future) throws Exception {
});
}
}
- endFileLease(client, stat);
}
}
}
@@ -654,12 +638,13 @@ public static boolean shouldRetryCreate(RemoteException e) {
return e.getClassName().endsWith("RetryStartFileException");
}
- static void completeFile(DFSClient client, ClientProtocol namenode, String src, String clientName,
- ExtendedBlock block, HdfsFileStatus stat) {
+ static void completeFile(FanOutOneBlockAsyncDFSOutput output, DFSClient client,
+ ClientProtocol namenode, String src, String clientName, ExtendedBlock block,
+ HdfsFileStatus stat) {
for (int retry = 0;; retry++) {
try {
if (namenode.complete(src, clientName, block, stat.getFileId())) {
- endFileLease(client, stat);
+ endFileLease(output);
return;
} else {
LOG.warn("complete file " + src + " not finished, retry = " + retry);
diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hdfs/DummyDFSOutputStream.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hdfs/DummyDFSOutputStream.java
new file mode 100644
index 000000000000..c92ff416b0cb
--- /dev/null
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hdfs/DummyDFSOutputStream.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A dummy DFSOutputStream which is mainly used for lease renewal.
+ *
+ * We have to put it under this package as we want to override a package private method.
+ */
+@InterfaceAudience.Private
+public final class DummyDFSOutputStream extends DFSOutputStream {
+
+ private final AsyncFSOutput delegate;
+
+ public DummyDFSOutputStream(AsyncFSOutput output, DFSClient dfsClient, String src,
+ HdfsFileStatus stat, EnumSet flag, DataChecksum checksum) {
+ super(dfsClient, src, stat, flag, null, checksum, null, false);
+ this.delegate = output;
+ }
+
+ // public for testing
+ @Override
+ public void abort() throws IOException {
+ delegate.close();
+ }
+
+ @Override
+ public void close() throws IOException {
+ delegate.close();
+ }
+}
diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLeaseRenewal.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLeaseRenewal.java
new file mode 100644
index 000000000000..e8f7188518d0
--- /dev/null
+++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLeaseRenewal.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.asyncfs;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mockConstruction;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.Optional;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.DummyDFSOutputStream;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.MockedConstruction;
+
+import org.apache.hbase.thirdparty.io.netty.channel.Channel;
+import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
+import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
+
+/**
+ * Make sure lease renewal works. Since it is in a background thread, normal read/write test can not
+ * verify it.
+ *
+ * See HBASE-28955 for more details.
+ */
+@Category({ MiscTests.class, MediumTests.class })
+public class TestLeaseRenewal extends AsyncFSTestBase {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestLeaseRenewal.class);
+
+ private static DistributedFileSystem FS;
+ private static EventLoopGroup EVENT_LOOP_GROUP;
+ private static Class extends Channel> CHANNEL_CLASS;
+ private static StreamSlowMonitor MONITOR;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ startMiniDFSCluster(3);
+ FS = CLUSTER.getFileSystem();
+ EVENT_LOOP_GROUP = new NioEventLoopGroup();
+ CHANNEL_CLASS = NioSocketChannel.class;
+ MONITOR = StreamSlowMonitor.create(UTIL.getConfiguration(), "testMonitor");
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ if (EVENT_LOOP_GROUP != null) {
+ EVENT_LOOP_GROUP.shutdownGracefully().get();
+ }
+ shutdownMiniDFSCluster();
+ }
+
+ private FanOutOneBlockAsyncDFSOutput create(String file)
+ throws IllegalArgumentException, IOException {
+ EventLoop eventLoop = EVENT_LOOP_GROUP.next();
+ return FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, new Path("/test_lease_renew"), true,
+ false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR, true);
+ }
+
+ @Test
+ public void testLeaseRenew() throws IOException {
+ DFSClient client = FS.getClient();
+ assertFalse(client.renewLease());
+
+ FanOutOneBlockAsyncDFSOutput out = create("/test_lease_renew");
+ assertTrue(client.renewLease());
+ client.closeAllFilesBeingWritten(false);
+ assertTrue(out.isClosed());
+
+ assertFalse(client.renewLease());
+
+ out = create("/test_lease_renew");
+ assertTrue(client.renewLease());
+ client.closeAllFilesBeingWritten(true);
+ assertTrue(out.isClosed());
+ }
+
+ private Optional getUniqKeyMethod() {
+ try {
+ return Optional.of(DFSOutputStream.class.getMethod("getUniqKey"));
+ } catch (NoSuchMethodException e) {
+ // should be hadoop 3.3 or below
+ return Optional.empty();
+ }
+ }
+
+ @Test
+ public void testEnsureMethodsCalledWhenLeaseRenewal() throws Exception {
+ try (MockedConstruction mocked =
+ mockConstruction(DummyDFSOutputStream.class)) {
+ try (FanOutOneBlockAsyncDFSOutput out = create("/methods_for_lease_renewal")) {
+ DummyDFSOutputStream dummy = mocked.constructed().get(0);
+ assertTrue(FS.getClient().renewLease());
+ Optional getUniqKeyMethod = getUniqKeyMethod();
+ if (getUniqKeyMethod.isPresent()) {
+ getUniqKeyMethod.get().invoke(verify(dummy));
+ Method getNamespaceMethod = DFSOutputStream.class.getMethod("getNamespace");
+ getNamespaceMethod.invoke(verify(dummy));
+ } else {
+ verify(dummy).getFileId();
+ }
+ verifyNoMoreInteractions(dummy);
+ }
+ }
+ }
+
+ private void verifyGetUniqKey(DummyDFSOutputStream dummy) throws Exception {
+ Optional getUniqKeyMethod = getUniqKeyMethod();
+ if (getUniqKeyMethod.isPresent()) {
+ getUniqKeyMethod.get().invoke(verify(dummy));
+ } else {
+ verify(dummy).getFileId();
+ }
+ }
+
+ @Test
+ public void testEnsureMethodsCalledWhenClosing() throws Exception {
+ try (MockedConstruction mocked =
+ mockConstruction(DummyDFSOutputStream.class)) {
+ try (FanOutOneBlockAsyncDFSOutput out = create("/methods_for_closing")) {
+ DummyDFSOutputStream dummy = mocked.constructed().get(0);
+ verifyGetUniqKey(dummy);
+ FS.getClient().closeAllFilesBeingWritten(false);
+ verify(dummy).close();
+
+ verifyNoMoreInteractions(dummy);
+ }
+ }
+ }
+
+ @Test
+ public void testEnsureMethodsCalledWhenAborting() throws Exception {
+ try (MockedConstruction mocked =
+ mockConstruction(DummyDFSOutputStream.class)) {
+ try (FanOutOneBlockAsyncDFSOutput out = create("/methods_for_aborting")) {
+ DummyDFSOutputStream dummy = mocked.constructed().get(0);
+ verifyGetUniqKey(dummy);
+ FS.getClient().closeAllFilesBeingWritten(true);
+ verify(dummy).abort();
+ verifyNoMoreInteractions(dummy);
+ }
+ }
+ }
+}
diff --git a/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
index 0ca7ecc58175..c6a923fb9022 100644
--- a/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
+++ b/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
@@ -75,6 +75,8 @@ allowed_expr="(^org/$|^org/apache/$|^org/apache/hadoop/$"
allowed_expr+="|^org/apache/hadoop/hbase"
# * classes in packages that start with org.apache.hbase
allowed_expr+="|^org/apache/hbase/"
+# We have a dummy DFSOutputStream implementation in hbase
+allowed_expr+="|^org/apache/hadoop/hdfs/$|^org/apache/hadoop/hdfs/DummyDFSOutputStream.class"
# * whatever in the "META-INF" directory
allowed_expr+="|^META-INF/"
# * the folding tables from jcodings
diff --git a/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
index 0ca7ecc58175..c6a923fb9022 100644
--- a/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
+++ b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
@@ -75,6 +75,8 @@ allowed_expr="(^org/$|^org/apache/$|^org/apache/hadoop/$"
allowed_expr+="|^org/apache/hadoop/hbase"
# * classes in packages that start with org.apache.hbase
allowed_expr+="|^org/apache/hbase/"
+# We have a dummy DFSOutputStream implementation in hbase
+allowed_expr+="|^org/apache/hadoop/hdfs/$|^org/apache/hadoop/hdfs/DummyDFSOutputStream.class"
# * whatever in the "META-INF" directory
allowed_expr+="|^META-INF/"
# * the folding tables from jcodings