-
Notifications
You must be signed in to change notification settings - Fork 9.2k
HADOOP-19672 Nework switchover when apache client throw error #7967
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
1dad672
6faa4c9
bb93023
44765c0
0241d9a
7ff7086
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -68,6 +68,9 @@ public AbfsClientHandler(final URL baseUrl, | |
| final SASTokenProvider sasTokenProvider, | ||
| final EncryptionContextProvider encryptionContextProvider, | ||
| final AbfsClientContext abfsClientContext) throws IOException { | ||
| // This will initialize the default and ingress service types. | ||
| // This is needed before crating the clients so that we can do cache warmup | ||
|
||
| // only for default client. | ||
| initServiceType(abfsConfiguration); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why this change?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This will initialize the default and ingress service types. This is needed before crating the clients so that we can do cache warmup only for default client. |
||
| this.dfsAbfsClient = createDfsClient(baseUrl, sharedKeyCredentials, | ||
| abfsConfiguration, null, sasTokenProvider, encryptionContextProvider, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -33,6 +33,7 @@ | |
| import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; | ||
| import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; | ||
| import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsDriverException; | ||
| import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; | ||
| import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; | ||
| import org.apache.hadoop.util.functional.Tuples; | ||
| import org.apache.http.HttpHost; | ||
|
|
@@ -45,8 +46,10 @@ | |
| import org.apache.http.conn.ssl.SSLConnectionSocketFactory; | ||
| import org.apache.http.impl.conn.DefaultHttpClientConnectionOperator; | ||
|
|
||
| import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.APACHE_IMPL; | ||
| import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.COLON; | ||
| import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; | ||
| import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.JDK_FALLBACK; | ||
| import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.KEEP_ALIVE_CACHE_CLOSED; | ||
| import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_METRIC_FORMAT; | ||
| import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_NETWORKING_LIBRARY; | ||
|
|
@@ -74,7 +77,9 @@ public void testKacIsClosed() throws Throwable { | |
| configuration.unset(FS_AZURE_METRIC_FORMAT); | ||
| try (AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.newInstance( | ||
| configuration)) { | ||
| KeepAliveCache kac = fs.getAbfsStore().getClientHandler().getIngressClient() | ||
| KeepAliveCache kac = fs.getAbfsStore() | ||
| .getClientHandler() | ||
| .getIngressClient() | ||
| .getKeepAliveCache(); | ||
| kac.close(); | ||
| AbfsDriverException ex = intercept(AbfsDriverException.class, | ||
|
|
@@ -149,10 +154,33 @@ public void testApacheClientFallbackDuringConnectionWarmup() | |
| Assertions.assertThat(AbfsApacheHttpClient.usable()) | ||
| .describedAs("Apache HttpClient should be not usable") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you make one http call and validate now jdk is being used, user agent can be used for validation
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sure, will do that. |
||
| .isFalse(); | ||
| // Make a rest API call to verify that the client falls back to JDK client. | ||
| AzureBlobFileSystem fs = getFileSystem(); | ||
| verifyClientRequestId(fs, JDK_FALLBACK); | ||
| AbfsApacheHttpClient.setUsable(); | ||
| verifyClientRequestId(fs, APACHE_IMPL); | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Verify that the client request id contains the expected client. | ||
| * @param fs AzureBlobFileSystem instance | ||
| * @param expectedClient Expected client in the client request id. | ||
| * @throws AzureBlobFileSystemException if any failure occurs during the operation. | ||
| */ | ||
| private void verifyClientRequestId(AzureBlobFileSystem fs, | ||
| String expectedClient) | ||
| throws AzureBlobFileSystemException { | ||
| AbfsRestOperation op = fs.getAbfsStore() | ||
| .getClient() | ||
| .getFilesystemProperties(getTestTracingContext(fs, true)); | ||
| String[] clientRequestIdList = op.getResult() | ||
| .getClientRequestId().split(COLON); | ||
| Assertions.assertThat(clientRequestIdList[clientRequestIdList.length - 1]) | ||
| .describedAs("Http Client in use should be %s", expectedClient) | ||
| .isEqualTo(expectedClient); | ||
| } | ||
|
|
||
| private Map.Entry<HttpRoute, AbfsManagedApacheHttpConnection> getTestConnection() | ||
| throws IOException { | ||
| HttpHost host = new HttpHost(getFileSystem().getUri().getHost(), | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As we were discussing last time, if we keep it to read timeout the 100 continue timeout would become 30 seconds, this should be another config for 100 continue timeout
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
+1
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, will create a new config for read timeout and use that when 100 continue is enabled.