Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
66 commits
Select commit Hold shift + click to select a range
e2ffb8c
ABFS: Added changes for expect hundred continue header with append re…
anmolasrani123 Feb 28, 2022
d110408
ABFS: Added changes for expect hundred continue
anmolasrani123 Mar 2, 2022
a29faa8
ABFS: Added changes for expect hundred continue
anmolasrani123 Mar 2, 2022
dacfde0
ABFS: Added changes for expect hundred continue
anmolasrani123 Mar 2, 2022
c14f458
Added retry mechanism for certain HTTP errors
anmolasrani123 Mar 4, 2022
7e40f07
Added retry mechanism for certain HTTP errors
anmolasrani123 Mar 4, 2022
ef67598
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Mar 4, 2022
93a77a7
Added retry mechanism for certain HTTP errors
anmolasrani123 Mar 30, 2022
9b43316
ABFS: Added changes for expect hundred continue header with append re…
anmolasrani123 Feb 28, 2022
899b40b
ABFS: Added changes for expect hundred continue
anmolasrani123 Mar 2, 2022
2af317d
ABFS: Added changes for expect hundred continue
anmolasrani123 Mar 2, 2022
cc9fcdb
ABFS: Added changes for expect hundred continue
anmolasrani123 Mar 2, 2022
9fc9c99
Added retry mechanism for certain HTTP errors
anmolasrani123 Mar 4, 2022
56eda26
Added retry mechanism for certain HTTP errors
anmolasrani123 Mar 4, 2022
091f1d4
Added retry mechanism for certain HTTP errors
anmolasrani123 Mar 30, 2022
3709619
Fix trunk conflict
anmolasrani123 May 12, 2022
fe33f93
Merge branch 'HADOOP-18146' of https://github.com/anmolanmol1234/hado…
anmolasrani123 Jul 20, 2022
94397c7
Added config details in md file
anmolasrani123 Jul 20, 2022
9c8f7d0
Merge branch 'trunk' into HADOOP-18146
anmolanmol1234 Jul 20, 2022
5f26061
Changing class modifier
anmolasrani123 Jul 20, 2022
f2ffb23
Merge branch 'HADOOP-18146' of https://github.com/anmolanmol1234/hado…
anmolasrani123 Jul 20, 2022
0f18d94
Spot bugs and checkstyle fixes
anmolasrani123 Aug 2, 2022
83fbd8c
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Aug 2, 2022
58c1123
remove unused imports
anmolasrani123 Aug 2, 2022
aab3128
Fix imports
anmolasrani123 Aug 22, 2022
d13f8bd
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Sep 19, 2022
478021a
Separate out account throttling
anmolasrani123 Sep 19, 2022
9fbb4de
Documentation added
anmolasrani123 Oct 25, 2022
7c43202
Formatting
anmolasrani123 Oct 25, 2022
75a3332
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Oct 25, 2022
3fc18b9
Addressed PR comments
anmolasrani123 Oct 28, 2022
aaca1c1
Addressed PR comments
anmolasrani123 Oct 28, 2022
135e04f
Addressed PR comments
anmolasrani123 Oct 28, 2022
eaf9dfc
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Nov 29, 2022
2a1834f
Addressed PR comments
anmolasrani123 Nov 29, 2022
3f37058
Merge branch 'HADOOP-18146' of https://github.com/anmolanmol1234/hado…
anmolasrani123 Nov 29, 2022
443e263
Fix for exception
anmolasrani123 Nov 29, 2022
e694264
Merge branch 'trunk' into HADOOP-18146
anmolanmol1234 Dec 5, 2022
7961d08
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Dec 5, 2022
05df41c
Update AbfsConfiguration.java
anmolanmol1234 Dec 5, 2022
457fda0
Changes for exception handling
anmolasrani123 Dec 6, 2022
f2e6f52
String correction
anmolasrani123 Dec 7, 2022
fd006bd
Tests for hundred continue
anmolasrani123 Dec 15, 2022
baf9ec7
Add tests for 100 continue
anmolasrani123 Dec 15, 2022
fe8deea
Add tests for hundred continue
anmolasrani123 Dec 15, 2022
cafd409
Parameters for test
anmolasrani123 Dec 19, 2022
f17c15a
Tests for expect header
anmolasrani123 Dec 19, 2022
61138e9
Update metrics fix
anmolasrani123 Dec 20, 2022
ac3e973
Metric update changes
anmolasrani123 Dec 20, 2022
36ec260
Tests for metric updation verification
anmolasrani123 Dec 20, 2022
8283ef2
Update md file
anmolasrani123 Dec 20, 2022
9611999
Remove unused imports
anmolasrani123 Dec 20, 2022
93003b2
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Dec 20, 2022
7899d1c
Checkstyle fixes
anmolasrani123 Dec 20, 2022
5398f2a
Checkstyle fixes
anmolasrani123 Dec 21, 2022
796b774
PR comments addressing
anmolasrani123 Dec 22, 2022
115b0b6
PR comments
anmolasrani123 Dec 22, 2022
db89c78
remove stter for connection
anmolasrani123 Dec 23, 2022
0fb9067
Update AbfsClient.java
anmolanmol1234 Dec 26, 2022
f3e2e14
Addressing PR comments
anmolanmol1234 Mar 16, 2023
51bdece
String fix
anmolanmol1234 Mar 16, 2023
83f14fb
Merge branch 'apache:trunk' into HADOOP-18146
anmolanmol1234 Mar 16, 2023
c3268dc
Remove unused imports
anmolanmol1234 Mar 16, 2023
675687c
Import fix
anmolanmol1234 Mar 16, 2023
99a9377
Checkstyle fixes
anmolanmol1234 Mar 16, 2023
e210b04
Build fixed
anmolanmol1234 Mar 17, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,11 @@ public final class AbfsHttpConstants {
public static final String HTTP_METHOD_PATCH = "PATCH";
public static final String HTTP_METHOD_POST = "POST";
public static final String HTTP_METHOD_PUT = "PUT";
/**
* All status codes less than http 100 signify error
* and should qualify for retry.
*/
public static final int HTTP_CONTINUE = 100;

// Abfs generic constants
public static final String SINGLE_WHITE_SPACE = " ";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ public final class ConfigurationKeys {
* path to determine HNS status.
*/
public static final String FS_AZURE_ACCOUNT_IS_HNS_ENABLED = "fs.azure.account.hns.enabled";
/**
* Enable or disable expect hundred continue header.
* Value: {@value}.
*/
public static final String FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED = "fs.azure.account.expect.header.enabled";
public static final String FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME = "fs.azure.account.key";
public static final String FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME_REGX = "fs\\.azure\\.account\\.key\\.(.*)";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -686,8 +686,13 @@ public AbfsRestOperation append(final String path, final byte[] buffer,

final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = getAbfsRestOperationForAppend(AbfsRestOperationType.Append,
HTTP_METHOD_PUT, url, requestHeaders, buffer, reqParams.getoffset(),
reqParams.getLength(), sasTokenForReuse);
HTTP_METHOD_PUT,
url,
requestHeaders,
buffer,
reqParams.getoffset(),
reqParams.getLength(),
sasTokenForReuse);
try {
op.execute(tracingContext);
} catch (AzureBlobFileSystemException e) {
Expand All @@ -702,7 +707,7 @@ public AbfsRestOperation append(final String path, final byte[] buffer,
*/
int responseStatusCode = ((AbfsRestOperationException) e).getStatusCode();
if (checkUserError(responseStatusCode) && reqParams.isExpectHeaderEnabled()) {
LOG.debug("User error, retrying without 100 continue enabled");
LOG.debug("User error, retrying without 100 continue enabled for the given path " + path);
reqParams.setExpectHeaderEnabled(false);
return this.append(path, buffer, reqParams, cachedSasToken,
tracingContext);
Expand All @@ -715,9 +720,14 @@ public AbfsRestOperation append(final String path, final byte[] buffer,
&& appendSuccessCheckOp(op, path,
(reqParams.getPosition() + reqParams.getLength()), tracingContext)) {
final AbfsRestOperation successOp = getAbfsRestOperationForAppend(
AbfsRestOperationType.Append, HTTP_METHOD_PUT, url, requestHeaders,
buffer, reqParams.getoffset(), reqParams.getLength(),
sasTokenForReuse);
AbfsRestOperationType.Append,
HTTP_METHOD_PUT,
url,
requestHeaders,
buffer,
reqParams.getoffset(),
reqParams.getLength(),
sasTokenForReuse);
successOp.hardSetResult(HttpURLConnection.HTTP_OK);
return successOp;
}
Expand All @@ -728,7 +738,7 @@ && appendSuccessCheckOp(op, path,
}

/**
* Returns the rest operation for append
* Returns the rest operation for append.
* @param operationType The AbfsRestOperationType.
* @param httpMethod specifies the httpMethod.
* @param url specifies the url.
Expand All @@ -737,7 +747,7 @@ && appendSuccessCheckOp(op, path,
* @param bufferOffset The buffer offset.
* @param bufferLength The buffer Length.
* @param sasTokenForReuse The sasToken.
* @return AbfsRestOperation op
* @return AbfsRestOperation op.
*/
@VisibleForTesting
AbfsRestOperation getAbfsRestOperationForAppend(final AbfsRestOperationType operationType,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ public AbfsClientThrottlingIntercept(String accountName, AbfsConfiguration abfsC

// Hide default constructor
private AbfsClientThrottlingIntercept(AbfsConfiguration abfsConfiguration) {
//Account name is kept as empty as same instance is shared across all accounts.
// Account name is kept as empty as same instance is shared across all accounts.
this.accountName = "";
this.readThrottler = setAnalyzer("read", abfsConfiguration);
this.writeThrottler = setAnalyzer("write", abfsConfiguration);
Expand Down Expand Up @@ -117,11 +117,11 @@ static AbfsClientThrottlingIntercept initializeSingleton(AbfsConfiguration abfsC
}

/**
* Updates the metrics for the case when getOutputStream() caught an IOException
* and response code signifies throttling.
* Updates the metrics for the case when response code signifies throttling
* but there are some expected bytes to be sent.
* @param isThrottledOperation returns true if status code is HTTP_UNAVAILABLE
* @param abfsHttpOperation Used for status code and data transferred.
* @return
* @return true if the operation is throttled and has some bytes to transfer.
*/
private boolean updateBytesTransferred(boolean isThrottledOperation,
AbfsHttpOperation abfsHttpOperation) {
Expand All @@ -148,7 +148,7 @@ public void updateMetrics(AbfsRestOperationType operationType,
boolean isFailedOperation = (status < HttpURLConnection.HTTP_OK
|| status >= HttpURLConnection.HTTP_INTERNAL_ERROR);

// If status code is 503, it considered a throttled operation.
// If status code is 503, it is considered as a throttled operation.
boolean isThrottledOperation = (status == HTTP_UNAVAILABLE);

switch (operationType) {
Expand All @@ -160,7 +160,7 @@ public void updateMetrics(AbfsRestOperationType operationType,
throttling but there were some expectedBytesToBeSent.
*/
if (updateBytesTransferred(isThrottledOperation, abfsHttpOperation)) {
LOG.debug("Updating metrics due to throttling");
LOG.debug("Updating metrics due to throttling for path " + abfsHttpOperation.getConnUrl().getPath());
contentLength = abfsHttpOperation.getExpectedBytesToBeSent();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding;

import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.HTTP_CONTINUE;

/**
* The AbfsRestOperation for Rest AbfsClient.
*/
Expand All @@ -61,9 +63,6 @@ public class AbfsRestOperation {
// Used only by AbfsInputStream/AbfsOutputStream to reuse SAS tokens.
private final String sasToken;

// All status codes less than http 100 signify error.
private static final int HTTP_CONTINUE = 100;

private static final Logger LOG = LoggerFactory.getLogger(AbfsClient.class);

// For uploads, this is the request entity body. For downloads,
Expand Down Expand Up @@ -250,6 +249,14 @@ private void completeExecute(TracingContext tracingContext)
LOG.trace("{} REST operation complete", operationType);
}

/**
* Returns a new object of AbfsHttpOperation.
* @param url The url for the operation.
* @param method The http method.
* @param requestHeaders The request headers for the operation.
* @return AbfsHttpOperation object.
* @throws IOException
*/
AbfsHttpOperation getHttpOperation(final URL url, final String method,
final List<AbfsHttpHeader> requestHeaders) throws IOException {
return new AbfsHttpOperation(url, method, requestHeaders);
Expand Down Expand Up @@ -341,7 +348,7 @@ private boolean executeHttpOperation(final int retryCount,
int status = httpOperation.getStatusCode();
/*
A status less than 300 (2xx range) or greater than or equal
to 500 (5xx range) should contribute to throttling metrics updation.
to 500 (5xx range) should contribute to throttling metrics being updated.
Less than 200 or greater than or equal to 500 show failed operations. 2xx
range contributes to successful operations. 3xx range is for redirects
and 4xx range is for user errors. These should not be a part of
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
import org.apache.hadoop.fs.azurebfs.AbfsConfiguration;
import org.apache.hadoop.classification.VisibleForTesting;

import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.HTTP_CONTINUE;

/**
* Retry policy used by AbfsClient.
* */
Expand Down Expand Up @@ -56,12 +58,6 @@ public class ExponentialRetryPolicy {
*/
private static final double MAX_RANDOM_RATIO = 1.2;

/**
* All status codes less than http 100 signify error
* and should qualify for retry.
*/
private static final int HTTP_CONTINUE = 100;

/**
* Holds the random number generator used to calculate randomized backoff intervals
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream;
import org.apache.hadoop.fs.azurebfs.services.AuthType;
import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient;
import org.apache.hadoop.fs.azurebfs.services.ITestAbfsClient;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
Expand Down Expand Up @@ -254,7 +254,7 @@ public Hashtable<String, String> call() throws Exception {
}

public AccessTokenProvider getAccessTokenProvider(final AzureBlobFileSystem fs) {
return TestAbfsClient.getAccessTokenProvider(fs.getAbfsStore().getClient());
return ITestAbfsClient.getAccessTokenProvider(fs.getAbfsStore().getClient());
}

public void loadConfiguredFileSystem() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation;
import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation;
import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient;
import org.apache.hadoop.fs.azurebfs.services.ITestAbfsClient;
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator;

Expand Down Expand Up @@ -362,7 +362,7 @@ public void testNegativeScenariosForCreateOverwriteDisabled()
// Get mock AbfsClient with current config
AbfsClient
mockClient
= TestAbfsClient.getMockAbfsClient(
= ITestAbfsClient.getMockAbfsClient(
fs.getAbfsStore().getClient(),
fs.getAbfsStore().getAbfsConfiguration());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation;
import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation;
import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient;
import org.apache.hadoop.fs.azurebfs.services.ITestAbfsClient;
import org.apache.hadoop.fs.azurebfs.services.TestAbfsPerfTracker;
import org.apache.hadoop.fs.azurebfs.utils.TestMockHelpers;
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
Expand Down Expand Up @@ -176,7 +176,7 @@ public void testDeleteIdempotency() throws Exception {

final AzureBlobFileSystem fs = getFileSystem();
AbfsClient abfsClient = fs.getAbfsStore().getClient();
AbfsClient testClient = TestAbfsClient.createTestClientFromCurrentContext(
AbfsClient testClient = ITestAbfsClient.createTestClientFromCurrentContext(
abfsClient,
abfsConfig);

Expand Down Expand Up @@ -223,7 +223,7 @@ public void testDeleteIdempotency() throws Exception {
public void testDeleteIdempotencyTriggerHttp404() throws Exception {

final AzureBlobFileSystem fs = getFileSystem();
AbfsClient client = TestAbfsClient.createTestClientFromCurrentContext(
AbfsClient client = ITestAbfsClient.createTestClientFromCurrentContext(
fs.getAbfsStore().getClient(),
this.getConfiguration());

Expand All @@ -242,7 +242,7 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception {
getTestTracingContext(fs, true)));

// mock idempotency check to mimic retried case
AbfsClient mockClient = TestAbfsClient.getMockAbfsClient(
AbfsClient mockClient = ITestAbfsClient.getMockAbfsClient(
fs.getAbfsStore().getClient(),
this.getConfiguration());
AzureBlobFileSystemStore mockStore = mock(AzureBlobFileSystemStore.class);
Expand All @@ -257,10 +257,10 @@ public void testDeleteIdempotencyTriggerHttp404() throws Exception {

// Case 2: Mimic retried case
// Idempotency check on Delete always returns success
AbfsRestOperation idempotencyRetOp = TestAbfsClient.getRestOp(
AbfsRestOperation idempotencyRetOp = ITestAbfsClient.getRestOp(
DeletePath, mockClient, HTTP_METHOD_DELETE,
TestAbfsClient.getTestUrl(mockClient, "/NonExistingPath"),
TestAbfsClient.getTestRequestHeaders(mockClient));
ITestAbfsClient.getTestUrl(mockClient, "/NonExistingPath"),
ITestAbfsClient.getTestRequestHeaders(mockClient));
idempotencyRetOp.hardSetResult(HTTP_OK);

doReturn(idempotencyRetOp).when(mockClient).deleteIdempotencyCheckOp(any());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
* Test useragent of abfs client.
*
*/
public final class TestAbfsClient extends AbstractAbfsIntegrationTest {
public final class ITestAbfsClient extends AbstractAbfsIntegrationTest {

private static final String ACCOUNT_NAME = "bogusAccountName.dfs.core.windows.net";
private static final String FS_AZURE_USER_AGENT_PREFIX = "Partner Service";
Expand All @@ -94,7 +94,7 @@ public final class TestAbfsClient extends AbstractAbfsIntegrationTest {

private final Pattern userAgentStringPattern;

public TestAbfsClient() throws Exception {
public ITestAbfsClient() throws Exception {
StringBuilder regEx = new StringBuilder();
regEx.append("^");
regEx.append(APN_VERSION);
Expand Down Expand Up @@ -340,30 +340,30 @@ public static AbfsClient getMockAbfsClient(AbfsClient baseAbfsClientInstance,
abfsConfig.getAccountName().substring(0,
abfsConfig.getAccountName().indexOf(DOT)), abfsConfig));
// override baseurl
client = TestAbfsClient.setAbfsClientField(client, "abfsConfiguration",
client = ITestAbfsClient.setAbfsClientField(client, "abfsConfiguration",
abfsConfig);

// override baseurl
client = TestAbfsClient.setAbfsClientField(client, "baseUrl",
client = ITestAbfsClient.setAbfsClientField(client, "baseUrl",
baseAbfsClientInstance.getBaseUrl());

// override auth provider
if (currentAuthType == AuthType.SharedKey) {
client = TestAbfsClient.setAbfsClientField(client, "sharedKeyCredentials",
client = ITestAbfsClient.setAbfsClientField(client, "sharedKeyCredentials",
new SharedKeyCredentials(
abfsConfig.getAccountName().substring(0,
abfsConfig.getAccountName().indexOf(DOT)),
abfsConfig.getStorageAccountKey()));
} else {
client = TestAbfsClient.setAbfsClientField(client, "tokenProvider",
client = ITestAbfsClient.setAbfsClientField(client, "tokenProvider",
abfsConfig.getTokenProvider());
}

// override user agent
String userAgent = "APN/1.0 Azure Blob FS/3.4.0-SNAPSHOT (PrivateBuild "
+ "JavaJRE 1.8.0_252; Linux 5.3.0-59-generic/amd64; openssl-1.0; "
+ "UNKNOWN/UNKNOWN) MSFT";
client = TestAbfsClient.setAbfsClientField(client, "userAgent", userAgent);
client = ITestAbfsClient.setAbfsClientField(client, "userAgent", userAgent);

return client;
}
Expand Down Expand Up @@ -469,7 +469,7 @@ public void testExpectHundredContinue() throws Exception {

// Gets the client.
AbfsClient testClient = Mockito.spy(
TestAbfsClient.createTestClientFromCurrentContext(
ITestAbfsClient.createTestClientFromCurrentContext(
abfsClient,
abfsConfig));

Expand All @@ -489,7 +489,7 @@ public void testExpectHundredContinue() throws Exception {

// Creates a list of request headers.
final List<AbfsHttpHeader> requestHeaders
= TestAbfsClient.getTestRequestHeaders(testClient);
= ITestAbfsClient.getTestRequestHeaders(testClient);
requestHeaders.add(
new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, HTTP_METHOD_PATCH));
if (appendRequestParameters.isExpectHeaderEnabled()) {
Expand Down Expand Up @@ -566,9 +566,7 @@ public void testExpectHundredContinue() throws Exception {
TracingHeaderFormat.ALL_ID_FORMAT, null));

// Check that expect header is enabled before the append call.
Assertions.assertThat(appendRequestParameters.isExpectHeaderEnabled())
.describedAs("The expect header is not true before the append call")
.isEqualTo(true);
assertTrue(appendRequestParameters.isExpectHeaderEnabled());

intercept(AzureBlobFileSystemException.class,
() -> testClient.append(finalTestPath, buffer, appendRequestParameters, null, tracingContext));
Expand All @@ -579,8 +577,6 @@ public void testExpectHundredContinue() throws Exception {
.isEqualTo(0);

// Verify that the same request was retried with expect header disabled.
Assertions.assertThat(appendRequestParameters.isExpectHeaderEnabled())
.describedAs("The expect header is not false")
.isEqualTo(false);
assertFalse(appendRequestParameters.isExpectHeaderEnabled());
}
}
Loading