diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java index 4a4d91b2ff73..a946fe54cb14 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java @@ -18,17 +18,23 @@ package org.apache.hadoop.hdds.scm; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + /** * The information of the request of pipeline. */ public final class PipelineRequestInformation { private final long size; + private final Set datacenters; /** * Builder for PipelineRequestInformation. */ public static class Builder { private long size; + private Set datacenters = Collections.emptySet(); public static Builder getBuilder() { return new Builder(); @@ -44,16 +50,31 @@ public Builder setSize(long sz) { return this; } + /** + * sets the datacenters. + * @param datacenters request datacenters + * @return Builder for PipelineRequestInformation + */ + public Builder setDatacenters(Set datacenters) { + this.datacenters = new HashSet<>(datacenters); + return this; + } + public PipelineRequestInformation build() { - return new PipelineRequestInformation(size); + return new PipelineRequestInformation(size, datacenters); } } - private PipelineRequestInformation(long size) { + private PipelineRequestInformation(long size, Set datacenters) { this.size = size; + this.datacenters = datacenters; } public long getSize() { return size; } + + public Set getDatacenters() { + return datacenters; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 52e2a26f6431..ddcc75a1e9b7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -617,6 +617,13 @@ public final class ScmConfigKeys { "ozone.scm.ha.dbtransactionbuffer.flush.interval"; public static final long OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL_DEFAULT = 600 * 1000L; + + public static final String OZONE_SCM_DC_DATANODE_MAPPING_KEY = + "ozone.scm.dc.datanode.mapping"; + + public static final String OZONE_SCM_DC_DATANODE_MAPPING_DEFAULT = + ""; + /** * Never constructed. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index b11428581e7b..06fc7bf8104b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hdds.scm.container; -import java.time.Clock; -import java.time.Instant; -import java.util.Comparator; - +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -29,12 +28,16 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto2Codec; +import org.apache.ratis.util.Preconditions; + +import java.time.Clock; +import java.time.Instant; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.Set; -import com.fasterxml.jackson.annotation.JsonIgnore; import static java.lang.Math.max; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.ratis.util.Preconditions; /** * Class wraps ozone container info. @@ -89,6 +92,7 @@ public static Codec getCodec() { // The sequenceId of a close container cannot change, and all the // container replica should have the same sequenceId. private long sequenceId; + private final Set datacenters; @SuppressWarnings("parameternumber") private ContainerInfo( @@ -102,7 +106,8 @@ private ContainerInfo( long deleteTransactionId, long sequenceId, ReplicationConfig repConfig, - Clock clock) { + Clock clock, + Set datacenters) { this.containerID = ContainerID.valueOf(containerID); this.pipelineID = pipelineID; this.usedBytes = usedBytes; @@ -115,6 +120,7 @@ private ContainerInfo( this.sequenceId = sequenceId; this.replicationConfig = repConfig; this.clock = clock; + this.datacenters = datacenters; } public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { @@ -131,13 +137,14 @@ public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { .setDeleteTransactionId(info.getDeleteTransactionId()) .setReplicationConfig(config) .setSequenceId(info.getSequenceId()) + .setDatacenters(new HashSet<>(info.getDatacentersList())) .build(); if (info.hasPipelineID()) { builder.setPipelineID(PipelineID.getFromProtobuf(info.getPipelineID())); } - return builder.build(); + return builder.build(); } /** @@ -229,6 +236,10 @@ public long getSequenceId() { return sequenceId; } + public Set getDatacenters() { + return datacenters; + } + public void updateDeleteTransactionId(long transactionId) { deleteTransactionId = max(transactionId, deleteTransactionId); } @@ -267,7 +278,8 @@ public HddsProtos.ContainerInfoProto getProtobuf() { .setDeleteTransactionId(getDeleteTransactionId()) .setOwner(getOwner()) .setSequenceId(getSequenceId()) - .setReplicationType(getReplicationType()); + .setReplicationType(getReplicationType()) + .addAllDatacenters(getDatacenters()); if (replicationConfig instanceof ECReplicationConfig) { builder.setEcReplicationConfig(((ECReplicationConfig) replicationConfig) @@ -282,6 +294,7 @@ public HddsProtos.ContainerInfoProto getProtobuf() { if (getPipelineID() != null) { builder.setPipelineID(getPipelineID().getProtobuf()); } + return builder.build(); } @@ -383,6 +396,7 @@ public static class Builder { private long sequenceId; private PipelineID pipelineID; private ReplicationConfig replicationConfig; + private Set datacenters = Collections.emptySet(); public Builder setPipelineID(PipelineID pipelineId) { this.pipelineID = pipelineId; @@ -444,10 +458,15 @@ public Builder setClock(Clock clock) { return this; } + public Builder setDatacenters(Set datacenters) { + this.datacenters = datacenters; + return this; + } + public ContainerInfo build() { return new ContainerInfo(containerID, state, pipelineID, used, keys, stateEnterTime, owner, deleteTransactionId, - sequenceId, replicationConfig, clock); + sequenceId, replicationConfig, clock, datacenters); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java index 1cebd3296e34..d7a2fc3f79d7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java @@ -139,6 +139,7 @@ public enum ResultCodes { CA_ROTATION_IN_PROGRESS, CA_ROTATION_IN_POST_PROGRESS, CONTAINER_ALREADY_CLOSED, - CONTAINER_ALREADY_CLOSING + CONTAINER_ALREADY_CLOSING, + FAILED_TO_FIND_ENOUGH_NODES_WITHIN_DATACENTER, } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 0d7a85198355..c7e5f3d04814 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -93,6 +94,8 @@ public static Codec getCodec() { private final Instant stateEnterTime; + private final Set datacenters; + /** * The immutable properties of pipeline object is used in * ContainerStateManager#getMatchingContainerByPipeline to take a lock on @@ -118,6 +121,7 @@ private Pipeline(Builder b) { replicaIndexes = b.replicaIndexes != null ? ImmutableMap.copyOf(b.replicaIndexes) : ImmutableMap.of(); creationTimestamp = b.creationTimestamp != null ? b.creationTimestamp : Instant.now(); stateEnterTime = Instant.now(); + datacenters = b.datacenters; } /** @@ -160,6 +164,10 @@ public Instant getStateEnterTime() { return stateEnterTime; } + public Set getDatacenters() { + return datacenters; + } + /** * Return the suggested leaderId which has a high priority among DNs of the * pipeline. @@ -379,7 +387,8 @@ public HddsProtos.Pipeline getProtobufMessage(int clientVersion) .setLeaderID(leaderId != null ? leaderId.toString() : "") .setCreationTimeStamp(creationTimestamp.toEpochMilli()) .addAllMembers(members) - .addAllMemberReplicaIndexes(memberReplicaIndexes); + .addAllMemberReplicaIndexes(memberReplicaIndexes) + .addAllDatacenters(datacenters); if (replicationConfig instanceof ECReplicationConfig) { builder.setEcReplicationConfig(((ECReplicationConfig) replicationConfig) @@ -479,7 +488,8 @@ public static Builder toBuilder(HddsProtos.Pipeline pipeline) .setLeaderId(leaderId) .setSuggestedLeaderId(suggestedLeaderId) .setNodeOrder(pipeline.getMemberOrdersList()) - .setCreateTimestamp(pipeline.getCreationTimeStamp()); + .setCreateTimestamp(pipeline.getCreationTimeStamp()) + .setDatacenters(new HashSet<>(pipeline.getDatacentersList())); } public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) @@ -555,6 +565,7 @@ public static class Builder { private Instant creationTimestamp = null; private UUID suggestedLeaderId = null; private Map replicaIndexes; + private Set datacenters = new HashSet<>(); public Builder() { } @@ -567,6 +578,7 @@ public Builder(Pipeline pipeline) { this.leaderId = pipeline.getLeaderId(); this.creationTimestamp = pipeline.getCreationTimestamp(); this.suggestedLeaderId = pipeline.getSuggestedLeaderId(); + this.datacenters = pipeline.getDatacenters(); if (nodeStatus != null) { replicaIndexes = new HashMap<>(); for (DatanodeDetails dn : nodeStatus.keySet()) { @@ -635,6 +647,10 @@ public Builder setSuggestedLeaderId(UUID uuid) { return this; } + public Builder setDatacenters(Set datacenters) { + this.datacenters = datacenters; + return this; + } public Builder setReplicaIndexes(Map indexes) { this.replicaIndexes = indexes; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 70566767eaba..c27aec605e09 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -407,6 +407,9 @@ private OzoneConsts() { public static final String GDPR_SECRET = "secret"; public static final String GDPR_ALGORITHM = "algorithm"; + // DATACENTERS + public static final String DATACENTERS = "datacenters"; + /** * Block key name as illegal characters * diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 6f02f542e63c..4784294ee1d2 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4349,4 +4349,16 @@ A key of the cache entry is a pair of bucket and the requested key path. + + + ozone.scm.dc.datanode.mapping + + SCM + + This property defines a mapping that associates each DataNode with its corresponding data center. + The mapping is specified as a comma-separated list of pairs in the format `hostname:ratis_port=dc_name`. + By establishing this association, Ozone can identify which data center a DataNode belongs to, + enabling more efficient storage management and data placement across data centers. + + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java index c75bdc4f4af1..62c59d758b54 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java @@ -26,6 +26,7 @@ import java.time.Duration; import java.time.Instant; +import java.util.Collections; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; @@ -116,6 +117,7 @@ public static ContainerInfo.Builder newBuilderForTest() { .setContainerID(1234) .setPipelineID(PipelineID.randomId()) .setState(OPEN) - .setOwner("scm"); + .setOwner("scm") + .setDatacenters(Collections.emptySet()); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java index b23325432c7e..2db8d82296e4 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -72,6 +73,7 @@ public static Pipeline createPipeline(Iterable ids) { .setReplicationConfig( StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)) .setNodes(dns) + .setDatacenters(Collections.emptySet()) .build(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index ef2585488faa..e96d2c76df88 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -19,20 +19,21 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmConfig; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; +import org.apache.hadoop.security.KerberosInfo; import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeoutException; /** @@ -87,7 +88,7 @@ default List allocateBlock(long size, int numBlocks, ReplicationConfig replicationConfig, String owner, ExcludeList excludeList) throws IOException { return allocateBlock(size, numBlocks, replicationConfig, owner, - excludeList, null); + excludeList, null, null); } /** @@ -109,7 +110,7 @@ default List allocateBlock(long size, int numBlocks, */ List allocateBlock(long size, int numBlocks, ReplicationConfig replicationConfig, String owner, - ExcludeList excludeList, String clientMachine) throws IOException; + ExcludeList excludeList, String clientMachine, Set datacenters) throws IOException; /** * Delete blocks for a set of object keys. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 2e724969998b..9cd8255a0907 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -16,12 +16,9 @@ */ package org.apache.hadoop.hdds.scm.protocolPB; -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - +import com.google.common.base.Preconditions; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ContainerBlockID; @@ -31,19 +28,17 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .SortDatanodesRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .SortDatanodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type; import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -60,9 +55,12 @@ import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status.OK; @@ -153,7 +151,7 @@ public List allocateBlock( long size, int num, ReplicationConfig replicationConfig, String owner, ExcludeList excludeList, - String clientMachine + String clientMachine, Set datacenters ) throws IOException { Preconditions.checkArgument(size > 0, "block size must be greater than 0"); @@ -163,7 +161,8 @@ public List allocateBlock( .setNumBlocks(num) .setType(replicationConfig.getReplicationType()) .setOwner(owner) - .setExcludeList(excludeList.getProtoBuf()); + .setExcludeList(excludeList.getProtoBuf()) + .addAllDatacenters(datacenters); if (StringUtils.isNotEmpty(clientMachine)) { requestBuilder.setClient(clientMachine); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3f346300b3ed..cdaa394c850a 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -129,6 +129,7 @@ message Pipeline { optional ECReplicationConfig ecReplicationConfig = 11; // TODO(runzhiwang): when leaderID is gone, specify 6 as the index of leaderID128 optional UUID leaderID128 = 100; + repeated string datacenters = 103; } message KeyValue { @@ -239,6 +240,7 @@ message ContainerInfoProto { optional ReplicationFactor replicationFactor = 10; required ReplicationType replicationType = 11; optional ECReplicationConfig ecReplicationConfig = 12; + repeated string datacenters = 13; } message ContainerWithPipeline { diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto index 307c23a56202..2f3e0b824264 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto @@ -137,6 +137,7 @@ enum Status { CA_ROTATION_IN_POST_PROGRESS = 44; CONTAINER_ALREADY_CLOSED = 45; CONTAINER_ALREADY_CLOSING = 46; + FAILED_TO_FIND_ENOUGH_NODES_WITHIN_DATACENTER=47; } /** @@ -154,6 +155,7 @@ message AllocateScmBlockRequestProto { optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 8; optional string client = 9; + repeated string datacenters = 10; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java index a792e2cea6b7..9cb37bd13e58 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java @@ -34,10 +34,11 @@ public interface PlacementPolicy { default List chooseDatanodes( List excludedNodes, - List favoredNodes, int nodesRequired, + List favoredNodes, + Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws IOException { return this.chooseDatanodes(Collections.emptyList(), excludedNodes, - favoredNodes, nodesRequired, metadataSizeRequired, + favoredNodes, datacenters, nodesRequired, metadataSizeRequired, dataSizeRequired); } /** @@ -47,6 +48,7 @@ default List chooseDatanodes( * @param usedNodes - List of nodes already chosen for pipeline * @param excludedNodes - list of nodes to be excluded. * @param favoredNodes - list of nodes preferred. + * @param datacenters - set of preferred datacenters. * @param nodesRequired - number of datanodes required. * @param dataSizeRequired - size required for the container. * @param metadataSizeRequired - size required for Ratis metadata. @@ -56,8 +58,8 @@ default List chooseDatanodes( List chooseDatanodes(List usedNodes, List excludedNodes, List favoredNodes, - int nodesRequired, long metadataSizeRequired, - long dataSizeRequired) throws IOException; + Set datacenters, int nodesRequired, + long metadataSizeRequired, long dataSizeRequired) throws IOException; /** * Given a list of datanode and the number of replicas required, return diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 471a94794122..464a96933bd0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -134,11 +134,13 @@ public ConfigurationSource getConf() { @Override public final List chooseDatanodes( List excludedNodes, - List favoredNodes, int nodesRequired, + List favoredNodes, + Set datacenters, + int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { return this.chooseDatanodes(UNSET_USED_NODES, excludedNodes, - favoredNodes, nodesRequired, metadataSizeRequired, + favoredNodes, datacenters, nodesRequired, metadataSizeRequired, dataSizeRequired); } @@ -191,6 +193,7 @@ public final List chooseDatanodes( List usedNodes, List excludedNodes, List favoredNodes, + Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { /* @@ -207,7 +210,7 @@ object of DatanodeDetails(with Topology Information) while trying to get the random node from NetworkTopology should fix this. Check HDDS-7015 */ return chooseDatanodesInternal(validateDatanodes(usedNodes), - validateDatanodes(excludedNodes), favoredNodes, nodesRequired, + validateDatanodes(excludedNodes), favoredNodes, datacenters, nodesRequired, metadataSizeRequired, dataSizeRequired); } @@ -225,7 +228,7 @@ object of DatanodeDetails(with Topology Information) while trying to get the */ protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, + List favoredNodes, Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List healthyNodes = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java index 27a97a0349d3..0aace88c2ba2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java @@ -18,16 +18,12 @@ package org.apache.hadoop.hdds.scm; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.BlockingQueue; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.safemode.Precheck; - import org.apache.hadoop.hdds.scm.security.RootCARotationManager; import org.apache.hadoop.hdds.scm.server.ContainerReportQueue; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReport; @@ -41,8 +37,15 @@ import java.io.File; import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.OptionalInt; +import java.util.concurrent.BlockingQueue; +import java.util.stream.Collectors; import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; @@ -61,6 +64,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DC_DATANODE_MAPPING_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DC_DATANODE_MAPPING_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_CONTAINER_REPORT_QUEUE_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_PREFIX; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT; @@ -248,4 +253,14 @@ public static void checkIfCertSignRequestAllowed( } } } + + public static Map getDcMapping(ConfigurationSource conf) { + final String dcMappingStr = conf.get(OZONE_SCM_DC_DATANODE_MAPPING_KEY, OZONE_SCM_DC_DATANODE_MAPPING_DEFAULT); + if (dcMappingStr == null || dcMappingStr.trim().isEmpty()) { + return Collections.emptyMap(); + } + return Arrays.stream(dcMappingStr.split(",")) + .map(s -> s.split("=", 2)) + .collect(Collectors.toMap(parts -> parts[0], parts -> parts[1])); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java index 094b085af507..3dd2a7fc6328 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java @@ -17,16 +17,17 @@ package org.apache.hadoop.hdds.scm.block; -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.TimeoutException; - import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.common.BlockGroup; +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeoutException; + /** * * Block APIs. @@ -44,7 +45,7 @@ public interface BlockManager extends Closeable { */ AllocatedBlock allocateBlock(long size, ReplicationConfig replicationConfig, String owner, - ExcludeList excludeList) throws IOException, TimeoutException; + ExcludeList excludeList, Set datacenters) throws IOException, TimeoutException; /** * Deletes a list of blocks in an atomic operation. Internally, SCM diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 1260ea6a006b..0a6e4c7bddf2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -16,14 +16,6 @@ */ package org.apache.hadoop.hdds.scm.block; -import javax.management.ObjectName; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -44,13 +36,21 @@ import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.ObjectName; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE; import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.LOCAL_ID; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** Block Manager manages the block access for SCM. */ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { @@ -148,7 +148,7 @@ public void stop() throws IOException { @Override public AllocatedBlock allocateBlock(final long size, ReplicationConfig replicationConfig, - String owner, ExcludeList excludeList) + String owner, ExcludeList excludeList, Set datacenters) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Size : {} , replicationConfig: {}", size, replicationConfig); @@ -164,7 +164,7 @@ public AllocatedBlock allocateBlock(final long size, } ContainerInfo containerInfo = writableContainerFactory.getContainer( - size, replicationConfig, owner, excludeList); + size, replicationConfig, owner, excludeList, datacenters); if (containerInfo != null) { return newBlock(containerInfo); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 53fc615cca2f..b2399c35dc18 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -250,7 +250,8 @@ private ContainerInfo allocateContainer(final Pipeline pipeline, .setOwner(owner) .setContainerID(containerID.getId()) .setDeleteTransactionId(0) - .setReplicationType(pipeline.getType()); + .setReplicationType(pipeline.getType()) + .addAllDatacenters(pipeline.getDatacenters()); if (pipeline.getReplicationConfig() instanceof ECReplicationConfig) { containerInfoBuilder.setEcReplicationConfig( @@ -340,10 +341,10 @@ public ContainerInfo getMatchingContainer(final long size, final String owner, ContainerInfo containerInfo; try { synchronized (pipeline.getId()) { - containerIDs = getContainersForOwner(pipeline, owner); + containerIDs = getContainersForOwnerAndDcs(pipeline, owner); if (containerIDs.size() < getOpenContainerCountPerPipeline(pipeline)) { allocateContainer(pipeline, owner); - containerIDs = getContainersForOwner(pipeline, owner); + containerIDs = getContainersForOwnerAndDcs(pipeline, owner); } containerIDs.removeAll(excludedContainerIDs); containerInfo = containerStateManager.getMatchingContainer( @@ -368,12 +369,12 @@ private int getOpenContainerCountPerPipeline(Pipeline pipeline) { } /** - * Returns the container ID's matching with specified owner. + * Returns the container ID's matching with specified owner and allowed datacenters. * @param pipeline * @param owner * @return NavigableSet */ - private NavigableSet getContainersForOwner( + private NavigableSet getContainersForOwnerAndDcs( Pipeline pipeline, String owner) throws IOException { NavigableSet containerIDs = pipelineManager.getContainersInPipeline(pipeline.getId()); @@ -381,7 +382,9 @@ private NavigableSet getContainersForOwner( while (containerIDIterator.hasNext()) { ContainerID cid = containerIDIterator.next(); try { - if (!getContainer(cid).getOwner().equals(owner)) { + ContainerInfo containerInfo = getContainer(cid); + if (!containerInfo.getOwner().equals(owner) || + !containerInfo.getDatacenters().equals(pipeline.getDatacenters())) { containerIDIterator.remove(); } } catch (ContainerNotFoundException e) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java index 3ac5bcbe5522..c3110096cdf0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java @@ -19,6 +19,7 @@ import java.util.List; +import java.util.Set; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; @@ -102,10 +103,11 @@ public SCMContainerPlacementCapacity(final NodeManager nodeManager, protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, List favoredNodes, + Set datacenters, final int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List healthyNodes = super.chooseDatanodesInternal( - usedNodes, excludedNodes, favoredNodes, nodesRequired, + usedNodes, excludedNodes, favoredNodes, datacenters, nodesRequired, metadataSizeRequired, dataSizeRequired); if (healthyNodes.size() == nodesRequired) { return healthyNodes; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 1c2b5a3be395..e9b68b8beebd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -30,12 +30,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.Arrays; -import java.util.List; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; /** @@ -106,8 +106,9 @@ public SCMContainerPlacementRackAware(final NodeManager nodeManager, protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, int nodesRequired, - long metadataSizeRequired, long dataSizeRequired) + List favoredNodes, + Set datacenters, + int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { Map mapSizeRequired = new HashMap<>(); mapSizeRequired.put(META_DATA_SIZE_REQUIRED, metadataSizeRequired); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java index 141d85fd5e10..8f7d49ec1421 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java @@ -209,6 +209,7 @@ protected List chooseDatanodesInternal( List usedNodes, final List excludedNodes, final List favoredNodes, + final Set datacenters, final int nodesRequired, final long metadataSizeRequired, final long dataSizeRequired) throws SCMException { if (nodesRequired <= 0) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java index cdfd57d1d09b..8be942cfb46c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java @@ -19,16 +19,17 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; +import java.util.Set; /** * Container placement policy that randomly chooses healthy datanodes. @@ -74,11 +75,12 @@ public SCMContainerPlacementRandom(final NodeManager nodeManager, protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, final int nodesRequired, - long metadataSizeRequired, long dataSizeRequired) + List favoredNodes, + Set datacenters, + final int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List healthyNodes = - super.chooseDatanodesInternal(usedNodes, excludedNodes, favoredNodes, + super.chooseDatanodesInternal(usedNodes, excludedNodes, favoredNodes, datacenters, nodesRequired, metadataSizeRequired, dataSizeRequired); if (healthyNodes.size() == nodesRequired) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java index 3dcd6aa23ba4..20c1960ea5b8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; @@ -85,10 +86,10 @@ public static List getTargetDatanodes(PlacementPolicy policy, while (mutableRequiredNodes > 0) { try { if (usedNodes == null) { - return policy.chooseDatanodes(excludedNodes, null, + return policy.chooseDatanodes(excludedNodes, null, Collections.emptySet(), mutableRequiredNodes, 0, dataSizeRequired); } else { - return policy.chooseDatanodes(usedNodes, excludedNodes, null, + return policy.chooseDatanodes(usedNodes, excludedNodes, null, Collections.emptySet(), mutableRequiredNodes, 0, dataSizeRequired); } } catch (IOException e) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java index 4e68605c6831..e4667304e9f2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMService; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -36,7 +37,10 @@ import java.io.IOException; import java.time.Clock; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; @@ -88,6 +92,7 @@ public class BackgroundPipelineCreator implements SCMService { private final AtomicBoolean running = new AtomicBoolean(false); private final long intervalInMillis; private final Clock clock; + private final Set datacenters; BackgroundPipelineCreator(PipelineManager pipelineManager, @@ -96,6 +101,7 @@ public class BackgroundPipelineCreator implements SCMService { this.conf = conf; this.scmContext = scmContext; this.clock = clock; + this.datacenters = new HashSet<>(ScmUtils.getDcMapping(conf).values()); this.createPipelineInSafeMode = conf.getBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, @@ -232,7 +238,8 @@ private void createPipelines() throws RuntimeException { (ReplicationConfig) it.next(); try { - Pipeline pipeline = pipelineManager.createPipeline(replicationConfig); + Pipeline pipeline = pipelineManager.createPipeline(replicationConfig, Collections.emptyList(), + Collections.emptyList(), datacenters); LOG.info("Created new pipeline {}", pipeline); } catch (IOException ioe) { it.remove(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/ECPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/ECPipelineProvider.java index c7bf6819a73a..931f085035ad 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/ECPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/ECPipelineProvider.java @@ -71,15 +71,15 @@ public ECPipelineProvider(NodeManager nodeManager, public synchronized Pipeline create(ECReplicationConfig replicationConfig) throws IOException { return create(replicationConfig, Collections.emptyList(), - Collections.emptyList()); + Collections.emptyList(), Collections.emptySet()); } @Override protected Pipeline create(ECReplicationConfig replicationConfig, - List excludedNodes, List favoredNodes) + List excludedNodes, List favoredNodes, Set datacenters) throws IOException { List dns = placementPolicy - .chooseDatanodes(excludedNodes, favoredNodes, + .chooseDatanodes(excludedNodes, favoredNodes, datacenters, replicationConfig.getRequiredNodes(), 0, this.containerSizeBytes); return create(replicationConfig, dns); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index 604c3466c340..c83a0042d01d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -83,10 +83,10 @@ void setProvider( public Pipeline create( ReplicationConfig replicationConfig, List excludedNodes, - List favoredNodes) + List favoredNodes, Set datacenters) throws IOException { Pipeline pipeline = providers.get(replicationConfig.getReplicationType()) - .create(replicationConfig, excludedNodes, favoredNodes); + .create(replicationConfig, excludedNodes, favoredNodes, datacenters); checkPipeline(pipeline); return pipeline; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java index 15b0f408c560..f254220541b4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java @@ -41,7 +41,8 @@ Pipeline createPipeline(ReplicationConfig replicationConfig) Pipeline createPipeline(ReplicationConfig replicationConfig, List excludedNodes, - List favoredNodes) + List favoredNodes, + Set datacenters) throws IOException; Pipeline buildECPipeline(ReplicationConfig replicationConfig, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java index 000d3e73633f..1237a1c56eb5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java @@ -214,7 +214,7 @@ public Pipeline buildECPipeline(ReplicationConfig replicationConfig, } checkIfPipelineCreationIsAllowed(replicationConfig); return pipelineFactory.create(replicationConfig, excludedNodes, - favoredNodes); + favoredNodes, Collections.emptySet()); } /** @@ -239,12 +239,12 @@ public void addEcPipeline(Pipeline pipeline) public Pipeline createPipeline(ReplicationConfig replicationConfig) throws IOException { return createPipeline(replicationConfig, Collections.emptyList(), - Collections.emptyList()); + Collections.emptyList(), Collections.emptySet()); } @Override public Pipeline createPipeline(ReplicationConfig replicationConfig, - List excludedNodes, List favoredNodes) + List excludedNodes, List favoredNodes, Set datacenters) throws IOException { checkIfPipelineCreationIsAllowed(replicationConfig); @@ -253,7 +253,7 @@ public Pipeline createPipeline(ReplicationConfig replicationConfig, try { try { pipeline = pipelineFactory.create(replicationConfig, - excludedNodes, favoredNodes); + excludedNodes, favoredNodes, datacenters); } catch (IOException e) { metrics.incNumPipelineCreationFailed(); throw e; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index 6c13960e179e..c854de8a0c97 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -25,8 +25,9 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -37,6 +38,8 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; /** @@ -58,6 +61,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy { private final ConfigurationSource conf; private final int heavyNodeCriteria; private static final int REQUIRED_RACKS = 2; + private final Map dcMapping; public static final String MULTIPLE_RACK_PIPELINE_MSG = "The cluster has multiple racks, but all nodes with available " + @@ -81,6 +85,7 @@ public PipelinePlacementPolicy(final NodeManager nodeManager, this.stateManager = stateManager; String dnLimit = conf.get(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT); this.heavyNodeCriteria = dnLimit == null ? 0 : Integer.parseInt(dnLimit); + this.dcMapping = ScmUtils.getDcMapping(conf); } public static int currentRatisThreePipelineCount( @@ -248,7 +253,7 @@ private boolean multipleRacksAvailable(List dns) { @Override protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, + List favoredNodes, Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { // Get a list of viable nodes based on criteria @@ -256,6 +261,10 @@ protected List chooseDatanodesInternal( List healthyNodes = filterViableNodes(excludedNodes, usedNodes, nodesRequired, metadataSizeRequired, dataSizeRequired); + if (!datacenters.isEmpty()) { + return this.getResultSetWithDatacenters(nodesRequired, healthyNodes, usedNodes, datacenters); + } + // Randomly picks nodes when all nodes are equal or factor is ONE. // This happens when network topology is absent or // all nodes are on the same rack. @@ -283,6 +292,54 @@ DatanodeDetails fallBackPickNodes( return node; } + /** + * Get result set based on the pipeline placement algorithm which considers datacenters. + * @param nodesRequired - Nodes Required + * @param healthyNodes - List of Nodes in the result set. + * @param usedNodes - List of used Nodes. + * @param datacenters - Set of datacenter names. + * @return a list of datanodes + * @throws SCMException SCMException + */ + private List getResultSetWithDatacenters( + int nodesRequired, List healthyNodes, + List usedNodes, Set datacenters) + throws SCMException { + Preconditions.checkNotNull(usedNodes); + Preconditions.checkNotNull(healthyNodes); + Preconditions.checkState(nodesRequired >= 1); + + if (nodesRequired % datacenters.size() != 0) { + String msg = String.format("Number of nodes required (%d) must be a multiple of requested datacenters (%d).", + nodesRequired, datacenters.size()); + throw new SCMException(msg, SCMException.ResultCodes.INVALID_CAPACITY); + } + + int nodesRequiredPerDc = nodesRequired / datacenters.size(); + Map> nodesPerDatacenter = datacenters.stream() + .collect(Collectors.toMap(dc -> dc, dc -> new ArrayList<>())); + for (DatanodeDetails node : healthyNodes) { + String nodeDc = dcMapping.get(node.getHostName() + ":" + + node.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + if (nodesPerDatacenter.containsKey(nodeDc) && nodesPerDatacenter.get(nodeDc).size() < nodesRequiredPerDc) { + nodesPerDatacenter.get(nodeDc).add(node); + } + } + + List datanodesWithinDatacenters = new ArrayList<>(); + for (List nodes : nodesPerDatacenter.values()) { + if (nodes.size() < nodesRequiredPerDc) { + String msg = String.format("Unable to find enough nodes in requested datacenter. " + + "Requested %d nodes, found %d nodes.", nodesRequiredPerDc, nodes.size()); + LOG.warn(msg); + throw new SCMException(msg, + SCMException.ResultCodes.FAILED_TO_FIND_ENOUGH_NODES_WITHIN_DATACENTER); + } + datanodesWithinDatacenters.addAll(nodes); + } + return datanodesWithinDatacenters; + } + /** * Get result set based on the pipeline placement algorithm which considers * network topology and rack awareness. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index 0ec74d2405c4..f97693e1f5f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -68,7 +68,7 @@ protected abstract Pipeline create(REPLICATION_CONFIG replicationConfig) throws IOException; protected abstract Pipeline create(REPLICATION_CONFIG replicationConfig, - List excludedNodes, List favoredNodes) + List excludedNodes, List favoredNodes, Set datacenters) throws IOException; protected abstract Pipeline create( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 8336bce5eae7..5774432375b3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -142,12 +142,12 @@ public LeaderChoosePolicy getLeaderChoosePolicy() { public synchronized Pipeline create(RatisReplicationConfig replicationConfig) throws IOException { return create(replicationConfig, Collections.emptyList(), - Collections.emptyList()); + Collections.emptyList(), Collections.emptySet()); } @Override public synchronized Pipeline create(RatisReplicationConfig replicationConfig, - List excludedNodes, List favoredNodes) + List excludedNodes, List favoredNodes, Set datacenters) throws IOException { if (exceedPipelineNumberLimit(replicationConfig)) { throw new SCMException("Ratis pipeline number meets the limit: " + @@ -162,6 +162,9 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, replicationConfig.getReplicationFactor(); switch (factor) { case ONE: + if (!datacenters.isEmpty()) { + throw new IllegalStateException("Datacenter locality is not supported for replication factor " + factor.name()); + } dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes, containerSizeBytes, conf); break; @@ -175,7 +178,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, } } dns = placementPolicy.chooseDatanodes(excludedNodes, - favoredNodes, factor.getNumber(), minRatisVolumeSizeBytes, + favoredNodes, datacenters, factor.getNumber(), minRatisVolumeSizeBytes, containerSizeBytes); break; default: @@ -191,6 +194,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, .setNodes(dns) .setSuggestedLeaderId( suggestedLeader != null ? suggestedLeader.getUuid() : null) + .setDatacenters(datacenters) .build(); // Send command to datanodes to create pipeline diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java index ac390ea3664d..a7cbd994d72d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java @@ -45,12 +45,12 @@ public SimplePipelineProvider(NodeManager nodeManager, public Pipeline create(StandaloneReplicationConfig replicationConfig) throws IOException { return create(replicationConfig, Collections.emptyList(), - Collections.emptyList()); + Collections.emptyList(), Collections.emptySet()); } @Override public Pipeline create(StandaloneReplicationConfig replicationConfig, - List excludedNodes, List favoredNodes) + List excludedNodes, List favoredNodes, Set datacenters) throws IOException { List dns = pickNodesNotUsed(replicationConfig); int available = dns.size(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java index 81189538b5a7..53d25037dc89 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import java.io.IOException; +import java.util.Collections; +import java.util.Set; import static org.apache.hadoop.hdds.conf.StorageUnit.BYTES; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; @@ -47,7 +49,9 @@ public WritableContainerFactory(StorageContainerManager scm) { this.ratisProvider = new WritableRatisContainerProvider( conf, scm.getPipelineManager(), - scm.getContainerManager(), scm.getPipelineChoosePolicy()); + scm.getContainerManager(), + scm.getPipelineChoosePolicy() + ); this.standaloneProvider = ratisProvider; WritableECContainerProviderConfig ecProviderConfig = @@ -64,17 +68,17 @@ public WritableContainerFactory(StorageContainerManager scm) { } public ContainerInfo getContainer(final long size, - ReplicationConfig repConfig, String owner, ExcludeList excludeList) + ReplicationConfig repConfig, String owner, ExcludeList excludeList, Set datacenters) throws IOException { switch (repConfig.getReplicationType()) { case STAND_ALONE: return standaloneProvider - .getContainer(size, repConfig, owner, excludeList); + .getContainer(size, repConfig, owner, excludeList, Collections.emptySet()); case RATIS: - return ratisProvider.getContainer(size, repConfig, owner, excludeList); + return ratisProvider.getContainer(size, repConfig, owner, excludeList, datacenters); case EC: return ecProvider.getContainer(size, (ECReplicationConfig)repConfig, - owner, excludeList); + owner, excludeList, null); default: throw new IOException(repConfig.getReplicationType() + " is an invalid replication type"); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerProvider.java index 628aba629ad6..834413a6494d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerProvider.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import java.io.IOException; +import java.util.Set; /** * Interface used by the WritableContainerFactory to obtain a writable container @@ -52,7 +53,7 @@ public interface WritableContainerProvider { * @throws IOException */ ContainerInfo getContainer(long size, T repConfig, - String owner, ExcludeList excludeList) + String owner, ExcludeList excludeList, Set datacenters) throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java index b528a30197ad..076bec5b14ba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java @@ -43,6 +43,7 @@ import java.util.Collections; import java.util.List; import java.util.NavigableSet; +import java.util.Set; import static org.apache.hadoop.hdds.conf.ConfigTag.SCM; import static org.apache.hadoop.hdds.scm.node.NodeStatus.inServiceHealthy; @@ -93,7 +94,7 @@ public WritableECContainerProvider(WritableECContainerProviderConfig config, */ @Override public ContainerInfo getContainer(final long size, - ECReplicationConfig repConfig, String owner, ExcludeList excludeList) + ECReplicationConfig repConfig, String owner, ExcludeList excludeList, Set datacenters) throws IOException { int maximumPipelines = getMaximumPipelines(repConfig); int openPipelineCount; @@ -204,7 +205,7 @@ private ContainerInfo allocateContainer(ReplicationConfig repConfig, } Pipeline newPipeline = pipelineManager.createPipeline(repConfig, - excludedNodes, Collections.emptyList()); + excludedNodes, Collections.emptyList(), Collections.emptySet()); ContainerInfo container = containerManager.getMatchingContainer(size, owner, newPipeline); pipelineManager.openPipeline(newPipeline.getId()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java index f9fc651f2faa..96249842f319 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java @@ -34,7 +34,9 @@ import javax.annotation.Nullable; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; /** @@ -64,7 +66,7 @@ public WritableRatisContainerProvider(ConfigurationSource conf, @Override public ContainerInfo getContainer(final long size, - ReplicationConfig repConfig, String owner, ExcludeList excludeList) + ReplicationConfig repConfig, String owner, ExcludeList excludeList, Set datacenters) throws IOException { /* Here is the high level logic. @@ -84,12 +86,10 @@ public ContainerInfo getContainer(final long size, //TODO we need to continue the refactor to use repConfig everywhere //in downstream managers. + PipelineRequestInformation req = PipelineRequestInformation.Builder.getBuilder() + .setSize(size).setDatacenters(datacenters).build(); - PipelineRequestInformation req = - PipelineRequestInformation.Builder.getBuilder().setSize(size).build(); - - ContainerInfo containerInfo = - getContainer(repConfig, owner, excludeList, req); + ContainerInfo containerInfo = getContainer(repConfig, owner, excludeList, req); if (containerInfo != null) { return containerInfo; } @@ -97,7 +97,9 @@ public ContainerInfo getContainer(final long size, try { // TODO: #CLUTIL Remove creation logic when all replication types // and factors are handled by pipeline creator - Pipeline pipeline = pipelineManager.createPipeline(repConfig); + // TODO: why is pipeline created without accounting for excludeList??? + Pipeline pipeline = pipelineManager.createPipeline(repConfig, Collections.emptyList(), + Collections.emptyList(), datacenters); // wait until pipeline is ready pipelineManager.waitPipelineReady(pipeline.getId(), 0); @@ -189,6 +191,12 @@ private List findPipelinesByState( Pipeline pipeline = pipelineChoosePolicy.choosePipeline( availablePipelines, req); + // no suitable pipeline + if (pipeline == null) { + availablePipelines.clear(); + break; + } + // look for OPEN containers that match the criteria. final ContainerInfo containerInfo = containerManager.getMatchingContainer( req.getSize(), owner, pipeline, excludeList.getContainerIds()); @@ -202,5 +210,4 @@ private List findPipelinesByState( return null; } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java index ea6a0ee70eb2..e884cdae761e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java @@ -21,7 +21,9 @@ import org.apache.hadoop.hdds.scm.PipelineRequestInformation; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import java.util.ArrayList; import java.util.List; +import java.util.Set; /** * Random choose policy that randomly chooses pipeline. @@ -34,7 +36,11 @@ public class RandomPipelineChoosePolicy implements PipelineChoosePolicy { @SuppressWarnings("java:S2245") // no need for secure random public Pipeline choosePipeline(List pipelineList, PipelineRequestInformation pri) { - return pipelineList.get(choosePipelineIndex(pipelineList, pri)); + int pipelineIndex = choosePipelineIndex(pipelineList, pri); + if (pipelineIndex == -1) { + return null; + } + return pipelineList.get(pipelineIndex); } /** @@ -45,8 +51,20 @@ public Pipeline choosePipeline(List pipelineList, * could be selected. */ @Override - public int choosePipelineIndex(List pipelineList, - PipelineRequestInformation pri) { - return (int) (Math.random() * pipelineList.size()); + public int choosePipelineIndex(List pipelineList, PipelineRequestInformation pri) { + Set requestedDatacenters = pri.getDatacenters(); + + List matchingIndices = new ArrayList<>(); + for (int i = 0; i < pipelineList.size(); i++) { + if (pipelineList.get(i).getDatacenters().equals(requestedDatacenters)) { + matchingIndices.add(i); + } + } + + if (matchingIndices.isEmpty()) { + return -1; + } + + return matchingIndices.get((int) (Math.random() * matchingIndices.size())); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index 0914cdd90b22..acff96eb8f14 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.protocol; import java.io.IOException; +import java.util.HashSet; import java.util.List; import java.util.stream.Collectors; @@ -197,7 +198,7 @@ public AllocateScmBlockResponseProto allocateScmBlock( request.getEcReplicationConfig()), request.getOwner(), ExcludeList.getFromProtoBuf(request.getExcludeList()), - request.getClient()); + request.getClient(), new HashSet<>(request.getDatacentersList())); AllocateScmBlockResponseProto.Builder builder = AllocateScmBlockResponseProto.newBuilder(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index e80208de8d71..1cee9d89914e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -21,15 +21,9 @@ */ package org.apache.hadoop.hdds.scm.server; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.TimeoutException; - +import com.google.common.collect.Maps; +import com.google.protobuf.BlockingService; +import com.google.protobuf.ProtocolMessageEnum; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -48,8 +42,10 @@ import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -63,12 +59,19 @@ import org.apache.hadoop.ozone.audit.SCMAction; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeoutException; -import com.google.common.collect.Maps; -import com.google.protobuf.BlockingService; -import com.google.protobuf.ProtocolMessageEnum; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.IO_EXCEPTION; @@ -78,9 +81,6 @@ import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * SCM block protocol is the protocol used by Namenode and OzoneManager to get * blocks from the SCM. @@ -184,7 +184,7 @@ public List allocateBlock( long size, int num, ReplicationConfig replicationConfig, String owner, ExcludeList excludeList, - String clientMachine + String clientMachine, Set datacenters ) throws IOException { Map auditMap = Maps.newHashMap(); auditMap.put("size", String.valueOf(size)); @@ -201,7 +201,7 @@ public List allocateBlock( try { for (int i = 0; i < num; i++) { AllocatedBlock block = scm.getScmBlockManager() - .allocateBlock(size, replicationConfig, owner, excludeList); + .allocateBlock(size, replicationConfig, owner, excludeList, datacenters); if (block != null) { // Sort the datanodes if client machine is specified final Node client = getClientNode(clientMachine); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index 87497a9f0709..ec9e3bef781b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -451,15 +451,14 @@ protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, List favoredNodes, + Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) { usedNodesIdentity.set(usedNodesPassed(usedNodes)); return null; } }; - dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1); - assertFalse(usedNodesIdentity.get()); - dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1); + dummyPlacementPolicy.chooseDatanodes(null, null, null, null, 1, 1, 1); Assertions.assertTrue(usedNodesIdentity.get()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 6bbbaa1df90c..50bac04a9516 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -23,6 +23,7 @@ import java.nio.file.Path; import java.time.ZoneOffset; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -207,7 +208,7 @@ public void testAllocateBlock() throws Exception { pipelineManager.createPipeline(replicationConfig); HddsTestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - replicationConfig, OzoneConsts.OZONE, new ExcludeList()); + replicationConfig, OzoneConsts.OZONE, new ExcludeList(), Collections.emptySet()); assertNotNull(block); } @@ -226,7 +227,7 @@ public void testAllocateBlockWithExclusion() throws Exception { .get(0).getId()); AllocatedBlock block = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - excludeList); + excludeList, Collections.emptySet()); assertNotNull(block); for (PipelineID id : excludeList.getPipelineIds()) { Assertions.assertNotEquals(block.getPipeline().getId(), id); @@ -237,7 +238,7 @@ public void testAllocateBlockWithExclusion() throws Exception { } block = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - excludeList); + excludeList, Collections.emptySet()); assertNotNull(block); Assertions.assertTrue( excludeList.getPipelineIds().contains(block.getPipeline().getId())); @@ -260,7 +261,7 @@ public void testAllocateBlockInParallel() { future.complete(blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList())); + new ExcludeList(), Collections.emptySet())); } catch (IOException e) { future.completeExceptionally(e); } @@ -301,7 +302,7 @@ public void testBlockDistribution() throws Exception { AllocatedBlock block = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); long containerId = block.getBlockID().getContainerID(); if (!allocatedBlockMap.containsKey(containerId)) { blockList = new ArrayList<>(); @@ -362,7 +363,7 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { AllocatedBlock block = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); long containerId = block.getBlockID().getContainerID(); if (!allocatedBlockMap.containsKey(containerId)) { blockList = new ArrayList<>(); @@ -430,7 +431,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { AllocatedBlock block = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); long containerId = block.getBlockID().getContainerID(); if (!allocatedBlockMap.containsKey(containerId)) { blockList = new ArrayList<>(); @@ -473,7 +474,7 @@ public void testAllocateOversizedBlock() { long size = 6 * GB; Throwable t = Assertions.assertThrows(IOException.class, () -> blockManager.allocateBlock(size, - replicationConfig, OzoneConsts.OZONE, new ExcludeList())); + replicationConfig, OzoneConsts.OZONE, new ExcludeList(), Collections.emptySet())); Assertions.assertEquals("Unsupported block size: " + size, t.getMessage()); } @@ -486,7 +487,7 @@ public void testAllocateBlockFailureInSafeMode() { // Test1: In safe mode expect an SCMException. Throwable t = Assertions.assertThrows(IOException.class, () -> blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - replicationConfig, OzoneConsts.OZONE, new ExcludeList())); + replicationConfig, OzoneConsts.OZONE, new ExcludeList(), Collections.emptySet())); Assertions.assertEquals("SafeModePrecheck failed for allocateBlock", t.getMessage()); } @@ -495,7 +496,7 @@ public void testAllocateBlockFailureInSafeMode() { public void testAllocateBlockSucInSafeMode() throws Exception { // Test2: Exit safe mode and then try allocateBock again. assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - replicationConfig, OzoneConsts.OZONE, new ExcludeList())); + replicationConfig, OzoneConsts.OZONE, new ExcludeList(), Collections.emptySet())); } @Test @@ -509,14 +510,14 @@ public void testMultipleBlockAllocation() AllocatedBlock allocatedBlock = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); // block should be allocated in different pipelines GenericTestUtils.waitFor(() -> { try { AllocatedBlock block = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); return !block.getPipeline().getId() .equals(allocatedBlock.getPipeline().getId()); } catch (IOException e) { @@ -563,7 +564,7 @@ public void testMultipleBlockAllocationWithClosedContainer() blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); } catch (IOException e) { } return verifyNumberOfContainersInPipelines( @@ -588,7 +589,7 @@ public void testMultipleBlockAllocationWithClosedContainer() blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList()); + new ExcludeList(), Collections.emptySet()); } catch (IOException e) { } return verifyNumberOfContainersInPipelines( @@ -607,7 +608,7 @@ public void testBlockAllocationWithNoAvailablePipelines() pipelineManager.getPipelines(replicationConfig).size()); assertNotNull(blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, - new ExcludeList())); + new ExcludeList(), Collections.emptySet())); } private class DatanodeCommandHandler implements diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java index 447c93ae9854..2db8cc8640f3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -152,7 +152,7 @@ public void testRackAwarePolicy() throws IOException { int nodeNum = 3; List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 15, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); @@ -190,6 +190,7 @@ public List chooseDatanodes( List usedNodes, List excludedNodes, List favoredNodes, + Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) { return null; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index e51f9731ad4a..a7016dff8af8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -18,6 +18,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -133,7 +134,7 @@ public void chooseDatanodes() throws SCMException { //when List datanodeDetails = scmContainerPlacementRandom - .chooseDatanodes(existingNodes, null, 1, 15, 15); + .chooseDatanodes(existingNodes, null, Collections.emptySet(), 1, 15, 15); //then Assertions.assertEquals(1, datanodeDetails.size()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 575460f3053f..b1abe3d95e92 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -18,6 +18,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Random; import java.util.stream.IntStream; @@ -208,19 +209,19 @@ public void chooseNodeWithNoExcludedNodes(int datanodeCount) // 1 replica int nodeNum = 1; List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 0, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // 2 replicas nodeNum = 2; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)) || (datanodeCount % NODE_PER_RACK == 1)); // 3 replicas nodeNum = 3; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // requires at least 2 racks for following statement assumeTrue(datanodeCount > NODE_PER_RACK && @@ -234,7 +235,7 @@ public void chooseNodeWithNoExcludedNodes(int datanodeCount) // 4 replicas nodeNum = 4; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // requires at least 2 racks and enough datanodes for following statement assumeTrue(datanodeCount > NODE_PER_RACK + 1); @@ -260,7 +261,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); excludedNodes.add(datanodes.get(1)); List datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(0))); @@ -272,7 +273,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.clear(); excludedNodes.add(datanodes.get(0)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent( datanodeDetails.get(0), excludedNodes.get(0)) || @@ -284,7 +285,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); excludedNodes.add(datanodes.get(5)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent( datanodeDetails.get(0), excludedNodes.get(0)) || @@ -301,7 +302,7 @@ public void testSingleNodeRack(int datanodeCount) throws SCMException { excludeNodes.add(datanodes.get(datanodeCount - 1)); excludeNodes.add(datanodes.get(0)); List chooseDatanodes = - policy.chooseDatanodes(excludeNodes, null, 1, 0, 0); + policy.chooseDatanodes(excludeNodes, null, Collections.emptySet(), 1, 0, 0); assertEquals(1, chooseDatanodes.size()); // the selected node should be on the same rack as the second exclude node Assertions.assertTrue( @@ -320,7 +321,7 @@ public void testFallback(int datanodeCount) throws SCMException { setup(datanodeCount); int nodeNum = 5; List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 0, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); @@ -355,7 +356,7 @@ public void testNoFallback(int datanodeCount) { // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; try { - policyNoFallback.chooseDatanodes(null, null, nodeNum, 0, 15); + policyNoFallback.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); fail("Fallback prohibited, this call should fail"); } catch (Exception e) { assertEquals("SCMException", e.getClass().getSimpleName()); @@ -385,7 +386,7 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) // no excludedNodes, only favoredNodes favoredNodes.add(datanodes.get(0)); List datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 0, 15); + excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(datanodeDetails.get(0).getNetworkFullPath(), favoredNodes.get(0).getNetworkFullPath()); @@ -397,7 +398,7 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); favoredNodes.add(datanodes.get(2)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 0, 15); + excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(datanodeDetails.get(0).getNetworkFullPath(), favoredNodes.get(0).getNetworkFullPath()); @@ -409,7 +410,7 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); favoredNodes.add(datanodes.get(0)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 0, 15); + excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertNotEquals(datanodeDetails.get(0).getNetworkFullPath(), favoredNodes.get(0).getNetworkFullPath()); @@ -423,7 +424,7 @@ public void testNoInfiniteLoop(int datanodeCount) { try { // request storage space larger than node capability - policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, STORAGE_CAPACITY + 0, 15); fail("Storage requested exceeds capacity, this call should fail"); } catch (Exception e) { assertTrue(e.getClass().getSimpleName().equals("SCMException")); @@ -488,7 +489,7 @@ public void testDatanodeWithDefaultNetworkLocation(int datanodeCount) new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true, metrics); List datanodeDetails = - newPolicy.chooseDatanodes(null, null, nodeNum, 0, 15); + newPolicy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); @@ -625,7 +626,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) { dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); try { List datanodeDetails = - policy.chooseDatanodes(null, null, 1, 0, 0); + policy.chooseDatanodes(null, null, Collections.emptySet(), 1, 0, 0); Assertions.assertEquals(dnInfos.get(index), datanodeDetails.get(0)); } catch (SCMException e) { // If we get SCMException: No satisfied datanode to meet the ... this is @@ -651,7 +652,7 @@ public void chooseNodeWithUsedNodesMultipleRack(int datanodeCount) usedNodes.add(datanodes.get(1)); List datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // New DN should be on different rack than DN0 & DN1 @@ -667,7 +668,7 @@ public void chooseNodeWithUsedNodesMultipleRack(int datanodeCount) usedNodes.add(datanodes.get(5)); datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // New replica should be either on rack0 or rack1 @@ -691,7 +692,7 @@ public void chooseSingleNodeRackWithUsedAndExcludeNodes() excludedNodes.add(datanodes.get(2)); List datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent( @@ -707,7 +708,7 @@ public void chooseSingleNodeRackWithUsedAndExcludeNodes() usedNodes.add(datanodes.get(0)); datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(excludedNodes.get(0).getUuid() != @@ -719,7 +720,7 @@ public void chooseSingleNodeRackWithUsedAndExcludeNodes() usedNodes.clear(); datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(excludedNodes.get(0).getUuid() != @@ -743,7 +744,7 @@ public void chooseNodeWithUsedAndExcludeNodesMultipleRack(int datanodeCount) excludedNodes.add(datanodes.get(1)); List datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); @@ -761,7 +762,7 @@ public void chooseNodeWithUsedAndExcludeNodesMultipleRack(int datanodeCount) excludedNodes.add(datanodes.get(2)); datanodeDetails = policy.chooseDatanodes(usedNodes, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); @@ -786,7 +787,7 @@ public void chooseNodeWithOnlyExcludeAndNoUsedNodes(int datanodeCount) excludedNodes.add(datanodes.get(1)); List datanodeDetails = policy.chooseDatanodes(null, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); @@ -801,7 +802,7 @@ public void chooseNodeWithOnlyExcludeAndNoUsedNodes(int datanodeCount) excludedNodes.add(datanodes.get(2)); datanodeDetails = policy.chooseDatanodes(null, - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); @@ -827,7 +828,7 @@ public void testNoFallbackWithUsedNodes(int datanodeCount) { // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; try { - policyNoFallback.chooseDatanodes(usedNodes, null, null, nodeNum, 0, 15); + policyNoFallback.chooseDatanodes(usedNodes, null, null, Collections.emptySet(), nodeNum, 0, 15); fail("Fallback prohibited, this call should fail"); } catch (Exception e) { assertEquals("SCMException", e.getClass().getSimpleName()); @@ -861,7 +862,7 @@ public void chooseNodeWithUsedAndFavouredNodesMultipleRack() favouredNodes.add(datanodes.get(2)); List datanodeDetails = policy.chooseDatanodes(usedNodes, - null, favouredNodes, nodeNum, 0, 5); + null, favouredNodes, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // Favoured node should not be returned, @@ -874,7 +875,7 @@ public void chooseNodeWithUsedAndFavouredNodesMultipleRack() favouredNodes.add(datanodes.get(6)); datanodeDetails = policy.chooseDatanodes(usedNodes, - null, favouredNodes, nodeNum, 0, 5); + null, favouredNodes, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java index b5453a5be8db..40fc7f1363c9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java @@ -278,12 +278,12 @@ public void chooseNodeWithNoExcludedNodes(int datanodeCount) // 1 replica int nodeNum = 1; List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 0, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // 2 replicas nodeNum = 2; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(!cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)) || (datanodeCount <= NODE_PER_RACK)); @@ -291,7 +291,7 @@ public void chooseNodeWithNoExcludedNodes(int datanodeCount) // 3 replicas nodeNum = 3; if (datanodeCount > nodeNum) { - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails), Math.min(nodeNum, rackNum)); @@ -304,10 +304,10 @@ public void chooseNodeWithNoExcludedNodes(int datanodeCount) if (datanodeCount == 6) { int finalNodeNum = nodeNum; SCMException e = assertThrows(SCMException.class, - () -> policy.chooseDatanodes(null, null, finalNodeNum, 0, 15)); + () -> policy.chooseDatanodes(null, null, Collections.emptySet(), finalNodeNum, 0, 15)); assertEquals(FAILED_TO_FIND_HEALTHY_NODES, e.getResult()); } else { - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails), Math.min(nodeNum, rackNum)); @@ -321,10 +321,10 @@ public void chooseNodeWithNoExcludedNodes(int datanodeCount) if (datanodeCount == 11) { int finalNodeNum = nodeNum; SCMException e = assertThrows(SCMException.class, - () -> policy.chooseDatanodes(null, null, finalNodeNum, 0, 15)); + () -> policy.chooseDatanodes(null, null, Collections.emptySet(), finalNodeNum, 0, 15)); assertEquals(FAILED_TO_FIND_HEALTHY_NODES, e.getResult()); } else { - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 0, 15); + datanodeDetails = policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails), Math.min(nodeNum, rackNum)); @@ -349,7 +349,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); excludedNodes.add(datanodes.get(1)); List datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(0))); @@ -362,7 +362,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.clear(); excludedNodes.add(datanodes.get(0)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails, excludedNodes), Math.min(totalNum, rackNum)); @@ -374,7 +374,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); excludedNodes.add(datanodes.get(5)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails, excludedNodes), Math.min(totalNum, rackNum)); @@ -387,12 +387,12 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) if (datanodeCount == 6) { int finalNodeNum = nodeNum; SCMException e = assertThrows(SCMException.class, - () -> policy.chooseDatanodes(excludedNodes, null, - finalNodeNum, 0, 15)); + () -> policy.chooseDatanodes(excludedNodes, null, Collections.emptySet(), + finalNodeNum, 0, 15)); assertEquals(FAILED_TO_FIND_HEALTHY_NODES, e.getResult()); } else { datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails, excludedNodes), Math.min(totalNum, rackNum)); @@ -406,7 +406,7 @@ public void chooseNodeWithExcludedNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); excludedNodes.add(datanodes.get(5)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 15); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(getRackSize(datanodeDetails, excludedNodes), Math.min(totalNum, rackNum)); @@ -424,7 +424,7 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) // no excludedNodes, only favoredNodes favoredNodes.add(datanodes.get(0)); List datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 0, 15); + excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(datanodeDetails.get(0).getNetworkFullPath(), favoredNodes.get(0).getNetworkFullPath()); @@ -436,7 +436,7 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); favoredNodes.add(datanodes.get(1)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 0, 15); + excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(datanodeDetails.get(0).getNetworkFullPath(), favoredNodes.get(0).getNetworkFullPath()); @@ -448,7 +448,7 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) excludedNodes.add(datanodes.get(0)); favoredNodes.add(datanodes.get(0)); datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 0, 15); + excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertNotEquals(datanodeDetails.get(0).getNetworkFullPath(), favoredNodes.get(0).getNetworkFullPath()); @@ -462,7 +462,7 @@ public void testNoInfiniteLoop(int datanodeCount) { try { // request storage space larger than node capability - policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, STORAGE_CAPACITY + 0, 15); fail("Storage requested exceeds capacity, this call should fail"); } catch (Exception e) { assertEquals("SCMException", e.getClass().getSimpleName()); @@ -527,7 +527,7 @@ public void testDatanodeWithDefaultNetworkLocation(int datanodeCount) new SCMContainerPlacementRackScatter(nodeManager, conf, clusterMap, true, metrics); List datanodeDetails = - newPolicy.chooseDatanodes(null, null, nodeNum, 0, 15); + newPolicy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 0, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertEquals(1, getRackSize(datanodeDetails)); } @@ -662,7 +662,7 @@ public void testPipelineProviderRackScatter() throws SCMException { List usedDns = new ArrayList<>(); List excludedDns = new ArrayList<>(); List additionalNodes = policy.chooseDatanodes(usedDns, - excludedDns, null, 3, 0, 5); + excludedDns, null, Collections.emptySet(), 3, 0, 5); assertPlacementPolicySatisfied(usedDns, additionalNodes, excludedDns, 3, true, 0); } @@ -680,13 +680,13 @@ public void testPipelineProviderRackScatterFallback() throws SCMException { List usedDns = new ArrayList<>(); List excludedDns = new ArrayList<>(); List additionalNodes = policy.chooseDatanodes(usedDns, - excludedDns, null, 3, 0, 5); + excludedDns, null, Collections.emptySet(), 3, 0, 5); assertPlacementPolicySatisfied(usedDns, additionalNodes, excludedDns, 3, true, 0); setup(3, 3); additionalNodes = policy.chooseDatanodes(usedDns, - excludedDns, null, 3, 0, 5); + excludedDns, null, Collections.emptySet(), 3, 0, 5); assertPlacementPolicySatisfied(usedDns, additionalNodes, excludedDns, 3, true, 0); } @@ -699,7 +699,7 @@ public void testValidChooseNodesWithUsedNodes() throws SCMException { List usedDns = getDatanodes(Lists.newArrayList(0, 1)); List excludedDns = getDatanodes(Lists.newArrayList(2)); List additionalNodes = policy.chooseDatanodes(usedDns, - excludedDns, null, 2, 0, 5); + excludedDns, null, Collections.emptySet(), 2, 0, 5); assertPlacementPolicySatisfied(usedDns, additionalNodes, excludedDns, 4, true, 0); } @@ -718,7 +718,7 @@ public void shouldChooseNodeIfNodesRequiredLessThanAdditionalRacksRequired() List chosenNodes = policy.chooseDatanodes(usedDns, excludedDns, - null, 1, 0, 5); + null, Collections.emptySet(), 1, 0, 5); Assertions.assertEquals(1, chosenNodes.size()); /* The chosen node should be node4 from the third rack because we prefer to @@ -752,7 +752,7 @@ public void shouldChooseNodeWhenOneNodeRequiredAndTwoRacksRequired() List chosenNode = policy.chooseDatanodes(usedDns, excludedDns, - null, 1, 0, 5); + null, Collections.emptySet(), 1, 0, 5); Assertions.assertEquals(1, chosenNode.size()); Assertions.assertTrue(chosenNode.get(0).equals(datanodes.get(3)) || chosenNode.get(0).equals(datanodes.get(4))); @@ -765,7 +765,7 @@ public void testChooseNodesWithInsufficientNodesAvailable() { List excludedDns = getDatanodes(Lists.newArrayList(2)); SCMException exception = Assertions.assertThrows(SCMException.class, () -> policy.chooseDatanodes(usedDns, excludedDns, - null, 3, 0, 5)); + null, Collections.emptySet(), 3, 0, 5)); assertThat(exception.getMessage(), matchesPattern("^No enough datanodes to choose.*")); assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, @@ -787,7 +787,7 @@ public void chooseNodesOnTheSameRackWhenInSufficientRacks() List excludedDns = getDatanodes(Lists.newArrayList(5)); List chosenDatanodes = - policy.chooseDatanodes(usedDns, excludedDns, null, 2, 0, 5); + policy.chooseDatanodes(usedDns, excludedDns, null, Collections.emptySet(), 2, 0, 5); Assertions.assertEquals(2, chosenDatanodes.size()); for (DatanodeDetails dn : chosenDatanodes) { @@ -846,7 +846,7 @@ public void testExcludedNodesOverlapsOutOfServiceNodes() throws SCMException { excludedNodes.add(datanodes.get(5)); List datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 0, 5); + excludedNodes, null, Collections.emptySet(), nodeNum, 0, 5); Assertions.assertEquals(nodeNum, datanodeDetails.size()); } @@ -874,7 +874,7 @@ public void testAllNodesOnRackExcludedReducesRackCount() List chosenNodes = policy.chooseDatanodes(usedDns, excludedDns, - null, 1, 0, 5); + null, Collections.emptySet(), 1, 0, 5); Assertions.assertEquals(1, chosenNodes.size()); } @@ -895,7 +895,7 @@ public void testAllNodesOnRackExcludedReducesRackCount2() List chosenNodes = policy.chooseDatanodes(usedDns, excludedDns, - null, 1, 0, 5); + null, Collections.emptySet(), 1, 0, 5); Assertions.assertEquals(1, chosenNodes.size()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index acf9dbc03c52..5cc2e8b33345 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -18,6 +18,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -100,7 +101,7 @@ public void chooseDatanodes() throws SCMException { for (int i = 0; i < 100; i++) { //when List datanodeDetails = scmContainerPlacementRandom - .chooseDatanodes(existingNodes, null, 1, 15, 15); + .chooseDatanodes(existingNodes, null, Collections.emptySet(), 1, 15, 15); //then Assertions.assertEquals(1, datanodeDetails.size()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java index 3f0f0e78a218..628ab0276cf5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java @@ -257,8 +257,9 @@ public static PlacementPolicy getSimpleTestPlacementPolicy( protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, int nodesRequiredToChoose, - long metadataSizeRequired, long dataSizeRequired) { + List favoredNodes, + Set datacenters, + int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) { List dns = new ArrayList<>(); for (int i = 0; i < nodesRequiredToChoose; i++) { dns.add(MockDatanodeDetails.randomDatanodeDetails()); @@ -287,8 +288,9 @@ public static PlacementPolicy getSameNodeTestPlacementPolicy( protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, int nodesRequiredToChoose, - long metadataSizeRequired, long dataSizeRequired) + List favoredNodes, + Set datacenters, + int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) throws SCMException { if (nodesRequiredToChoose > 1) { throw new IllegalArgumentException("Only one node is allowed"); @@ -315,8 +317,9 @@ public static PlacementPolicy getNoNodesTestPlacementPolicy( protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, int nodesRequiredToChoose, - long metadataSizeRequired, long dataSizeRequired) + List favoredNodes, + Set datacenters, + int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) throws SCMException { throw new SCMException("No nodes available", FAILED_TO_FIND_SUITABLE_NODE); @@ -342,8 +345,9 @@ public static PlacementPolicy getInsufficientNodesTestPlacementPolicy( protected List chooseDatanodesInternal( List usedNodes, List excludedNodes, - List favoredNodes, int nodesRequiredToChoose, - long metadataSizeRequired, long dataSizeRequired) + List favoredNodes, + Set datacenters, + int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) throws SCMException { if (nodesRequiredToChoose >= throwWhenThisOrMoreNodesRequested) { throw new SCMException("No nodes available", diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java index ead2fec3ce0e..99a0629e7849 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java @@ -91,7 +91,7 @@ public void testMisReplicationWithNoNodesReturned() throws IOException { Mockito.when(placementPolicy.validateContainerPlacement(anyList(), anyInt())).thenReturn(mockedContainerPlacementStatus); Mockito.when(placementPolicy.chooseDatanodes( - any(), any(), any(), + any(), any(), any(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenThrow(new IOException("No nodes found")); assertThrows(SCMException.class, () -> testMisReplication( @@ -210,7 +210,7 @@ public void commandsForFewerThanRequiredNodes() throws IOException { List targetDatanodes = singletonList( availableReplicas.iterator().next().getDatanodeDetails()); Mockito.when(placementPolicy.chooseDatanodes( - any(), any(), any(), + any(), any(), any(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenReturn(targetDatanodes); assertThrows(InsufficientDatanodesException.class, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java index 4f3c0702d712..56ecc94e642d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java @@ -91,6 +91,7 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.clearInvocations; @@ -1039,9 +1040,9 @@ public void testMaintenanceDoesNotRequestZeroNodes() throws IOException { Pair.of(IN_SERVICE, 5)); when(ecPlacementPolicy.chooseDatanodes(anyList(), anyList(), - isNull(), anyInt(), anyLong(), anyLong())) + isNull(), anySet(), anyInt(), anyLong(), anyLong())) .thenAnswer(invocationOnMock -> { - int numNodes = invocationOnMock.getArgument(3); + int numNodes = invocationOnMock.getArgument(4); List targets = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { targets.add(MockDatanodeDetails.randomDatanodeDetails()); @@ -1059,7 +1060,7 @@ public void testMaintenanceDoesNotRequestZeroNodes() throws IOException { emptyList(), result, 1); assertEquals(1, commandsSent.size()); verify(ecPlacementPolicy, times(0)) - .chooseDatanodes(anyList(), isNull(), eq(0), anyLong(), + .chooseDatanodes(anyList(), isNull(), anySet(), eq(0), anyLong(), anyLong()); } @@ -1083,7 +1084,7 @@ public void testDatanodesPendingAddAreNotSelectedAsTargets() contain the DN pending ADD. */ when(ecPlacementPolicy.chooseDatanodes(anyList(), anyList(), - isNull(), anyInt(), anyLong(), anyLong())) + isNull(), anySet(), anyInt(), anyLong(), anyLong())) .thenAnswer(invocationOnMock -> { List usedList = invocationOnMock.getArgument(0); List excludeList = invocationOnMock.getArgument(1); @@ -1208,7 +1209,7 @@ private DeleteContainerCommand createDeleteContainerCommand( /** * Helper to mock and verify calls to - * {@link PlacementPolicy#chooseDatanodes(List, List, int, long, long)}. + * {@link PlacementPolicy#chooseDatanodes(List, List, Set, long, long)}. */ private static class PlacementPolicySpy { @@ -1220,11 +1221,11 @@ private static class PlacementPolicySpy { PlacementPolicySpy(PlacementPolicy placementPolicy, int totalNodes) throws IOException { when(placementPolicy.chooseDatanodes(any(), any(), - any(), anyInt(), anyLong(), anyLong()) + any(), anySet(), anyInt(), anyLong(), anyLong()) ).thenAnswer(invocation -> { final Collection used = invocation.getArgument(0); final Collection excluded = invocation.getArgument(1); - final int nodesRequired = invocation.getArgument(3); + final int nodesRequired = invocation.getArgument(4); final int availableNodes = totalNodes - excluded.size() - used.size(); usedNodesLists.add(new ArrayList<>(used)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java index c9bd4bddbda0..dcc62c605fae 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java @@ -208,10 +208,10 @@ void setup() throws IOException, InterruptedException, ecContainerPlacementPolicy = Mockito.mock(PlacementPolicy.class); Mockito.when(ratisContainerPlacementPolicy.chooseDatanodes( - Mockito.any(), Mockito.any(), Mockito.anyInt(), + Mockito.any(), Mockito.any(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenAnswer(invocation -> { - int count = (int) invocation.getArguments()[2]; + int count = (int) invocation.getArguments()[3]; return IntStream.range(0, count) .mapToObj(i -> randomDatanodeDetails()) .collect(Collectors.toList()); @@ -1424,14 +1424,14 @@ public void testUnderReplicationBlockedByUnhealthyReplicas() of required targets. */ Mockito.when(ratisContainerPlacementPolicy.chooseDatanodes( - Mockito.any(), Mockito.any(), Mockito.anyInt(), + Mockito.any(), Mockito.any(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenAnswer(invocation -> { throw new SCMException( SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); }) .thenAnswer(invocation -> { - int nodesRequired = invocation.getArgument(2); + int nodesRequired = invocation.getArgument(3); List nodes = new ArrayList<>(nodesRequired); while (nodesRequired != 0) { nodes.add(MockDatanodeDetails.randomDatanodeDetails()); @@ -1509,7 +1509,7 @@ public void testUnderReplicationBlockedByUnhealthyReplicas() public void testUnderRepQuasiClosedContainerBlockedByUnhealthyReplicas() throws IOException, TimeoutException { Mockito.when(ratisContainerPlacementPolicy.chooseDatanodes( - Mockito.anyList(), Mockito.any(), Mockito.anyInt(), + Mockito.anyList(), Mockito.any(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenAnswer(invocation -> { List excluded = invocation.getArgument(0); @@ -1517,7 +1517,7 @@ public void testUnderRepQuasiClosedContainerBlockedByUnhealthyReplicas() throw new SCMException( SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } else { - int nodesRequired = invocation.getArgument(2); + int nodesRequired = invocation.getArgument(3); List nodes = new ArrayList<>(nodesRequired); while (nodesRequired != 0) { DatanodeDetails dn = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java index 9c2874740f04..8e5b871ad5c5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java @@ -59,6 +59,7 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.eq; /** @@ -201,7 +202,7 @@ protected void testMisReplication(Set availableReplicas, .collect(Collectors.toList()); if (expectedNumberOfNodes > 0) { Mockito.when(mockedPlacementPolicy.chooseDatanodes( - any(), any(), any(), + any(), any(), any(), anySet(), eq(copy.size()), Mockito.anyLong(), Mockito.anyLong())) .thenAnswer(invocation -> { List datanodeDetails = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java index 275c2f26b6d0..7747a1f36ba1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java @@ -97,7 +97,7 @@ public void testMisReplicationWithNoNodesReturned() throws IOException { Mockito.when(placementPolicy.validateContainerPlacement(anyList(), anyInt())).thenReturn(mockedContainerPlacementStatus); Mockito.when(placementPolicy.chooseDatanodes( - Mockito.any(), Mockito.any(), Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenThrow(new IOException("No nodes found")); Assertions.assertThrows(SCMException.class, () -> testMisReplication( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java index ce9dfb3f3dbe..b04b2a78d73d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java @@ -69,6 +69,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.Mockito.times; /** @@ -466,7 +467,7 @@ public void testOnlyHighestBcsidShouldBeASource() throws IOException { @Test public void testCorrectUsedAndExcludedNodesPassed() throws IOException { PlacementPolicy mockPolicy = Mockito.mock(PlacementPolicy.class); - Mockito.when(mockPolicy.chooseDatanodes(any(), any(), any(), + Mockito.when(mockPolicy.chooseDatanodes(any(), any(), any(), anySet(), anyInt(), anyLong(), anyLong())) .thenReturn(Collections.singletonList( MockDatanodeDetails.randomDatanodeDetails())); @@ -512,7 +513,7 @@ public void testCorrectUsedAndExcludedNodesPassed() throws IOException { Mockito.verify(mockPolicy, times(1)).chooseDatanodes( - usedNodesCaptor.capture(), excludedNodesCaptor.capture(), any(), + usedNodesCaptor.capture(), excludedNodesCaptor.capture(), any(), anySet(), anyInt(), anyLong(), anyLong()); List usedNodes = usedNodesCaptor.getValue(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 4c916023f904..a3d16dec1173 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -99,6 +99,7 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -509,7 +510,7 @@ public void testQuasiClosedContainerWithVulnerableUnhealthyReplica() assertEquals(1, repQueue.underReplicatedQueueSize()); assertEquals(0, repQueue.overReplicatedQueueSize()); - when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), + when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), anySet(), eq(1), anyLong(), anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails())); when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) .thenAnswer(invocation -> { @@ -569,7 +570,7 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit assertEquals(0, repQueue.overReplicatedQueueSize()); // next, this test sets up some mocks to test if RatisUnderReplicationHandler will handle this container correctly - when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), + when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), anySet(), eq(1), anyLong(), anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails())); when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) .thenAnswer(invocation -> { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java index 6ece2ecb88fc..048ec6438710 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java @@ -64,12 +64,12 @@ public MockPipelineManager(DBStore dbStore, SCMHAManager scmhaManager, public Pipeline createPipeline(ReplicationConfig replicationConfig) throws IOException { return createPipeline(replicationConfig, Collections.emptyList(), - Collections.emptyList()); + Collections.emptyList(), Collections.emptySet()); } @Override public Pipeline createPipeline(ReplicationConfig replicationConfig, - List excludedNodes, List favoredNodes) + List excludedNodes, List favoredNodes, Set datacenters) throws IOException { Pipeline pipeline; if (replicationConfig.getReplicationType() diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java index f18704b50d51..3506084dd02c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java @@ -39,6 +39,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -79,11 +80,11 @@ public void setup() throws IOException, NodeNotFoundException { StorageUnit.BYTES); // Placement policy will always return EC number of random nodes. when(placementPolicy.chooseDatanodes(Mockito.anyList(), - Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong(), + Mockito.anyList(), Mockito.anySet(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())) .thenAnswer(invocation -> { List dns = new ArrayList<>(); - for (int i = 0; i < (int) invocation.getArguments()[2]; i++) { + for (int i = 0; i < (int) invocation.getArguments()[3]; i++) { dns.add(MockDatanodeDetails.randomDatanodeDetails()); } return dns; @@ -195,12 +196,12 @@ public void testExcludedAndFavoredNodesPassedToPlacementPolicy() List favoredNodes = new ArrayList<>(); favoredNodes.add(MockDatanodeDetails.randomDatanodeDetails()); - Pipeline pipeline = provider.create(ecConf, excludedNodes, favoredNodes); + Pipeline pipeline = provider.create(ecConf, excludedNodes, favoredNodes, Collections.emptySet()); Assertions.assertEquals(EC, pipeline.getType()); Assertions.assertEquals(ecConf.getData() + ecConf.getParity(), pipeline.getNodes().size()); - verify(placementPolicy).chooseDatanodes(excludedNodes, favoredNodes, + verify(placementPolicy).chooseDatanodes(excludedNodes, favoredNodes, Collections.emptySet(), ecConf.getRequiredNodes(), 0, containerSizeBytes); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 48f82b5cc958..e420eaa47da3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -95,11 +95,12 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.doAnswer; @@ -887,7 +888,7 @@ public void testWaitForAllocatedPipeline() // Throw on pipeline creates, so no new pipelines can be created doThrow(SCMException.class).when(pipelineManagerSpy) - .createPipeline(any(), any(), anyList()); + .createPipeline(any(), any(), anyList(), anySet()); provider = new WritableRatisContainerProvider( conf, pipelineManagerSpy, containerManager, pipelineChoosingPolicy); @@ -929,7 +930,7 @@ public void testWaitForAllocatedPipeline() ContainerInfo c = provider.getContainer(1, repConfig, - owner, new ExcludeList()); + owner, new ExcludeList(), Collections.emptySet()); Assertions.assertTrue(c.equals(container), "Expected container was returned"); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java index 4288676f6727..028ca9fd8c80 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.io.File; +import java.util.Collections; import java.util.List; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -174,7 +175,7 @@ public void testDefaultPipelineProviderRackPlacement() throws Exception { int nodeNum = 3; List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 15, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2))); @@ -199,7 +200,7 @@ public void testRackScatterPipelineProviderRackPlacement() throws Exception { List excludedNodes = new ArrayList<>(); List favoredNodes = new ArrayList<>(); List datanodeDetails = - policy.chooseDatanodes(excludedNodes, excludedNodes, favoredNodes, + policy.chooseDatanodes(excludedNodes, excludedNodes, favoredNodes, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(0), @@ -224,7 +225,7 @@ public void testPipelineProviderRackPlacementAnchorChange() int nodeNum = 3; List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 15, 15); + policy.chooseDatanodes(null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // First anchor will be Node0, Since there is no more node available @@ -252,7 +253,7 @@ public void testPipelineProviderRackPlacementWithUsedNodes() usedNodes.add(datanodes.get(0)); int nodeNum = 2; List datanodeDetails = - policy.chooseDatanodes(usedNodes, null, null, nodeNum, 15, 15); + policy.chooseDatanodes(usedNodes, null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); Assertions.assertTrue(cluster.isSameParent(usedNodes.get(0), @@ -266,7 +267,7 @@ public void testPipelineProviderRackPlacementWithUsedNodes() nodeNum = 1; datanodeDetails = - policy.chooseDatanodes(usedNodes, null, null, nodeNum, 15, 15); + policy.chooseDatanodes(usedNodes, null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // Node return by policy should have different parent as node0 and node1 Assertions.assertFalse(cluster.isSameParent(usedNodes.get(0), @@ -280,7 +281,7 @@ public void testPipelineProviderRackPlacementWithUsedNodes() nodeNum = 1; datanodeDetails = - policy.chooseDatanodes(usedNodes, null, null, nodeNum, 15, 15); + policy.chooseDatanodes(usedNodes, null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // Node return by policy should have same parent as node0 or node3 Assertions.assertTrue(cluster.isSameParent(usedNodes.get(0), @@ -291,7 +292,7 @@ public void testPipelineProviderRackPlacementWithUsedNodes() usedNodes.clear(); nodeNum = 3; datanodeDetails = - policy.chooseDatanodes(usedNodes, null, null, nodeNum, 15, 15); + policy.chooseDatanodes(usedNodes, null, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); } @@ -312,7 +313,7 @@ public void testPipelineProviderRackPlacementWithUsedAndExcludeNodes() excludeNodes.add(datanodes.get(3)); int nodeNum = 2; List datanodeDetails = - policy.chooseDatanodes(usedNodes, excludeNodes, null, nodeNum, 15, 15); + policy.chooseDatanodes(usedNodes, excludeNodes, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // policy should not return any of excluded node Assertions.assertNotSame(datanodeDetails.get(0).getUuid(), @@ -336,7 +337,7 @@ public void testPipelineProviderRackPlacementWithUsedAndExcludeNodes() excludeNodes.add(datanodes.get(2)); nodeNum = 1; datanodeDetails = - policy.chooseDatanodes(usedNodes, excludeNodes, null, nodeNum, 15, 15); + policy.chooseDatanodes(usedNodes, excludeNodes, null, Collections.emptySet(), nodeNum, 15, 15); Assertions.assertEquals(nodeNum, datanodeDetails.size()); // policy should not return any of excluded node Assertions.assertNotSame(datanodeDetails.get(0).getUuid(), diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 325cb671afed..6f02c457e3c1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -20,6 +20,7 @@ import java.io.File; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -206,6 +207,7 @@ public void testChooseNodeWithSingleNodeRack() throws IOException { List results = localPlacementPolicy.chooseDatanodes( new ArrayList<>(datanodes.size()), new ArrayList<>(datanodes.size()), + Collections.emptySet(), nodesRequired, 0, 0); Assertions.assertEquals(nodesRequired, results.size()); @@ -246,7 +248,7 @@ public void testChooseNodeNotEnoughSpace() throws IOException { try { // A huge container size localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), - new ArrayList<>(datanodes.size()), nodesRequired, + new ArrayList<>(datanodes.size()), Collections.emptySet(), nodesRequired, 0, 10 * OzoneConsts.TB); Assertions.fail("SCMException should have been thrown."); } catch (SCMException ex) { @@ -256,7 +258,7 @@ public void testChooseNodeNotEnoughSpace() throws IOException { try { // a huge free space min configured localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), - new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB, + new ArrayList<>(datanodes.size()), Collections.emptySet(), nodesRequired, 10 * OzoneConsts.TB, 0); Assertions.fail("SCMException should have been thrown."); } catch (SCMException ex) { @@ -274,7 +276,7 @@ public void testPickLowestLoadAnchor() throws IOException, TimeoutException { for (int i = 0; i < maxPipelineCount; i++) { try { List nodes = placementPolicy.chooseDatanodes(null, - null, HddsProtos.ReplicationFactor.THREE.getNumber(), 0, 0); + null, Collections.emptySet(), HddsProtos.ReplicationFactor.THREE.getNumber(), 0, 0); Pipeline pipeline = Pipeline.newBuilder() .setId(PipelineID.randomId()) @@ -435,6 +437,7 @@ public void testHeavyNodeShouldBeExcludedWithMinorityHeavy() List pickedNodes1 = placementPolicy.chooseDatanodes( new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), + Collections.emptySet(), nodesRequired, 0, 0); // modify node to pipeline mapping. insertHeavyNodesIntoNodeManager(healthyNodes, minorityHeavy); @@ -451,6 +454,7 @@ public void testHeavyNodeShouldBeExcludedWithMinorityHeavy() placementPolicy.chooseDatanodes( new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), + Collections.emptySet(), nodesRequired, 0, 0)); } @@ -468,6 +472,7 @@ public void testHeavyNodeShouldBeExcludedWithMajorityHeavy() placementPolicy.chooseDatanodes( new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), + Collections.emptySet(), nodesRequired, 0, 0)); } @@ -560,7 +565,7 @@ public void test3NodesInSameRackReturnedWhenOnlyOneHealthyRackIsPresent() // As there is only 1 rack alive, the 3 DNs on /rack2 should be returned List pickedDns = placementPolicy.chooseDatanodes( - new ArrayList<>(), new ArrayList<>(), nodesRequired, 0, 0); + new ArrayList<>(), new ArrayList<>(), Collections.emptySet(), nodesRequired, 0, 0); Assertions.assertEquals(3, pickedDns.size()); Assertions.assertTrue(pickedDns.contains(dns.get(1))); @@ -581,7 +586,7 @@ public void testExceptionIsThrownWhenRackAwarePipelineCanNotBeCreated() Throwable t = Assertions.assertThrows(SCMException.class, () -> placementPolicy.chooseDatanodes( - new ArrayList<>(), new ArrayList<>(), nodesRequired, 0, 0)); + new ArrayList<>(), new ArrayList<>(), Collections.emptySet(), nodesRequired, 0, 0)); Assertions.assertEquals(PipelinePlacementPolicy.MULTIPLE_RACK_PIPELINE_MSG, t.getMessage()); } @@ -601,7 +606,7 @@ public void testExceptionThrownRackAwarePipelineCanNotBeCreatedExcludedNode() excluded.add(dns.get(0)); Throwable t = Assertions.assertThrows(SCMException.class, () -> placementPolicy.chooseDatanodes( - excluded, new ArrayList<>(), nodesRequired, 0, 0)); + excluded, new ArrayList<>(), Collections.emptySet(), nodesRequired, 0, 0)); Assertions.assertEquals(PipelinePlacementPolicy.MULTIPLE_RACK_PIPELINE_MSG, t.getMessage()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index fa96f9ed0cbb..5985fff4cccf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -307,7 +307,7 @@ public void testCreateFactorTHREEPipelineWithExcludedDatanodes() Pipeline pipeline1 = provider.create( RatisReplicationConfig.getInstance(ReplicationFactor.THREE), - excludedNodes, Collections.EMPTY_LIST); + excludedNodes, Collections.EMPTY_LIST, Collections.emptySet()); for (DatanodeDetails dn : pipeline1.getNodes()) { assertFalse(excludedNodes.contains(dn)); @@ -329,7 +329,7 @@ public void testFactorTHREEPipelineRackScatterEngagement() Assertions.assertThrows(SCMException.class, () -> provider.create(RatisReplicationConfig .getInstance(ReplicationFactor.THREE), - excludedNodes, Collections.EMPTY_LIST)); + excludedNodes, Collections.EMPTY_LIST, Collections.emptySet())); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java index bbedb502fc9a..4c5f916a1214 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java @@ -189,7 +189,7 @@ private Set assertDistinctContainers(int n) Set allocatedContainers = new HashSet<>(); for (int i = 0; i < n; i++) { ContainerInfo container = - provider.getContainer(1, repConfig, OWNER, new ExcludeList()); + provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null); assertFalse(allocatedContainers.contains(container), "Provided existing container for request " + i); allocatedContainers.add(container); @@ -201,7 +201,7 @@ private void assertReusesExisting(Set existing, int n) throws IOException { for (int i = 0; i < 3 * n; i++) { ContainerInfo container = - provider.getContainer(1, repConfig, OWNER, new ExcludeList()); + provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null); assertTrue(existing.contains(container), "Provided new container for request " + i); } @@ -215,7 +215,7 @@ public void testPiplineLimitIgnoresExcludedPipelines( Set allocatedContainers = new HashSet<>(); for (int i = 0; i < providerConf.getMinimumPipelines(); i++) { ContainerInfo container = provider.getContainer( - 1, repConfig, OWNER, new ExcludeList()); + 1, repConfig, OWNER, new ExcludeList(), null); allocatedContainers.add(container); } // We have the min limit of pipelines, but then exclude one. It should use @@ -226,7 +226,7 @@ public void testPiplineLimitIgnoresExcludedPipelines( .stream().findFirst().get().getPipelineID(); exclude.addPipeline(excludedID); - ContainerInfo c = provider.getContainer(1, repConfig, OWNER, exclude); + ContainerInfo c = provider.getContainer(1, repConfig, OWNER, exclude, null); assertNotEquals(excludedID, c.getPipelineID()); assertTrue(allocatedContainers.contains(c)); } @@ -241,7 +241,7 @@ public void testNewPipelineNotCreatedIfAllPipelinesExcluded( Set allocatedContainers = new HashSet<>(); for (int i = 0; i < providerConf.getMinimumPipelines(); i++) { ContainerInfo container = provider.getContainer( - 1, repConfig, OWNER, new ExcludeList()); + 1, repConfig, OWNER, new ExcludeList(), null); allocatedContainers.add(container); } // We have the min limit of pipelines, but then exclude them all @@ -250,7 +250,7 @@ public void testNewPipelineNotCreatedIfAllPipelinesExcluded( exclude.addPipeline(c.getPipelineID()); } assertThrows(IOException.class, () -> provider.getContainer( - 1, repConfig, OWNER, exclude)); + 1, repConfig, OWNER, exclude, null)); } @ParameterizedTest @@ -261,7 +261,7 @@ void newPipelineCreatedIfSoftLimitReached(PipelineChoosePolicy policy) providerConf.setMinimumPipelines(1); provider = createSubject(policy); ContainerInfo container = provider.getContainer( - 1, repConfig, OWNER, new ExcludeList()); + 1, repConfig, OWNER, new ExcludeList(), null); ExcludeList exclude = new ExcludeList(); exclude.addPipeline(container.getPipelineID()); @@ -269,7 +269,7 @@ void newPipelineCreatedIfSoftLimitReached(PipelineChoosePolicy policy) pipelineManager.getPipeline(container.getPipelineID()).getFirstNode()); ContainerInfo newContainer = provider.getContainer( - 1, repConfig, OWNER, exclude); + 1, repConfig, OWNER, exclude, null); assertNotSame(container, newContainer); } @@ -283,7 +283,7 @@ public void testNewPipelineNotCreatedIfAllContainersExcluded( Set allocatedContainers = new HashSet<>(); for (int i = 0; i < providerConf.getMinimumPipelines(); i++) { ContainerInfo container = provider.getContainer( - 1, repConfig, OWNER, new ExcludeList()); + 1, repConfig, OWNER, new ExcludeList(), null); allocatedContainers.add(container); } // We have the min limit of pipelines, but then exclude all the associated @@ -293,7 +293,7 @@ public void testNewPipelineNotCreatedIfAllContainersExcluded( exclude.addConatinerId(c.containerID()); } assertThrows(IOException.class, () -> provider.getContainer( - 1, repConfig, OWNER, exclude)); + 1, repConfig, OWNER, exclude, null)); } @ParameterizedTest @@ -305,14 +305,14 @@ public void testUnableToCreateAnyPipelinesThrowsException( @Override public Pipeline createPipeline(ReplicationConfig repConf, List excludedNodes, - List favoredNodes) throws IOException { + List favoredNodes, Set datacenters) throws IOException { throw new IOException("Cannot create pipelines"); } }; provider = createSubject(policy); IOException ioException = assertThrows(IOException.class, - () -> provider.getContainer(1, repConfig, OWNER, new ExcludeList())); + () -> provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null)); assertThat(ioException.getMessage(), containsString("Cannot create pipelines")); } @@ -329,7 +329,7 @@ public void testExistingPipelineReturnedWhenNewCannotBeCreated( @Override public Pipeline createPipeline(ReplicationConfig repConf, List excludedNodes, - List favoredNodes) + List favoredNodes, Set datacenters) throws IOException { if (throwError) { throw new IOException("Cannot create pipelines"); @@ -341,13 +341,13 @@ public Pipeline createPipeline(ReplicationConfig repConf, provider = createSubject(policy); IOException ioException = assertThrows(IOException.class, - () -> provider.getContainer(1, repConfig, OWNER, new ExcludeList())); + () -> provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null)); assertThat(ioException.getMessage(), containsString("Cannot create pipelines")); for (int i = 0; i < 5; i++) { ioException = assertThrows(IOException.class, - () -> provider.getContainer(1, repConfig, OWNER, new ExcludeList())); + () -> provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null)); assertThat(ioException.getMessage(), containsString("Cannot create pipelines")); } @@ -370,13 +370,13 @@ public void testNewContainerAllocatedAndPipelinesClosedIfNoSpaceInExisting( // We ask for a space of 50 MB, and will actually need 50 MB space. ContainerInfo newContainer = provider.getContainer(50 * 1024 * 1024, repConfig, OWNER, - new ExcludeList()); + new ExcludeList(), null); assertNotNull(newContainer); assertTrue(allocatedContainers.contains(newContainer)); // Now get a new container where there is not enough space in the existing // and ensure a new container gets created. newContainer = provider.getContainer( - 128 * 1024 * 1024, repConfig, OWNER, new ExcludeList()); + 128 * 1024 * 1024, repConfig, OWNER, new ExcludeList(), null); assertNotNull(newContainer); assertFalse(allocatedContainers.contains(newContainer)); // The original pipelines should all be closed, triggered by the lack of @@ -410,7 +410,7 @@ public NavigableSet getContainersInPipeline( // Now attempt to get a container - any attempt to use an existing with // throw PNF and then we must allocate a new one ContainerInfo newContainer = - provider.getContainer(1, repConfig, OWNER, new ExcludeList()); + provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null); assertNotNull(newContainer); assertFalse(allocatedContainers.contains(newContainer)); } @@ -430,7 +430,7 @@ public void testContainerNotFoundWhenAttemptingToUseExisting( }).when(containerManager).getContainer(Mockito.any(ContainerID.class)); ContainerInfo newContainer = - provider.getContainer(1, repConfig, OWNER, new ExcludeList()); + provider.getContainer(1, repConfig, OWNER, new ExcludeList(), null); assertNotNull(newContainer); assertFalse(allocatedContainers.contains(newContainer)); @@ -452,7 +452,7 @@ public void testPipelineOpenButContainerRemovedFromIt( Set allocatedContainers = new HashSet<>(); for (int i = 0; i < providerConf.getMinimumPipelines(); i++) { ContainerInfo container = provider.getContainer( - 1, repConfig, OWNER, new ExcludeList()); + 1, repConfig, OWNER, new ExcludeList(), null); assertFalse(allocatedContainers.contains(container)); allocatedContainers.add(container); // Remove the container from the pipeline to simulate closing it @@ -460,7 +460,7 @@ public void testPipelineOpenButContainerRemovedFromIt( container.getPipelineID(), container.containerID()); } ContainerInfo newContainer = provider.getContainer( - 1, repConfig, OWNER, new ExcludeList()); + 1, repConfig, OWNER, new ExcludeList(), null); assertFalse(allocatedContainers.contains(newContainer)); for (ContainerInfo c : allocatedContainers) { Pipeline pipeline = pipelineManager.getPipeline(c.getPipelineID()); @@ -497,7 +497,7 @@ public void testExcludedOpenPipelineWithClosedContainerIsClosed( // expecting a new container to be created ContainerInfo containerInfo = provider.getContainer(1, repConfig, OWNER, - excludeList); + excludeList, null); assertFalse(allocated.contains(containerInfo)); for (ContainerInfo c : allocated) { Pipeline pipeline = pipelineManager.getPipeline(c.getPipelineID()); @@ -515,11 +515,11 @@ public void testExcludedNodesPassedToCreatePipelineIfProvided( // EmptyList should be passed if there are no nodes excluded. ContainerInfo container = provider.getContainer( - 1, repConfig, OWNER, excludeList); + 1, repConfig, OWNER, excludeList, null); assertNotNull(container); verify(pipelineManagerSpy).createPipeline(repConfig, - Collections.emptyList(), Collections.emptyList()); + Collections.emptyList(), Collections.emptyList(), Collections.emptySet()); // If nodes are excluded then the excluded nodes should be passed through to // the create pipeline call. @@ -528,10 +528,10 @@ public void testExcludedNodesPassedToCreatePipelineIfProvided( new ArrayList<>(excludeList.getDatanodes()); container = provider.getContainer( - 1, repConfig, OWNER, excludeList); + 1, repConfig, OWNER, excludeList, null); assertNotNull(container); verify(pipelineManagerSpy).createPipeline(repConfig, excludedNodes, - Collections.emptyList()); + Collections.emptyList(), Collections.emptySet()); } private ContainerInfo createContainer(Pipeline pipeline, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java index d5d9208adae4..2c7ac9832421 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy; import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; @@ -37,6 +38,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicLong; @@ -72,6 +74,9 @@ class TestWritableRatisContainerProvider { @Mock private ContainerManager containerManager; + @Mock + private NodeManager scmNodeManager; + @Test void returnsExistingContainer() throws Exception { Pipeline pipeline = MockPipeline.createPipeline(3); @@ -79,7 +84,8 @@ void returnsExistingContainer() throws Exception { existingPipelines(pipeline); - ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION); + ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION, + Collections.emptySet()); assertSame(existingContainer, container); verifyPipelineNotCreated(); @@ -93,7 +99,8 @@ void skipsPipelineWithoutContainer() throws Exception { Pipeline pipelineWithoutContainer = MockPipeline.createPipeline(3); existingPipelines(pipelineWithoutContainer, pipeline); - ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION); + ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION, + Collections.emptySet()); assertSame(existingContainer, container); verifyPipelineNotCreated(); @@ -103,7 +110,8 @@ void skipsPipelineWithoutContainer() throws Exception { void createsNewContainerIfNoneFound() throws Exception { ContainerInfo newContainer = createNewContainerOnDemand(); - ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION); + ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION, + Collections.emptySet()); assertSame(newContainer, container); verifyPipelineCreated(); @@ -114,7 +122,8 @@ void failsIfContainerCannotBeCreated() throws Exception { throwWhenCreatePipeline(); assertThrows(IOException.class, - () -> createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION)); + () -> createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION, + Collections.emptySet())); verifyPipelineCreated(); } @@ -144,6 +153,8 @@ private ContainerInfo createNewContainerOnDemand() throws IOException { Pipeline newPipeline = MockPipeline.createPipeline(3); when(pipelineManager.createPipeline(REPLICATION_CONFIG)) .thenReturn(newPipeline); + when(pipelineManager.createPipeline(REPLICATION_CONFIG, emptyList(), emptyList(), emptySet())) + .thenReturn(newPipeline); when(pipelineManager.getPipelines(REPLICATION_CONFIG, OPEN, emptySet(), emptySet())) .thenReturn(emptyList()) @@ -153,7 +164,7 @@ private ContainerInfo createNewContainerOnDemand() throws IOException { } private void throwWhenCreatePipeline() throws IOException { - when(pipelineManager.createPipeline(REPLICATION_CONFIG)) + when(pipelineManager.createPipeline(REPLICATION_CONFIG, emptyList(), emptyList(), emptySet())) .thenThrow(new SCMException(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE)); } @@ -166,7 +177,7 @@ private void verifyPipelineCreated() throws IOException { verify(pipelineManager, times(2)) .getPipelines(REPLICATION_CONFIG, OPEN, emptySet(), emptySet()); verify(pipelineManager) - .createPipeline(REPLICATION_CONFIG); + .createPipeline(REPLICATION_CONFIG, emptyList(), emptyList(), emptySet()); } private void verifyPipelineNotCreated() throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index 7e5fab8a6f86..7a0b0c99d801 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -34,20 +34,19 @@ import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.net.StaticMapping; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.container.common.SCMTestUtils; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -62,6 +61,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; @@ -100,7 +100,7 @@ private static class BlockManagerStub implements BlockManager { @Override public AllocatedBlock allocateBlock(long size, ReplicationConfig replicationConfig, String owner, - ExcludeList excludeList) throws IOException, TimeoutException { + ExcludeList excludeList, Set datacenters) throws IOException, TimeoutException { List nodes = new ArrayList<>(datanodes); Collections.shuffle(nodes); Pipeline pipeline; @@ -311,7 +311,7 @@ void testAllocateBlockWithClientMachine() throws IOException { List allocatedBlocks = server.allocateBlock( blockSize, numOfBlocks, replicationConfig, "o", - new ExcludeList(), clientAddress); + new ExcludeList(), clientAddress, Collections.emptySet()); Assertions.assertEquals(numOfBlocks, allocatedBlocks.size()); for (AllocatedBlock allocatedBlock: allocatedBlocks) { List nodesInOrder = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java index 6bb641e62b2c..02c8046a0824 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.placement; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Random; @@ -88,12 +89,12 @@ public void testCapacityPlacementYieldsBetterDataDistribution() throws long containerSize = random.nextInt(10) * OzoneConsts.GB; long metadataSize = random.nextInt(10) * OzoneConsts.GB; List nodesCapacity = - capacityPlacer.chooseDatanodes(new ArrayList<>(), null, nodesRequired, + capacityPlacer.chooseDatanodes(new ArrayList<>(), null, Collections.emptySet(), nodesRequired, metadataSize, containerSize); assertEquals(nodesRequired, nodesCapacity.size()); List nodesRandom = - randomPlacer.chooseDatanodes(nodesCapacity, null, nodesRequired, + randomPlacer.chooseDatanodes(nodesCapacity, null, Collections.emptySet(), nodesRequired, metadataSize, containerSize); // One fifth of all calls are delete diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java index f285abfaf75b..9666d88572dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.net.URI; +import java.util.Collections; import java.util.function.Function; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; @@ -120,7 +121,7 @@ private void testSafeMode(Function fsRoot) RatisReplicationConfig.getInstance(THREE); assertThrows(IOException.class, () -> cluster.getStorageContainerManager() .getWritableContainerFactory() - .getContainer(MB, replication, OZONE, new ExcludeList())); + .getContainer(MB, replication, OZONE, new ExcludeList(), Collections.emptySet())); } finally { IOUtils.closeQuietly(fs); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index b878f920193f..a0298441fc44 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -219,7 +219,8 @@ public static void setUp() throws Exception { any(ReplicationConfig.class), Mockito.anyString(), any(ExcludeList.class), - Mockito.anyString())).thenThrow( + Mockito.anyString(), + Mockito.anySet())).thenThrow( new SCMException("SafeModePrecheck failed for allocateBlock", ResultCodes.SAFE_MODE_EXCEPTION)); createVolume(VOLUME_NAME); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index 26380a1ad567..84e6165c736e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -114,6 +114,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.doAnswer; @@ -700,7 +701,8 @@ private void mockScmAllocationOnDn1(long containerID, any(ReplicationConfig.class), anyString(), any(ExcludeList.class), - anyString())) + anyString(), + anySet())) .thenReturn(Collections.singletonList(block)); } @@ -716,7 +718,8 @@ private void mockScmAllocationEcPipeline(long containerID, long localId) any(ECReplicationConfig.class), anyString(), any(ExcludeList.class), - anyString())) + anyString(), + anySet())) .thenReturn(Collections.singletonList(block)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java index 62c31010d25d..8ad586e4741e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java @@ -37,6 +37,7 @@ import org.junit.jupiter.api.Timeout; import java.io.IOException; +import java.util.Collections; import java.util.Optional; import java.util.concurrent.TimeoutException; @@ -105,7 +106,7 @@ public void testNumBlocksAllocated() throws IOException, TimeoutException { cluster.getStorageContainerManager().getScmBlockManager() .allocateBlock(5, RatisReplicationConfig.getInstance(ReplicationFactor.ONE), - "Test", new ExcludeList()); + "Test", new ExcludeList(), Collections.emptySet()); MetricsRecordBuilder metrics = getMetrics(SCMPipelineMetrics.class.getSimpleName()); Pipeline pipeline = block.getPipeline(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 3dea33f4bc01..d167d9b63e7c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -292,8 +292,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // return response. if (exception == null) { - LOG.info("created bucket: {} of layout {} in volume: {}", bucketName, - omBucketInfo.getBucketLayout(), volumeName); + LOG.info("created bucket: {} of layout {} in volume: {}, allowed dc: {}", bucketName, + omBucketInfo.getBucketLayout(), volumeName, omBucketInfo.getMetadata().get(OzoneConsts.DATACENTERS)); omMetrics.incNumBuckets(); if (isECBucket(bucketInfo)) { omMetrics.incEcBucketsTotal(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 1e6cc944fe19..34c5c5615d3b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -18,60 +18,62 @@ package org.apache.hadoop.ozone.om.request.file; -import java.io.IOException; -import java.nio.file.InvalidPathException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OzoneConfigUtil; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; -import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; -import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; -import org.apache.hadoop.ozone.om.request.validation.ValidationCondition; -import org.apache.hadoop.ozone.om.request.validation.ValidationContext; -import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.utils.UniqueId; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; +import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; +import org.apache.hadoop.ozone.om.request.validation.ValidationCondition; +import org.apache.hadoop.ozone.om.request.validation.ValidationContext; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.UniqueId; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.InvalidPathException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateFile; /** @@ -125,6 +127,13 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final OmBucketInfo bucketInfo = ozoneManager .getBucketInfo(keyArgs.getVolumeName(), keyArgs.getBucketName()); + + Set datacenters = Collections.emptySet(); + final String datacentersMetadata = bucketInfo.getMetadata().get(OzoneConsts.DATACENTERS); + if (StringUtils.isNotEmpty(datacentersMetadata)) { + datacenters = Arrays.stream(datacentersMetadata.split(",")).collect(Collectors.toSet()); + } + final ReplicationConfig repConfig = OzoneConfigUtil .resolveReplicationConfigPreference(type, factor, keyArgs.getEcReplicationConfig(), @@ -143,7 +152,9 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.getOMServiceId(), ozoneManager.getMetrics(), keyArgs.getSortDatanodes(), - userInfo); + datacenters, + userInfo + ); KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() .setModificationTime(Time.now()).setType(type).setFactor(factor) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 321b9b9a270b..6410fa650dc7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -18,31 +18,12 @@ package org.apache.hadoop.ozone.om.request.key; -import java.io.IOException; -import java.nio.file.InvalidPathException; -import java.util.Collections; -import java.util.List; -import java.util.Map; - import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.QuotaUtil; -import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; -import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; -import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; -import org.apache.hadoop.ozone.om.request.validation.ValidationCondition; -import org.apache.hadoop.ozone.om.request.validation.ValidationContext; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -50,18 +31,40 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; +import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; +import org.apache.hadoop.ozone.om.request.validation.ValidationCondition; +import org.apache.hadoop.ozone.om.request.validation.ValidationContext; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.InvalidPathException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -98,6 +101,15 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ExcludeList.getFromProtoBuf(allocateBlockRequest.getExcludeList()); } + OmBucketInfo omBucketInfo = getBucketInfo(ozoneManager.getMetadataManager(), keyArgs.getVolumeName(), + keyArgs.getBucketName()); + + Set datacenters = Collections.emptySet(); + String datacentersMetadata = omBucketInfo != null ? omBucketInfo.getMetadata().get(OzoneConsts.DATACENTERS) : null; + if (StringUtils.isNotEmpty(datacentersMetadata)) { + datacenters = Arrays.stream(datacentersMetadata.split(",")).collect(Collectors.toSet()); + } + // TODO: Here we are allocating block with out any check for key exist in // open table or not and also with out any authorization checks. // Assumption here is that allocateBlocks with out openKey will be less. @@ -119,7 +131,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.getPreallocateBlocksMax(), ozoneManager.isGrpcBlockTokenEnabled(), ozoneManager.getOMServiceId(), ozoneManager.getMetrics(), - keyArgs.getSortDatanodes(), userInfo); + keyArgs.getSortDatanodes(), datacenters, userInfo); // Set modification time and normalize key if required. KeyArgs.Builder newKeyArgs = @@ -148,10 +160,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex) { - OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest = + AllocateBlockRequest allocateBlockRequest = getOmRequest().getAllocateBlockRequest(); - OzoneManagerProtocolProtos.KeyArgs keyArgs = + KeyArgs keyArgs = allocateBlockRequest.getKeyArgs(); OzoneManagerProtocolProtos.KeyLocation blockLocation = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index fbaf987ebc8b..01d17e6abf11 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -18,21 +18,25 @@ package org.apache.hadoop.ozone.om.request.key; -import java.io.IOException; -import java.nio.file.InvalidPathException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneConfigUtil; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; @@ -41,32 +45,32 @@ import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; import org.apache.hadoop.ozone.om.request.validation.ValidationCondition; import org.apache.hadoop.ozone.om.request.validation.ValidationContext; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.UniqueId; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.InvalidPathException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; @@ -131,6 +135,13 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final OmBucketInfo bucketInfo = ozoneManager .getBucketInfo(keyArgs.getVolumeName(), keyArgs.getBucketName()); + + Set datacenters = Collections.emptySet(); + final String datacentersMetadata = bucketInfo.getMetadata().get(OzoneConsts.DATACENTERS); + if (StringUtils.isNotEmpty(datacentersMetadata)) { + datacenters = Arrays.stream(datacentersMetadata.split(",")).collect(Collectors.toSet()); + } + final ReplicationConfig repConfig = OzoneConfigUtil .resolveReplicationConfigPreference(type, factor, keyArgs.getEcReplicationConfig(), @@ -151,6 +162,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.getOMServiceId(), ozoneManager.getMetrics(), keyArgs.getSortDatanodes(), + datacenters, userInfo); newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index b031c6106ef2..e2ac78a8b3ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -18,35 +18,33 @@ package org.apache.hadoop.ozone.om.request.key; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; +import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PrefixManager; import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.ScmClient; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; @@ -60,44 +58,41 @@ import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension - .EncryptedKeyVersion; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ScmClient; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; @@ -144,7 +139,7 @@ protected List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient, ReplicationConfig replicationConfig, ExcludeList excludeList, long requestedSize, long scmBlockSize, int preallocateBlocksMax, boolean grpcBlockTokenEnabled, String serviceID, OMMetrics omMetrics, - boolean shouldSortDatanodes, UserInfo userInfo) + boolean shouldSortDatanodes, Set datacenters, UserInfo userInfo) throws IOException { int dataGroupSize = replicationConfig instanceof ECReplicationConfig ? ((ECReplicationConfig) replicationConfig).getData() : 1; @@ -162,7 +157,7 @@ protected List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient, try { allocatedBlocks = scmClient.getBlockClient() .allocateBlock(scmBlockSize, numBlocks, replicationConfig, serviceID, - excludeList, clientMachine); + excludeList, clientMachine, datacenters); } catch (SCMException ex) { omMetrics.incNumBlockAllocateCallFails(); if (ex.getResult() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java index 8847a2d51e3f..76732e9780fc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java @@ -44,15 +44,13 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.success; -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.unknownFailure; +import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; +import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.success; +import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.unknownFailure; /** * This is a testing client that allows us to intercept calls from OzoneManager @@ -121,10 +119,10 @@ public ScmBlockLocationTestingClient(String clusterID, String scmId, @Override public List allocateBlock(long size, int num, ReplicationConfig config, - String owner, ExcludeList excludeList, String clientMachine) + String owner, ExcludeList excludeList, String clientMachine, Set datacenters) throws IOException { DatanodeDetails datanodeDetails = randomDatanodeDetails(); - Pipeline pipeline = createPipeline(datanodeDetails); + Pipeline pipeline = createPipeline(datanodeDetails, datacenters); long containerID = Time.monotonicNow(); long localID = Time.monotonicNow(); AllocatedBlock.Builder abb = @@ -134,7 +132,7 @@ public List allocateBlock(long size, int num, return Collections.singletonList(abb.build()); } - private Pipeline createPipeline(DatanodeDetails datanode) { + private Pipeline createPipeline(DatanodeDetails datanode, Set datacenters) { List dns = new ArrayList<>(); dns.add(datanode); Pipeline pipeline = Pipeline.newBuilder() @@ -143,6 +141,7 @@ private Pipeline createPipeline(DatanodeDetails datanode) { .setReplicationConfig( StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)) .setNodes(dns) + .setDatacenters(datacenters) .build(); return pipeline; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 178134e6941e..e1dc1a31faf0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -18,26 +18,38 @@ package org.apache.hadoop.ozone.om.request.key; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.UUID; - import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OMPerformanceMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; import org.apache.hadoop.ozone.om.ResolvedBucket; -import org.apache.hadoop.ozone.om.KeyManager; -import org.apache.hadoop.ozone.om.KeyManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.ScmClient; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; @@ -45,6 +57,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.AfterEach; @@ -52,32 +65,19 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; import org.mockito.Mockito; - -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ScmClient; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; -import org.apache.hadoop.util.Time; import org.slf4j.event.Level; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.UUID; + import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.setupReplicationConfigValidation; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -193,7 +193,7 @@ public void setup() throws Exception { when(scmBlockLocationProtocol.allocateBlock(anyLong(), anyInt(), any(ReplicationConfig.class), anyString(), any(ExcludeList.class), - anyString())).thenAnswer(invocation -> { + anyString(), anySet())).thenAnswer(invocation -> { int num = invocation.getArgument(1); List allocatedBlocks = new ArrayList<>(num); for (int i = 0; i < num; i++) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java index 1f4fb530985a..b7454ad0a152 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java @@ -55,7 +55,8 @@ public Pipeline create(ReplicationConfig config) { @Override public Pipeline create(ReplicationConfig config, List excludedNodes, - List favoredNodes) { + List favoredNodes, + Set datacenters) { // We don't expect this to be called at all. But adding this as a red // flag for troubleshooting. throw new UnsupportedOperationException( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java index b1dfed992986..d152e7cb1325 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestBlocksEndPoint.java @@ -37,9 +37,9 @@ import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.io.TempDir; import javax.ws.rs.core.Response; @@ -47,6 +47,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.TimeoutException; @@ -277,6 +278,7 @@ protected ContainerWithPipeline getTestContainer( .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setOwner("test") .setState(state) + .setDatacenters(Collections.emptySet()) .build(); return new ContainerWithPipeline(containerInfo, localPipeline); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 38522a7b82f0..68a72982f40c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -1093,6 +1093,7 @@ protected ContainerWithPipeline getTestContainer( .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setOwner("test") .setState(state) + .setDatacenters(Collections.emptySet()) .build(); return new ContainerWithPipeline(containerInfo, localPipeline); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 4d391feaae29..314890299830 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -541,7 +541,7 @@ public void setMisRepWhenDnPresent(UUID dn) { @Override public List chooseDatanodes( List usedNodes, List excludedNodes, - List favoredNodes, + List favoredNodes, Set datacenters, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws IOException { return null; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index eb62b7d3ece1..549e67429651 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.time.Clock; import java.time.ZoneId; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.TimeoutException; @@ -215,6 +216,7 @@ protected ContainerWithPipeline getTestContainer(LifeCycleState state) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setOwner("test") .setState(state) + .setDatacenters(Collections.emptySet()) .build(); return new ContainerWithPipeline(containerInfo, pipeline); } @@ -233,6 +235,7 @@ protected ContainerWithPipeline getTestContainer(long id, .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setOwner("test") .setState(state) + .setDatacenters(Collections.emptySet()) .build(); return new ContainerWithPipeline(containerInfo, pipeline); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java index 277c5afff217..b6227c62fe6c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java @@ -59,6 +59,10 @@ public class CreateBucketHandler extends BucketHandler { " user if not specified") private String ownerName; + @Option(names = {"--datacenters", "-dc"}, + description = "Comma-separated list of datacenters to store the bucket in") + private String datacenters; + enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE, LEGACY } @Option(names = { "--layout", "-l" }, @@ -96,6 +100,13 @@ public void execute(OzoneClient client, OzoneAddress address) bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(isGdprEnforced)); } + if (datacenters != null && !datacenters.isEmpty()) { + if (!datacenters.matches("^\\w+(,\\w+)*$")) { + throw new IllegalArgumentException("Invalid datacenters format. Expected a comma separated string."); + } + bb.addMetadata(OzoneConsts.DATACENTERS, datacenters); + } + if (bekName != null) { if (!bekName.isEmpty()) { bb.setBucketEncryptionKey(bekName); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java index 7ba62a5ce1cf..c70960783c0b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.ozone.shell.bucket; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; - import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -38,6 +38,11 @@ public class UpdateBucketHandler extends BucketHandler { description = "Owner of the bucket to set") private String ownerName; + @Option(names = {"--datacenters", "-dc"}, + description = "Comma-separated list of datacenters to store the bucket in") + private String datacenters; + + @Override protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { @@ -55,6 +60,13 @@ protected void execute(OzoneClient client, OzoneAddress address) } } + if (datacenters != null && !datacenters.isEmpty()) { + if (!datacenters.matches("^\\w+(,\\w+)*$")) { + throw new IllegalArgumentException("Invalid datacenters format. Expected a comma separated string."); + } + bucket.getMetadata().put(OzoneConsts.DATACENTERS, datacenters); + } + OzoneBucket updatedBucket = client.getObjectStore().getVolume(volumeName) .getBucket(bucketName); printObjectAsJson(updatedBucket);