Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,19 @@
package org.apache.hadoop.hbase.ipc;

import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.security.User;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;

import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hbase.thirdparty.com.google.protobuf.Message;

import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;

/**
Expand Down Expand Up @@ -83,7 +86,17 @@ public interface RpcCall extends RpcCallContext {
/** Returns The request header of this call. */
RequestHeader getHeader();

ConnectionHeader getConnectionHeader();
/**
* Returns the map of attributes specified when building the Connection See the Map argument on
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nitpick: since you already have to do one more revision can you fix this and below javadoc to at least add a period prior to See the..? Could also add a <br> if you want them to be on the next line, or better yet use @see, i.e.:

/**
 * Returns the map of attributes specified when building the Connection.
 * @see ConnectionFactory#createConnection(Configuration, ExecutorService, User, Map)  
 */

The see will automatically also do the linking.

* {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration, ExecutorService, User, Map)}
*/
Map<String, byte[]> getConnectionAttributes();

/**
* Returns the map of attributes specified when building the request See
* {@link org.apache.hadoop.hbase.client.TableBuilder#setRequestAttribute(String, byte[])}
*/
Map<String, byte[]> getRequestAttributes();

/** Returns Port of remote address in this call */
int getRemotePort();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,12 @@
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.curator.shaded.com.google.common.collect.Maps;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseServerException;
Expand All @@ -48,8 +51,8 @@
import org.apache.hbase.thirdparty.com.google.protobuf.Message;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
Expand Down Expand Up @@ -99,6 +102,7 @@ public abstract class ServerCall<T extends ServerRpcConnection> implements RpcCa
// cumulative size of serialized exceptions
private long exceptionSize = 0;
private final boolean retryImmediatelySupported;
private Map<String, byte[]> requestAttributes;

// This is a dirty hack to address HBASE-22539. The highest bit is for rpc ref and cleanup, and
// the rest of the bits are for WAL reference count. We can only call release if all of them are
Expand Down Expand Up @@ -209,8 +213,24 @@ public RequestHeader getHeader() {
}

@Override
public RPCProtos.ConnectionHeader getConnectionHeader() {
return this.connection.connectionHeader;
public Map<String, byte[]> getConnectionAttributes() {
return this.connection.connectionAttributes;
}

@Override
public Map<String, byte[]> getRequestAttributes() {
if (this.requestAttributes == null) {
if (header.getAttributeList().isEmpty()) {
this.requestAttributes = Collections.emptyMap();
} else {
this.requestAttributes = Maps.newHashMapWithExpectedSize(header.getAttributeList().size());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we need to synchronize this method given the chances of threading issues (low) and the cost of the computation (low), but we should consider thread safety here nonetheless.

At the very least, we should make this.requestAttributes volatile. Then here we should construct a local map and add the values onto it, then only do this.requestAttributes = localAttributes at the end. This way if there did happen to be 2 callers, one would not get an incomplete view of the request attributes.

for (HBaseProtos.NameBytesPair nameBytesPair : header.getAttributeList()) {
this.requestAttributes.put(nameBytesPair.getName(),
nameBytesPair.getValue().toByteArray());
}
}
}
return this.requestAttributes;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,14 @@
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import org.apache.commons.crypto.cipher.CryptoCipherFactory;
import org.apache.commons.crypto.random.CryptoRandom;
import org.apache.commons.crypto.random.CryptoRandomFactory;
import org.apache.curator.shaded.com.google.common.collect.Maps;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.client.VersionInfoUtil;
Expand Down Expand Up @@ -75,6 +78,7 @@
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
Expand Down Expand Up @@ -103,6 +107,7 @@ abstract class ServerRpcConnection implements Closeable {
protected int remotePort;
protected InetAddress addr;
protected ConnectionHeader connectionHeader;
protected Map<String, byte[]> connectionAttributes;

/**
* Codec the client asked use.
Expand Down Expand Up @@ -405,6 +410,16 @@ private CodedInputStream createCis(ByteBuff buf) {
// Reads the connection header following version
private void processConnectionHeader(ByteBuff buf) throws IOException {
this.connectionHeader = ConnectionHeader.parseFrom(createCis(buf));
if (connectionHeader.getAttributeList().isEmpty()) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you add a comment here that we want to copy the attributes prior to releasing the buffer so that they don't get corrupted down the line when the buffer's underlying memory is replaced for some other call?

this.connectionAttributes = Collections.emptyMap();
} else {
this.connectionAttributes =
Maps.newHashMapWithExpectedSize(connectionHeader.getAttributeList().size());
for (HBaseProtos.NameBytesPair nameBytesPair : connectionHeader.getAttributeList()) {
this.connectionAttributes.put(nameBytesPair.getName(),
nameBytesPair.getValue().toByteArray());
}
}
String serviceName = connectionHeader.getServiceName();
if (serviceName == null) {
throw new EmptyServiceNameException();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
Expand Down Expand Up @@ -59,8 +60,6 @@

import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;

import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;

@Category({ ClientTests.class, MediumTests.class })
public class TestRequestAndConnectionAttributes {

Expand Down Expand Up @@ -101,15 +100,22 @@ public void setup() {
}

@Test
public void testConnectionAttributes() throws IOException {
public void testConnectionHeaderOverwrittenAttributesRemain() throws IOException {
TableName tableName = TableName.valueOf("testConnectionAttributes");
TEST_UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("0") }, 1,
HConstants.DEFAULT_BLOCKSIZE, AttributesCoprocessor.class.getName());
byte[] cf = Bytes.toBytes("0");
TEST_UTIL.createTable(tableName, new byte[][] { cf }, 1, HConstants.DEFAULT_BLOCKSIZE,
AttributesCoprocessor.class.getName());

Configuration conf = TEST_UTIL.getConfiguration();
try (Connection conn = ConnectionFactory.createConnection(conf, null,
AuthUtil.loginClient(conf), CONNECTION_ATTRIBUTES); Table table = conn.getTable(tableName)) {
Result result = table.get(new Get(Bytes.toBytes(0)));

// submit a 300 byte rowkey here to encourage netty's allocator to overwrite the connection
// header
byte[] bytes = new byte[300];
new Random().nextBytes(bytes);
Result result = table.get(new Get(bytes));

assertEquals(CONNECTION_ATTRIBUTES.size(), result.size());
for (Map.Entry<String, byte[]> attr : CONNECTION_ATTRIBUTES.entrySet()) {
byte[] val = result.getValue(Bytes.toBytes("c"), Bytes.toBytes(attr.getKey()));
Expand Down Expand Up @@ -270,15 +276,15 @@ public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get,

// for connection attrs test
RpcCall rpcCall = RpcServer.getCurrentCall().get();
for (HBaseProtos.NameBytesPair attr : rpcCall.getHeader().getAttributeList()) {
for (Map.Entry<String, byte[]> attr : rpcCall.getRequestAttributes().entrySet()) {
result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow())
.setFamily(Bytes.toBytes("r")).setQualifier(Bytes.toBytes(attr.getName()))
.setValue(attr.getValue().toByteArray()).setType(Cell.Type.Put).setTimestamp(1).build());
.setFamily(Bytes.toBytes("r")).setQualifier(Bytes.toBytes(attr.getKey()))
.setValue(attr.getValue()).setType(Cell.Type.Put).setTimestamp(1).build());
}
for (HBaseProtos.NameBytesPair attr : rpcCall.getConnectionHeader().getAttributeList()) {
for (Map.Entry<String, byte[]> attr : rpcCall.getConnectionAttributes().entrySet()) {
result.add(c.getEnvironment().getCellBuilder().clear().setRow(get.getRow())
.setFamily(Bytes.toBytes("c")).setQualifier(Bytes.toBytes(attr.getName()))
.setValue(attr.getValue().toByteArray()).setType(Cell.Type.Put).setTimestamp(1).build());
.setFamily(Bytes.toBytes("c")).setQualifier(Bytes.toBytes(attr.getKey()))
.setValue(attr.getValue()).setType(Cell.Type.Put).setTimestamp(1).build());
}
result.sort(CellComparator.getInstance());
c.bypass();
Expand All @@ -299,15 +305,15 @@ public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WAL

private void validateRequestAttributes() {
RpcCall rpcCall = RpcServer.getCurrentCall().get();
List<HBaseProtos.NameBytesPair> attrs = rpcCall.getHeader().getAttributeList();
Map<String, byte[]> attrs = rpcCall.getRequestAttributes();
if (attrs.size() != REQUEST_ATTRIBUTES.size()) {
return;
}
for (HBaseProtos.NameBytesPair attr : attrs) {
if (!REQUEST_ATTRIBUTES.containsKey(attr.getName())) {
for (Map.Entry<String, byte[]> attr : attrs.entrySet()) {
if (!REQUEST_ATTRIBUTES.containsKey(attr.getKey())) {
return;
}
if (!Arrays.equals(REQUEST_ATTRIBUTES.get(attr.getName()), attr.getValue().toByteArray())) {
if (!Arrays.equals(REQUEST_ATTRIBUTES.get(attr.getKey()), attr.getValue())) {
return;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
Expand Down Expand Up @@ -695,7 +696,12 @@ public RPCProtos.RequestHeader getHeader() {
}

@Override
public RPCProtos.ConnectionHeader getConnectionHeader() {
public Map<String, byte[]> getConnectionAttributes() {
return null;
}

@Override
public Map<String, byte[]> getRequestAttributes() {
return null;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import java.io.IOException;
import java.net.InetAddress;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.hadoop.hbase.CellScanner;
Expand Down Expand Up @@ -222,7 +223,12 @@ public RPCProtos.RequestHeader getHeader() {
}

@Override
public RPCProtos.ConnectionHeader getConnectionHeader() {
public Map<String, byte[]> getConnectionAttributes() {
return null;
}

@Override
public Map<String, byte[]> getRequestAttributes() {
return null;
}

Expand Down