hadoop中router就是自己实现线程池来维护响应线程,来响应请求的。
package org.apache.hadoop.hdfs.server.federation.router;
public class ConnectionPool {
protected ConnectionContext getConnection() {
this.lastActiveTime = Time.now();
// Get a connection from the pool following round-robin
ConnectionContext conn = null;
List<ConnectionContext> tmpConnections = this.connections;
int size = tmpConnections.size();
// Inc and mask off sign bit, lookup index should be non-negative int
int threadIndex = this.clientIndex.getAndIncrement() & 0x7FFFFFFF;
for (int i=0; i<size; i++) {
int index = (threadIndex + i) % size;
conn = tmpConnections.get(index);
if (conn != null && conn.isUsable()) {
return conn;
}
}
// We return a connection even if it's active
return conn;
}
}
connectionId
是由ugi、addr、协议来确定的。
this.connectionPoolId =
new ConnectionPoolId(this.ugi, this.namenodeAddress, this.protocol);
该连接池的设计与《并发编程的艺术》第4章线程实例应用几乎一样,值得学习。