msn: [email protected]
来源:http://yfydz.cublog.cn
6. IPVS的连接管理 和netfilter的连接类似,IPVS的连接管理是IPVS的一个重要组成部分,但相对来说IPVS的连接比netfilter的连接要简单一些。 6.1 连接五元组 要实现面向连接的处理的基本功能就是根据数据包内容查找连接,IPVS区分每个连接的关键数据和netfilter一样是五元组,为IP协议、源地址、源端口、目的地址和目的端口,不过没定义方向的概念,所以在IPVS中请求方向和回应方向要用不同的查找函数处理,由于IPVS是在INPUT点处理请求,在FORWARD点处理回应包,不会在同一个点同时处理请求包和回应包,因此可以没有方向的概念。 进入方向: /* * Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab. * Called for pkts coming from OUTside-to-INside. * s_addr, s_port: pkt source address (foreign host) * d_addr, d_port: pkt dest address (load balancer) */ static inline struct ip_vs_conn *__ip_vs_conn_in_get (int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) { unsigned hash; struct ip_vs_conn *cp; // 入(请求)方向计算HASH值是用源的三元组来计算:IP协议、源地址、源端口 hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); ct_read_lock(hash); list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { // caddr,cport是连接记录的客户端的地址和端口 if (s_addr==cp->caddr && s_port==cp->cport && d_port==cp->vport && d_addr==cp->vaddr && // 连接中的客户端端口为0的情况,不过基本不可能 ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && protocol==cp->protocol) { /* HIT */ // 增加连接引用 atomic_inc(&cp->refcnt); ct_read_unlock(hash); return cp; } } ct_read_unlock(hash); return NULL; } struct ip_vs_conn *ip_vs_conn_in_get (int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) { struct ip_vs_conn *cp; cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port); if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) // 正常查找没找到时以s_port为0重查一次 cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port); IP_VS_DBG(9, "lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", ip_vs_proto_name(protocol), NIPQUAD(s_addr), ntohs(s_port), NIPQUAD(d_addr), ntohs(d_port), cp?"hit":"not hit"); return cp; } 另外还有个获取连接模板的函数,没有s_port为0的特殊处理,在查找固定连接和模板连接时使用: /* Get reference to connection template */ struct ip_vs_conn *ip_vs_ct_in_get (int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) { unsigned hash; struct ip_vs_conn *cp; hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); ct_read_lock(hash); list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { if (s_addr==cp->caddr && s_port==cp->cport && d_port==cp->vport && d_addr==cp->vaddr && cp->flags & IP_VS_CONN_F_TEMPLATE && protocol==cp->protocol) { /* HIT */ atomic_inc(&cp->refcnt); goto out; } } cp = NULL; out: ct_read_unlock(hash); IP_VS_DBG(9, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", ip_vs_proto_name(protocol), NIPQUAD(s_addr), ntohs(s_port), NIPQUAD(d_addr), ntohs(d_port), cp?"hit":"not hit"); return cp; } 发出方向查找: /* * Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab. * Called for pkts coming from inside-to-OUTside. * s_addr, s_port: pkt source address (inside host) * d_addr, d_port: pkt dest address (foreign host) */ struct ip_vs_conn *ip_vs_conn_out_get (int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port) { unsigned hash; struct ip_vs_conn *cp, *ret=NULL; /* * Check for "full" addressed entries */ // 出方向计算HASH值是用目的三元组来计算:IP协议、目的地址和目的端口, // 这样计算结果和入方向的计算值是相同的 hash = ip_vs_conn_hashkey(protocol, d_addr, d_port); ct_read_lock(hash); list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { if (d_addr == cp->caddr && d_port == cp->cport && s_port == cp->dport && s_addr == cp->daddr && protocol == cp->protocol) { /* HIT */ atomic_inc(&cp->refcnt); ret = cp; break; } } ct_read_unlock(hash); IP_VS_DBG(9, "lookup/out %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", ip_vs_proto_name(protocol), NIPQUAD(s_addr), ntohs(s_port), NIPQUAD(d_addr), ntohs(d_port), ret?"hit":"not hit"); return ret; } 6.2 连接HASH表 和netfilter一样,IPVS的连接表是通过HASH表来实现的,不过和netfilter不同的是该HASH表大小是固定的,可在内核内核参数时设置,而不是象netfitler那样是根据系统内存动态计算出来的: // HASH表缺省大小是1<<12, 4096 /* * IPVS connection entry hash table */ #ifndef CONFIG_IP_VS_TAB_BITS #define CONFIG_IP_VS_TAB_BITS 12 #endif // 内核配置时设置的话最小为1<<8, 最大为1<<20 /* make sure that IP_VS_CONN_TAB_BITS is located in [8, 20] */ #if CONFIG_IP_VS_TAB_BITS < 8 #define IP_VS_CONN_TAB_BITS 8 #endif #if CONFIG_IP_VS_TAB_BITS > 20 #define IP_VS_CONN_TAB_BITS 20 #endif #if 8 <= CONFIG_IP_VS_TAB_BITS && CONFIG_IP_VS_TAB_BITS <= 20 #define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS #endif #define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS) #define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1) 连接HASH表数组在连接初始化函数中分配: int ip_vs_conn_init(void) { ...... ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head)); 不过IPVS的连接读写锁不象netfilter那样只有一把,而是一个数组,通过连接HASH值取模后得到该连接对应的锁,这样读写连接时的冲突就会减少一些: // 锁的数量是1<<4, 16把 /* * Fine locking granularity for big connection hash table */ #define CT_LOCKARRAY_BITS 4 #define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS) #define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1) struct ip_vs_aligned_lock { rwlock_t l; } __attribute__((__aligned__(SMP_CACHE_BYTES))); // 连接表锁数组 /* lock array for conn table */ static struct ip_vs_aligned_lock __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; // 读写时用连接的HASH值和锁数量取模,就得到相应锁 static inline void ct_read_lock(unsigned key) { read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } 6.3 连接的建立 /* * Create a new connection entry and hash it into the ip_vs_conn_tab */ struct ip_vs_conn * ip_vs_conn_new(int proto, __u32 caddr, __u16 cport, __u32 vaddr, __u16 vport, __u32 daddr, __u16 dport, unsigned flags, struct ip_vs_dest *dest) { struct ip_vs_conn *cp; struct ip_vs_protocol *pp = ip_vs_proto_get(proto); // 从cache中分配连接 cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC); if (cp == NULL) { IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n"); return NULL; } memset(cp, 0, sizeof(*cp)); INIT_LIST_HEAD(&cp->c_list); init_timer(&cp->timer); cp->timer.data = (unsigned long)cp; // 连接超时函数 cp->timer.function = ip_vs_conn_expire; // 连接基本参数赋值 cp->protocol = proto; cp->caddr = caddr; cp->cport = cport; cp->vaddr = vaddr; cp->vport = vport; cp->daddr = daddr; cp->dport = dport; // 连接标志,普通连接为0 // cport为0时为IP_VS_CONN_F_NO_CPORT // 永久连接时为IP_VS_CONN_F_TEMPLATE // 目的服务器dest为NULL时为IP_VS_CONN_F_BYPASS cp->flags = flags; spin_lock_init(&cp->lock); /* * Set the entry is referenced by the current thread before hashing * it in the table, so that other thread run ip_vs_random_dropentry * but cannot drop this entry. */ // 引用初始值为1 atomic_set(&cp->refcnt, 1); // 子连接数置0 atomic_set(&cp->n_control, 0); atomic_set(&cp->in_pkts, 0); // 增加IPVS连接计数,其实最好在加入连接表时增加为好 atomic_inc(&ip_vs_conn_count); if (flags & IP_VS_CONN_F_NO_CPORT) atomic_inc(&ip_vs_conn_no_cport_cnt); /* Bind the connection with a destination server */ // 将连接和目的服务器进行绑定 ip_vs_bind_dest(cp, dest); /* Set its state and timeout */ // 连接初始状态为0 cp->state = 0; // 缺省超时为3秒 cp->timeout = 3*HZ; /* Bind its packet transmitter */ // 绑定连接的数据包的发送方法 ip_vs_bind_xmit(cp); // 绑定协议应用,其实目前只有TCP的FTP一种,所以用了unlikely if (unlikely(pp && atomic_read(&pp->appcnt))) ip_vs_bind_app(cp, pp); /* Hash it in the ip_vs_conn_tab finally */ // 将该连接节点加入到IPVS连接表中 ip_vs_conn_hash(cp); // 返回了,奇怪的是始终没有add_timer()启动定时器 return cp; } 绑定连接目的服务器: /* * Bind a connection entry with a virtual service destination * Called just after a new connection entry is created. */ static inline void ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) { /* if dest is NULL, then return directly */ if (!dest) return; /* Increase the refcnt counter of the dest */ // 增加目的服务器的引用 atomic_inc(&dest->refcnt); /* Bind with the destination and its corresponding transmitter */ // 根据服务器情况设置连接标志,主要是用来确定连接数据包的发送方法 cp->flags |= atomic_read(&dest->conn_flags); // 指向目的服务器 cp->dest = dest; IP_VS_DBG(7, "Bind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " "dest->refcnt:%d\n", ip_vs_proto_name(cp->protocol), NIPQUAD(cp->caddr), ntohs(cp->cport), NIPQUAD(cp->vaddr), ntohs(cp->vport), NIPQUAD(cp->daddr), ntohs(cp->dport), ip_vs_fwd_tag(cp), cp->state, cp->flags, atomic_read(&cp->refcnt), atomic_read(&dest->refcnt)); /* Update the connection counters */ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { /* It is a normal connection, so increase the inactive connection counter because it is in TCP SYNRECV state (inactive) or other protocol inacive state */ // 增加目的服务器的不活动连接计数,目前还属于不活动连接 atomic_inc(&dest->inactconns); } else { /* It is a persistent connection/template, so increase the peristent connection counter */ // 如果是永久连接或模板,增加目的服务器的永久连接计数 atomic_inc(&dest->persistconns); } // 检查目的服务器的连接数是否超载了 if (dest->u_threshold != 0 && ip_vs_dest_totalconns(dest) >= dest->u_threshold) dest->flags |= IP_VS_DEST_F_OVERLOAD; } 绑定协议应用: /* * Bind ip_vs_conn to its ip_vs_app (called by cp constructor) */ int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { // 调用协议的app_conn_bind成员函数,对TCP协议来说就是tcp_app_conn_bind()函数 // 只在NAT模式下有效 // 检查该端口是否属于某多连接应用协议,是的话连接上绑定该协议处理, // 相当于netfilter的连接的helper return pp->app_conn_bind(cp); } 绑定发送方法: /* * Bind a connection entry with the corresponding packet_xmit. * Called by ip_vs_conn_new. */ static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp) { // 连接的发送方法标志是在和目的服务器绑定时设置的 switch (IP_VS_FWD_METHOD(cp)) { case IP_VS_CONN_F_MASQ: // NAT发送 cp->packet_xmit = ip_vs_nat_xmit; break; case IP_VS_CONN_F_TUNNEL: // TUNNEL发送 cp->packet_xmit = ip_vs_tunnel_xmit; break; case IP_VS_CONN_F_DROUTE: // DR发送 cp->packet_xmit = ip_vs_dr_xmit; break; case IP_VS_CONN_F_LOCALNODE: // 本地包 cp->packet_xmit = ip_vs_null_xmit; break; case IP_VS_CONN_F_BYPASS: // 旁路发送 cp->packet_xmit = ip_vs_bypass_xmit; break; } } 将连接结构添加到连接HASH表: /* * Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port. * returns bool success. */ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) { unsigned hash; int ret; // 为什么不先判断flags呢?这样就不用计算HASH和加锁解锁了 /* Hash by protocol, client address and port */ hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); ct_write_lock(hash); if (!(cp->flags & IP_VS_CONN_F_HASHED)) { list_add(&cp->c_list, &ip_vs_conn_tab[hash]); // 设置HASH标志 cp->flags |= IP_VS_CONN_F_HASHED; // 再次增加引用计数 atomic_inc(&cp->refcnt); ret = 1; } else { IP_VS_ERR("ip_vs_conn_hash(): request for already hashed, " "called from %p\n", __builtin_return_address(0)); ret = 0; } ct_write_unlock(hash); return ret; } 6.4 连接的释放 连接超时函数: static void ip_vs_conn_expire(unsigned long data) { struct ip_vs_conn *cp = (struct ip_vs_conn *)data; // 连接超时设为60秒 cp->timeout = 60*HZ; /* * hey, I'm using it */ // 再次增加引用值 atomic_inc(&cp->refcnt); /* * do I control anybody? */ // 如果有子连接,延迟 if (atomic_read(&cp->n_control)) goto expire_later; /* * unhash it if it is hashed in the conn table */ // 将连接结构从连接HASH表中断开 if (!ip_vs_conn_unhash(cp)) goto expire_later; /* * refcnt==1 implies I'm the only one referrer */ // 引用为1表示可以删除连接了 if (likely(atomic_read(&cp->refcnt) == 1)) { /* delete the timer if it is activated by other users */ // 删除时钟,如果是定时器到时的话,定时器是已经删除了的 if (timer_pending(&cp->timer)) del_timer(&cp->timer); /* does anybody control me? */ // 本身是某连接的子连接, 从主连接中删除 if (cp->control) ip_vs_control_del(cp); // 解除协议应用绑定,目前应用只有FTP if (unlikely(cp->app != NULL)) ip_vs_unbind_app(cp); // 解除与目的服务器的绑定 ip_vs_unbind_dest(cp); // 如果客户端端口为0,减少0端口计数 if (cp->flags & IP_VS_CONN_F_NO_CPORT) atomic_dec(&ip_vs_conn_no_cport_cnt); // 减少IPVS连接总数 atomic_dec(&ip_vs_conn_count); // 释放连接cache内存 kmem_cache_free(ip_vs_conn_cachep, cp); return; } /* hash it back to the table */ // 还不能删除,重新把连接结构挂回连接HASH表 ip_vs_conn_hash(cp); expire_later: IP_VS_DBG(7, "delayed: conn->refcnt-1=%d conn->n_control=%d\n", atomic_read(&cp->refcnt)-1, atomic_read(&cp->n_control)); // 修改连接定时,减少连接引用计数 ip_vs_conn_put(cp); } 从连接HASH表中断开: /* * UNhashes ip_vs_conn from ip_vs_conn_tab. * returns bool success. */ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) { unsigned hash; int ret; // 为什么不先判断flags呢?就不用计算HASH和加锁解锁了 /* unhash it and decrease its reference counter */ hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); ct_write_lock(hash); if (cp->flags & IP_VS_CONN_F_HASHED) { // 从链表中删除 list_del(&cp->c_list); cp->flags &= ~IP_VS_CONN_F_HASHED; // 减少连接引用计数 atomic_dec(&cp->refcnt); ret = 1; } else ret = 0; ct_write_unlock(hash); return ret; } 从主连接中断开: static inline void ip_vs_control_del(struct ip_vs_conn *cp) { // ctl_cp是主连接 struct ip_vs_conn *ctl_cp = cp->control; if (!ctl_cp) { IP_VS_ERR("request control DEL for uncontrolled: " "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", NIPQUAD(cp->caddr),ntohs(cp->cport), NIPQUAD(cp->vaddr),ntohs(cp->vport)); return; } IP_VS_DBG(7, "DELeting control for: " "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", NIPQUAD(cp->caddr),ntohs(cp->cport), NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); // 将连接的主连接指针置空 cp->control = NULL; if (atomic_read(&ctl_cp->n_control) == 0) { IP_VS_ERR("BUG control DEL with n=0 : " "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", NIPQUAD(cp->caddr),ntohs(cp->cport), NIPQUAD(cp->vaddr),ntohs(cp->vport)); return; } // 减少主连接的子连接计数 atomic_dec(&ctl_cp->n_control); } 解除与应用的绑定: /* * Unbind cp from application incarnation (called by cp destructor) */ void ip_vs_unbind_app(struct ip_vs_conn *cp) { // 应用指针 struct ip_vs_app *inc = cp->app; if (!inc) return; // 调用应用的解除绑定,不过对FTP协议该函数为NULL if (inc->unbind_conn) inc->unbind_conn(inc, cp); // 调用应用的连接结束函数,对FTP协议就是ip_vs_ftp_done_conn if (inc->done_conn) inc->done_conn(inc, cp); // 减少应用的应用计数 ip_vs_app_inc_put(inc); // 将连接的应用指针置空 cp->app = NULL; } 连接和目的服务器解除绑定: /* * Unbind a connection entry with its VS destination * Called by the ip_vs_conn_expire function. */ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) { struct ip_vs_dest *dest = cp->dest; if (!dest) return; IP_VS_DBG(7, "Unbind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " "dest->refcnt:%d\n", ip_vs_proto_name(cp->protocol), NIPQUAD(cp->caddr), ntohs(cp->cport), NIPQUAD(cp->vaddr), ntohs(cp->vport), NIPQUAD(cp->daddr), ntohs(cp->dport), ip_vs_fwd_tag(cp), cp->state, cp->flags, atomic_read(&cp->refcnt), atomic_read(&dest->refcnt)); /* Update the connection counters */ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { // 普通连接 /* It is a normal connection, so decrease the inactconns or activeconns counter */ if (cp->flags & IP_VS_CONN_F_INACTIVE) { // 连接还是属于不活动连接,减少目的服务器的不活动连接计数 atomic_dec(&dest->inactconns); } else { // 连接还是属于活动连接,减少目的服务器的活动连接计数 atomic_dec(&dest->activeconns); } } else { /* It is a persistent connection/template, so decrease the peristent connection counter */ // 固定连接,减少固定连接计数 atomic_dec(&dest->persistconns); } // 当服务器连接低于某限制时去掉服务器超载标志 if (dest->l_threshold != 0) { // 判断目的服务器的连接是否低于阈值下限 if (ip_vs_dest_totalconns(dest) < dest->l_threshold) dest->flags &= ~IP_VS_DEST_F_OVERLOAD; } else if (dest->u_threshold != 0) { // 判断目的服务器的连接是否小于阈值上限的3/4 if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3) dest->flags &= ~IP_VS_DEST_F_OVERLOAD; } else { // 没设置服务器阈值,如果服务器超载就改为非超载 if (dest->flags & IP_VS_DEST_F_OVERLOAD) dest->flags &= ~IP_VS_DEST_F_OVERLOAD; } /* * Simply decrease the refcnt of the dest, because the * dest will be either in service's destination list * or in the trash. */ // 减少目的服务器的引用计数 atomic_dec(&dest->refcnt); } 6.5 其他释放连接函数 6.5.1 释放所有连接 在删除IPVS模块时调用,方法是让所有连接定时器到期而自动调用定时到期函数: /* * Flush all the connection entries in the ip_vs_conn_tab */ static void ip_vs_conn_flush(void) { int idx; struct ip_vs_conn *cp; flush_again: // 循环所有连接HASH表 for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) { /* * Lock is actually needed in this loop. */ ct_write_lock_bh(idx); // 循环链表 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { IP_VS_DBG(4, "del connection\n"); // 定时器立即到期 ip_vs_conn_expire_now(cp); // 如果有主连接,立即使主连接定时器到期 if (cp->control) { IP_VS_DBG(4, "del conn template\n"); ip_vs_conn_expire_now(cp->control); } } ct_write_unlock_bh(idx); } /* the counter may be not NULL, because maybe some conn entries are run by slow timer handler or unhashed but still referred */ if (atomic_read(&ip_vs_conn_count) != 0) { // 如果连接数不为0,重新调度进程,重新释放 schedule(); goto flush_again; } } 连接定时器立即到期: void ip_vs_conn_expire_now(struct ip_vs_conn *cp) { if (del_timer(&cp->timer)) mod_timer(&cp->timer, jiffies); } 6.5.2 定时随机删除连接 该函数被IPVS的定时函数defense_work_handler()定期调用: /* Called from keventd and must protect itself from softirqs */ void ip_vs_random_dropentry(void) { int idx; struct ip_vs_conn *cp; /* * Randomly scan 1/32 of the whole table every second */ for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) { // 随机找个HASH链表 unsigned hash = net_random() & IP_VS_CONN_TAB_MASK; /* * Lock is actually needed in this loop. */ ct_write_lock_bh(hash); list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { // 模板连接不删 if (cp->flags & IP_VS_CONN_F_TEMPLATE) /* connection template */ continue; if (cp->protocol == IPPROTO_TCP) { switch(cp->state) { case IP_VS_TCP_S_SYN_RECV: case IP_VS_TCP_S_SYNACK: // TCP半连接 break; case IP_VS_TCP_S_ESTABLISHED: // 检查是否释放该连接 if (todrop_entry(cp)) break; continue; default: // 其他TCP连接状态不删除连接 continue; } } else { // 其他协议直接检查是否释放该连接 if (!todrop_entry(cp)) continue; } IP_VS_DBG(4, "del connection\n"); // 使连接到期 ip_vs_conn_expire_now(cp); if (cp->control) { IP_VS_DBG(4, "del conn template\n"); // 使主连接也到期 ip_vs_conn_expire_now(cp->control); } } ct_write_unlock_bh(hash); } } 判断是否要删除连接 /* * Randomly drop connection entries before running out of memory */ static inline int todrop_entry(struct ip_vs_conn *cp) { /* * The drop rate array needs tuning for real environments. * Called from timer bh only => no locking */ static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; static char todrop_counter[9] = {0}; int i; /* if the conn entry hasn't lasted for 60 seconds, don't drop it. This will leave enough time for normal connection to get through. */ // 60秒内连接就要被生产力,不用删了 if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ)) return 0; /* Don't drop the entry if its number of incoming packets is not located in [0, 8] */ i = atomic_read(&cp->in_pkts); // 连接包数超过8不删, 小于0几乎不可能 if (i > 8 || i < 0) return 0; // i==0时todrop_rate才是0,定义该值没啥意义 if (!todrop_rate[i]) return 0; // 丢包计数还不为0不丢 if (--todrop_counter[i] > 0) return 0; // 保存丢包计数器 todrop_counter[i] = todrop_rate[i]; return 1; } 6.6 其他连接相关函数 /* * Fill a no_client_port connection with a client port number */ // 该函数对未设置客户端端口的连接提供一个端口值,在ip_vs_nat_xmit()函数中调用 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __u16 cport) { // 只处理还没有挂接到连接HASH表的连接 if (ip_vs_conn_unhash(cp)) { // 这个锁最好还是加到下面的判断里面 spin_lock(&cp->lock); // 只对设置NO_CPORT标志的连接操作 if (cp->flags & IP_VS_CONN_F_NO_CPORT) { // 减少NO_CPORT的统计值 atomic_dec(&ip_vs_conn_no_cport_cnt); // 去掉NO_CPORT标志 cp->flags &= ~IP_VS_CONN_F_NO_CPORT; // 设置连接客户端端口 cp->cport = cport; } spin_unlock(&cp->lock); /* hash on new dport */ ip_vs_conn_hash(cp); } } /* * Checking if the destination of a connection template is available. * If available, return 1, otherwise invalidate this connection * template and return 0. */ // 检查连接模板的目的服务器是否可用,ip_vs_sched_persist()函数中调用 int ip_vs_check_template(struct ip_vs_conn *ct) { // 连接的目的服务器 struct ip_vs_dest *dest = ct->dest; /* * Checking the dest server status. */ // 1.服务器不存在 if ((dest == NULL) || // 2.服务器不可用 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || // 3.服务器权重为0而且参数sysctl_ip_vs_expire_quiescent_template非0 (sysctl_ip_vs_expire_quiescent_template && (atomic_read(&dest->weight) == 0))) { IP_VS_DBG(9, "check_template: dest not available for " "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " "-> d:%u.%u.%u.%u:%d\n", ip_vs_proto_name(ct->protocol), NIPQUAD(ct->caddr), ntohs(ct->cport), NIPQUAD(ct->vaddr), ntohs(ct->vport), NIPQUAD(ct->daddr), ntohs(ct->dport)); /* * Invalidate the connection template */ // 如果虚拟服务端口不等于65535,设置相关值使连接参数无效 if (ct->vport != 65535) { if (ip_vs_conn_unhash(ct)) { ct->dport = 65535; ct->vport = 65535; ct->cport = 0; ip_vs_conn_hash(ct); } } /* * Simply decrease the refcnt of the template, * don't restart its timer. */ // 减少连接引用计数,因为该连接对应的目标服务器已经不可用 atomic_dec(&ct->refcnt); return 0; } return 1; } ......待续......