while((skb = skb_dequeue(&sk->sk_receive_queue))!=NULL){ if(state==TCP_LISTEN) unix_release_sock(skb->sk, 1); /* passed fds are erased in the kfree_skb hook */ kfree_skb(skb); }
if(dentry){ dput(dentry); mntput(mnt); }
sock_put(sk);
/* ---- Socket is dead now and most probably destroyed ---- */
/* * Fixme: BSD difference: In BSD all sockets connected to use get * ECONNRESET and we die on the spot. In Linux we behave * like files and pipes do and wait for the last * dereference. * * Can't we simply set sock->err? * * What the above comment does talk about? --ANK(980817) */
/* Avoid a recursive GC. */ if(gc_in_progress) goto out;
gc_in_progress =true; /* * First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. This also means, that since there are no * possible receivers, the receive queues of these sockets are * static during the GC, even though the dequeue is done * before the detach without atomicity guarantees. */ list_for_each_entry_safe(u, next,&gc_inflight_list, link){ int total_refs; int inflight_refs;
/* * Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor,&gc_candidates); while(cursor.next !=&gc_candidates){ u = list_entry(cursor.next,struct unix_sock, link);
/* Move cursor to after the current position. */ list_move(&cursor,&u->link);
/* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u,&gc_candidates, link) scan_children(&u->sk, inc_inflight,&hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress =false;
/* * For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb){ u = unix_sk(skb->sk);
/* * An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link,&embryos); } spin_unlock(&x->sk_receive_queue.lock);
spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb){ /* * Do we have file descriptors ? */ if(UNIXCB(skb).fp){ bool hit =false; /* * Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; structfile**fp = UNIXCB(skb).fp->fp; while(nfd--){ /* * Get the socket the fd matches * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if(sk){ hit =true; func(unix_sk(sk)); } } if(hit && hitlist !=NULL){ __skb_unlink(skb,&x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); }
staticvoid inc_inflight_move_tail(struct unix_sock *u) { atomic_inc(&u->inflight); /* * If this is still a candidate, move it to the end of the * list, so that it's checked even if it was already passed * over */ if(u->gc_candidate) list_move_tail(&u->link,&gc_candidates); }