ATH9K Driver Learning Part VII: Transmission Tasklet and Interrupts

电脑的无线收发都是基于 tasklet 还有 interrupts 的。每到一个特定的时间点,一个 tasklet 和一系列的 interrupts 便会被触发并进行特定的操作。本文重点分析 ath9k driver 触发 packet transmission 的流程,并分析在何时何处可以确定一个 packet 的发送是否成功。Transmission tasklet and interrupts 流程如下:

ath9k_tasklet() (main.c) -> ath_tx_edma_tasklet() (xmit.c) ->
ath_tx_process_buffer (xmit.c) -> ath_tx_complete_aggr (xmit.c) (if ampdu
is used) -> ath_tx_complete_buf (xmit.c) -> ath_tx_complete (xmit.c)
ath_txq_skb_done (xmit.c)

ath9k_tasklet

ath9k_tasklet function 是 ath9k driver 各项功能的起点。它在 ath9k/xmit.c 中被定义并且在 ath9k/init.c 中得到使用,可见其重要性。tasklet 类型的函数本质上属于软中断,会在特定时间点上被 linux 内核触发。触发之后该 function 便会判断 TSF sync 是否正确,当前任务是发包还是收包。如果是发包,hardware 是否支持 edma。若支持,触发 ath_tx_edma_tasklet。若不支持,便出发 ath_tx_tasklet。本文重点分析支持 edma 的情况。ath_tx_edma_tasklet 和 ath_tx_tasklet 纵然内容不同,但最终都会进入同一个函数:ath_tx_process_buff。

/ath9k/main.c
void ath9k_tasklet(unsigned long data)
{
   struct ath_softc *sc = (struct ath_softc *)data;
   struct ath_hw *ah = sc->sc_ah;
   struct ath_common *common = ath9k_hw_common(ah);
   enum ath_reset_type type;
   unsigned long flags;
   u32 status = sc->intrstatus;
   u32 rxmask;

   ath9k_ps_wakeup(sc);
   spin_lock(&sc->sc_pcu_lock);

   if ((status & ATH9K_INT_FATAL) ||
       (status & ATH9K_INT_BB_WATCHDOG)) {

   	if (status & ATH9K_INT_FATAL)
   		type = RESET_TYPE_FATAL_INT;
   	else
   		type = RESET_TYPE_BB_WATCHDOG;

   	ath9k_queue_reset(sc, type);

   	/*
   	 * Increment the ref. counter here so that
   	 * interrupts are enabled in the reset routine.
   	 */
   	atomic_inc(&ah->intr_ref_cnt);
   	ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
   	goto out;
   }

   spin_lock_irqsave(&sc->sc_pm_lock, flags); // Temperately disable the intterupt sc_pm_lock with flags
   if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
   	/*
   	 * TSF sync does not look correct; remain awake to sync with
   	 * the next Beacon.
   	 */
   	ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
   	sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
   }
   spin_unlock_irqrestore(&sc->sc_pm_lock, flags);

   if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
   	rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
   		  ATH9K_INT_RXORN);
   else
   	rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);

   if (status & rxmask) { // If it is time to receive a packet
   	/* Check for high priority Rx first */
   	if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
   	    (status & ATH9K_INT_RXHP))
   		ath_rx_tasklet(sc, 0, true);

   	ath_rx_tasklet(sc, 0, false);
   }

   if (status & ATH9K_INT_TX) {  // If it is time to transmit a packet, EDMA-> ath_tx_edma_tasklet; else ath_tx_tasklet
   	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
   		ath_tx_edma_tasklet(sc);
   	else
   		ath_tx_tasklet(sc);
   }
   spin_unlock(&sc->sc_pcu_lock);
   ath9k_ps_restore(sc);
}

ath_tx_edma_tasklet

该 function 中使用了一个无限循环。只用当 hardware 重启,hardware 正在被使用或者是 hardware 中的 process 出现了问题才会 break,否则就会一直尝试着去发送 packet。若 harware 正在处理 beacon,那么其他 packet 的发送就会被推迟到 beacon 发送完毕。为了发送 packet, 第一步便是拿出 transmission queue

txq = &sc->tx.txq[ts.qid]; //The packet transmission queue in ath9k driver is struc ath_txq

对 txq 上锁以后,便会从 txq 中拿出 packet bf,并使用 ath_tx_qaddbuf 函数来把 bf 放入 hardware buffer 中等待发送。ath_tx_qaddbuf 在 Learning Part II 中已经被分析过,请读者查阅。最终,ath_tx_edma_tasklet 会调用 ath_tx_process_buffer 来判断 bf 的收发状况。

/ath9k/xmit.c
void ath_tx_edma_tasklet(struct ath_softc *sc)//enhanced data memory access
{
	struct ath_tx_status ts;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct ath_txq *txq;
	struct ath_buf *bf, *lastbf;
	struct list_head bf_head;
	struct list_head *fifo_list;
	int status;

	for (;;) {
		if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) // Check if the hardware is in resetting? Yes, break; else, continue
			break;

		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); // IMPORTANT: Return back the current hardware condition to ts struct. ts is not empty anymore
		if (status == -EINPROGRESS)
			break;
		if (status == -EIO) {
			ath_dbg(common, XMIT, "Error processing tx status\n");
			break;
		}

		/* Process beacon completions separately */
		if (ts.qid == sc->beacon.beaconq) { // If the hardware is handling a beacon
			sc->beacon.tx_processed = true;
			sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);

			ath9k_csa_is_finished(sc);
			continue;
		}

		txq = &sc->tx.txq[ts.qid]; //The packet transmission queue in ath9k driver is struc ath_txq

								   //Based on the hardware information in ts struct, find the corresponding txq (ath_txq)
								   //in softcore. This corresponding txq includes the packet going to be transmitted


		ath_txq_lock(sc, txq); 	   // LOCK THIS ath_txq FOR INSERTING PACKET.

		TX_STAT_INC(txq->axq_qnum, txprocdesc);  // axq_qnum: ath9k hardware queue number 

		fifo_list = &txq->txq_fifo[txq->txq_tailidx]; // fifo_list becomse the structure specially used for EDMA (ath_tx_txqaddbuf)
		if (list_empty(fifo_list)) {				  // If the txq (ath_txq) declared in ts struct has no content.
			ath_txq_unlock(sc, txq);				  // Do nothing and return
			return;
		}

		bf = list_first_entry(fifo_list, struct ath_buf, list);	// Here, the txq (ath_txq) must not empty, return its first ath_buf to bf struct
		if (bf->bf_state.stale) { // stale means old
			list_del(&bf->list);
			ath_tx_return_buffer(sc, bf);
			bf = list_first_entry(fifo_list, struct ath_buf, list);
		}

		lastbf = bf->bf_lastbf;

		INIT_LIST_HEAD(&bf_head);
		if (list_is_last(&lastbf->list, fifo_list)) {
			list_splice_tail_init(fifo_list, &bf_head);
			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);

			if (!list_empty(&txq->axq_q)) {
				struct list_head bf_q;

				INIT_LIST_HEAD(&bf_q);
				txq->axq_link = NULL;
				list_splice_tail_init(&txq->axq_q, &bf_q);
				ath_tx_txqaddbuf(sc, txq, &bf_q, true);  // Put the txq into ath_tx_txqaddbuf for transmission
			}
		} else {
			lastbf->bf_state.stale = true;
			if (bf != lastbf)
				list_cut_position(&bf_head, fifo_list,
						  lastbf->list.prev);
		}

		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
		ath_txq_unlock_complete(sc, txq);				 // UNLOCK THIS ath_txq FOR INSERTING PACKET.
	}
}


ath_tx_process_buff

ath_tx_process_buff 是一个十分重要的“中转”函数。切不可在这里随意的做更改,笔者曾经在这里使用 printk 函数直接让系统内核崩溃了,开不了机只能重装系统。它的逻辑十分简单。首先是读取一些 physical layer 信息,然后根据 packet 是不是 edma packet 来触发不同的函数。如果不是 edma packet,那么它会直接把这个 packet 的 transmit rate 信息拷贝到 info->control.rates 里面,并使用 ath_tx_rc_status 来确认该 packet 最终成功的 transmission rate 和 重传次数。可以说 ath_tx_rc_status 便是想要确认一个 packet 有没有传输成功的重要依据,接下来就分析它。如果是 edma packet,那么 ath_tx_process_buff 便会使用 ath_tx_complete_aggr 函数。ath_tx_complete_aggre 函数内部同样使用了 ath_tx_rc_status。

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
				  struct ath_tx_status *ts, struct ath_buf *bf,
				  struct list_head *bf_head)
{
	struct ieee80211_tx_info *info;
	bool txok, flush;
	// ath_tx_status stores physical layer information
	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
	flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	txq->axq_tx_inprogress = false;

	txq->axq_depth--;
	if (bf_is_ampdu_not_probing(bf))
		txq->axq_ampdu_depth--;

	if (!bf_isampdu(bf)) {
		if (!flush) {
			info = IEEE80211_SKB_CB(bf->bf_mpdu);
			memcpy(info->control.rates, bf->rates, //void * memcpy ( void * destination, const void * source, size_t num );
			       sizeof(info->control.rates));
			ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
		}
		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
	} else
		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);

	if (!flush)
		ath_txq_schedule(sc, txq);
}

ath_tx_rc_status

简而言之,ath_tx_rc_status 的功能便是把基于硬件信息产生的 ath_tx_status 结构体中的信息给放入到 ieee80211_tx_info 结构体中。为什么要做这一步呢?原因是 ieee80211_tx_info 结构体是与 packet 的 control buffer 相关联的。写入 ieee80211_tx_info 就相当于写入 packet,方便后续的函数得到 packet 的发送状态。关于 transmission rate 和重传次数,请参阅 Part VI。

/ath9k/xmit.c
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
			     struct ath_tx_status *ts, int nframes, int nbad,
			     int txok)
// IMPORTANT: ath_tx_status is the most important structure, it includes the successfully transmit rate and attempt times
{
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hw *hw = sc->hw;
	struct ath_hw *ah = sc->sc_ah;
	u8 i, tx_rateindex;

	if (txok)
		tx_info->status.ack_signal = ts->ts_rssi;

	tx_rateindex = ts->ts_rateindex;			// IMPORTANT: ts->ts_rateindex inlcudes the successfully transmit rate index
	WARN_ON(tx_rateindex >= hw->max_rates);

	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;

		BUG_ON(nbad > nframes);
	}
	tx_info->status.ampdu_len = nframes;
	tx_info->status.ampdu_ack_len = nframes - nbad;

	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
		/*
		 * If an underrun error is seen assume it as an excessive
		 * retry only if max frame trigger level has been reached
		 * (2 KB for single stream, and 4 KB for dual stream).
		 * Adjust the long retry as if the frame was tried
		 * hw->max_rate_tries times to affect how rate control updates
		 * PER for the failed rate.
		 * In case of congestion on the bus penalizing this type of
		 * underruns should help hardware actually transmit new frames
		 * successfully by eventually preferring slower rates.
		 * This itself should also alleviate congestion on the bus.
		 */
		if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
		                             ATH9K_TX_DELIM_UNDERRUN)) &&
		    ieee80211_is_data(hdr->frame_control) &&
		    ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
			tx_info->status.rates[tx_rateindex].count =
				hw->max_rate_tries;   
	}

	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {  // After successful transmission, reset the information of slower rates
		tx_info->status.rates[i].count = 0;
		tx_info->status.rates[i].idx = -1;
	}

	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; // IMPORTANT: This is the function to reduce the count of the successlly transmit rate
}

ath_tx_complete_aggr

这个 function 结构相对复杂,主要是为了处理好 edma。除开 edma 之外它的逻辑也很清楚,最终也是要用 ath_tx_rc_status 得到 packet transmission condition 并最终进入 ath_tx_complete_buf。

static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
				 struct ath_buf *bf, struct list_head *bf_q,
				 struct ath_tx_status *ts, int txok)
{
	struct ath_node *an = NULL;
	struct sk_buff *skb;
	struct ieee80211_sta *sta;
	struct ieee80211_hw *hw = sc->hw;
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *tx_info;
	struct ath_atx_tid *tid = NULL;
	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
	struct list_head bf_head;
	struct sk_buff_head bf_pending;
	u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
	u32 ba[WME_BA_BMP_SIZE >> 5];
	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
	bool rc_update = true, isba;
	struct ieee80211_tx_rate rates[4];
	struct ath_frame_info *fi;
	int nframes;
	bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
	int i, retries;
	int bar_index = -1;

	skb = bf->bf_mpdu;
	hdr = (struct ieee80211_hdr *)skb->data;

	tx_info = IEEE80211_SKB_CB(skb);

	memcpy(rates, bf->rates, sizeof(rates));   //void * memcpy ( void * destination, const void * source, size_t num );
											   // Get the rate information in bf

	retries = ts->ts_longretry + 1;            // IMPORTANT: calculate the retry times
	for (i = 0; i < ts->ts_rateindex; i++)
		retries += rates[i].count;

	rcu_read_lock();

	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); // find a station on hardware
	if (!sta) { // If no station can be found
		rcu_read_unlock();

		INIT_LIST_HEAD(&bf_head);  // Initilize the list headed by &bf_head
		while (bf) {
			bf_next = bf->bf_next;

			if (!bf->bf_state.stale || bf_next != NULL)// if the bf_state is not stale and bf_next is not NULL
				list_move_tail(&bf->list, &bf_head); //Move &bf->list to the end of the list headed by &bf_head

			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);

			bf = bf_next;
		}
		return;
	}

	an = (struct ath_node *)sta->drv_priv;
	tid = ath_get_skb_tid(sc, an, skb);
	seq_first = tid->seq_start;
	isba = ts->ts_flags & ATH9K_TX_BA;

	/*
	 * The hardware occasionally sends a tx status for the wrong TID.
	 * In this case, the BA status cannot be considered valid and all
	 * subframes need to be retransmitted
	 *
	 * Only BlockAcks have a TID and therefore normal Acks cannot be
	 * checked
	 */
	if (isba && tid->tidno != ts->tid)
		txok = false;

	isaggr = bf_isaggr(bf);
	memset(ba, 0, WME_BA_BMP_SIZE >> 3);

	if (isaggr && txok) {
		if (ts->ts_flags & ATH9K_TX_BA) {
			seq_st = ts->ts_seqnum;
			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
		} else {
			/*
			 * AR5416 can become deaf/mute when BA
			 * issue happens. Chip needs to be reset.
			 * But AP code may have sychronization issues
			 * when perform internal reset in this routine.
			 * Only enable reset in STA mode for now.
			 */
			if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
				needreset = 1;
		}
	}

	__skb_queue_head_init(&bf_pending);

	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
	while (bf) {
		u16 seqno = bf->bf_state.seqno;

		txfail = txpending = sendbar = 0;
		bf_next = bf->bf_next;

		skb = bf->bf_mpdu;
		tx_info = IEEE80211_SKB_CB(skb);
		fi = get_frame_info(skb);

		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
		    !tid->active) {
			/*
			 * Outside of the current BlockAck window,
			 * maybe part of a previous session
			 */
			txfail = 1;
		} else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
			/* transmit completion, subframe is
			 * acked by block ack */
			acked_cnt++;
		} else if (!isaggr && txok) {
			/* transmit completion */
			acked_cnt++;
		} else if (flush) {
			txpending = 1;
		} else if (fi->retries < ATH_MAX_SW_RETRIES) {
			if (txok || !an->sleeping)
				ath_tx_set_retry(sc, txq, bf->bf_mpdu,
						 retries);

			txpending = 1;
		} else {
			txfail = 1;
			txfail_cnt++;
			bar_index = max_t(int, bar_index,
				ATH_BA_INDEX(seq_first, seqno));
		}

		/*
		 * Make sure the last desc is reclaimed if it
		 * not a holding desc.
		 */
		INIT_LIST_HEAD(&bf_head);
		if (bf_next != NULL || !bf_last->bf_state.stale)
			list_move_tail(&bf->list, &bf_head);

		if (!txpending) {
			/*
			 * complete the acked-ones/xretried ones; update
			 * block-ack window
			 */
			ath_tx_update_baw(sc, tid, seqno);

			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
				memcpy(tx_info->control.rates, rates, sizeof(rates));
				ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
				rc_update = false;
			}

			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
				!txfail);
		} else {
			if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
				tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
				ieee80211_sta_eosp(sta);
			}
			/* retry the un-acked ones */
			if (bf->bf_next == NULL && bf_last->bf_state.stale) {
				struct ath_buf *tbf;

				tbf = ath_clone_txbuf(sc, bf_last);
				/*
				 * Update tx baw and complete the
				 * frame with failed status if we
				 * run out of tx buf.
				 */
				if (!tbf) {
					ath_tx_update_baw(sc, tid, seqno);

					ath_tx_complete_buf(sc, bf, txq,
							    &bf_head, ts, 0);
					bar_index = max_t(int, bar_index,
						ATH_BA_INDEX(seq_first, seqno));
					break;
				}

				fi->bf = tbf;
			}

			/*
			 * Put this buffer to the temporary pending
			 * queue to retain ordering
			 */
			__skb_queue_tail(&bf_pending, skb);
		}

		bf = bf_next;
	}

	/* prepend un-acked frames to the beginning of the pending frame queue */
	if (!skb_queue_empty(&bf_pending)) {
		if (an->sleeping)
			ieee80211_sta_set_buffered(sta, tid->tidno, true);

		skb_queue_splice_tail(&bf_pending, &tid->retry_q);
		if (!an->sleeping) {
			ath_tx_queue_tid(txq, tid);

			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
				tid->ac->clear_ps_filter = true;
		}
	}

	if (bar_index >= 0) {
		u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);

		if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
			tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);

		ath_txq_unlock(sc, txq);
		ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
		ath_txq_lock(sc, txq);
	}

	rcu_read_unlock();

	if (needreset)
		ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}


ath_tx_complete_buf

这个 function 查询 packet 是否超时,若超时了就需要强制清空该 packet。反之则调用 ath_tx_complete function。

static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
				struct ath_txq *txq, struct list_head *bf_q,
				struct ath_tx_status *ts, int txok)
{
	struct sk_buff *skb = bf->bf_mpdu;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	unsigned long flags;
	int tx_flags = 0;

	if (!txok)
		tx_flags |= ATH_TX_ERROR;

	if (ts->ts_status & ATH9K_TXERR_FILT)
		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;

	dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
	bf->bf_buf_addr = 0; /* physical addr of data buffer, for DMA */
	if (sc->tx99_state)
		goto skip_tx_complete;

	if (bf->bf_state.bfs_paprd) { // if bfs_paprd is true, the driver checks whether this skb has been time out or not
		if (time_after(jiffies,
				bf->bf_state.bfs_paprd_timestamp +
				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
			dev_kfree_skb_any(skb);  // drop this skb
		else
			complete(&sc->paprd_complete);
	} else {
		ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
		ath_tx_complete(sc, skb, tx_flags, txq);
	}
skip_tx_complete:
	/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
	 * accidentally reference it later. Because you have already use ath_tx_complete to remove this skb
	 */
	bf->bf_mpdu = NULL;

	/*
	 * Return the list of ath_buf of this mpdu to free queue
	 */
	spin_lock_irqsave(&sc->tx.txbuflock, flags);
	list_splice_tail_init(bf_q, &sc->tx.txbuf);
	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}

ath_tx_complete & ath_txq_skb_done

ath_tx_complete 用来查看 hardware 有没有收到对应于已经发送出去的 packet 的 ACK。若有,就让 driver 重新休眠。并最终调用 ath_txq_skb_done 以彻底结束发送过程。

static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
			    int tx_flags, struct ath_txq *txq)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
	int padpos, padsize;
	unsigned long flags;

	ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);

	if (sc->sc_ah->caldata)
		set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);

	if (!(tx_flags & ATH_TX_ERROR))
		/* Frame was ACKed */
		tx_info->flags |= IEEE80211_TX_STAT_ACK;

	padpos = ieee80211_hdrlen(hdr->frame_control);
	padsize = padpos & 3;
	if (padsize && skb->len>padpos+padsize) {
		/*
		 * Remove MAC header padding before giving the frame back to
		 * mac80211.
		 */
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
	}

	spin_lock_irqsave(&sc->sc_pm_lock, flags);
	if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
		sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
		ath_dbg(common, PS,
			"Going back to sleep after having received TX status (0x%lx)\n",
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
	}
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);

	__skb_queue_tail(&txq->complete_q, skb);
	ath_txq_skb_done(sc, txq, skb);
}

static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
			     struct sk_buff *skb)
{
	int q;

	q = skb_get_queue_mapping(skb);
	if (txq == sc->tx.uapsdq)
		txq = sc->tx.txq_map[q];

	if (txq != sc->tx.txq_map[q])
		return;

	if (WARN_ON(--txq->pending_frames < 0))
		txq->pending_frames = 0;

	if (txq->stopped &&
	    txq->pending_frames < sc->tx.txq_max_pending[q]) {
		ieee80211_wake_queue(sc->hw, q);
		txq->stopped = false;
	}
}
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值