mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-27 10:38:01 +00:00 
			
		
		
		
	qlge: Add tx multiqueue support.
Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									22bdd4f599
								
							
						
					
					
						commit
						1e213303d8
					
				| @ -1627,14 +1627,12 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, | |||||||
| /* Fire up a handler to reset the MPI processor. */ | /* Fire up a handler to reset the MPI processor. */ | ||||||
| void ql_queue_fw_error(struct ql_adapter *qdev) | void ql_queue_fw_error(struct ql_adapter *qdev) | ||||||
| { | { | ||||||
| 	netif_stop_queue(qdev->ndev); |  | ||||||
| 	netif_carrier_off(qdev->ndev); | 	netif_carrier_off(qdev->ndev); | ||||||
| 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); | 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void ql_queue_asic_error(struct ql_adapter *qdev) | void ql_queue_asic_error(struct ql_adapter *qdev) | ||||||
| { | { | ||||||
| 	netif_stop_queue(qdev->ndev); |  | ||||||
| 	netif_carrier_off(qdev->ndev); | 	netif_carrier_off(qdev->ndev); | ||||||
| 	ql_disable_interrupts(qdev); | 	ql_disable_interrupts(qdev); | ||||||
| 	/* Clear adapter up bit to signal the recovery
 | 	/* Clear adapter up bit to signal the recovery
 | ||||||
| @ -1689,6 +1687,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||||||
| 	struct ob_mac_iocb_rsp *net_rsp = NULL; | 	struct ob_mac_iocb_rsp *net_rsp = NULL; | ||||||
| 	int count = 0; | 	int count = 0; | ||||||
| 
 | 
 | ||||||
|  | 	struct tx_ring *tx_ring; | ||||||
| 	/* While there are entries in the completion queue. */ | 	/* While there are entries in the completion queue. */ | ||||||
| 	while (prod != rx_ring->cnsmr_idx) { | 	while (prod != rx_ring->cnsmr_idx) { | ||||||
| 
 | 
 | ||||||
| @ -1714,15 +1713,16 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||||||
| 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | ||||||
| 	} | 	} | ||||||
| 	ql_write_cq_idx(rx_ring); | 	ql_write_cq_idx(rx_ring); | ||||||
| 	if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { | 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; | ||||||
| 		struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; | 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) && | ||||||
|  | 					net_rsp != NULL) { | ||||||
| 		if (atomic_read(&tx_ring->queue_stopped) && | 		if (atomic_read(&tx_ring->queue_stopped) && | ||||||
| 		    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) | 		    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) | ||||||
| 			/*
 | 			/*
 | ||||||
| 			 * The queue got stopped because the tx_ring was full. | 			 * The queue got stopped because the tx_ring was full. | ||||||
| 			 * Wake it up, because it's now at least 25% empty. | 			 * Wake it up, because it's now at least 25% empty. | ||||||
| 			 */ | 			 */ | ||||||
| 			netif_wake_queue(qdev->ndev); | 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return count; | 	return count; | ||||||
| @ -2054,7 +2054,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||||||
| 	struct ql_adapter *qdev = netdev_priv(ndev); | 	struct ql_adapter *qdev = netdev_priv(ndev); | ||||||
| 	int tso; | 	int tso; | ||||||
| 	struct tx_ring *tx_ring; | 	struct tx_ring *tx_ring; | ||||||
| 	u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb); | 	u32 tx_ring_idx = (u32) skb->queue_mapping; | ||||||
| 
 | 
 | ||||||
| 	tx_ring = &qdev->tx_ring[tx_ring_idx]; | 	tx_ring = &qdev->tx_ring[tx_ring_idx]; | ||||||
| 
 | 
 | ||||||
| @ -2062,7 +2062,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||||||
| 		QPRINTK(qdev, TX_QUEUED, INFO, | 		QPRINTK(qdev, TX_QUEUED, INFO, | ||||||
| 			"%s: shutting down tx queue %d du to lack of resources.\n", | 			"%s: shutting down tx queue %d du to lack of resources.\n", | ||||||
| 			__func__, tx_ring_idx); | 			__func__, tx_ring_idx); | ||||||
| 		netif_stop_queue(ndev); | 		netif_stop_subqueue(ndev, tx_ring->wq_id); | ||||||
| 		atomic_inc(&tx_ring->queue_stopped); | 		atomic_inc(&tx_ring->queue_stopped); | ||||||
| 		return NETDEV_TX_BUSY; | 		return NETDEV_TX_BUSY; | ||||||
| 	} | 	} | ||||||
| @ -3192,12 +3192,10 @@ static void ql_display_dev_info(struct net_device *ndev) | |||||||
| 
 | 
 | ||||||
| static int ql_adapter_down(struct ql_adapter *qdev) | static int ql_adapter_down(struct ql_adapter *qdev) | ||||||
| { | { | ||||||
| 	struct net_device *ndev = qdev->ndev; |  | ||||||
| 	int i, status = 0; | 	int i, status = 0; | ||||||
| 	struct rx_ring *rx_ring; | 	struct rx_ring *rx_ring; | ||||||
| 
 | 
 | ||||||
| 	netif_stop_queue(ndev); | 	netif_carrier_off(qdev->ndev); | ||||||
| 	netif_carrier_off(ndev); |  | ||||||
| 
 | 
 | ||||||
| 	/* Don't kill the reset worker thread if we
 | 	/* Don't kill the reset worker thread if we
 | ||||||
| 	 * are in the process of recovery. | 	 * are in the process of recovery. | ||||||
| @ -3261,12 +3259,11 @@ static int ql_adapter_up(struct ql_adapter *qdev) | |||||||
| 	spin_unlock(&qdev->hw_lock); | 	spin_unlock(&qdev->hw_lock); | ||||||
| 	set_bit(QL_ADAPTER_UP, &qdev->flags); | 	set_bit(QL_ADAPTER_UP, &qdev->flags); | ||||||
| 	ql_alloc_rx_buffers(qdev); | 	ql_alloc_rx_buffers(qdev); | ||||||
|  | 	if ((ql_read32(qdev, STS) & qdev->port_init)) | ||||||
|  | 		netif_carrier_on(qdev->ndev); | ||||||
| 	ql_enable_interrupts(qdev); | 	ql_enable_interrupts(qdev); | ||||||
| 	ql_enable_all_completion_interrupts(qdev); | 	ql_enable_all_completion_interrupts(qdev); | ||||||
| 	if ((ql_read32(qdev, STS) & qdev->port_init)) { | 	netif_tx_start_all_queues(qdev->ndev); | ||||||
| 		netif_carrier_on(qdev->ndev); |  | ||||||
| 		netif_start_queue(qdev->ndev); |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| err_init: | err_init: | ||||||
| @ -3354,6 +3351,7 @@ static int ql_configure_rings(struct ql_adapter *qdev) | |||||||
| 	 * completion handler rx_rings. | 	 * completion handler rx_rings. | ||||||
| 	 */ | 	 */ | ||||||
| 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; | 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; | ||||||
|  | 	netif_set_gso_max_size(qdev->ndev, 65536); | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < qdev->tx_ring_count; i++) { | 	for (i = 0; i < qdev->tx_ring_count; i++) { | ||||||
| 		tx_ring = &qdev->tx_ring[i]; | 		tx_ring = &qdev->tx_ring[i]; | ||||||
| @ -3829,7 +3827,8 @@ static int __devinit qlge_probe(struct pci_dev *pdev, | |||||||
| 	static int cards_found = 0; | 	static int cards_found = 0; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 
 | 
 | ||||||
| 	ndev = alloc_etherdev(sizeof(struct ql_adapter)); | 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), | ||||||
|  | 			min(MAX_CPUS, (int)num_online_cpus())); | ||||||
| 	if (!ndev) | 	if (!ndev) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| @ -3872,7 +3871,6 @@ static int __devinit qlge_probe(struct pci_dev *pdev, | |||||||
| 		return err; | 		return err; | ||||||
| 	} | 	} | ||||||
| 	netif_carrier_off(ndev); | 	netif_carrier_off(ndev); | ||||||
| 	netif_stop_queue(ndev); |  | ||||||
| 	ql_display_dev_info(ndev); | 	ql_display_dev_info(ndev); | ||||||
| 	cards_found++; | 	cards_found++; | ||||||
| 	return 0; | 	return 0; | ||||||
| @ -3926,7 +3924,6 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) | |||||||
| 	pci_set_master(pdev); | 	pci_set_master(pdev); | ||||||
| 
 | 
 | ||||||
| 	netif_carrier_off(ndev); | 	netif_carrier_off(ndev); | ||||||
| 	netif_stop_queue(ndev); |  | ||||||
| 	ql_adapter_reset(qdev); | 	ql_adapter_reset(qdev); | ||||||
| 
 | 
 | ||||||
| 	/* Make sure the EEPROM is good */ | 	/* Make sure the EEPROM is good */ | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 Ron Mercer
						Ron Mercer