diff --git a/package/kernel/linux/modules/netdevices.mk b/package/kernel/linux/modules/netdevices.mk index 64dd128237..5c66c99c07 100644 --- a/package/kernel/linux/modules/netdevices.mk +++ b/package/kernel/linux/modules/netdevices.mk @@ -1755,6 +1755,24 @@ endef $(eval $(call KernelPackage,mhi-wwan-mbim)) +define KernelPackage/mtk-t7xx + SUBMENU:=$(NETWORK_DEVICES_MENU) + TITLE:=MediaTek PCIe 5G WWAN modem T7xx device + DEPENDS:=@PCI_SUPPORT +kmod-wwan + KCONFIG:=CONFIG_MTK_T7XX + FILES:=$(LINUX_DIR)/drivers/net/wwan/t7xx/mtk_t7xx.ko + AUTOLOAD:=$(call AutoProbe,mtk_t7xx) +endef + +define KernelPackage/mtk-t7xx/description + Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device. + Adapts WWAN framework and provides network interface like wwan0 + and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0 + (MBIM protocol), etc. +endef + +$(eval $(call KernelPackage,mtk-t7xx)) + define KernelPackage/atlantic SUBMENU:=$(NETWORK_DEVICES_MENU) TITLE:=Aquantia AQtion 10Gbps Ethernet NIC diff --git a/target/linux/generic/backport-5.15/620-v5.19-list-Add-list_next_entry_circular-and.patch b/target/linux/generic/backport-5.15/620-v5.19-list-Add-list_next_entry_circular-and.patch new file mode 100644 index 0000000000..339d917e86 --- /dev/null +++ b/target/linux/generic/backport-5.15/620-v5.19-list-Add-list_next_entry_circular-and.patch @@ -0,0 +1,59 @@ +From 2fbdf45d7d26361a0c3ec8833fd96edf0f5812da Mon Sep 17 00:00:00 2001 +From: Ricardo Martinez +Date: Fri, 6 May 2022 11:12:57 -0700 +Subject: [PATCH] list: Add list_next_entry_circular() and + list_prev_entry_circular() + +Add macros to get the next or previous entries and wraparound if +needed. For example, calling list_next_entry_circular() on the last +element should return the first element in the list. + +Signed-off-by: Ricardo Martinez +Reviewed-by: Andy Shevchenko +Signed-off-by: David S. Miller +--- + include/linux/list.h | 26 ++++++++++++++++++++++++++ + 1 file changed, 26 insertions(+) + +--- a/include/linux/list.h ++++ b/include/linux/list.h +@@ -562,6 +562,19 @@ static inline void list_splice_tail_init + list_entry((pos)->member.next, typeof(*(pos)), member) + + /** ++ * list_next_entry_circular - get the next element in list ++ * @pos: the type * to cursor. ++ * @head: the list head to take the element from. ++ * @member: the name of the list_head within the struct. ++ * ++ * Wraparound if pos is the last element (return the first element). ++ * Note, that list is expected to be not empty. ++ */ ++#define list_next_entry_circular(pos, head, member) \ ++ (list_is_last(&(pos)->member, head) ? \ ++ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member)) ++ ++/** + * list_prev_entry - get the prev element in list + * @pos: the type * to cursor + * @member: the name of the list_head within the struct. +@@ -570,6 +583,19 @@ static inline void list_splice_tail_init + list_entry((pos)->member.prev, typeof(*(pos)), member) + + /** ++ * list_prev_entry_circular - get the prev element in list ++ * @pos: the type * to cursor. ++ * @head: the list head to take the element from. ++ * @member: the name of the list_head within the struct. ++ * ++ * Wraparound if pos is the first element (return the last element). ++ * Note, that list is expected to be not empty. ++ */ ++#define list_prev_entry_circular(pos, head, member) \ ++ (list_is_first(&(pos)->member, head) ? \ ++ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member)) ++ ++/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. diff --git a/target/linux/generic/backport-5.15/621-v5.19-01-net-wwan-t7xx-Add-control-DMA-interface.patch b/target/linux/generic/backport-5.15/621-v5.19-01-net-wwan-t7xx-Add-control-DMA-interface.patch new file mode 100644 index 0000000000..8cfa341fa7 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-01-net-wwan-t7xx-Add-control-DMA-interface.patch @@ -0,0 +1,1884 @@ +From 39d439047f1dc88f98b755d6f3a53a4ef8f0de21 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:12:59 -0700 +Subject: [PATCH] net: wwan: t7xx: Add control DMA interface +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control +path of Host-Modem data transfers. CLDMA HIF layer provides a common +interface to the Port Layer. + +CLDMA manages 8 independent RX/TX physical channels with data flow +control in HW queues. CLDMA uses ring buffers of General Packet +Descriptors (GPD) for TX/RX. GPDs can represent multiple or single +data buffers (DB). + +CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA +interrupts, and initializes CLDMA HW registers. + +CLDMA TX flow: +1. Port Layer write +2. Get DB address +3. Configure GPD +4. Triggering processing via HW register write + +CLDMA RX flow: +1. CLDMA HW sends a RX "done" to host +2. Driver starts thread to safely read GPD +3. DB is sent to Port layer +4. Create a new buffer for GPD ring + +Note: This patch does not enable compilation since it has dependencies +such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and +struct t7xx_pci_dev which are added by the core patch. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Ilpo Järvinen +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_cldma.c | 281 ++++++ + drivers/net/wwan/t7xx/t7xx_cldma.h | 180 ++++ + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 1192 ++++++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 126 +++ + drivers/net/wwan/t7xx/t7xx_reg.h | 33 + + 5 files changed, 1812 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_reg.h + +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_cldma.c +@@ -0,0 +1,281 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Andy Shevchenko ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_cldma.h" ++ ++#define ADDR_SIZE 8 ++ ++void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info) ++{ ++ u32 val; ++ ++ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); ++ val |= IP_BUSY_WAKEUP; ++ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); ++} ++ ++/** ++ * t7xx_cldma_hw_restore() - Restore CLDMA HW registers. ++ * @hw_info: Pointer to struct t7xx_cldma_hw. ++ * ++ * Restore HW after resume. Writes uplink configuration for CLDMA HW. ++ */ ++void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info) ++{ ++ u32 ul_cfg; ++ ++ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); ++ ul_cfg &= ~UL_CFG_BIT_MODE_MASK; ++ ++ if (hw_info->hw_mode == MODE_BIT_64) ++ ul_cfg |= UL_CFG_BIT_MODE_64; ++ else if (hw_info->hw_mode == MODE_BIT_40) ++ ul_cfg |= UL_CFG_BIT_MODE_40; ++ else if (hw_info->hw_mode == MODE_BIT_36) ++ ul_cfg |= UL_CFG_BIT_MODE_36; ++ ++ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); ++ /* Disable TX and RX invalid address check */ ++ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); ++ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); ++} ++ ++void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD : ++ hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD; ++ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); ++ iowrite32(val, reg); ++} ++ ++void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info) ++{ ++ /* Enable the TX & RX interrupts */ ++ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); ++ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); ++ /* Enable the empty queue interrupt */ ++ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); ++ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); ++} ++ ++void t7xx_cldma_hw_reset(void __iomem *ao_base) ++{ ++ u32 val; ++ ++ val = ioread32(ao_base + REG_INFRA_RST2_SET); ++ val |= RST2_PMIC_SW_RST_SET; ++ iowrite32(val, ao_base + REG_INFRA_RST2_SET); ++ val = ioread32(ao_base + REG_INFRA_RST4_SET); ++ val |= RST4_CLDMA1_SW_RST_SET; ++ iowrite32(val, ao_base + REG_INFRA_RST4_SET); ++ udelay(1); ++ ++ val = ioread32(ao_base + REG_INFRA_RST4_CLR); ++ val |= RST4_CLDMA1_SW_RST_CLR; ++ iowrite32(val, ao_base + REG_INFRA_RST4_CLR); ++ val = ioread32(ao_base + REG_INFRA_RST2_CLR); ++ val |= RST2_PMIC_SW_RST_CLR; ++ iowrite32(val, ao_base + REG_INFRA_RST2_CLR); ++} ++ ++bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) ++{ ++ u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; ++ ++ return ioread64(hw_info->ap_pdn_base + offset); ++} ++ ++void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, ++ enum mtk_txrx tx_rx) ++{ ++ u32 offset = qno * ADDR_SIZE; ++ void __iomem *reg; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 : ++ hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0; ++ iowrite64(address, reg + offset); ++} ++ ++void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx) ++{ ++ void __iomem *base = hw_info->ap_pdn_base; ++ ++ if (tx_rx == MTK_RX) ++ iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); ++ else ++ iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); ++} ++ ++unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 mask, val; ++ ++ mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS : ++ hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS; ++ val = ioread32(reg); ++ ++ return val & mask; ++} ++ ++void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) ++{ ++ unsigned int ch_id; ++ ++ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); ++ ch_id &= bitmask; ++ /* Clear the ch IDs in the TX interrupt status register */ ++ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); ++ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); ++} ++ ++void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) ++{ ++ unsigned int ch_id; ++ ++ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); ++ ch_id &= bitmask; ++ /* Clear the ch IDs in the RX interrupt status register */ ++ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); ++ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); ++} ++ ++unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, ++ enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 : ++ hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0; ++ val = ioread32(reg); ++ return val & bitmask; ++} ++ ++void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : ++ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; ++ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); ++ iowrite32(val, reg); ++} ++ ++void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : ++ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; ++ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); ++ iowrite32(val << EQ_STA_BIT_OFFSET, reg); ++} ++ ++void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : ++ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; ++ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); ++ iowrite32(val, reg); ++} ++ ++void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : ++ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; ++ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); ++ iowrite32(val << EQ_STA_BIT_OFFSET, reg); ++} ++ ++/** ++ * t7xx_cldma_hw_init() - Initialize CLDMA HW. ++ * @hw_info: Pointer to struct t7xx_cldma_hw. ++ * ++ * Write uplink and downlink configuration to CLDMA HW. ++ */ ++void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info) ++{ ++ u32 ul_cfg, dl_cfg; ++ ++ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); ++ dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG); ++ /* Configure the DRAM address mode */ ++ ul_cfg &= ~UL_CFG_BIT_MODE_MASK; ++ dl_cfg &= ~DL_CFG_BIT_MODE_MASK; ++ ++ if (hw_info->hw_mode == MODE_BIT_64) { ++ ul_cfg |= UL_CFG_BIT_MODE_64; ++ dl_cfg |= DL_CFG_BIT_MODE_64; ++ } else if (hw_info->hw_mode == MODE_BIT_40) { ++ ul_cfg |= UL_CFG_BIT_MODE_40; ++ dl_cfg |= DL_CFG_BIT_MODE_40; ++ } else if (hw_info->hw_mode == MODE_BIT_36) { ++ ul_cfg |= UL_CFG_BIT_MODE_36; ++ dl_cfg |= DL_CFG_BIT_MODE_36; ++ } ++ ++ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); ++ dl_cfg |= DL_CFG_UP_HW_LAST; ++ iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG); ++ iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK); ++ iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK); ++ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); ++ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); ++} ++ ++void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD : ++ hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD; ++ iowrite32(CLDMA_ALL_Q, reg); ++} ++ ++void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) ++{ ++ void __iomem *reg; ++ ++ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : ++ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; ++ iowrite32(TXRX_STATUS_BITMASK, reg); ++ iowrite32(EMPTY_STATUS_BITMASK, reg); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_cldma.h +@@ -0,0 +1,180 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Andy Shevchenko ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_CLDMA_H__ ++#define __T7XX_CLDMA_H__ ++ ++#include ++#include ++ ++#define CLDMA_TXQ_NUM 8 ++#define CLDMA_RXQ_NUM 8 ++#define CLDMA_ALL_Q GENMASK(7, 0) ++ ++/* Interrupt status bits */ ++#define EMPTY_STATUS_BITMASK GENMASK(15, 8) ++#define TXRX_STATUS_BITMASK GENMASK(7, 0) ++#define EQ_STA_BIT_OFFSET 8 ++#define L2_INT_BIT_COUNT 16 ++#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK) ++ ++#define TQ_ERR_INT_BITMASK GENMASK(23, 16) ++#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) ++ ++#define RQ_ERR_INT_BITMASK GENMASK(23, 16) ++#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) ++ ++#define CLDMA0_AO_BASE 0x10049000 ++#define CLDMA0_PD_BASE 0x1021d000 ++#define CLDMA1_AO_BASE 0x1004b000 ++#define CLDMA1_PD_BASE 0x1021f000 ++ ++#define CLDMA_R_AO_BASE 0x10023000 ++#define CLDMA_R_PD_BASE 0x1023d000 ++ ++/* CLDMA TX */ ++#define REG_CLDMA_UL_START_ADDRL_0 0x0004 ++#define REG_CLDMA_UL_START_ADDRH_0 0x0008 ++#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044 ++#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048 ++#define REG_CLDMA_UL_STATUS 0x0084 ++#define REG_CLDMA_UL_START_CMD 0x0088 ++#define REG_CLDMA_UL_RESUME_CMD 0x008c ++#define REG_CLDMA_UL_STOP_CMD 0x0090 ++#define REG_CLDMA_UL_ERROR 0x0094 ++#define REG_CLDMA_UL_CFG 0x0098 ++#define UL_CFG_BIT_MODE_36 BIT(5) ++#define UL_CFG_BIT_MODE_40 BIT(6) ++#define UL_CFG_BIT_MODE_64 BIT(7) ++#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5) ++ ++#define REG_CLDMA_UL_MEM 0x009c ++#define UL_MEM_CHECK_DIS BIT(0) ++ ++/* CLDMA RX */ ++#define REG_CLDMA_DL_START_CMD 0x05bc ++#define REG_CLDMA_DL_RESUME_CMD 0x05c0 ++#define REG_CLDMA_DL_STOP_CMD 0x05c4 ++#define REG_CLDMA_DL_MEM 0x0508 ++#define DL_MEM_CHECK_DIS BIT(0) ++ ++#define REG_CLDMA_DL_CFG 0x0404 ++#define DL_CFG_UP_HW_LAST BIT(2) ++#define DL_CFG_BIT_MODE_36 BIT(10) ++#define DL_CFG_BIT_MODE_40 BIT(11) ++#define DL_CFG_BIT_MODE_64 BIT(12) ++#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10) ++ ++#define REG_CLDMA_DL_START_ADDRL_0 0x0478 ++#define REG_CLDMA_DL_START_ADDRH_0 0x047c ++#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8 ++#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc ++#define REG_CLDMA_DL_STATUS 0x04f8 ++ ++/* CLDMA MISC */ ++#define REG_CLDMA_L2TISAR0 0x0810 ++#define REG_CLDMA_L2TISAR1 0x0814 ++#define REG_CLDMA_L2TIMR0 0x0818 ++#define REG_CLDMA_L2TIMR1 0x081c ++#define REG_CLDMA_L2TIMCR0 0x0820 ++#define REG_CLDMA_L2TIMCR1 0x0824 ++#define REG_CLDMA_L2TIMSR0 0x0828 ++#define REG_CLDMA_L2TIMSR1 0x082c ++#define REG_CLDMA_L3TISAR0 0x0830 ++#define REG_CLDMA_L3TISAR1 0x0834 ++#define REG_CLDMA_L2RISAR0 0x0850 ++#define REG_CLDMA_L2RISAR1 0x0854 ++#define REG_CLDMA_L3RISAR0 0x0870 ++#define REG_CLDMA_L3RISAR1 0x0874 ++#define REG_CLDMA_IP_BUSY 0x08b4 ++#define IP_BUSY_WAKEUP BIT(0) ++#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0) ++#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0) ++ ++/* CLDMA MISC */ ++#define REG_CLDMA_L2RIMR0 0x0858 ++#define REG_CLDMA_L2RIMR1 0x085c ++#define REG_CLDMA_L2RIMCR0 0x0860 ++#define REG_CLDMA_L2RIMCR1 0x0864 ++#define REG_CLDMA_L2RIMSR0 0x0868 ++#define REG_CLDMA_L2RIMSR1 0x086c ++#define REG_CLDMA_BUSY_MASK 0x0954 ++#define BUSY_MASK_PCIE BIT(0) ++#define BUSY_MASK_AP BIT(1) ++#define BUSY_MASK_MD BIT(2) ++ ++#define REG_CLDMA_INT_MASK 0x0960 ++ ++/* CLDMA RESET */ ++#define REG_INFRA_RST4_SET 0x0730 ++#define RST4_CLDMA1_SW_RST_SET BIT(20) ++ ++#define REG_INFRA_RST4_CLR 0x0734 ++#define RST4_CLDMA1_SW_RST_CLR BIT(20) ++ ++#define REG_INFRA_RST2_SET 0x0140 ++#define RST2_PMIC_SW_RST_SET BIT(18) ++ ++#define REG_INFRA_RST2_CLR 0x0144 ++#define RST2_PMIC_SW_RST_CLR BIT(18) ++ ++enum mtk_txrx { ++ MTK_TX, ++ MTK_RX, ++}; ++ ++enum t7xx_hw_mode { ++ MODE_BIT_32, ++ MODE_BIT_36, ++ MODE_BIT_40, ++ MODE_BIT_64, ++}; ++ ++struct t7xx_cldma_hw { ++ enum t7xx_hw_mode hw_mode; ++ void __iomem *ap_ao_base; ++ void __iomem *ap_pdn_base; ++ u32 phy_interrupt_id; ++}; ++ ++void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx); ++unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info); ++void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info); ++void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); ++void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); ++void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, ++ unsigned int qno, u64 address, enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_reset(void __iomem *ao_base); ++void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); ++unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, ++ enum mtk_txrx tx_rx); ++void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info); ++void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info); ++bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno); ++#endif +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -0,0 +1,1192 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Andy Shevchenko ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_cldma.h" ++#include "t7xx_hif_cldma.h" ++#include "t7xx_mhccif.h" ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_reg.h" ++#include "t7xx_state_monitor.h" ++ ++#define MAX_TX_BUDGET 16 ++#define MAX_RX_BUDGET 16 ++ ++#define CHECK_Q_STOP_TIMEOUT_US 1000000 ++#define CHECK_Q_STOP_STEP_US 10000 ++ ++#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */ ++ ++static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, ++ enum mtk_txrx tx_rx, unsigned int index) ++{ ++ queue->dir = tx_rx; ++ queue->index = index; ++ queue->md_ctrl = md_ctrl; ++ queue->tr_ring = NULL; ++ queue->tr_done = NULL; ++ queue->tx_next = NULL; ++} ++ ++static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, ++ enum mtk_txrx tx_rx, unsigned int index) ++{ ++ md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); ++ init_waitqueue_head(&queue->req_wq); ++ spin_lock_init(&queue->ring_lock); ++} ++ ++static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) ++{ ++ gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); ++ gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); ++} ++ ++static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) ++{ ++ gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); ++ gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); ++} ++ ++static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, ++ size_t size) ++{ ++ req->skb = __dev_alloc_skb(size, GFP_KERNEL); ++ if (!req->skb) ++ return -ENOMEM; ++ ++ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, ++ skb_data_area_size(req->skb), DMA_FROM_DEVICE); ++ if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { ++ dev_kfree_skb_any(req->skb); ++ req->skb = NULL; ++ req->mapped_buff = 0; ++ dev_err(md_ctrl->dev, "DMA mapping failed\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ unsigned int hwo_polling_count = 0; ++ struct t7xx_cldma_hw *hw_info; ++ bool rx_not_done = true; ++ unsigned long flags; ++ int count = 0; ++ ++ hw_info = &md_ctrl->hw_info; ++ ++ do { ++ struct cldma_request *req; ++ struct cldma_gpd *gpd; ++ struct sk_buff *skb; ++ int ret; ++ ++ req = queue->tr_done; ++ if (!req) ++ return -ENODATA; ++ ++ gpd = req->gpd; ++ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { ++ dma_addr_t gpd_addr; ++ ++ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { ++ dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); ++ return -ENODEV; ++ } ++ ++ gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + ++ queue->index * sizeof(u64)); ++ if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) ++ return 0; ++ ++ udelay(1); ++ continue; ++ } ++ ++ hwo_polling_count = 0; ++ skb = req->skb; ++ ++ if (req->mapped_buff) { ++ dma_unmap_single(md_ctrl->dev, req->mapped_buff, ++ skb_data_area_size(skb), DMA_FROM_DEVICE); ++ req->mapped_buff = 0; ++ } ++ ++ skb->len = 0; ++ skb_reset_tail_pointer(skb); ++ skb_put(skb, le16_to_cpu(gpd->data_buff_len)); ++ ++ ret = md_ctrl->recv_skb(queue, skb); ++ /* Break processing, will try again later */ ++ if (ret < 0) ++ return ret; ++ ++ req->skb = NULL; ++ t7xx_cldma_gpd_set_data_ptr(gpd, 0); ++ ++ spin_lock_irqsave(&queue->ring_lock, flags); ++ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ req = queue->rx_refill; ++ ++ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); ++ if (ret) ++ return ret; ++ ++ gpd = req->gpd; ++ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); ++ gpd->data_buff_len = 0; ++ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; ++ ++ spin_lock_irqsave(&queue->ring_lock, flags); ++ queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ ++ rx_not_done = ++count < budget || !need_resched(); ++ } while (rx_not_done); ++ ++ *over_budget = true; ++ return 0; ++} ++ ++static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ struct t7xx_cldma_hw *hw_info; ++ unsigned int pending_rx_int; ++ bool over_budget = false; ++ unsigned long flags; ++ int ret; ++ ++ hw_info = &md_ctrl->hw_info; ++ ++ do { ++ ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); ++ if (ret == -ENODATA) ++ return 0; ++ else if (ret) ++ return ret; ++ ++ pending_rx_int = 0; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ if (md_ctrl->rxq_active & BIT(queue->index)) { ++ if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) ++ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); ++ ++ pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), ++ MTK_RX); ++ if (pending_rx_int) { ++ t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); ++ ++ if (over_budget) { ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ return -EAGAIN; ++ } ++ } ++ } ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ } while (pending_rx_int); ++ ++ return 0; ++} ++ ++static void t7xx_cldma_rx_done(struct work_struct *work) ++{ ++ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ int value; ++ ++ value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); ++ if (value && md_ctrl->rxq_active & BIT(queue->index)) { ++ queue_work(queue->worker, &queue->cldma_work); ++ return; ++ } ++ ++ t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); ++ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); ++ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); ++} ++ ++static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ unsigned int dma_len, count = 0; ++ struct cldma_request *req; ++ struct cldma_gpd *gpd; ++ unsigned long flags; ++ dma_addr_t dma_free; ++ struct sk_buff *skb; ++ ++ while (!kthread_should_stop()) { ++ spin_lock_irqsave(&queue->ring_lock, flags); ++ req = queue->tr_done; ++ if (!req) { ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ break; ++ } ++ gpd = req->gpd; ++ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ break; ++ } ++ queue->budget++; ++ dma_free = req->mapped_buff; ++ dma_len = le16_to_cpu(gpd->data_buff_len); ++ skb = req->skb; ++ req->skb = NULL; ++ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ ++ count++; ++ dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); ++ dev_kfree_skb_any(skb); ++ } ++ ++ if (count) ++ wake_up_nr(&queue->req_wq, count); ++ ++ return count; ++} ++ ++static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ struct cldma_request *req; ++ dma_addr_t ul_curr_addr; ++ unsigned long flags; ++ bool pending_gpd; ++ ++ if (!(md_ctrl->txq_active & BIT(queue->index))) ++ return; ++ ++ spin_lock_irqsave(&queue->ring_lock, flags); ++ req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ ++ pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ if (pending_gpd) { ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ ++ /* Check current processing TGPD, 64-bit address is in a table by Q index */ ++ ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + ++ queue->index * sizeof(u64)); ++ if (req->gpd_addr != ul_curr_addr) { ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", ++ md_ctrl->hif_id, queue->index); ++ return; ++ } ++ ++ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); ++ } ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static void t7xx_cldma_tx_done(struct work_struct *work) ++{ ++ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ struct t7xx_cldma_hw *hw_info; ++ unsigned int l2_tx_int; ++ unsigned long flags; ++ ++ hw_info = &md_ctrl->hw_info; ++ t7xx_cldma_gpd_tx_collect(queue); ++ l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), ++ MTK_TX); ++ if (l2_tx_int & EQ_STA_BIT(queue->index)) { ++ t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); ++ t7xx_cldma_txq_empty_hndl(queue); ++ } ++ ++ if (l2_tx_int & BIT(queue->index)) { ++ t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); ++ queue_work(queue->worker, &queue->cldma_work); ++ return; ++ } ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ if (md_ctrl->txq_active & BIT(queue->index)) { ++ t7xx_cldma_clear_ip_busy(hw_info); ++ t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); ++ t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); ++ } ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, ++ struct cldma_ring *ring, enum dma_data_direction tx_rx) ++{ ++ struct cldma_request *req_cur, *req_next; ++ ++ list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { ++ if (req_cur->mapped_buff && req_cur->skb) { ++ dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, ++ skb_data_area_size(req_cur->skb), tx_rx); ++ req_cur->mapped_buff = 0; ++ } ++ ++ dev_kfree_skb_any(req_cur->skb); ++ ++ if (req_cur->gpd) ++ dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); ++ ++ list_del(&req_cur->entry); ++ kfree(req_cur); ++ } ++} ++ ++static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) ++{ ++ struct cldma_request *req; ++ int val; ++ ++ req = kzalloc(sizeof(*req), GFP_KERNEL); ++ if (!req) ++ return NULL; ++ ++ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); ++ if (!req->gpd) ++ goto err_free_req; ++ ++ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); ++ if (val) ++ goto err_free_pool; ++ ++ return req; ++ ++err_free_pool: ++ dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); ++ ++err_free_req: ++ kfree(req); ++ ++ return NULL; ++} ++ ++static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) ++{ ++ struct cldma_request *req; ++ struct cldma_gpd *gpd; ++ int i; ++ ++ INIT_LIST_HEAD(&ring->gpd_ring); ++ ring->length = MAX_RX_BUDGET; ++ ++ for (i = 0; i < ring->length; i++) { ++ req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); ++ if (!req) { ++ t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); ++ return -ENOMEM; ++ } ++ ++ gpd = req->gpd; ++ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); ++ gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); ++ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; ++ INIT_LIST_HEAD(&req->entry); ++ list_add_tail(&req->entry, &ring->gpd_ring); ++ } ++ ++ /* Link previous GPD to next GPD, circular */ ++ list_for_each_entry(req, &ring->gpd_ring, entry) { ++ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); ++ gpd = req->gpd; ++ } ++ ++ return 0; ++} ++ ++static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) ++{ ++ struct cldma_request *req; ++ ++ req = kzalloc(sizeof(*req), GFP_KERNEL); ++ if (!req) ++ return NULL; ++ ++ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); ++ if (!req->gpd) { ++ kfree(req); ++ return NULL; ++ } ++ ++ return req; ++} ++ ++static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) ++{ ++ struct cldma_request *req; ++ struct cldma_gpd *gpd; ++ int i; ++ ++ INIT_LIST_HEAD(&ring->gpd_ring); ++ ring->length = MAX_TX_BUDGET; ++ ++ for (i = 0; i < ring->length; i++) { ++ req = t7xx_alloc_tx_request(md_ctrl); ++ if (!req) { ++ t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); ++ return -ENOMEM; ++ } ++ ++ gpd = req->gpd; ++ gpd->flags = GPD_FLAGS_IOC; ++ INIT_LIST_HEAD(&req->entry); ++ list_add_tail(&req->entry, &ring->gpd_ring); ++ } ++ ++ /* Link previous GPD to next GPD, circular */ ++ list_for_each_entry(req, &ring->gpd_ring, entry) { ++ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); ++ gpd = req->gpd; ++ } ++ ++ return 0; ++} ++ ++/** ++ * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. ++ * @queue: Pointer to the queue structure. ++ * ++ * Called with ring_lock (unless called during initialization phase) ++ */ ++static void t7xx_cldma_q_reset(struct cldma_queue *queue) ++{ ++ struct cldma_request *req; ++ ++ req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); ++ queue->tr_done = req; ++ queue->budget = queue->tr_ring->length; ++ ++ if (queue->dir == MTK_TX) ++ queue->tx_next = req; ++ else ++ queue->rx_refill = req; ++} ++ ++static void t7xx_cldma_rxq_init(struct cldma_queue *queue) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ ++ queue->dir = MTK_RX; ++ queue->tr_ring = &md_ctrl->rx_ring[queue->index]; ++ t7xx_cldma_q_reset(queue); ++} ++ ++static void t7xx_cldma_txq_init(struct cldma_queue *queue) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ ++ queue->dir = MTK_TX; ++ queue->tr_ring = &md_ctrl->tx_ring[queue->index]; ++ t7xx_cldma_q_reset(queue); ++} ++ ++static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) ++{ ++ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); ++} ++ ++static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) ++{ ++ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); ++} ++ ++static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) ++{ ++ unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ int i; ++ ++ /* L2 raw interrupt status */ ++ l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); ++ l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); ++ l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); ++ l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); ++ l2_tx_int &= ~l2_tx_int_msk; ++ l2_rx_int &= ~l2_rx_int_msk; ++ ++ if (l2_tx_int) { ++ if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { ++ /* Read and clear L3 TX interrupt status */ ++ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); ++ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); ++ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); ++ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); ++ } ++ ++ t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); ++ if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { ++ for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { ++ if (i < CLDMA_TXQ_NUM) { ++ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); ++ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); ++ queue_work(md_ctrl->txq[i].worker, ++ &md_ctrl->txq[i].cldma_work); ++ } else { ++ t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); ++ } ++ } ++ } ++ } ++ ++ if (l2_rx_int) { ++ if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { ++ /* Read and clear L3 RX interrupt status */ ++ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); ++ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); ++ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); ++ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); ++ } ++ ++ t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); ++ if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { ++ l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; ++ for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { ++ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); ++ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); ++ queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); ++ } ++ } ++ } ++} ++ ++static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) ++{ ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ unsigned int tx_active; ++ unsigned int rx_active; ++ ++ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) ++ return false; ++ ++ tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); ++ rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); ++ ++ return tx_active || rx_active; ++} ++ ++/** ++ * t7xx_cldma_stop() - Stop CLDMA. ++ * @md_ctrl: CLDMA context structure. ++ * ++ * Stop TX and RX queues. Disable L1 and L2 interrupts. ++ * Clear status registers. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code from polling cldma_queues_active. ++ */ ++int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) ++{ ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ bool active; ++ int i, ret; ++ ++ md_ctrl->rxq_active = 0; ++ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); ++ md_ctrl->txq_active = 0; ++ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); ++ md_ctrl->txq_started = 0; ++ t7xx_cldma_disable_irq(md_ctrl); ++ t7xx_cldma_hw_stop(hw_info, MTK_RX); ++ t7xx_cldma_hw_stop(hw_info, MTK_TX); ++ t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); ++ t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); ++ ++ if (md_ctrl->is_late_init) { ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) ++ flush_work(&md_ctrl->txq[i].cldma_work); ++ ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) ++ flush_work(&md_ctrl->rxq[i].cldma_work); ++ } ++ ++ ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, ++ CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); ++ if (ret) ++ dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); ++ ++ return ret; ++} ++ ++static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) ++{ ++ int i; ++ ++ if (!md_ctrl->is_late_init) ++ return; ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) ++ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); ++ ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) ++ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); ++ ++ dma_pool_destroy(md_ctrl->gpd_dmapool); ++ md_ctrl->gpd_dmapool = NULL; ++ md_ctrl->is_late_init = false; ++} ++ ++void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) ++{ ++ unsigned long flags; ++ int i; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ md_ctrl->txq_active = 0; ++ md_ctrl->rxq_active = 0; ++ t7xx_cldma_disable_irq(md_ctrl); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) { ++ cancel_work_sync(&md_ctrl->txq[i].cldma_work); ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ } ++ ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) { ++ cancel_work_sync(&md_ctrl->rxq[i].cldma_work); ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ } ++ ++ t7xx_cldma_late_release(md_ctrl); ++} ++ ++/** ++ * t7xx_cldma_start() - Start CLDMA. ++ * @md_ctrl: CLDMA context structure. ++ * ++ * Set TX/RX start address. ++ * Start all RX queues and enable L2 interrupt. ++ */ ++void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ if (md_ctrl->is_late_init) { ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ int i; ++ ++ t7xx_cldma_enable_irq(md_ctrl); ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) { ++ if (md_ctrl->txq[i].tr_done) ++ t7xx_cldma_hw_set_start_addr(hw_info, i, ++ md_ctrl->txq[i].tr_done->gpd_addr, ++ MTK_TX); ++ } ++ ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) { ++ if (md_ctrl->rxq[i].tr_done) ++ t7xx_cldma_hw_set_start_addr(hw_info, i, ++ md_ctrl->rxq[i].tr_done->gpd_addr, ++ MTK_RX); ++ } ++ ++ /* Enable L2 interrupt */ ++ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); ++ t7xx_cldma_hw_start(hw_info); ++ md_ctrl->txq_started = 0; ++ md_ctrl->txq_active |= TXRX_STATUS_BITMASK; ++ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; ++ } ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) ++{ ++ struct cldma_queue *txq = &md_ctrl->txq[qnum]; ++ struct cldma_request *req; ++ struct cldma_gpd *gpd; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&txq->ring_lock, flags); ++ t7xx_cldma_q_reset(txq); ++ list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { ++ gpd = req->gpd; ++ gpd->flags &= ~GPD_FLAGS_HWO; ++ t7xx_cldma_gpd_set_data_ptr(gpd, 0); ++ gpd->data_buff_len = 0; ++ dev_kfree_skb_any(req->skb); ++ req->skb = NULL; ++ } ++ spin_unlock_irqrestore(&txq->ring_lock, flags); ++} ++ ++static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) ++{ ++ struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; ++ struct cldma_request *req; ++ struct cldma_gpd *gpd; ++ unsigned long flags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&rxq->ring_lock, flags); ++ t7xx_cldma_q_reset(rxq); ++ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { ++ gpd = req->gpd; ++ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; ++ gpd->data_buff_len = 0; ++ ++ if (req->skb) { ++ req->skb->len = 0; ++ skb_reset_tail_pointer(req->skb); ++ } ++ } ++ ++ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { ++ if (req->skb) ++ continue; ++ ++ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); ++ if (ret) ++ break; ++ ++ t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); ++ } ++ spin_unlock_irqrestore(&rxq->ring_lock, flags); ++ ++ return ret; ++} ++ ++void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) ++{ ++ int i; ++ ++ if (tx_rx == MTK_TX) { ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) ++ t7xx_cldma_clear_txq(md_ctrl, i); ++ } else { ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) ++ t7xx_cldma_clear_rxq(md_ctrl, i); ++ } ++} ++ ++void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) ++{ ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); ++ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); ++ if (tx_rx == MTK_RX) ++ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; ++ else ++ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; ++ t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, ++ struct sk_buff *skb) ++{ ++ struct cldma_ctrl *md_ctrl = queue->md_ctrl; ++ struct cldma_gpd *gpd = tx_req->gpd; ++ unsigned long flags; ++ ++ /* Update GPD */ ++ tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); ++ ++ if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { ++ dev_err(md_ctrl->dev, "DMA mapping failed\n"); ++ return -ENOMEM; ++ } ++ ++ t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); ++ gpd->data_buff_len = cpu_to_le16(skb->len); ++ ++ /* This lock must cover TGPD setting, as even without a resume operation, ++ * CLDMA can send next HWO=1 if last TGPD just finished. ++ */ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ if (md_ctrl->txq_active & BIT(queue->index)) ++ gpd->flags |= GPD_FLAGS_HWO; ++ ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ tx_req->skb = skb; ++ return 0; ++} ++ ++/* Called with cldma_lock */ ++static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, ++ struct cldma_request *prev_req) ++{ ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ ++ /* Check whether the device was powered off (CLDMA start address is not set) */ ++ if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { ++ t7xx_cldma_hw_init(hw_info); ++ t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); ++ md_ctrl->txq_started &= ~BIT(qno); ++ } ++ ++ if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { ++ if (md_ctrl->txq_started & BIT(qno)) ++ t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); ++ else ++ t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); ++ ++ md_ctrl->txq_started |= BIT(qno); ++ } ++} ++ ++/** ++ * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. ++ * @md_ctrl: CLDMA context structure. ++ * @recv_skb: Receiving skb callback. ++ */ ++void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, ++ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) ++{ ++ md_ctrl->recv_skb = recv_skb; ++} ++ ++/** ++ * t7xx_cldma_send_skb() - Send control data to modem. ++ * @md_ctrl: CLDMA context structure. ++ * @qno: Queue number. ++ * @skb: Socket buffer. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ENOMEM - Allocation failure. ++ * * -EINVAL - Invalid queue request. ++ * * -EIO - Queue is not active. ++ * * -ETIMEDOUT - Timeout waiting for the device to wake up. ++ */ ++int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) ++{ ++ struct cldma_request *tx_req; ++ struct cldma_queue *queue; ++ unsigned long flags; ++ int ret; ++ ++ if (qno >= CLDMA_TXQ_NUM) ++ return -EINVAL; ++ ++ queue = &md_ctrl->txq[qno]; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ if (!(md_ctrl->txq_active & BIT(qno))) { ++ ret = -EIO; ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ goto allow_sleep; ++ } ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ do { ++ spin_lock_irqsave(&queue->ring_lock, flags); ++ tx_req = queue->tx_next; ++ if (queue->budget > 0 && !tx_req->skb) { ++ struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; ++ ++ queue->budget--; ++ t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); ++ queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ ++ /* Protect the access to the modem for queues operations (resume/start) ++ * which access shared locations by all the queues. ++ * cldma_lock is independent of ring_lock which is per queue. ++ */ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ break; ++ } ++ spin_unlock_irqrestore(&queue->ring_lock, flags); ++ ++ if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ } ++ ++ ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); ++ } while (!ret); ++ ++allow_sleep: ++ return ret; ++} ++ ++static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) ++{ ++ char dma_pool_name[32]; ++ int i, j, ret; ++ ++ if (md_ctrl->is_late_init) { ++ dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); ++ return -EALREADY; ++ } ++ ++ snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); ++ ++ md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, ++ sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); ++ if (!md_ctrl->gpd_dmapool) { ++ dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) { ++ ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); ++ if (ret) { ++ dev_err(md_ctrl->dev, "control TX ring init fail\n"); ++ goto err_free_tx_ring; ++ } ++ } ++ ++ for (j = 0; j < CLDMA_RXQ_NUM; j++) { ++ md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; ++ ++ if (j == CLDMA_RXQ_NUM - 1) ++ md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; ++ ++ ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); ++ if (ret) { ++ dev_err(md_ctrl->dev, "Control RX ring init fail\n"); ++ goto err_free_rx_ring; ++ } ++ } ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) ++ t7xx_cldma_txq_init(&md_ctrl->txq[i]); ++ ++ for (j = 0; j < CLDMA_RXQ_NUM; j++) ++ t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); ++ ++ md_ctrl->is_late_init = true; ++ return 0; ++ ++err_free_rx_ring: ++ while (j--) ++ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); ++ ++err_free_tx_ring: ++ while (i--) ++ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); ++ ++ return ret; ++} ++ ++static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) ++{ ++ return addr + phy_addr - addr_trs1; ++} ++ ++static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) ++{ ++ struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ u32 phy_ao_base, phy_pd_base; ++ ++ if (md_ctrl->hif_id != CLDMA_ID_MD) ++ return; ++ ++ phy_ao_base = CLDMA1_AO_BASE; ++ phy_pd_base = CLDMA1_PD_BASE; ++ hw_info->phy_interrupt_id = CLDMA1_INT; ++ hw_info->hw_mode = MODE_BIT_64; ++ hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, ++ pbase->pcie_dev_reg_trsl_addr, phy_ao_base); ++ hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, ++ pbase->pcie_dev_reg_trsl_addr, phy_pd_base); ++} ++ ++static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) ++{ ++ dev_kfree_skb_any(skb); ++ return 0; ++} ++ ++int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct device *dev = &t7xx_dev->pdev->dev; ++ struct cldma_ctrl *md_ctrl; ++ ++ md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); ++ if (!md_ctrl) ++ return -ENOMEM; ++ ++ md_ctrl->t7xx_dev = t7xx_dev; ++ md_ctrl->dev = dev; ++ md_ctrl->hif_id = hif_id; ++ md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; ++ t7xx_hw_info_init(md_ctrl); ++ t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; ++ return 0; ++} ++ ++void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) ++{ ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_stop(hw_info, MTK_TX); ++ t7xx_cldma_hw_stop(hw_info, MTK_RX); ++ t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); ++ t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); ++ t7xx_cldma_hw_init(hw_info); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) ++{ ++ struct cldma_ctrl *md_ctrl = data; ++ u32 interrupt; ++ ++ interrupt = md_ctrl->hw_info.phy_interrupt_id; ++ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); ++ t7xx_cldma_irq_work_cb(md_ctrl); ++ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); ++ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); ++ return IRQ_HANDLED; ++} ++ ++static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) ++{ ++ int i; ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) { ++ if (md_ctrl->txq[i].worker) { ++ destroy_workqueue(md_ctrl->txq[i].worker); ++ md_ctrl->txq[i].worker = NULL; ++ } ++ } ++ ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) { ++ if (md_ctrl->rxq[i].worker) { ++ destroy_workqueue(md_ctrl->rxq[i].worker); ++ md_ctrl->rxq[i].worker = NULL; ++ } ++ } ++} ++ ++/** ++ * t7xx_cldma_init() - Initialize CLDMA. ++ * @md_ctrl: CLDMA context structure. ++ * ++ * Initialize HIF TX/RX queue structure. ++ * Register CLDMA callback ISR with PCIe driver. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code from failure sub-initializations. ++ */ ++int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) ++{ ++ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ++ int i; ++ ++ md_ctrl->txq_active = 0; ++ md_ctrl->rxq_active = 0; ++ md_ctrl->is_late_init = false; ++ ++ spin_lock_init(&md_ctrl->cldma_lock); ++ ++ for (i = 0; i < CLDMA_TXQ_NUM; i++) { ++ md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); ++ md_ctrl->txq[i].worker = ++ alloc_workqueue("md_hif%d_tx%d_worker", ++ WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), ++ 1, md_ctrl->hif_id, i); ++ if (!md_ctrl->txq[i].worker) ++ goto err_workqueue; ++ ++ INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); ++ } ++ ++ for (i = 0; i < CLDMA_RXQ_NUM; i++) { ++ md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); ++ INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); ++ ++ md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", ++ WQ_UNBOUND | WQ_MEM_RECLAIM, ++ 1, md_ctrl->hif_id, i); ++ if (!md_ctrl->rxq[i].worker) ++ goto err_workqueue; ++ } ++ ++ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); ++ md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; ++ md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; ++ md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; ++ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); ++ return 0; ++ ++err_workqueue: ++ t7xx_cldma_destroy_wqs(md_ctrl); ++ return -ENOMEM; ++} ++ ++void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) ++{ ++ t7xx_cldma_late_release(md_ctrl); ++ t7xx_cldma_late_init(md_ctrl); ++} ++ ++void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) ++{ ++ t7xx_cldma_stop(md_ctrl); ++ t7xx_cldma_late_release(md_ctrl); ++ t7xx_cldma_destroy_wqs(md_ctrl); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +@@ -0,0 +1,126 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ */ ++ ++#ifndef __T7XX_HIF_CLDMA_H__ ++#define __T7XX_HIF_CLDMA_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_cldma.h" ++#include "t7xx_pci.h" ++ ++/** ++ * enum cldma_id - Identifiers for CLDMA HW units. ++ * @CLDMA_ID_MD: Modem control channel. ++ * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). ++ * @CLDMA_NUM: Number of CLDMA HW units available. ++ */ ++enum cldma_id { ++ CLDMA_ID_MD, ++ CLDMA_ID_AP, ++ CLDMA_NUM ++}; ++ ++struct cldma_gpd { ++ u8 flags; ++ u8 not_used1; ++ __le16 rx_data_allow_len; ++ __le32 next_gpd_ptr_h; ++ __le32 next_gpd_ptr_l; ++ __le32 data_buff_bd_ptr_h; ++ __le32 data_buff_bd_ptr_l; ++ __le16 data_buff_len; ++ __le16 not_used2; ++}; ++ ++struct cldma_request { ++ struct cldma_gpd *gpd; /* Virtual address for CPU */ ++ dma_addr_t gpd_addr; /* Physical address for DMA */ ++ struct sk_buff *skb; ++ dma_addr_t mapped_buff; ++ struct list_head entry; ++}; ++ ++struct cldma_ring { ++ struct list_head gpd_ring; /* Ring of struct cldma_request */ ++ unsigned int length; /* Number of struct cldma_request */ ++ int pkt_size; ++}; ++ ++struct cldma_queue { ++ struct cldma_ctrl *md_ctrl; ++ enum mtk_txrx dir; ++ unsigned int index; ++ struct cldma_ring *tr_ring; ++ struct cldma_request *tr_done; ++ struct cldma_request *rx_refill; ++ struct cldma_request *tx_next; ++ int budget; /* Same as ring buffer size by default */ ++ spinlock_t ring_lock; ++ wait_queue_head_t req_wq; /* Only for TX */ ++ struct workqueue_struct *worker; ++ struct work_struct cldma_work; ++}; ++ ++struct cldma_ctrl { ++ enum cldma_id hif_id; ++ struct device *dev; ++ struct t7xx_pci_dev *t7xx_dev; ++ struct cldma_queue txq[CLDMA_TXQ_NUM]; ++ struct cldma_queue rxq[CLDMA_RXQ_NUM]; ++ unsigned short txq_active; ++ unsigned short rxq_active; ++ unsigned short txq_started; ++ spinlock_t cldma_lock; /* Protects CLDMA structure */ ++ /* Assumes T/R GPD/BD/SPD have the same size */ ++ struct dma_pool *gpd_dmapool; ++ struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; ++ struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; ++ struct t7xx_cldma_hw hw_info; ++ bool is_late_init; ++ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); ++}; ++ ++#define GPD_FLAGS_HWO BIT(0) ++#define GPD_FLAGS_IOC BIT(7) ++#define GPD_DMAPOOL_ALIGN 16 ++ ++#define CLDMA_MTU 3584 /* 3.5kB */ ++ ++int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); ++void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); ++int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); ++void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); ++void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); ++void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); ++int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); ++void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); ++void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, ++ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); ++int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); ++void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); ++void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); ++ ++#endif /* __T7XX_HIF_CLDMA_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_reg.h +@@ -0,0 +1,33 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Chiranjeevi Rapolu ++ * ++ * Contributors: ++ * Amir Hanania ++ * Andy Shevchenko ++ * Eliot Lee ++ * Moises Veleta ++ * Ricardo Martinez ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_REG_H__ ++#define __T7XX_REG_H__ ++ ++enum t7xx_int { ++ DPMAIF_INT, ++ CLDMA0_INT, ++ CLDMA1_INT, ++ CLDMA2_INT, ++ MHCCIF_INT, ++ DPMAIF2_INT, ++ SAP_RGU_INT, ++ CLDMA3_INT, ++}; ++ ++#endif /* __T7XX_REG_H__ */ diff --git a/target/linux/generic/backport-5.15/621-v5.19-02-net-wwan-t7xx-Add-core-components.patch b/target/linux/generic/backport-5.15/621-v5.19-02-net-wwan-t7xx-Add-core-components.patch new file mode 100644 index 0000000000..6c8bdce138 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-02-net-wwan-t7xx-Add-core-components.patch @@ -0,0 +1,2223 @@ +From 13e920d93e37fcaef4a9309515798a3cae9dcf19 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:00 -0700 +Subject: [PATCH] net: wwan: t7xx: Add core components +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Registers the t7xx device driver with the kernel. Setup all the core +components: PCIe layer, Modem Host Cross Core Interface (MHCCIF), +modem control operations, modem state machine, and build +infrastructure. + +* PCIe layer code implements driver probe and removal. +* MHCCIF provides interrupt channels to communicate events + such as handshake, PM and port enumeration. +* Modem control implements the entry point for modem init, + reset and exit. +* The modem status monitor is a state machine used by modem control + to complete initialization and stop. It is used also to propagate + exception events reported by other components. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Ilpo Järvinen +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/Kconfig | 14 + + drivers/net/wwan/Makefile | 1 + + drivers/net/wwan/t7xx/Makefile | 12 + + drivers/net/wwan/t7xx/t7xx_mhccif.c | 102 ++++ + drivers/net/wwan/t7xx/t7xx_mhccif.h | 37 ++ + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 498 +++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_modem_ops.h | 85 ++++ + drivers/net/wwan/t7xx/t7xx_pci.c | 225 +++++++++ + drivers/net/wwan/t7xx/t7xx_pci.h | 64 +++ + drivers/net/wwan/t7xx/t7xx_pcie_mac.c | 262 ++++++++++ + drivers/net/wwan/t7xx/t7xx_pcie_mac.h | 31 ++ + drivers/net/wwan/t7xx/t7xx_reg.h | 104 ++++ + drivers/net/wwan/t7xx/t7xx_state_monitor.c | 540 +++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_state_monitor.h | 133 +++++ + 14 files changed, 2108 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/Makefile + create mode 100644 drivers/net/wwan/t7xx/t7xx_mhccif.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_mhccif.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_modem_ops.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_modem_ops.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_pci.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_pci.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_pcie_mac.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_pcie_mac.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_state_monitor.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_state_monitor.h + +--- a/drivers/net/wwan/Kconfig ++++ b/drivers/net/wwan/Kconfig +@@ -79,6 +79,20 @@ config IOSM + + If unsure, say N. + ++config MTK_T7XX ++ tristate "MediaTek PCIe 5G WWAN modem T7xx device" ++ depends on PCI ++ help ++ Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device. ++ Adapts WWAN framework and provides network interface like wwan0 ++ and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0 ++ (MBIM protocol), etc. ++ ++ To compile this driver as a module, choose M here: the module will be ++ called mtk_t7xx. ++ ++ If unsure, say N. ++ + endif # WWAN + + endmenu +--- a/drivers/net/wwan/Makefile ++++ b/drivers/net/wwan/Makefile +@@ -12,3 +12,4 @@ obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ + obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o + obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o + obj-$(CONFIG_IOSM) += iosm/ ++obj-$(CONFIG_MTK_T7XX) += t7xx/ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -0,0 +1,12 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++ccflags-y += -Werror ++ ++obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o ++mtk_t7xx-y:= t7xx_pci.o \ ++ t7xx_pcie_mac.o \ ++ t7xx_mhccif.o \ ++ t7xx_state_monitor.o \ ++ t7xx_modem_ops.o \ ++ t7xx_cldma.o \ ++ t7xx_hif_cldma.o \ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c +@@ -0,0 +1,102 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Amir Hanania ++ * Ricardo Martinez ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_mhccif.h" ++#include "t7xx_modem_ops.h" ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_reg.h" ++ ++static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) ++{ ++ void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; ++ ++ /* Clear level 2 interrupt */ ++ iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK); ++ /* Ensure write is complete */ ++ t7xx_mhccif_read_sw_int_sts(t7xx_dev); ++ /* Clear level 1 interrupt */ ++ t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT); ++} ++ ++static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) ++{ ++ struct t7xx_pci_dev *t7xx_dev = data; ++ u32 int_status, val; ++ ++ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); ++ iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ ++ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); ++ if (int_status & D2H_SW_INT_MASK) { ++ int ret = t7xx_pci_mhccif_isr(t7xx_dev); ++ ++ if (ret) ++ dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret); ++ } ++ ++ t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); ++ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); ++ return IRQ_HANDLED; ++} ++ ++u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev) ++{ ++ return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS); ++} ++ ++void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val) ++{ ++ iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET); ++} ++ ++void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val) ++{ ++ iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR); ++} ++ ++u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev) ++{ ++ return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK); ++} ++ ++static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base + ++ MHCCIF_RC_DEV_BASE - ++ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; ++ ++ t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler; ++ t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread; ++ t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev; ++} ++ ++void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel) ++{ ++ void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; ++ ++ iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY); ++ iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h +@@ -0,0 +1,37 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Amir Hanania ++ * Ricardo Martinez ++ */ ++ ++#ifndef __T7XX_MHCCIF_H__ ++#define __T7XX_MHCCIF_H__ ++ ++#include ++ ++#include "t7xx_pci.h" ++#include "t7xx_reg.h" ++ ++#define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \ ++ D2H_INT_EXCEPTION_INIT_DONE | \ ++ D2H_INT_EXCEPTION_CLEARQ_DONE | \ ++ D2H_INT_EXCEPTION_ALLQ_RESET | \ ++ D2H_INT_PORT_ENUM | \ ++ D2H_INT_ASYNC_MD_HK) ++ ++void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); ++void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val); ++u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev); ++u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel); ++ ++#endif /*__T7XX_MHCCIF_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -0,0 +1,498 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Eliot Lee ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_cldma.h" ++#include "t7xx_hif_cldma.h" ++#include "t7xx_mhccif.h" ++#include "t7xx_modem_ops.h" ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_reg.h" ++#include "t7xx_state_monitor.h" ++ ++#define RGU_RESET_DELAY_MS 10 ++#define PORT_RESET_DELAY_MS 2000 ++#define EX_HS_TIMEOUT_MS 5000 ++#define EX_HS_POLL_DELAY_MS 10 ++ ++static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) ++{ ++ return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; ++} ++ ++/** ++ * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts. ++ * @t7xx_dev: MTK device. ++ * ++ * Check the interrupt status and queue commands accordingly. ++ * ++ * Returns: ++ ** 0 - Success. ++ ** -EINVAL - Failure to get FSM control. ++ */ ++int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_modem *md = t7xx_dev->md; ++ struct t7xx_fsm_ctl *ctl; ++ unsigned int int_sta; ++ int ret = 0; ++ u32 mask; ++ ++ ctl = md->fsm_ctl; ++ if (!ctl) { ++ dev_err_ratelimited(&t7xx_dev->pdev->dev, ++ "MHCCIF interrupt received before initializing MD monitor\n"); ++ return -EINVAL; ++ } ++ ++ spin_lock_bh(&md->exp_lock); ++ int_sta = t7xx_get_interrupt_status(t7xx_dev); ++ md->exp_id |= int_sta; ++ if (md->exp_id & D2H_INT_EXCEPTION_INIT) { ++ if (ctl->md_state == MD_STATE_INVALID || ++ ctl->md_state == MD_STATE_WAITING_FOR_HS1 || ++ ctl->md_state == MD_STATE_WAITING_FOR_HS2 || ++ ctl->md_state == MD_STATE_READY) { ++ md->exp_id &= ~D2H_INT_EXCEPTION_INIT; ++ ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX); ++ } ++ } else if (md->exp_id & D2H_INT_PORT_ENUM) { ++ md->exp_id &= ~D2H_INT_PORT_ENUM; ++ ++ if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START || ++ ctl->curr_state == FSM_STATE_STOPPED) ++ ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM); ++ } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) { ++ mask = t7xx_mhccif_mask_get(t7xx_dev); ++ if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { ++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ queue_work(md->handshake_wq, &md->handshake_work); ++ } ++ } ++ spin_unlock_bh(&md->exp_lock); ++ ++ return ret; ++} ++ ++static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr; ++ void __iomem *reset_pcie_reg; ++ u32 val; ++ ++ reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA - ++ pbase_addr->pcie_dev_reg_trsl_addr; ++ val = ioread32(reset_pcie_reg); ++ iowrite32(val, reset_pcie_reg); ++} ++ ++void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev) ++{ ++ /* Clear L2 */ ++ t7xx_clr_device_irq_via_pcie(t7xx_dev); ++ /* Clear L1 */ ++ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); ++} ++ ++static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) ++{ ++#ifdef CONFIG_ACPI ++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; ++ struct device *dev = &t7xx_dev->pdev->dev; ++ acpi_status acpi_ret; ++ acpi_handle handle; ++ ++ handle = ACPI_HANDLE(dev); ++ if (!handle) { ++ dev_err(dev, "ACPI handle not found\n"); ++ return -EFAULT; ++ } ++ ++ if (!acpi_has_method(handle, fn_name)) { ++ dev_err(dev, "%s method not found\n", fn_name); ++ return -EFAULT; ++ } ++ ++ acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer); ++ if (ACPI_FAILURE(acpi_ret)) { ++ dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret)); ++ return -EFAULT; ++ } ++ ++#endif ++ return 0; ++} ++ ++int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) ++{ ++ return t7xx_acpi_reset(t7xx_dev, "_RST"); ++} ++ ++static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) ++{ ++ u32 val; ++ ++ val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); ++ if (val & MISC_RESET_TYPE_PLDR) ++ t7xx_acpi_reset(t7xx_dev, "MRST._RST"); ++ else if (val & MISC_RESET_TYPE_FLDR) ++ t7xx_acpi_fldr_func(t7xx_dev); ++} ++ ++static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) ++{ ++ struct t7xx_pci_dev *t7xx_dev = data; ++ ++ msleep(RGU_RESET_DELAY_MS); ++ t7xx_reset_device_via_pmic(t7xx_dev); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data) ++{ ++ struct t7xx_pci_dev *t7xx_dev = data; ++ struct t7xx_modem *modem; ++ ++ t7xx_clear_rgu_irq(t7xx_dev); ++ if (!t7xx_dev->rgu_pci_irq_en) ++ return IRQ_HANDLED; ++ ++ modem = t7xx_dev->md; ++ modem->rgu_irq_asserted = true; ++ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); ++ return IRQ_WAKE_THREAD; ++} ++ ++static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev) ++{ ++ /* Registers RGU callback ISR with PCIe driver */ ++ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); ++ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); ++ ++ t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler; ++ t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread; ++ t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev; ++ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); ++} ++ ++/** ++ * t7xx_cldma_exception() - CLDMA exception handler. ++ * @md_ctrl: modem control struct. ++ * @stage: exception stage. ++ * ++ * Part of the modem exception recovery. ++ * Stages are one after the other as describe below: ++ * HIF_EX_INIT: Disable and clear TXQ. ++ * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. ++ * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. ++ */ ++ ++/* Modem Exception Handshake Flow ++ * ++ * Modem HW Exception interrupt received ++ * (MD_IRQ_CCIF_EX) ++ * | ++ * +---------v--------+ ++ * | HIF_EX_INIT | : Disable and clear TXQ ++ * +------------------+ ++ * | ++ * +---------v--------+ ++ * | HIF_EX_INIT_DONE | : Wait for the init to be done ++ * +------------------+ ++ * | ++ * +---------v--------+ ++ * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ ++ * +------------------+ : Flush TX/RX workqueues ++ * | ++ * +---------v--------+ ++ * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA ++ * +------------------+ ++ */ ++static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage) ++{ ++ switch (stage) { ++ case HIF_EX_INIT: ++ t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX); ++ t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX); ++ break; ++ ++ case HIF_EX_CLEARQ_DONE: ++ /* We do not want to get CLDMA IRQ when MD is ++ * resetting CLDMA after it got clearq_ack. ++ */ ++ t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX); ++ t7xx_cldma_stop(md_ctrl); ++ ++ if (md_ctrl->hif_id == CLDMA_ID_MD) ++ t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base); ++ ++ t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX); ++ break; ++ ++ case HIF_EX_ALLQ_RESET: ++ t7xx_cldma_hw_init(&md_ctrl->hw_info); ++ t7xx_cldma_start(md_ctrl); ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) ++{ ++ struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; ++ ++ if (stage == HIF_EX_CLEARQ_DONE) { ++ /* Give DHL time to flush data */ ++ msleep(PORT_RESET_DELAY_MS); ++ } ++ ++ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); ++ ++ if (stage == HIF_EX_INIT) ++ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); ++ else if (stage == HIF_EX_CLEARQ_DONE) ++ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK); ++} ++ ++static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) ++{ ++ unsigned int waited_time_ms = 0; ++ ++ do { ++ if (md->exp_id & event_id) ++ return 0; ++ ++ waited_time_ms += EX_HS_POLL_DELAY_MS; ++ msleep(EX_HS_POLL_DELAY_MS); ++ } while (waited_time_ms < EX_HS_TIMEOUT_MS); ++ ++ return -EFAULT; ++} ++ ++static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ /* Register the MHCCIF ISR for MD exception, port enum and ++ * async handshake notifications. ++ */ ++ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); ++ t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM); ++ ++ /* Register RGU IRQ handler for sAP exception notification */ ++ t7xx_dev->rgu_pci_irq_en = true; ++ t7xx_pcie_register_rgu_isr(t7xx_dev); ++} ++ ++static void t7xx_md_hk_wq(struct work_struct *work) ++{ ++ struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ ++ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); ++ md->core_md.ready = true; ++ wake_up(&ctl->async_hk_wq); ++} ++ ++void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) ++{ ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ void __iomem *mhccif_base; ++ unsigned int int_sta; ++ unsigned long flags; ++ ++ switch (evt_id) { ++ case FSM_PRE_START: ++ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); ++ break; ++ ++ case FSM_START: ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); ++ ++ spin_lock_irqsave(&md->exp_lock, flags); ++ int_sta = t7xx_get_interrupt_status(md->t7xx_dev); ++ md->exp_id |= int_sta; ++ if (md->exp_id & D2H_INT_EXCEPTION_INIT) { ++ ctl->exp_flg = true; ++ md->exp_id &= ~D2H_INT_EXCEPTION_INIT; ++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ } else if (ctl->exp_flg) { ++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { ++ queue_work(md->handshake_wq, &md->handshake_work); ++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; ++ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ } else { ++ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ } ++ spin_unlock_irqrestore(&md->exp_lock, flags); ++ ++ t7xx_mhccif_mask_clr(md->t7xx_dev, ++ D2H_INT_EXCEPTION_INIT | ++ D2H_INT_EXCEPTION_INIT_DONE | ++ D2H_INT_EXCEPTION_CLEARQ_DONE | ++ D2H_INT_EXCEPTION_ALLQ_RESET); ++ break; ++ ++ case FSM_READY: ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++void t7xx_md_exception_handshake(struct t7xx_modem *md) ++{ ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ int ret; ++ ++ t7xx_md_exception(md, HIF_EX_INIT); ++ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE); ++ if (ret) ++ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE); ++ ++ t7xx_md_exception(md, HIF_EX_INIT_DONE); ++ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE); ++ if (ret) ++ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE); ++ ++ t7xx_md_exception(md, HIF_EX_CLEARQ_DONE); ++ ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET); ++ if (ret) ++ dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET); ++ ++ t7xx_md_exception(md, HIF_EX_ALLQ_RESET); ++} ++ ++static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct device *dev = &t7xx_dev->pdev->dev; ++ struct t7xx_modem *md; ++ ++ md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL); ++ if (!md) ++ return NULL; ++ ++ md->t7xx_dev = t7xx_dev; ++ t7xx_dev->md = md; ++ spin_lock_init(&md->exp_lock); ++ md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, ++ 0, "md_hk_wq"); ++ if (!md->handshake_wq) ++ return NULL; ++ ++ INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); ++ return md; ++} ++ ++int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_modem *md = t7xx_dev->md; ++ ++ md->md_init_finish = false; ++ md->exp_id = 0; ++ t7xx_fsm_reset(md); ++ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); ++ md->md_init_finish = true; ++ return 0; ++} ++ ++/** ++ * t7xx_md_init() - Initialize modem. ++ * @t7xx_dev: MTK device. ++ * ++ * Allocate and initialize MD control block, and initialize data path. ++ * Register MHCCIF ISR and RGU ISR, and start the state machine. ++ * ++ * Return: ++ ** 0 - Success. ++ ** -ENOMEM - Allocation failure. ++ */ ++int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_modem *md; ++ int ret; ++ ++ md = t7xx_md_alloc(t7xx_dev); ++ if (!md) ++ return -ENOMEM; ++ ++ ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev); ++ if (ret) ++ goto err_destroy_hswq; ++ ++ ret = t7xx_fsm_init(md); ++ if (ret) ++ goto err_destroy_hswq; ++ ++ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); ++ if (ret) ++ goto err_uninit_fsm; ++ ++ ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); ++ if (ret) /* fsm_uninit flushes cmd queue */ ++ goto err_uninit_md_cldma; ++ ++ t7xx_md_sys_sw_init(t7xx_dev); ++ md->md_init_finish = true; ++ return 0; ++ ++err_uninit_md_cldma: ++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); ++ ++err_uninit_fsm: ++ t7xx_fsm_uninit(md); ++ ++err_destroy_hswq: ++ destroy_workqueue(md->handshake_wq); ++ dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n"); ++ return ret; ++} ++ ++void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_modem *md = t7xx_dev->md; ++ ++ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); ++ ++ if (!md->md_init_finish) ++ return; ++ ++ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); ++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_fsm_uninit(md); ++ destroy_workqueue(md->handshake_wq); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h +@@ -0,0 +1,85 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Eliot Lee ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_MODEM_OPS_H__ ++#define __T7XX_MODEM_OPS_H__ ++ ++#include ++#include ++#include ++ ++#include "t7xx_hif_cldma.h" ++#include "t7xx_pci.h" ++ ++#define FEATURE_COUNT 64 ++ ++/** ++ * enum hif_ex_stage - HIF exception handshake stages with the HW. ++ * @HIF_EX_INIT: Disable and clear TXQ. ++ * @HIF_EX_INIT_DONE: Polling for initialization to be done. ++ * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. ++ * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. ++ */ ++enum hif_ex_stage { ++ HIF_EX_INIT, ++ HIF_EX_INIT_DONE, ++ HIF_EX_CLEARQ_DONE, ++ HIF_EX_ALLQ_RESET, ++}; ++ ++struct mtk_runtime_feature { ++ u8 feature_id; ++ u8 support_info; ++ u8 reserved[2]; ++ __le32 data_len; ++ __le32 data[]; ++}; ++ ++enum md_event_id { ++ FSM_PRE_START, ++ FSM_START, ++ FSM_READY, ++}; ++ ++struct t7xx_sys_info { ++ bool ready; ++}; ++ ++struct t7xx_modem { ++ struct cldma_ctrl *md_ctrl[CLDMA_NUM]; ++ struct t7xx_pci_dev *t7xx_dev; ++ struct t7xx_sys_info core_md; ++ bool md_init_finish; ++ bool rgu_irq_asserted; ++ struct workqueue_struct *handshake_wq; ++ struct work_struct handshake_work; ++ struct t7xx_fsm_ctl *fsm_ctl; ++ struct port_proxy *port_prox; ++ unsigned int exp_id; ++ spinlock_t exp_lock; /* Protects exception events */ ++}; ++ ++void t7xx_md_exception_handshake(struct t7xx_modem *md); ++void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id); ++int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev); ++int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); ++int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); ++int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); ++ ++#endif /* __T7XX_MODEM_OPS_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -0,0 +1,225 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Ricardo Martinez ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Amir Hanania ++ * Andy Shevchenko ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Moises Veleta ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_mhccif.h" ++#include "t7xx_modem_ops.h" ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_reg.h" ++ ++#define T7XX_PCI_IREG_BASE 0 ++#define T7XX_PCI_EREG_BASE 2 ++ ++static int t7xx_request_irq(struct pci_dev *pdev) ++{ ++ struct t7xx_pci_dev *t7xx_dev; ++ int ret, i; ++ ++ t7xx_dev = pci_get_drvdata(pdev); ++ ++ for (i = 0; i < EXT_INT_NUM; i++) { ++ const char *irq_descr; ++ int irq_vec; ++ ++ if (!t7xx_dev->intr_handler[i]) ++ continue; ++ ++ irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", ++ dev_driver_string(&pdev->dev), i); ++ if (!irq_descr) { ++ ret = -ENOMEM; ++ break; ++ } ++ ++ irq_vec = pci_irq_vector(pdev, i); ++ ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], ++ t7xx_dev->intr_thread[i], 0, irq_descr, ++ t7xx_dev->callback_param[i]); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); ++ break; ++ } ++ } ++ ++ if (ret) { ++ while (i--) { ++ if (!t7xx_dev->intr_handler[i]) ++ continue; ++ ++ free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); ++ } ++ } ++ ++ return ret; ++} ++ ++static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct pci_dev *pdev = t7xx_dev->pdev; ++ int ret; ++ ++ /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ ++ ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); ++ return ret; ++ } ++ ++ ret = t7xx_request_irq(pdev); ++ if (ret) { ++ pci_free_irq_vectors(pdev); ++ return ret; ++ } ++ ++ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); ++ return 0; ++} ++ ++static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ int ret, i; ++ ++ if (!t7xx_dev->pdev->msix_cap) ++ return -EINVAL; ++ ++ ret = t7xx_setup_msix(t7xx_dev); ++ if (ret) ++ return ret; ++ ++ /* IPs enable interrupts when ready */ ++ for (i = 0; i < EXT_INT_NUM; i++) ++ t7xx_pcie_mac_set_int(t7xx_dev, i); ++ ++ return 0; ++} ++ ++static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev) ++{ ++ t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + ++ INFRACFG_AO_DEV_CHIP - ++ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; ++} ++ ++static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ++{ ++ struct t7xx_pci_dev *t7xx_dev; ++ int ret; ++ ++ t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); ++ if (!t7xx_dev) ++ return -ENOMEM; ++ ++ pci_set_drvdata(pdev, t7xx_dev); ++ t7xx_dev->pdev = pdev; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pdev); ++ ++ ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE), ++ pci_name(pdev)); ++ if (ret) { ++ dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); ++ return -ENOMEM; ++ } ++ ++ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); ++ if (ret) { ++ dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); ++ return ret; ++ } ++ ++ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); ++ if (ret) { ++ dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); ++ return ret; ++ } ++ ++ IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; ++ t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; ++ ++ t7xx_pcie_mac_atr_init(t7xx_dev); ++ t7xx_pci_infracfg_ao_calc(t7xx_dev); ++ t7xx_mhccif_init(t7xx_dev); ++ ++ ret = t7xx_md_init(t7xx_dev); ++ if (ret) ++ return ret; ++ ++ t7xx_pcie_mac_interrupts_dis(t7xx_dev); ++ ++ ret = t7xx_interrupt_init(t7xx_dev); ++ if (ret) { ++ t7xx_md_exit(t7xx_dev); ++ return ret; ++ } ++ ++ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); ++ t7xx_pcie_mac_interrupts_en(t7xx_dev); ++ ++ return 0; ++} ++ ++static void t7xx_pci_remove(struct pci_dev *pdev) ++{ ++ struct t7xx_pci_dev *t7xx_dev; ++ int i; ++ ++ t7xx_dev = pci_get_drvdata(pdev); ++ t7xx_md_exit(t7xx_dev); ++ ++ for (i = 0; i < EXT_INT_NUM; i++) { ++ if (!t7xx_dev->intr_handler[i]) ++ continue; ++ ++ free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); ++ } ++ ++ pci_free_irq_vectors(t7xx_dev->pdev); ++} ++ ++static const struct pci_device_id t7xx_pci_table[] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) }, ++ { } ++}; ++MODULE_DEVICE_TABLE(pci, t7xx_pci_table); ++ ++static struct pci_driver t7xx_pci_driver = { ++ .name = "mtk_t7xx", ++ .id_table = t7xx_pci_table, ++ .probe = t7xx_pci_probe, ++ .remove = t7xx_pci_remove, ++}; ++ ++module_pci_driver(t7xx_pci_driver); ++ ++MODULE_AUTHOR("MediaTek Inc"); ++MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver"); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_pci.h +@@ -0,0 +1,64 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Ricardo Martinez ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Moises Veleta ++ */ ++ ++#ifndef __T7XX_PCI_H__ ++#define __T7XX_PCI_H__ ++ ++#include ++#include ++#include ++ ++#include "t7xx_reg.h" ++ ++/* struct t7xx_addr_base - holds base addresses ++ * @pcie_mac_ireg_base: PCIe MAC register base ++ * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers ++ * @pcie_dev_reg_trsl_addr: used to calculate the register base address ++ * @infracfg_ao_base: base address used in CLDMA reset operations ++ * @mhccif_rc_base: host view of MHCCIF rc base addr ++ */ ++struct t7xx_addr_base { ++ void __iomem *pcie_mac_ireg_base; ++ void __iomem *pcie_ext_reg_base; ++ u32 pcie_dev_reg_trsl_addr; ++ void __iomem *infracfg_ao_base; ++ void __iomem *mhccif_rc_base; ++}; ++ ++typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); ++ ++/* struct t7xx_pci_dev - MTK device context structure ++ * @intr_handler: array of handler function for request_threaded_irq ++ * @intr_thread: array of thread_fn for request_threaded_irq ++ * @callback_param: array of cookie passed back to interrupt functions ++ * @pdev: PCI device ++ * @base_addr: memory base addresses of HW components ++ * @md: modem interface ++ * @ccmni_ctlb: context structure used to control the network data path ++ * @rgu_pci_irq_en: RGU callback ISR registered and active ++ */ ++struct t7xx_pci_dev { ++ t7xx_intr_callback intr_handler[EXT_INT_NUM]; ++ t7xx_intr_callback intr_thread[EXT_INT_NUM]; ++ void *callback_param[EXT_INT_NUM]; ++ struct pci_dev *pdev; ++ struct t7xx_addr_base base_addr; ++ struct t7xx_modem *md; ++ struct t7xx_ccmni_ctrl *ccmni_ctlb; ++ bool rgu_pci_irq_en; ++}; ++ ++#endif /* __T7XX_PCI_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c +@@ -0,0 +1,262 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Moises Veleta ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Ricardo Martinez ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_reg.h" ++ ++#define T7XX_PCIE_REG_BAR 2 ++#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0 ++#define T7XX_PCIE_REG_TABLE_NUM 0 ++#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0 ++ ++#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0 ++#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2 ++#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0 ++#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0 ++#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0 ++#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1 ++#define T7XX_PCIE_DEV_DMA_SIZE 0 ++ ++enum t7xx_atr_src_port { ++ ATR_SRC_PCI_WIN0, ++ ATR_SRC_PCI_WIN1, ++ ATR_SRC_AXIS_0, ++ ATR_SRC_AXIS_1, ++ ATR_SRC_AXIS_2, ++ ATR_SRC_AXIS_3, ++}; ++ ++enum t7xx_atr_dst_port { ++ ATR_DST_PCI_TRX, ++ ATR_DST_PCI_CONFIG, ++ ATR_DST_AXIM_0 = 4, ++ ATR_DST_AXIM_1, ++ ATR_DST_AXIM_2, ++ ATR_DST_AXIM_3, ++}; ++ ++struct t7xx_atr_config { ++ u64 src_addr; ++ u64 trsl_addr; ++ u64 size; ++ u32 port; ++ u32 table; ++ enum t7xx_atr_dst_port trsl_id; ++ u32 transparent; ++}; ++ ++static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port) ++{ ++ void __iomem *reg; ++ int i, offset; ++ ++ for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) { ++ offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i; ++ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; ++ iowrite64(0, reg); ++ } ++} ++ ++static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg) ++{ ++ struct device *dev = &t7xx_dev->pdev->dev; ++ void __iomem *pbase = IREG_BASE(t7xx_dev); ++ int atr_size, pos, offset; ++ void __iomem *reg; ++ u64 value; ++ ++ if (cfg->transparent) { ++ /* No address conversion is performed */ ++ atr_size = ATR_TRANSPARENT_SIZE; ++ } else { ++ if (cfg->src_addr & (cfg->size - 1)) { ++ dev_err(dev, "Source address is not aligned to size\n"); ++ return -EINVAL; ++ } ++ ++ if (cfg->trsl_addr & (cfg->size - 1)) { ++ dev_err(dev, "Translation address %llx is not aligned to size %llx\n", ++ cfg->trsl_addr, cfg->size - 1); ++ return -EINVAL; ++ } ++ ++ pos = __ffs64(cfg->size); ++ ++ /* HW calculates the address translation space as 2^(atr_size + 1) */ ++ atr_size = pos - 1; ++ } ++ ++ offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table; ++ ++ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset; ++ value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT; ++ iowrite64(value, reg); ++ ++ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset; ++ iowrite32(cfg->trsl_id, reg); ++ ++ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; ++ value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0); ++ iowrite64(value, reg); ++ ++ /* Ensure ATR is set */ ++ ioread64(reg); ++ return 0; ++} ++ ++/** ++ * t7xx_pcie_mac_atr_init() - Initialize address translation. ++ * @t7xx_dev: MTK device. ++ * ++ * Setup ATR for ports & device. ++ */ ++void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_atr_config cfg; ++ u32 i; ++ ++ /* Disable for all ports */ ++ for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++) ++ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i); ++ ++ memset(&cfg, 0, sizeof(cfg)); ++ /* Config ATR for RC to access device's register */ ++ cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR); ++ cfg.size = T7XX_PCIE_REG_SIZE_CHIP; ++ cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; ++ cfg.port = T7XX_PCIE_REG_PORT; ++ cfg.table = T7XX_PCIE_REG_TABLE_NUM; ++ cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT; ++ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); ++ t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); ++ ++ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; ++ ++ /* Config ATR for EP to access RC's memory */ ++ for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) { ++ cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR; ++ cfg.size = T7XX_PCIE_DEV_DMA_SIZE; ++ cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR; ++ cfg.port = i; ++ cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM; ++ cfg.trsl_id = ATR_DST_PCI_TRX; ++ cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT; ++ t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); ++ t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); ++ } ++} ++ ++/** ++ * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts. ++ * @t7xx_dev: MTK device. ++ * @enable: Enable/disable. ++ * ++ * Enable or disable device interrupts. ++ */ ++static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable) ++{ ++ u32 value; ++ ++ value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); ++ ++ if (enable) ++ value &= ~ISTAT_HST_CTRL_DIS; ++ else ++ value |= ISTAT_HST_CTRL_DIS; ++ ++ iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); ++} ++ ++void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev) ++{ ++ t7xx_pcie_mac_enable_disable_int(t7xx_dev, true); ++} ++ ++void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev) ++{ ++ t7xx_pcie_mac_enable_disable_int(t7xx_dev, false); ++} ++ ++/** ++ * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type. ++ * @t7xx_dev: MTK device. ++ * @int_type: Interrupt type. ++ * @clear: Clear/set. ++ * ++ * Clear or set device interrupt by type. ++ */ ++static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev, ++ enum t7xx_int int_type, bool clear) ++{ ++ void __iomem *reg; ++ u32 val; ++ ++ if (clear) ++ reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0; ++ else ++ reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0; ++ ++ val = BIT(EXT_INT_START + int_type); ++ iowrite32(val, reg); ++} ++ ++void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) ++{ ++ t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true); ++} ++ ++void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) ++{ ++ t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false); ++} ++ ++/** ++ * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type. ++ * @t7xx_dev: MTK device. ++ * @int_type: Interrupt type. ++ * ++ * Enable or disable device interrupts' status by type. ++ */ ++void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) ++{ ++ void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0; ++ u32 val = BIT(EXT_INT_START + int_type); ++ ++ iowrite32(val, reg); ++} ++ ++/** ++ * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration. ++ * @t7xx_dev: MTK device. ++ * @irq_count: Number of MSIX IRQ vectors. ++ * ++ * Write IRQ count to device. ++ */ ++void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count) ++{ ++ u32 val = ffs(irq_count) * 2 - 1; ++ ++ iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h +@@ -0,0 +1,31 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Sreehari Kancharla ++ * ++ * Contributors: ++ * Moises Veleta ++ * Ricardo Martinez ++ */ ++ ++#ifndef __T7XX_PCIE_MAC_H__ ++#define __T7XX_PCIE_MAC_H__ ++ ++#include "t7xx_pci.h" ++#include "t7xx_reg.h" ++ ++#define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base) ++ ++void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); ++void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); ++void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); ++void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count); ++ ++#endif /* __T7XX_PCIE_MAC_H__ */ +--- a/drivers/net/wwan/t7xx/t7xx_reg.h ++++ b/drivers/net/wwan/t7xx/t7xx_reg.h +@@ -19,6 +19,110 @@ + #ifndef __T7XX_REG_H__ + #define __T7XX_REG_H__ + ++#include ++ ++/* Device base address offset */ ++#define MHCCIF_RC_DEV_BASE 0x10024000 ++ ++#define REG_RC2EP_SW_BSY 0x04 ++#define REG_RC2EP_SW_INT_START 0x08 ++ ++#define REG_RC2EP_SW_TCHNUM 0x0c ++#define H2D_CH_EXCEPTION_ACK 1 ++#define H2D_CH_EXCEPTION_CLEARQ_ACK 2 ++#define H2D_CH_DS_LOCK 3 ++/* Channels 4-8 are reserved */ ++#define H2D_CH_SUSPEND_REQ 9 ++#define H2D_CH_RESUME_REQ 10 ++#define H2D_CH_SUSPEND_REQ_AP 11 ++#define H2D_CH_RESUME_REQ_AP 12 ++#define H2D_CH_DEVICE_RESET 13 ++#define H2D_CH_DRM_DISABLE_AP 14 ++ ++#define REG_EP2RC_SW_INT_STS 0x10 ++#define REG_EP2RC_SW_INT_ACK 0x14 ++#define REG_EP2RC_SW_INT_EAP_MASK 0x20 ++#define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30 ++#define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40 ++ ++#define D2H_INT_DS_LOCK_ACK BIT(0) ++#define D2H_INT_EXCEPTION_INIT BIT(1) ++#define D2H_INT_EXCEPTION_INIT_DONE BIT(2) ++#define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3) ++#define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4) ++#define D2H_INT_PORT_ENUM BIT(5) ++/* Bits 6-10 are reserved */ ++#define D2H_INT_SUSPEND_ACK BIT(11) ++#define D2H_INT_RESUME_ACK BIT(12) ++#define D2H_INT_SUSPEND_ACK_AP BIT(13) ++#define D2H_INT_RESUME_ACK_AP BIT(14) ++#define D2H_INT_ASYNC_SAP_HK BIT(15) ++#define D2H_INT_ASYNC_MD_HK BIT(16) ++ ++/* Register base */ ++#define INFRACFG_AO_DEV_CHIP 0x10001000 ++ ++/* ATR setting */ ++#define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000 ++#define T7XX_PCIE_REG_SIZE_CHIP 0x00400000 ++ ++/* Reset Generic Unit (RGU) */ ++#define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c ++ ++#define ATR_PORT_OFFSET 0x100 ++#define ATR_TABLE_OFFSET 0x20 ++#define ATR_TABLE_NUM_PER_ATR 8 ++#define ATR_TRANSPARENT_SIZE 0x3f ++ ++/* PCIE_MAC_IREG Register Definition */ ++ ++#define ISTAT_HST_CTRL 0x01ac ++#define ISTAT_HST_CTRL_DIS BIT(0) ++ ++#define T7XX_PCIE_MISC_CTRL 0x0348 ++#define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7) ++ ++#define T7XX_PCIE_CFG_MSIX 0x03ec ++#define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600 ++#define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608 ++#define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610 ++#define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12) ++ ++#define ATR_SRC_ADDR_INVALID 0x007f ++ ++#define T7XX_PCIE_PM_RESUME_STATE 0x0d0c ++ ++enum t7xx_pm_resume_state { ++ PM_RESUME_REG_STATE_L3, ++ PM_RESUME_REG_STATE_L1, ++ PM_RESUME_REG_STATE_INIT, ++ PM_RESUME_REG_STATE_EXP, ++ PM_RESUME_REG_STATE_L2, ++ PM_RESUME_REG_STATE_L2_EXP, ++}; ++ ++#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c ++#define MISC_STAGE_MASK GENMASK(2, 0) ++#define MISC_RESET_TYPE_PLDR BIT(26) ++#define MISC_RESET_TYPE_FLDR BIT(27) ++#define LINUX_STAGE 4 ++ ++#define T7XX_PCIE_RESOURCE_STATUS 0x0d28 ++#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) ++ ++#define DISABLE_ASPM_LOWPWR 0x0e50 ++#define ENABLE_ASPM_LOWPWR 0x0e54 ++#define T7XX_L1_BIT(i) BIT((i) * 4 + 1) ++#define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2) ++#define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3) ++ ++#define MSIX_ISTAT_HST_GRP0_0 0x0f00 ++#define IMASK_HOST_MSIX_SET_GRP0_0 0x3000 ++#define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080 ++#define EXT_INT_START 24 ++#define EXT_INT_NUM 8 ++#define MSIX_MSK_SET_ALL GENMASK(31, 24) ++ + enum t7xx_int { + DPMAIF_INT, + CLDMA0_INT, +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c +@@ -0,0 +1,540 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Eliot Lee ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_hif_cldma.h" ++#include "t7xx_mhccif.h" ++#include "t7xx_modem_ops.h" ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_reg.h" ++#include "t7xx_state_monitor.h" ++ ++#define FSM_DRM_DISABLE_DELAY_MS 200 ++#define FSM_EVENT_POLL_INTERVAL_MS 20 ++#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000 ++#define FSM_MD_EX_PASS_TIMEOUT_MS 45000 ++#define FSM_CMD_TIMEOUT_MS 2000 ++ ++void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) ++{ ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctl->notifier_lock, flags); ++ list_add_tail(¬ifier->entry, &ctl->notifier_list); ++ spin_unlock_irqrestore(&ctl->notifier_lock, flags); ++} ++ ++void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) ++{ ++ struct t7xx_fsm_notifier *notifier_cur, *notifier_next; ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctl->notifier_lock, flags); ++ list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { ++ if (notifier_cur == notifier) ++ list_del(¬ifier->entry); ++ } ++ spin_unlock_irqrestore(&ctl->notifier_lock, flags); ++} ++ ++static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) ++{ ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ struct t7xx_fsm_notifier *notifier; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctl->notifier_lock, flags); ++ list_for_each_entry(notifier, &ctl->notifier_list, entry) { ++ spin_unlock_irqrestore(&ctl->notifier_lock, flags); ++ if (notifier->notifier_fn) ++ notifier->notifier_fn(state, notifier->data); ++ ++ spin_lock_irqsave(&ctl->notifier_lock, flags); ++ } ++ spin_unlock_irqrestore(&ctl->notifier_lock, flags); ++} ++ ++void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) ++{ ++ ctl->md_state = state; ++ fsm_state_notify(ctl->md, state); ++} ++ ++static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) ++{ ++ if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { ++ *cmd->ret = result; ++ complete_all(cmd->done); ++ } ++ ++ kfree(cmd); ++} ++ ++static void fsm_del_kf_event(struct t7xx_fsm_event *event) ++{ ++ list_del(&event->entry); ++ kfree(event); ++} ++ ++static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) ++{ ++ struct device *dev = &ctl->md->t7xx_dev->pdev->dev; ++ struct t7xx_fsm_event *event, *evt_next; ++ struct t7xx_fsm_command *cmd, *cmd_next; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctl->command_lock, flags); ++ list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { ++ dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id); ++ list_del(&cmd->entry); ++ fsm_finish_command(ctl, cmd, -EINVAL); ++ } ++ spin_unlock_irqrestore(&ctl->command_lock, flags); ++ ++ spin_lock_irqsave(&ctl->event_lock, flags); ++ list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { ++ dev_warn(dev, "Unhandled event %d\n", event->event_id); ++ fsm_del_kf_event(event); ++ } ++ spin_unlock_irqrestore(&ctl->event_lock, flags); ++} ++ ++static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, ++ enum t7xx_fsm_event_state event_ignore, int retries) ++{ ++ struct t7xx_fsm_event *event; ++ bool event_received = false; ++ unsigned long flags; ++ int cnt = 0; ++ ++ while (cnt++ < retries && !event_received) { ++ bool sleep_required = true; ++ ++ if (kthread_should_stop()) ++ return; ++ ++ spin_lock_irqsave(&ctl->event_lock, flags); ++ event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); ++ if (event) { ++ event_received = event->event_id == event_expected; ++ if (event_received || event->event_id == event_ignore) { ++ fsm_del_kf_event(event); ++ sleep_required = false; ++ } ++ } ++ spin_unlock_irqrestore(&ctl->event_lock, flags); ++ ++ if (sleep_required) ++ msleep(FSM_EVENT_POLL_INTERVAL_MS); ++ } ++} ++ ++static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, ++ enum t7xx_ex_reason reason) ++{ ++ struct device *dev = &ctl->md->t7xx_dev->pdev->dev; ++ ++ if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { ++ if (cmd) ++ fsm_finish_command(ctl, cmd, -EINVAL); ++ ++ return; ++ } ++ ++ ctl->curr_state = FSM_STATE_EXCEPTION; ++ ++ switch (reason) { ++ case EXCEPTION_HS_TIMEOUT: ++ dev_err(dev, "Boot Handshake failure\n"); ++ break; ++ ++ case EXCEPTION_EVENT: ++ dev_err(dev, "Exception event\n"); ++ t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); ++ t7xx_md_exception_handshake(ctl->md); ++ ++ fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, ++ FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); ++ fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, ++ FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); ++ break; ++ ++ default: ++ dev_err(dev, "Exception %d\n", reason); ++ break; ++ } ++ ++ if (cmd) ++ fsm_finish_command(ctl, cmd, 0); ++} ++ ++static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) ++{ ++ ctl->curr_state = FSM_STATE_STOPPED; ++ ++ t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); ++ return t7xx_md_reset(ctl->md->t7xx_dev); ++} ++ ++static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) ++{ ++ if (ctl->curr_state == FSM_STATE_STOPPED) { ++ fsm_finish_command(ctl, cmd, -EINVAL); ++ return; ++ } ++ ++ fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); ++} ++ ++static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) ++{ ++ struct t7xx_pci_dev *t7xx_dev; ++ struct cldma_ctrl *md_ctrl; ++ int err; ++ ++ if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { ++ fsm_finish_command(ctl, cmd, -EINVAL); ++ return; ++ } ++ ++ md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; ++ t7xx_dev = ctl->md->t7xx_dev; ++ ++ ctl->curr_state = FSM_STATE_STOPPING; ++ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); ++ t7xx_cldma_stop(md_ctrl); ++ ++ if (!ctl->md->rgu_irq_asserted) { ++ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); ++ /* Wait for the DRM disable to take effect */ ++ msleep(FSM_DRM_DISABLE_DELAY_MS); ++ ++ err = t7xx_acpi_fldr_func(t7xx_dev); ++ if (err) ++ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); ++ } ++ ++ fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); ++} ++ ++static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) ++{ ++ if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) ++ return; ++ ++ ctl->md_state = MD_STATE_READY; ++ ++ fsm_state_notify(ctl->md, MD_STATE_READY); ++} ++ ++static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) ++{ ++ struct t7xx_modem *md = ctl->md; ++ ++ ctl->curr_state = FSM_STATE_READY; ++ t7xx_fsm_broadcast_ready_state(ctl); ++ t7xx_md_event_notify(md, FSM_READY); ++} ++ ++static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) ++{ ++ struct t7xx_modem *md = ctl->md; ++ struct device *dev; ++ ++ ctl->curr_state = FSM_STATE_STARTING; ++ ++ t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); ++ t7xx_md_event_notify(md, FSM_START); ++ ++ wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, ++ HZ * 60); ++ dev = &md->t7xx_dev->pdev->dev; ++ ++ if (ctl->exp_flg) ++ dev_err(dev, "MD exception is captured during handshake\n"); ++ ++ if (!md->core_md.ready) { ++ dev_err(dev, "MD handshake timeout\n"); ++ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); ++ return -ETIMEDOUT; ++ } ++ ++ fsm_routine_ready(ctl); ++ return 0; ++} ++ ++static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) ++{ ++ struct t7xx_modem *md = ctl->md; ++ u32 dev_status; ++ int ret; ++ ++ if (!md) ++ return; ++ ++ if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && ++ ctl->curr_state != FSM_STATE_STOPPED) { ++ fsm_finish_command(ctl, cmd, -EINVAL); ++ return; ++ } ++ ++ ctl->curr_state = FSM_STATE_PRE_START; ++ t7xx_md_event_notify(md, FSM_PRE_START); ++ ++ ret = read_poll_timeout(ioread32, dev_status, ++ (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, ++ false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); ++ if (ret) { ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ ++ fsm_finish_command(ctl, cmd, -ETIMEDOUT); ++ dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); ++ return; ++ } ++ ++ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); ++ fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); ++} ++ ++static int fsm_main_thread(void *data) ++{ ++ struct t7xx_fsm_ctl *ctl = data; ++ struct t7xx_fsm_command *cmd; ++ unsigned long flags; ++ ++ while (!kthread_should_stop()) { ++ if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || ++ kthread_should_stop())) ++ continue; ++ ++ if (kthread_should_stop()) ++ break; ++ ++ spin_lock_irqsave(&ctl->command_lock, flags); ++ cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); ++ list_del(&cmd->entry); ++ spin_unlock_irqrestore(&ctl->command_lock, flags); ++ ++ switch (cmd->cmd_id) { ++ case FSM_CMD_START: ++ fsm_routine_start(ctl, cmd); ++ break; ++ ++ case FSM_CMD_EXCEPTION: ++ fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); ++ break; ++ ++ case FSM_CMD_PRE_STOP: ++ fsm_routine_stopping(ctl, cmd); ++ break; ++ ++ case FSM_CMD_STOP: ++ fsm_routine_stopped(ctl, cmd); ++ break; ++ ++ default: ++ fsm_finish_command(ctl, cmd, -EINVAL); ++ fsm_flush_event_cmd_qs(ctl); ++ break; ++ } ++ } ++ ++ return 0; ++} ++ ++int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) ++{ ++ DECLARE_COMPLETION_ONSTACK(done); ++ struct t7xx_fsm_command *cmd; ++ unsigned long flags; ++ int ret; ++ ++ cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL); ++ if (!cmd) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&cmd->entry); ++ cmd->cmd_id = cmd_id; ++ cmd->flag = flag; ++ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { ++ cmd->done = &done; ++ cmd->ret = &ret; ++ } ++ ++ spin_lock_irqsave(&ctl->command_lock, flags); ++ list_add_tail(&cmd->entry, &ctl->command_queue); ++ spin_unlock_irqrestore(&ctl->command_lock, flags); ++ ++ wake_up(&ctl->command_wq); ++ ++ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { ++ unsigned long wait_ret; ++ ++ wait_ret = wait_for_completion_timeout(&done, ++ msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); ++ if (!wait_ret) ++ return -ETIMEDOUT; ++ ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, ++ unsigned char *data, unsigned int length) ++{ ++ struct device *dev = &ctl->md->t7xx_dev->pdev->dev; ++ struct t7xx_fsm_event *event; ++ unsigned long flags; ++ ++ if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) { ++ dev_err(dev, "Invalid event %d\n", event_id); ++ return -EINVAL; ++ } ++ ++ event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); ++ if (!event) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&event->entry); ++ event->event_id = event_id; ++ event->length = length; ++ ++ if (data && length) ++ memcpy(event->data, data, length); ++ ++ spin_lock_irqsave(&ctl->event_lock, flags); ++ list_add_tail(&event->entry, &ctl->event_queue); ++ spin_unlock_irqrestore(&ctl->event_lock, flags); ++ ++ wake_up_all(&ctl->event_wq); ++ return 0; ++} ++ ++void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) ++{ ++ struct t7xx_fsm_event *event, *evt_next; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctl->event_lock, flags); ++ list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { ++ if (event->event_id == event_id) ++ fsm_del_kf_event(event); ++ } ++ spin_unlock_irqrestore(&ctl->event_lock, flags); ++} ++ ++enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) ++{ ++ if (ctl) ++ return ctl->md_state; ++ ++ return MD_STATE_INVALID; ++} ++ ++unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) ++{ ++ if (ctl) ++ return ctl->curr_state; ++ ++ return FSM_STATE_STOPPED; ++} ++ ++int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) ++{ ++ unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT; ++ ++ if (type == MD_IRQ_PORT_ENUM) { ++ return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); ++ } else if (type == MD_IRQ_CCIF_EX) { ++ ctl->exp_flg = true; ++ wake_up(&ctl->async_hk_wq); ++ cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT); ++ return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); ++ } ++ ++ return -EINVAL; ++} ++ ++void t7xx_fsm_reset(struct t7xx_modem *md) ++{ ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ ++ fsm_flush_event_cmd_qs(ctl); ++ ctl->curr_state = FSM_STATE_STOPPED; ++ ctl->exp_flg = false; ++} ++ ++int t7xx_fsm_init(struct t7xx_modem *md) ++{ ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ struct t7xx_fsm_ctl *ctl; ++ ++ ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); ++ if (!ctl) ++ return -ENOMEM; ++ ++ md->fsm_ctl = ctl; ++ ctl->md = md; ++ ctl->curr_state = FSM_STATE_INIT; ++ INIT_LIST_HEAD(&ctl->command_queue); ++ INIT_LIST_HEAD(&ctl->event_queue); ++ init_waitqueue_head(&ctl->async_hk_wq); ++ init_waitqueue_head(&ctl->event_wq); ++ INIT_LIST_HEAD(&ctl->notifier_list); ++ init_waitqueue_head(&ctl->command_wq); ++ spin_lock_init(&ctl->event_lock); ++ spin_lock_init(&ctl->command_lock); ++ ctl->exp_flg = false; ++ spin_lock_init(&ctl->notifier_lock); ++ ++ ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); ++ return PTR_ERR_OR_ZERO(ctl->fsm_thread); ++} ++ ++void t7xx_fsm_uninit(struct t7xx_modem *md) ++{ ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ ++ if (!ctl) ++ return; ++ ++ if (ctl->fsm_thread) ++ kthread_stop(ctl->fsm_thread); ++ ++ fsm_flush_event_cmd_qs(ctl); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h +@@ -0,0 +1,133 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * ++ * Contributors: ++ * Eliot Lee ++ * Ricardo Martinez ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_MONITOR_H__ ++#define __T7XX_MONITOR_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_modem_ops.h" ++ ++enum t7xx_fsm_state { ++ FSM_STATE_INIT, ++ FSM_STATE_PRE_START, ++ FSM_STATE_STARTING, ++ FSM_STATE_READY, ++ FSM_STATE_EXCEPTION, ++ FSM_STATE_STOPPING, ++ FSM_STATE_STOPPED, ++}; ++ ++enum t7xx_fsm_event_state { ++ FSM_EVENT_INVALID, ++ FSM_EVENT_MD_EX, ++ FSM_EVENT_MD_EX_REC_OK, ++ FSM_EVENT_MD_EX_PASS, ++ FSM_EVENT_MAX ++}; ++ ++enum t7xx_fsm_cmd_state { ++ FSM_CMD_INVALID, ++ FSM_CMD_START, ++ FSM_CMD_EXCEPTION, ++ FSM_CMD_PRE_STOP, ++ FSM_CMD_STOP, ++}; ++ ++enum t7xx_ex_reason { ++ EXCEPTION_HS_TIMEOUT, ++ EXCEPTION_EVENT, ++}; ++ ++enum t7xx_md_irq_type { ++ MD_IRQ_WDT, ++ MD_IRQ_CCIF_EX, ++ MD_IRQ_PORT_ENUM, ++}; ++ ++enum md_state { ++ MD_STATE_INVALID, ++ MD_STATE_WAITING_FOR_HS1, ++ MD_STATE_WAITING_FOR_HS2, ++ MD_STATE_READY, ++ MD_STATE_EXCEPTION, ++ MD_STATE_WAITING_TO_STOP, ++ MD_STATE_STOPPED, ++}; ++ ++#define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0) ++#define FSM_CMD_FLAG_FLIGHT_MODE BIT(1) ++#define FSM_CMD_FLAG_IN_INTERRUPT BIT(2) ++#define FSM_CMD_EX_REASON GENMASK(23, 16) ++ ++struct t7xx_fsm_ctl { ++ struct t7xx_modem *md; ++ enum md_state md_state; ++ unsigned int curr_state; ++ struct list_head command_queue; ++ struct list_head event_queue; ++ wait_queue_head_t command_wq; ++ wait_queue_head_t event_wq; ++ wait_queue_head_t async_hk_wq; ++ spinlock_t event_lock; /* Protects event queue */ ++ spinlock_t command_lock; /* Protects command queue */ ++ struct task_struct *fsm_thread; ++ bool exp_flg; ++ spinlock_t notifier_lock; /* Protects notifier list */ ++ struct list_head notifier_list; ++}; ++ ++struct t7xx_fsm_event { ++ struct list_head entry; ++ enum t7xx_fsm_event_state event_id; ++ unsigned int length; ++ unsigned char data[]; ++}; ++ ++struct t7xx_fsm_command { ++ struct list_head entry; ++ enum t7xx_fsm_cmd_state cmd_id; ++ unsigned int flag; ++ struct completion *done; ++ int *ret; ++}; ++ ++struct t7xx_fsm_notifier { ++ struct list_head entry; ++ int (*notifier_fn)(enum md_state state, void *data); ++ void *data; ++}; ++ ++int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, ++ unsigned int flag); ++int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, ++ unsigned char *data, unsigned int length); ++void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id); ++void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state); ++void t7xx_fsm_reset(struct t7xx_modem *md); ++int t7xx_fsm_init(struct t7xx_modem *md); ++void t7xx_fsm_uninit(struct t7xx_modem *md); ++int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type); ++enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl); ++unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl); ++void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); ++void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); ++ ++#endif /* __T7XX_MONITOR_H__ */ diff --git a/target/linux/generic/backport-5.15/621-v5.19-03-net-wwan-t7xx-Add-port-proxy-infrastructure.patch b/target/linux/generic/backport-5.15/621-v5.19-03-net-wwan-t7xx-Add-port-proxy-infrastructure.patch new file mode 100644 index 0000000000..4de8243205 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-03-net-wwan-t7xx-Add-port-proxy-infrastructure.patch @@ -0,0 +1,811 @@ +From 48cc2f5ef846e76dc3bb1501a4014be18c644c1b Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:01 -0700 +Subject: [PATCH] net: wwan: t7xx: Add port proxy infrastructure +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Port-proxy provides a common interface to interact with different types +of ports. Ports export their configuration via `struct t7xx_port` and +operate as defined by `struct port_ops`. + +Signed-off-by: Haijun Liu +Co-developed-by: Chandrashekar Devegowda +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Ilpo Järvinen +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/Makefile | 1 + + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 3 +- + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 14 +- + drivers/net/wwan/t7xx/t7xx_port.h | 132 ++++++ + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 452 +++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_port_proxy.h | 72 ++++ + drivers/net/wwan/t7xx/t7xx_state_monitor.c | 5 + + 7 files changed, 677 insertions(+), 2 deletions(-) + create mode 100644 drivers/net/wwan/t7xx/t7xx_port.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.h + +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -10,3 +10,4 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_modem_ops.o \ + t7xx_cldma.o \ + t7xx_hif_cldma.o \ ++ t7xx_port_proxy.o \ +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -46,6 +46,7 @@ + #include "t7xx_mhccif.h" + #include "t7xx_pci.h" + #include "t7xx_pcie_mac.h" ++#include "t7xx_port_proxy.h" + #include "t7xx_reg.h" + #include "t7xx_state_monitor.h" + +@@ -55,7 +56,7 @@ + #define CHECK_Q_STOP_TIMEOUT_US 1000000 + #define CHECK_Q_STOP_STEP_US 10000 + +-#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */ ++#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) + + static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -34,6 +34,8 @@ + #include "t7xx_modem_ops.h" + #include "t7xx_pci.h" + #include "t7xx_pcie_mac.h" ++#include "t7xx_port.h" ++#include "t7xx_port_proxy.h" + #include "t7xx_reg.h" + #include "t7xx_state_monitor.h" + +@@ -273,6 +275,7 @@ static void t7xx_md_exception(struct t7x + if (stage == HIF_EX_CLEARQ_DONE) { + /* Give DHL time to flush data */ + msleep(PORT_RESET_DELAY_MS); ++ t7xx_port_proxy_reset(md->port_prox); + } + + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); +@@ -426,6 +429,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t + md->exp_id = 0; + t7xx_fsm_reset(md); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_port_proxy_reset(md->port_prox); + md->md_init_finish = true; + return 0; + } +@@ -462,14 +466,21 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + if (ret) + goto err_uninit_fsm; + ++ ret = t7xx_port_proxy_init(md); ++ if (ret) ++ goto err_uninit_md_cldma; ++ + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); + if (ret) /* fsm_uninit flushes cmd queue */ +- goto err_uninit_md_cldma; ++ goto err_uninit_proxy; + + t7xx_md_sys_sw_init(t7xx_dev); + md->md_init_finish = true; + return 0; + ++err_uninit_proxy: ++ t7xx_port_proxy_uninit(md->port_prox); ++ + err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + +@@ -492,6 +503,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t + return; + + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); ++ t7xx_port_proxy_uninit(md->port_prox); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_uninit(md); + destroy_workqueue(md->handshake_wq); +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -0,0 +1,132 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Andy Shevchenko ++ * Chandrashekar Devegowda ++ * Eliot Lee ++ */ ++ ++#ifndef __T7XX_PORT_H__ ++#define __T7XX_PORT_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_hif_cldma.h" ++#include "t7xx_pci.h" ++ ++#define PORT_CH_ID_MASK GENMASK(7, 0) ++ ++/* Channel ID and Message ID definitions. ++ * The channel number consists of peer_id(15:12) , channel_id(11:0) ++ * peer_id: ++ * 0:reserved, 1: to sAP, 2: to MD ++ */ ++enum port_ch { ++ /* to MD */ ++ PORT_CH_CONTROL_RX = 0x2000, ++ PORT_CH_CONTROL_TX = 0x2001, ++ PORT_CH_UART1_RX = 0x2006, /* META */ ++ PORT_CH_UART1_TX = 0x2008, ++ PORT_CH_UART2_RX = 0x200a, /* AT */ ++ PORT_CH_UART2_TX = 0x200c, ++ PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */ ++ PORT_CH_MD_LOG_TX = 0x202b, ++ PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */ ++ PORT_CH_LB_IT_TX = 0x203f, ++ PORT_CH_STATUS_RX = 0x2043, /* Status events */ ++ PORT_CH_MIPC_RX = 0x20ce, /* MIPC */ ++ PORT_CH_MIPC_TX = 0x20cf, ++ PORT_CH_MBIM_RX = 0x20d0, ++ PORT_CH_MBIM_TX = 0x20d1, ++ PORT_CH_DSS0_RX = 0x20d2, ++ PORT_CH_DSS0_TX = 0x20d3, ++ PORT_CH_DSS1_RX = 0x20d4, ++ PORT_CH_DSS1_TX = 0x20d5, ++ PORT_CH_DSS2_RX = 0x20d6, ++ PORT_CH_DSS2_TX = 0x20d7, ++ PORT_CH_DSS3_RX = 0x20d8, ++ PORT_CH_DSS3_TX = 0x20d9, ++ PORT_CH_DSS4_RX = 0x20da, ++ PORT_CH_DSS4_TX = 0x20db, ++ PORT_CH_DSS5_RX = 0x20dc, ++ PORT_CH_DSS5_TX = 0x20dd, ++ PORT_CH_DSS6_RX = 0x20de, ++ PORT_CH_DSS6_TX = 0x20df, ++ PORT_CH_DSS7_RX = 0x20e0, ++ PORT_CH_DSS7_TX = 0x20e1, ++}; ++ ++struct t7xx_port; ++struct port_ops { ++ int (*init)(struct t7xx_port *port); ++ int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb); ++ void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state); ++ void (*uninit)(struct t7xx_port *port); ++ int (*enable_chl)(struct t7xx_port *port); ++ int (*disable_chl)(struct t7xx_port *port); ++}; ++ ++struct t7xx_port_conf { ++ enum port_ch tx_ch; ++ enum port_ch rx_ch; ++ unsigned char txq_index; ++ unsigned char rxq_index; ++ unsigned char txq_exp_index; ++ unsigned char rxq_exp_index; ++ enum cldma_id path_id; ++ struct port_ops *ops; ++ char *name; ++ enum wwan_port_type port_type; ++}; ++ ++struct t7xx_port { ++ /* Members not initialized in definition */ ++ const struct t7xx_port_conf *port_conf; ++ struct wwan_port *wwan_port; ++ struct t7xx_pci_dev *t7xx_dev; ++ struct device *dev; ++ u16 seq_nums[2]; /* TX/RX sequence numbers */ ++ atomic_t usage_cnt; ++ struct list_head entry; ++ struct list_head queue_entry; ++ /* TX and RX flows are asymmetric since ports are multiplexed on ++ * queues. ++ * ++ * TX: data blocks are sent directly to a queue. Each port ++ * does not maintain a TX list; instead, they only provide ++ * a wait_queue_head for blocking writes. ++ * ++ * RX: Each port uses a RX list to hold packets, ++ * allowing the modem to dispatch RX packet as quickly as possible. ++ */ ++ struct sk_buff_head rx_skb_list; ++ spinlock_t port_update_lock; /* Protects port configuration */ ++ wait_queue_head_t rx_wq; ++ int rx_length_th; ++ bool chan_enable; ++ struct task_struct *thread; ++}; ++ ++struct sk_buff *t7xx_port_alloc_skb(int payload); ++int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); ++int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, ++ unsigned int ex_msg); ++ ++#endif /* __T7XX_PORT_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -0,0 +1,452 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Andy Shevchenko ++ * Chandrashekar Devegowda ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_hif_cldma.h" ++#include "t7xx_modem_ops.h" ++#include "t7xx_port.h" ++#include "t7xx_port_proxy.h" ++#include "t7xx_state_monitor.h" ++ ++#define Q_IDX_CTRL 0 ++#define Q_IDX_MBIM 2 ++#define Q_IDX_AT_CMD 5 ++ ++#define INVALID_SEQ_NUM GENMASK(15, 0) ++ ++#define for_each_proxy_port(i, p, proxy) \ ++ for (i = 0, (p) = &(proxy)->ports[i]; \ ++ i < (proxy)->port_count; \ ++ i++, (p) = &(proxy)->ports[i]) ++ ++static const struct t7xx_port_conf t7xx_md_port_conf[] = { ++}; ++ ++static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) ++{ ++ const struct t7xx_port_conf *port_conf; ++ struct t7xx_port *port; ++ int i; ++ ++ for_each_proxy_port(i, port, port_prox) { ++ port_conf = port->port_conf; ++ if (port_conf->rx_ch == ch || port_conf->tx_ch == ch) ++ return port; ++ } ++ ++ return NULL; ++} ++ ++static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h) ++{ ++ u32 status = le32_to_cpu(ccci_h->status); ++ u16 seq_num, next_seq_num; ++ bool assert_bit; ++ ++ seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status); ++ next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD); ++ assert_bit = status & CCCI_H_AST_BIT; ++ if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM) ++ return next_seq_num; ++ ++ if (seq_num != port->seq_nums[MTK_RX]) ++ dev_warn_ratelimited(port->dev, ++ "seq num out-of-order %u != %u (header %X, len %X)\n", ++ seq_num, port->seq_nums[MTK_RX], ++ le32_to_cpu(ccci_h->packet_header), ++ le32_to_cpu(ccci_h->packet_len)); ++ ++ return next_seq_num; ++} ++ ++void t7xx_port_proxy_reset(struct port_proxy *port_prox) ++{ ++ struct t7xx_port *port; ++ int i; ++ ++ for_each_proxy_port(i, port, port_prox) { ++ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; ++ port->seq_nums[MTK_TX] = 0; ++ } ++} ++ ++static int t7xx_port_get_queue_no(struct t7xx_port *port) ++{ ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; ++ ++ return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ? ++ port_conf->txq_exp_index : port_conf->txq_index; ++} ++ ++static void t7xx_port_struct_init(struct t7xx_port *port) ++{ ++ INIT_LIST_HEAD(&port->entry); ++ INIT_LIST_HEAD(&port->queue_entry); ++ skb_queue_head_init(&port->rx_skb_list); ++ init_waitqueue_head(&port->rx_wq); ++ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; ++ port->seq_nums[MTK_TX] = 0; ++ atomic_set(&port->usage_cnt, 0); ++} ++ ++struct sk_buff *t7xx_port_alloc_skb(int payload) ++{ ++ struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL); ++ ++ if (skb) ++ skb_reserve(skb, sizeof(struct ccci_header)); ++ ++ return skb; ++} ++ ++/** ++ * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. ++ * @port: port context. ++ * @skb: received skb. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed. ++ */ ++int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port->rx_wq.lock, flags); ++ if (port->rx_skb_list.qlen >= port->rx_length_th) { ++ spin_unlock_irqrestore(&port->rx_wq.lock, flags); ++ ++ return -ENOBUFS; ++ } ++ __skb_queue_tail(&port->rx_skb_list, skb); ++ spin_unlock_irqrestore(&port->rx_wq.lock, flags); ++ ++ wake_up_all(&port->rx_wq); ++ return 0; ++} ++ ++static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) ++{ ++ enum cldma_id path_id = port->port_conf->path_id; ++ struct cldma_ctrl *md_ctrl; ++ int ret, tx_qno; ++ ++ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; ++ tx_qno = t7xx_port_get_queue_no(port); ++ ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb); ++ if (ret) ++ dev_err(port->dev, "Failed to send skb: %d\n", ret); ++ ++ return ret; ++} ++ ++static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb, ++ unsigned int pkt_header, unsigned int ex_msg) ++{ ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ struct ccci_header *ccci_h; ++ u32 status; ++ int ret; ++ ++ ccci_h = skb_push(skb, sizeof(*ccci_h)); ++ status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) | ++ FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT; ++ ccci_h->status = cpu_to_le32(status); ++ ccci_h->packet_header = cpu_to_le32(pkt_header); ++ ccci_h->packet_len = cpu_to_le32(skb->len); ++ ccci_h->ex_msg = cpu_to_le32(ex_msg); ++ ++ ret = t7xx_port_send_raw_skb(port, skb); ++ if (ret) ++ return ret; ++ ++ port->seq_nums[MTK_TX]++; ++ return 0; ++} ++ ++int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, ++ unsigned int ex_msg) ++{ ++ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; ++ unsigned int fsm_state; ++ ++ fsm_state = t7xx_fsm_get_ctl_state(ctl); ++ if (fsm_state != FSM_STATE_PRE_START) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ enum md_state md_state = t7xx_fsm_get_md_state(ctl); ++ ++ switch (md_state) { ++ case MD_STATE_EXCEPTION: ++ if (port_conf->tx_ch != PORT_CH_MD_LOG_TX) ++ return -EBUSY; ++ break; ++ ++ case MD_STATE_WAITING_FOR_HS1: ++ case MD_STATE_WAITING_FOR_HS2: ++ case MD_STATE_STOPPED: ++ case MD_STATE_WAITING_TO_STOP: ++ case MD_STATE_INVALID: ++ return -ENODEV; ++ ++ default: ++ break; ++ } ++ } ++ ++ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); ++} ++ ++static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) ++{ ++ struct t7xx_port *port; ++ ++ int i, j; ++ ++ for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++) ++ INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]); ++ ++ for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) { ++ for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++) ++ INIT_LIST_HEAD(&port_prox->queue_ports[j][i]); ++ } ++ ++ for_each_proxy_port(i, port, port_prox) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ enum cldma_id path_id = port_conf->path_id; ++ u8 ch_id; ++ ++ ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch); ++ list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]); ++ list_add_tail(&port->queue_entry, ++ &port_prox->queue_ports[path_id][port_conf->rxq_index]); ++ } ++} ++ ++static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, ++ struct cldma_queue *queue, u16 channel) ++{ ++ struct port_proxy *port_prox = t7xx_dev->md->port_prox; ++ struct list_head *port_list; ++ struct t7xx_port *port; ++ u8 ch_id; ++ ++ ch_id = FIELD_GET(PORT_CH_ID_MASK, channel); ++ port_list = &port_prox->rx_ch_ports[ch_id]; ++ list_for_each_entry(port, port_list, entry) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ if (queue->md_ctrl->hif_id == port_conf->path_id && ++ channel == port_conf->rx_ch) ++ return port; ++ } ++ ++ return NULL; ++} ++ ++/** ++ * t7xx_port_proxy_recv_skb() - Dispatch received skb. ++ * @queue: CLDMA queue. ++ * @skb: Socket buffer. ++ * ++ * Return: ++ ** 0 - Packet consumed. ++ ** -ERROR - Failed to process skb. ++ */ ++static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) ++{ ++ struct ccci_header *ccci_h = (struct ccci_header *)skb->data; ++ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; ++ struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl; ++ struct device *dev = queue->md_ctrl->dev; ++ const struct t7xx_port_conf *port_conf; ++ struct t7xx_port *port; ++ u16 seq_num, channel; ++ int ret; ++ ++ if (!skb) ++ return -EINVAL; ++ ++ channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); ++ if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { ++ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); ++ goto drop_skb; ++ } ++ ++ port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel); ++ if (!port) { ++ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel); ++ goto drop_skb; ++ } ++ ++ seq_num = t7xx_port_next_rx_seq_num(port, ccci_h); ++ port_conf = port->port_conf; ++ skb_pull(skb, sizeof(*ccci_h)); ++ ++ ret = port_conf->ops->recv_skb(port, skb); ++ /* Error indicates to try again later */ ++ if (ret) { ++ skb_push(skb, sizeof(*ccci_h)); ++ return ret; ++ } ++ ++ port->seq_nums[MTK_RX] = seq_num; ++ return 0; ++ ++drop_skb: ++ dev_kfree_skb_any(skb); ++ return 0; ++} ++ ++/** ++ * t7xx_port_proxy_md_status_notify() - Notify all ports of state. ++ *@port_prox: The port_proxy pointer. ++ *@state: State. ++ * ++ * Called by t7xx_fsm. Used to dispatch modem status for all ports, ++ * which want to know MD state transition. ++ */ ++void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state) ++{ ++ struct t7xx_port *port; ++ int i; ++ ++ for_each_proxy_port(i, port, port_prox) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ if (port_conf->ops->md_state_notify) ++ port_conf->ops->md_state_notify(port, state); ++ } ++} ++ ++static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) ++{ ++ struct port_proxy *port_prox = md->port_prox; ++ struct t7xx_port *port; ++ int i; ++ ++ for_each_proxy_port(i, port, port_prox) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ t7xx_port_struct_init(port); ++ ++ port->t7xx_dev = md->t7xx_dev; ++ port->dev = &md->t7xx_dev->pdev->dev; ++ spin_lock_init(&port->port_update_lock); ++ port->chan_enable = false; ++ ++ if (port_conf->ops->init) ++ port_conf->ops->init(port); ++ } ++ ++ t7xx_proxy_setup_ch_mapping(port_prox); ++} ++ ++static int t7xx_proxy_alloc(struct t7xx_modem *md) ++{ ++ unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ struct port_proxy *port_prox; ++ int i; ++ ++ port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count, ++ GFP_KERNEL); ++ if (!port_prox) ++ return -ENOMEM; ++ ++ md->port_prox = port_prox; ++ port_prox->dev = dev; ++ ++ for (i = 0; i < port_count; i++) ++ port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; ++ ++ port_prox->port_count = port_count; ++ t7xx_proxy_init_all_ports(md); ++ return 0; ++} ++ ++/** ++ * t7xx_port_proxy_init() - Initialize ports. ++ * @md: Modem. ++ * ++ * Create all port instances. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code from failure sub-initializations. ++ */ ++int t7xx_port_proxy_init(struct t7xx_modem *md) ++{ ++ int ret; ++ ++ ret = t7xx_proxy_alloc(md); ++ if (ret) ++ return ret; ++ ++ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); ++ return 0; ++} ++ ++void t7xx_port_proxy_uninit(struct port_proxy *port_prox) ++{ ++ struct t7xx_port *port; ++ int i; ++ ++ for_each_proxy_port(i, port, port_prox) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ if (port_conf->ops->uninit) ++ port_conf->ops->uninit(port); ++ } ++} ++ ++int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, ++ bool en_flag) ++{ ++ struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id); ++ const struct t7xx_port_conf *port_conf; ++ ++ if (!port) ++ return -EINVAL; ++ ++ port_conf = port->port_conf; ++ ++ if (en_flag) { ++ if (port_conf->ops->enable_chl) ++ port_conf->ops->enable_chl(port); ++ } else { ++ if (port_conf->ops->disable_chl) ++ port_conf->ops->disable_chl(port); ++ } ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h +@@ -0,0 +1,72 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_PORT_PROXY_H__ ++#define __T7XX_PORT_PROXY_H__ ++ ++#include ++#include ++#include ++#include ++ ++#include "t7xx_hif_cldma.h" ++#include "t7xx_modem_ops.h" ++#include "t7xx_port.h" ++ ++#define MTK_QUEUES 16 ++#define RX_QUEUE_MAXLEN 32 ++#define CTRL_QUEUE_MAXLEN 16 ++ ++struct port_proxy { ++ int port_count; ++ struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1]; ++ struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES]; ++ struct device *dev; ++ struct t7xx_port ports[]; ++}; ++ ++struct ccci_header { ++ __le32 packet_header; ++ __le32 packet_len; ++ __le32 status; ++ __le32 ex_msg; ++}; ++ ++/* Coupled with HW - indicates if there is data following the CCCI header or not */ ++#define CCCI_HEADER_NO_DATA 0xffffffff ++ ++#define CCCI_H_AST_BIT BIT(31) ++#define CCCI_H_SEQ_FLD GENMASK(30, 16) ++#define CCCI_H_CHN_FLD GENMASK(15, 0) ++ ++#define PORT_INFO_RSRVD GENMASK(31, 16) ++#define PORT_INFO_ENFLG BIT(15) ++#define PORT_INFO_CH_ID GENMASK(14, 0) ++ ++#define PORT_ENUM_VER 0 ++#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a ++#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5 ++#define PORT_ENUM_VER_MISMATCH 0x00657272 ++ ++void t7xx_port_proxy_reset(struct port_proxy *port_prox); ++void t7xx_port_proxy_uninit(struct port_proxy *port_prox); ++int t7xx_port_proxy_init(struct t7xx_modem *md); ++void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state); ++int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, ++ bool en_flag); ++ ++#endif /* __T7XX_PORT_PROXY_H__ */ +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c +@@ -37,6 +37,7 @@ + #include "t7xx_modem_ops.h" + #include "t7xx_pci.h" + #include "t7xx_pcie_mac.h" ++#include "t7xx_port_proxy.h" + #include "t7xx_reg.h" + #include "t7xx_state_monitor.h" + +@@ -90,6 +91,9 @@ static void fsm_state_notify(struct t7xx + void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) + { + ctl->md_state = state; ++ ++ /* Update to port first, otherwise sending message on HS2 may fail */ ++ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); + fsm_state_notify(ctl->md, state); + } + +@@ -258,6 +262,7 @@ static void t7xx_fsm_broadcast_ready_sta + ctl->md_state = MD_STATE_READY; + + fsm_state_notify(ctl->md, MD_STATE_READY); ++ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); + } + + static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) diff --git a/target/linux/generic/backport-5.15/621-v5.19-04-net-wwan-t7xx-Add-control-port.patch b/target/linux/generic/backport-5.15/621-v5.19-04-net-wwan-t7xx-Add-control-port.patch new file mode 100644 index 0000000000..1ea81899fe --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-04-net-wwan-t7xx-Add-control-port.patch @@ -0,0 +1,759 @@ +From da45d2566a1d4e260b894ff5d96be64b21c7fa79 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:02 -0700 +Subject: [PATCH] net: wwan: t7xx: Add control port +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Control Port implements driver control messages such as modem-host +handshaking, controls port enumeration, and handles exception messages. + +The handshaking process between the driver and the modem happens during +the init sequence. The process involves the exchange of a list of +supported runtime features to make sure that modem and host are ready +to provide proper feature lists including port enumeration. Further +features can be enabled and controlled in this handshaking process. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Ilpo Järvinen +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/Makefile | 1 + + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 214 +++++++++++++++- + drivers/net/wwan/t7xx/t7xx_modem_ops.h | 3 + + drivers/net/wwan/t7xx/t7xx_port.h | 3 + + drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 273 +++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 40 +++ + drivers/net/wwan/t7xx/t7xx_port_proxy.h | 25 ++ + drivers/net/wwan/t7xx/t7xx_state_monitor.c | 3 + + drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 + + 9 files changed, 561 insertions(+), 3 deletions(-) + create mode 100644 drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c + +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -11,3 +11,4 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_cldma.o \ + t7xx_hif_cldma.o \ + t7xx_port_proxy.o \ ++ t7xx_port_ctrl_msg.o \ +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -16,6 +16,8 @@ + */ + + #include ++#include ++#include + #include + #include + #include +@@ -26,6 +28,7 @@ + #include + #include + #include ++#include + #include + + #include "t7xx_cldma.h" +@@ -39,11 +42,24 @@ + #include "t7xx_reg.h" + #include "t7xx_state_monitor.h" + ++#define RT_ID_MD_PORT_ENUM 0 ++/* Modem feature query identification code - "ICCC" */ ++#define MD_FEATURE_QUERY_ID 0x49434343 ++ ++#define FEATURE_VER GENMASK(7, 4) ++#define FEATURE_MSK GENMASK(3, 0) ++ + #define RGU_RESET_DELAY_MS 10 + #define PORT_RESET_DELAY_MS 2000 + #define EX_HS_TIMEOUT_MS 5000 + #define EX_HS_POLL_DELAY_MS 10 + ++enum mtk_feature_support_type { ++ MTK_FEATURE_DOES_NOT_EXIST, ++ MTK_FEATURE_NOT_SUPPORTED, ++ MTK_FEATURE_MUST_BE_SUPPORTED, ++}; ++ + static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) + { + return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; +@@ -314,16 +330,205 @@ static void t7xx_md_sys_sw_init(struct t + t7xx_pcie_register_rgu_isr(t7xx_dev); + } + ++struct feature_query { ++ __le32 head_pattern; ++ u8 feature_set[FEATURE_COUNT]; ++ __le32 tail_pattern; ++}; ++ ++static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core) ++{ ++ struct feature_query *ft_query; ++ struct sk_buff *skb; ++ ++ skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query)); ++ if (!skb) ++ return; ++ ++ ft_query = skb_put(skb, sizeof(*ft_query)); ++ ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); ++ memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT); ++ ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); ++ ++ /* Send HS1 message to device */ ++ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0); ++} ++ ++static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev, ++ void *data) ++{ ++ struct feature_query *md_feature = data; ++ struct mtk_runtime_feature *rt_feature; ++ unsigned int i, rt_data_len = 0; ++ struct sk_buff *skb; ++ ++ /* Parse MD runtime data query */ ++ if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID || ++ le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) { ++ dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n", ++ le32_to_cpu(md_feature->head_pattern), ++ le32_to_cpu(md_feature->tail_pattern)); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < FEATURE_COUNT; i++) { ++ if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) != ++ MTK_FEATURE_MUST_BE_SUPPORTED) ++ rt_data_len += sizeof(*rt_feature); ++ } ++ ++ skb = t7xx_ctrl_alloc_skb(rt_data_len); ++ if (!skb) ++ return -ENOMEM; ++ ++ rt_feature = skb_put(skb, rt_data_len); ++ memset(rt_feature, 0, rt_data_len); ++ ++ /* Fill runtime feature */ ++ for (i = 0; i < FEATURE_COUNT; i++) { ++ u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]); ++ ++ if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED) ++ continue; ++ ++ rt_feature->feature_id = i; ++ if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST) ++ rt_feature->support_info = md_feature->feature_set[i]; ++ ++ rt_feature++; ++ } ++ ++ /* Send HS3 message to device */ ++ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0); ++ return 0; ++} ++ ++static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core, ++ struct device *dev, void *data, int data_length) ++{ ++ enum mtk_feature_support_type ft_spt_st, ft_spt_cfg; ++ struct mtk_runtime_feature *rt_feature; ++ int i, offset; ++ ++ offset = sizeof(struct feature_query); ++ for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) { ++ rt_feature = data + offset; ++ offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len); ++ ++ ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]); ++ if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED) ++ continue; ++ ++ ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info); ++ if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) ++ return -EINVAL; ++ ++ if (i == RT_ID_MD_PORT_ENUM) ++ t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); ++ } ++ ++ return 0; ++} ++ ++static int t7xx_core_reset(struct t7xx_modem *md) ++{ ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ ++ md->core_md.ready = false; ++ ++ if (!ctl) { ++ dev_err(dev, "FSM is not initialized\n"); ++ return -EINVAL; ++ } ++ ++ if (md->core_md.handshake_ongoing) { ++ int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); ++ ++ if (ret) ++ return ret; ++ } ++ ++ md->core_md.handshake_ongoing = false; ++ return 0; ++} ++ ++static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, ++ enum t7xx_fsm_event_state event_id, ++ enum t7xx_fsm_event_state err_detect) ++{ ++ struct t7xx_sys_info *core_info = &md->core_md; ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ struct t7xx_fsm_event *event, *event_next; ++ unsigned long flags; ++ int ret; ++ ++ t7xx_prepare_host_rt_data_query(core_info); ++ ++ while (!kthread_should_stop()) { ++ bool event_received = false; ++ ++ spin_lock_irqsave(&ctl->event_lock, flags); ++ list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) { ++ if (event->event_id == err_detect) { ++ list_del(&event->entry); ++ spin_unlock_irqrestore(&ctl->event_lock, flags); ++ dev_err(dev, "Core handshake error event received\n"); ++ goto err_free_event; ++ } else if (event->event_id == event_id) { ++ list_del(&event->entry); ++ event_received = true; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&ctl->event_lock, flags); ++ ++ if (event_received) ++ break; ++ ++ wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) || ++ kthread_should_stop()); ++ if (kthread_should_stop()) ++ goto err_free_event; ++ } ++ ++ if (ctl->exp_flg) ++ goto err_free_event; ++ ++ ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); ++ if (ret) { ++ dev_err(dev, "Host failure parsing runtime data: %d\n", ret); ++ goto err_free_event; ++ } ++ ++ if (ctl->exp_flg) ++ goto err_free_event; ++ ++ ret = t7xx_prepare_device_rt_data(core_info, dev, event->data); ++ if (ret) { ++ dev_err(dev, "Device failure parsing runtime data: %d", ret); ++ goto err_free_event; ++ } ++ ++ core_info->ready = true; ++ core_info->handshake_ongoing = false; ++ wake_up(&ctl->async_hk_wq); ++err_free_event: ++ kfree(event); ++} ++ + static void t7xx_md_hk_wq(struct work_struct *work) + { + struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + ++ /* Clear the HS2 EXIT event appended in core_reset() */ ++ t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT); + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); +- md->core_md.ready = true; +- wake_up(&ctl->async_hk_wq); ++ md->core_md.handshake_ongoing = true; ++ t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); + } + + void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) +@@ -418,6 +623,9 @@ static struct t7xx_modem *t7xx_md_alloc( + return NULL; + + INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); ++ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; ++ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= ++ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); + return md; + } + +@@ -431,7 +639,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); + t7xx_port_proxy_reset(md->port_prox); + md->md_init_finish = true; +- return 0; ++ return t7xx_core_reset(md); + } + + /** +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h +@@ -57,6 +57,9 @@ enum md_event_id { + + struct t7xx_sys_info { + bool ready; ++ bool handshake_ongoing; ++ u8 feature_set[FEATURE_COUNT]; ++ struct t7xx_port *ctl_port; + }; + + struct t7xx_modem { +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -125,8 +125,11 @@ struct t7xx_port { + }; + + struct sk_buff *t7xx_port_alloc_skb(int payload); ++struct sk_buff *t7xx_ctrl_alloc_skb(int payload); + int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); + int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg); ++int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, ++ unsigned int ex_msg); + + #endif /* __T7XX_PORT_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c +@@ -0,0 +1,273 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Ricardo Martinez ++ * Moises Veleta ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_port.h" ++#include "t7xx_port_proxy.h" ++#include "t7xx_state_monitor.h" ++ ++#define PORT_MSG_VERSION GENMASK(31, 16) ++#define PORT_MSG_PRT_CNT GENMASK(15, 0) ++ ++struct port_msg { ++ __le32 head_pattern; ++ __le32 info; ++ __le32 tail_pattern; ++ __le32 data[]; ++}; ++ ++static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg) ++{ ++ struct sk_buff *skb; ++ int ret; ++ ++ skb = t7xx_ctrl_alloc_skb(0); ++ if (!skb) ++ return -ENOMEM; ++ ++ ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg); ++ if (ret) ++ dev_kfree_skb_any(skb); ++ ++ return ret; ++} ++ ++static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl, ++ struct sk_buff *skb) ++{ ++ struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data; ++ struct device *dev = &ctl->md->t7xx_dev->pdev->dev; ++ enum md_state md_state; ++ int ret = -EINVAL; ++ ++ md_state = t7xx_fsm_get_md_state(ctl); ++ if (md_state != MD_STATE_EXCEPTION) { ++ dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n", ++ ctrl_msg_h->ex_msg, md_state); ++ return -EINVAL; ++ } ++ ++ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { ++ case CTL_ID_MD_EX: ++ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) { ++ dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg); ++ break; ++ } ++ ++ ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID); ++ if (ret) { ++ dev_err(dev, "Failed to send exception message to modem\n"); ++ break; ++ } ++ ++ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0); ++ if (ret) ++ dev_err(dev, "Failed to append Modem Exception event"); ++ ++ break; ++ ++ case CTL_ID_MD_EX_ACK: ++ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) { ++ dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg); ++ break; ++ } ++ ++ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0); ++ if (ret) ++ dev_err(dev, "Failed to append Modem Exception Received event"); ++ ++ break; ++ ++ case CTL_ID_MD_EX_PASS: ++ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0); ++ if (ret) ++ dev_err(dev, "Failed to append Modem Exception Passed event"); ++ ++ break; ++ ++ case CTL_ID_DRV_VER_ERROR: ++ dev_err(dev, "AP/MD driver version mismatch\n"); ++ } ++ ++ return ret; ++} ++ ++/** ++ * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes. ++ * @md: Modem context. ++ * @msg: Message. ++ * ++ * Used to control create/remove device node. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -EFAULT - Message check failure. ++ */ ++int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg) ++{ ++ struct device *dev = &md->t7xx_dev->pdev->dev; ++ unsigned int version, port_count, i; ++ struct port_msg *port_msg = msg; ++ ++ version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info)); ++ if (version != PORT_ENUM_VER || ++ le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN || ++ le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) { ++ dev_err(dev, "Invalid port control message %x:%x:%x\n", ++ version, le32_to_cpu(port_msg->head_pattern), ++ le32_to_cpu(port_msg->tail_pattern)); ++ return -EFAULT; ++ } ++ ++ port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info)); ++ for (i = 0; i < port_count; i++) { ++ u32 port_info = le32_to_cpu(port_msg->data[i]); ++ unsigned int ch_id; ++ bool en_flag; ++ ++ ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info); ++ en_flag = port_info & PORT_INFO_ENFLG; ++ if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag)) ++ dev_dbg(dev, "Port:%x not found\n", ch_id); ++ } ++ ++ return 0; ++} ++ ++static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb) ++{ ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; ++ struct ctrl_msg_header *ctrl_msg_h; ++ int ret = 0; ++ ++ ctrl_msg_h = (struct ctrl_msg_header *)skb->data; ++ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { ++ case CTL_ID_HS2_MSG: ++ skb_pull(skb, sizeof(*ctrl_msg_h)); ++ ++ if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { ++ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, ++ le32_to_cpu(ctrl_msg_h->data_length)); ++ if (ret) ++ dev_err(port->dev, "Failed to append Handshake 2 event"); ++ } ++ ++ dev_kfree_skb_any(skb); ++ break; ++ ++ case CTL_ID_MD_EX: ++ case CTL_ID_MD_EX_ACK: ++ case CTL_ID_MD_EX_PASS: ++ case CTL_ID_DRV_VER_ERROR: ++ ret = fsm_ee_message_handler(port, ctl, skb); ++ dev_kfree_skb_any(skb); ++ break; ++ ++ case CTL_ID_PORT_ENUM: ++ skb_pull(skb, sizeof(*ctrl_msg_h)); ++ ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data); ++ if (!ret) ++ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0); ++ else ++ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, ++ PORT_ENUM_VER_MISMATCH); ++ ++ break; ++ ++ default: ++ ret = -EINVAL; ++ dev_err(port->dev, "Unknown control message ID to FSM %x\n", ++ le32_to_cpu(ctrl_msg_h->ctrl_msg_id)); ++ break; ++ } ++ ++ if (ret) ++ dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret); ++ ++ return ret; ++} ++ ++static int port_ctl_rx_thread(void *arg) ++{ ++ while (!kthread_should_stop()) { ++ struct t7xx_port *port = arg; ++ struct sk_buff *skb; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port->rx_wq.lock, flags); ++ if (skb_queue_empty(&port->rx_skb_list) && ++ wait_event_interruptible_locked_irq(port->rx_wq, ++ !skb_queue_empty(&port->rx_skb_list) || ++ kthread_should_stop())) { ++ spin_unlock_irqrestore(&port->rx_wq.lock, flags); ++ continue; ++ } ++ if (kthread_should_stop()) { ++ spin_unlock_irqrestore(&port->rx_wq.lock, flags); ++ break; ++ } ++ skb = __skb_dequeue(&port->rx_skb_list); ++ spin_unlock_irqrestore(&port->rx_wq.lock, flags); ++ ++ control_msg_handler(port, skb); ++ } ++ ++ return 0; ++} ++ ++static int port_ctl_init(struct t7xx_port *port) ++{ ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name); ++ if (IS_ERR(port->thread)) { ++ dev_err(port->dev, "Failed to start port control thread\n"); ++ return PTR_ERR(port->thread); ++ } ++ ++ port->rx_length_th = CTRL_QUEUE_MAXLEN; ++ return 0; ++} ++ ++static void port_ctl_uninit(struct t7xx_port *port) ++{ ++ unsigned long flags; ++ struct sk_buff *skb; ++ ++ if (port->thread) ++ kthread_stop(port->thread); ++ ++ spin_lock_irqsave(&port->rx_wq.lock, flags); ++ port->rx_length_th = 0; ++ while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL) ++ dev_kfree_skb_any(skb); ++ spin_unlock_irqrestore(&port->rx_wq.lock, flags); ++} ++ ++struct port_ops ctl_port_ops = { ++ .init = port_ctl_init, ++ .recv_skb = t7xx_port_enqueue_skb, ++ .uninit = port_ctl_uninit, ++}; +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -49,6 +49,15 @@ + i++, (p) = &(proxy)->ports[i]) + + static const struct t7xx_port_conf t7xx_md_port_conf[] = { ++ { ++ .tx_ch = PORT_CH_CONTROL_TX, ++ .rx_ch = PORT_CH_CONTROL_RX, ++ .txq_index = Q_IDX_CTRL, ++ .rxq_index = Q_IDX_CTRL, ++ .path_id = CLDMA_ID_MD, ++ .ops = &ctl_port_ops, ++ .name = "t7xx_ctrl", ++ }, + }; + + static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) +@@ -129,6 +138,16 @@ struct sk_buff *t7xx_port_alloc_skb(int + return skb; + } + ++struct sk_buff *t7xx_ctrl_alloc_skb(int payload) ++{ ++ struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header)); ++ ++ if (skb) ++ skb_reserve(skb, sizeof(struct ctrl_msg_header)); ++ ++ return skb; ++} ++ + /** + * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. + * @port: port context. +@@ -194,6 +213,24 @@ static int t7xx_port_send_ccci_skb(struc + return 0; + } + ++int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, ++ unsigned int ex_msg) ++{ ++ struct ctrl_msg_header *ctrl_msg_h; ++ unsigned int msg_len = skb->len; ++ u32 pkt_header = 0; ++ ++ ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h)); ++ ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg); ++ ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg); ++ ctrl_msg_h->data_length = cpu_to_le32(msg_len); ++ ++ if (!msg_len) ++ pkt_header = CCCI_HEADER_NO_DATA; ++ ++ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); ++} ++ + int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg) + { +@@ -359,6 +396,9 @@ static void t7xx_proxy_init_all_ports(st + + t7xx_port_struct_init(port); + ++ if (port_conf->tx_ch == PORT_CH_CONTROL_TX) ++ md->core_md.ctl_port = port; ++ + port->t7xx_dev = md->t7xx_dev; + port->dev = &md->t7xx_dev->pdev->dev; + spin_lock_init(&port->port_update_lock); +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h +@@ -53,6 +53,27 @@ struct ccci_header { + #define CCCI_H_SEQ_FLD GENMASK(30, 16) + #define CCCI_H_CHN_FLD GENMASK(15, 0) + ++struct ctrl_msg_header { ++ __le32 ctrl_msg_id; ++ __le32 ex_msg; ++ __le32 data_length; ++}; ++ ++/* Control identification numbers for AP<->MD messages */ ++#define CTL_ID_HS1_MSG 0x0 ++#define CTL_ID_HS2_MSG 0x1 ++#define CTL_ID_HS3_MSG 0x2 ++#define CTL_ID_MD_EX 0x4 ++#define CTL_ID_DRV_VER_ERROR 0x5 ++#define CTL_ID_MD_EX_ACK 0x6 ++#define CTL_ID_MD_EX_PASS 0x8 ++#define CTL_ID_PORT_ENUM 0x9 ++ ++/* Modem exception check identification code - "EXCP" */ ++#define MD_EX_CHK_ID 0x45584350 ++/* Modem exception check acknowledge identification code - "EREC" */ ++#define MD_EX_CHK_ACK_ID 0x45524543 ++ + #define PORT_INFO_RSRVD GENMASK(31, 16) + #define PORT_INFO_ENFLG BIT(15) + #define PORT_INFO_CH_ID GENMASK(14, 0) +@@ -62,10 +83,14 @@ struct ccci_header { + #define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5 + #define PORT_ENUM_VER_MISMATCH 0x00657272 + ++/* Port operations mapping */ ++extern struct port_ops ctl_port_ops; ++ + void t7xx_port_proxy_reset(struct port_proxy *port_prox); + void t7xx_port_proxy_uninit(struct port_proxy *port_prox); + int t7xx_port_proxy_init(struct t7xx_modem *md); + void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state); ++int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg); + int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag); + +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c +@@ -293,6 +293,9 @@ static int fsm_routine_starting(struct t + + if (!md->core_md.ready) { + dev_err(dev, "MD handshake timeout\n"); ++ if (md->core_md.handshake_ongoing) ++ t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); ++ + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; + } +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h +@@ -37,9 +37,11 @@ enum t7xx_fsm_state { + + enum t7xx_fsm_event_state { + FSM_EVENT_INVALID, ++ FSM_EVENT_MD_HS2, + FSM_EVENT_MD_EX, + FSM_EVENT_MD_EX_REC_OK, + FSM_EVENT_MD_EX_PASS, ++ FSM_EVENT_MD_HS2_EXIT, + FSM_EVENT_MAX + }; + diff --git a/target/linux/generic/backport-5.15/621-v5.19-05-net-wwan-t7xx-Add-AT-and-MBIM-WWAN-ports.patch b/target/linux/generic/backport-5.15/621-v5.19-05-net-wwan-t7xx-Add-AT-and-MBIM-WWAN-ports.patch new file mode 100644 index 0000000000..4f1aa56420 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-05-net-wwan-t7xx-Add-AT-and-MBIM-WWAN-ports.patch @@ -0,0 +1,253 @@ +From 61b7a2916a0ef91be2e9a4b0d0a5bdf9a371cbee Mon Sep 17 00:00:00 2001 +From: Chandrashekar Devegowda +Date: Fri, 6 May 2022 11:13:03 -0700 +Subject: [PATCH] net: wwan: t7xx: Add AT and MBIM WWAN ports +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Adds AT and MBIM ports to the port proxy infrastructure. +The initialization method is responsible for creating the corresponding +ports using the WWAN framework infrastructure. The implemented WWAN port +operations are start, stop, and TX. + +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Sergey Ryazanov +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/Makefile | 1 + + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 20 +++ + drivers/net/wwan/t7xx/t7xx_port_proxy.h | 1 + + drivers/net/wwan/t7xx/t7xx_port_wwan.c | 176 ++++++++++++++++++++++++ + 4 files changed, 198 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/t7xx_port_wwan.c + +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -12,3 +12,4 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_hif_cldma.o \ + t7xx_port_proxy.o \ + t7xx_port_ctrl_msg.o \ ++ t7xx_port_wwan.o \ +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -50,6 +50,26 @@ + + static const struct t7xx_port_conf t7xx_md_port_conf[] = { + { ++ .tx_ch = PORT_CH_UART2_TX, ++ .rx_ch = PORT_CH_UART2_RX, ++ .txq_index = Q_IDX_AT_CMD, ++ .rxq_index = Q_IDX_AT_CMD, ++ .txq_exp_index = 0xff, ++ .rxq_exp_index = 0xff, ++ .path_id = CLDMA_ID_MD, ++ .ops = &wwan_sub_port_ops, ++ .name = "AT", ++ .port_type = WWAN_PORT_AT, ++ }, { ++ .tx_ch = PORT_CH_MBIM_TX, ++ .rx_ch = PORT_CH_MBIM_RX, ++ .txq_index = Q_IDX_MBIM, ++ .rxq_index = Q_IDX_MBIM, ++ .path_id = CLDMA_ID_MD, ++ .ops = &wwan_sub_port_ops, ++ .name = "MBIM", ++ .port_type = WWAN_PORT_MBIM, ++ }, { + .tx_ch = PORT_CH_CONTROL_TX, + .rx_ch = PORT_CH_CONTROL_RX, + .txq_index = Q_IDX_CTRL, +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h +@@ -84,6 +84,7 @@ struct ctrl_msg_header { + #define PORT_ENUM_VER_MISMATCH 0x00657272 + + /* Port operations mapping */ ++extern struct port_ops wwan_sub_port_ops; + extern struct port_ops ctl_port_ops; + + void t7xx_port_proxy_reset(struct port_proxy *port_prox); +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c +@@ -0,0 +1,176 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Chandrashekar Devegowda ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Andy Shevchenko ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_port.h" ++#include "t7xx_port_proxy.h" ++#include "t7xx_state_monitor.h" ++ ++static int t7xx_port_ctrl_start(struct wwan_port *port) ++{ ++ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); ++ ++ if (atomic_read(&port_mtk->usage_cnt)) ++ return -EBUSY; ++ ++ atomic_inc(&port_mtk->usage_cnt); ++ return 0; ++} ++ ++static void t7xx_port_ctrl_stop(struct wwan_port *port) ++{ ++ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); ++ ++ atomic_dec(&port_mtk->usage_cnt); ++} ++ ++static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) ++{ ++ struct t7xx_port *port_private = wwan_port_get_drvdata(port); ++ size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU; ++ const struct t7xx_port_conf *port_conf; ++ struct t7xx_fsm_ctl *ctl; ++ enum md_state md_state; ++ ++ len = skb->len; ++ if (!len || !port_private->chan_enable) ++ return -EINVAL; ++ ++ port_conf = port_private->port_conf; ++ ctl = port_private->t7xx_dev->md->fsm_ctl; ++ md_state = t7xx_fsm_get_md_state(ctl); ++ if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) { ++ dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n", ++ port_conf->name, md_state); ++ return -ENODEV; ++ } ++ ++ for (offset = 0; offset < len; offset += chunk_len) { ++ struct sk_buff *skb_ccci; ++ int ret; ++ ++ chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header)); ++ skb_ccci = t7xx_port_alloc_skb(chunk_len); ++ if (!skb_ccci) ++ return -ENOMEM; ++ ++ skb_put_data(skb_ccci, skb->data + offset, chunk_len); ++ ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0); ++ if (ret) { ++ dev_kfree_skb_any(skb_ccci); ++ dev_err(port_private->dev, "Write error on %s port, %d\n", ++ port_conf->name, ret); ++ return ret; ++ } ++ } ++ ++ dev_kfree_skb(skb); ++ return 0; ++} ++ ++static const struct wwan_port_ops wwan_ops = { ++ .start = t7xx_port_ctrl_start, ++ .stop = t7xx_port_ctrl_stop, ++ .tx = t7xx_port_ctrl_tx, ++}; ++ ++static int t7xx_port_wwan_init(struct t7xx_port *port) ++{ ++ port->rx_length_th = RX_QUEUE_MAXLEN; ++ return 0; ++} ++ ++static void t7xx_port_wwan_uninit(struct t7xx_port *port) ++{ ++ if (!port->wwan_port) ++ return; ++ ++ port->rx_length_th = 0; ++ wwan_remove_port(port->wwan_port); ++ port->wwan_port = NULL; ++} ++ ++static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb) ++{ ++ if (!atomic_read(&port->usage_cnt) || !port->chan_enable) { ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ dev_kfree_skb_any(skb); ++ dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n", ++ port_conf->name); ++ /* Dropping skb, caller should not access skb.*/ ++ return 0; ++ } ++ ++ wwan_port_rx(port->wwan_port, skb); ++ return 0; ++} ++ ++static int t7xx_port_wwan_enable_chl(struct t7xx_port *port) ++{ ++ spin_lock(&port->port_update_lock); ++ port->chan_enable = true; ++ spin_unlock(&port->port_update_lock); ++ ++ return 0; ++} ++ ++static int t7xx_port_wwan_disable_chl(struct t7xx_port *port) ++{ ++ spin_lock(&port->port_update_lock); ++ port->chan_enable = false; ++ spin_unlock(&port->port_update_lock); ++ ++ return 0; ++} ++ ++static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) ++{ ++ const struct t7xx_port_conf *port_conf = port->port_conf; ++ ++ if (state != MD_STATE_READY) ++ return; ++ ++ if (!port->wwan_port) { ++ port->wwan_port = wwan_create_port(port->dev, port_conf->port_type, ++ &wwan_ops, port); ++ if (IS_ERR(port->wwan_port)) ++ dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); ++ } ++} ++ ++struct port_ops wwan_sub_port_ops = { ++ .init = t7xx_port_wwan_init, ++ .recv_skb = t7xx_port_wwan_recv_skb, ++ .uninit = t7xx_port_wwan_uninit, ++ .enable_chl = t7xx_port_wwan_enable_chl, ++ .disable_chl = t7xx_port_wwan_disable_chl, ++ .md_state_notify = t7xx_port_wwan_md_state_notify, ++}; diff --git a/target/linux/generic/backport-5.15/621-v5.19-06-net-wwan-t7xx-Data-path-HW-layer.patch b/target/linux/generic/backport-5.15/621-v5.19-06-net-wwan-t7xx-Data-path-HW-layer.patch new file mode 100644 index 0000000000..bc7407e42a --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-06-net-wwan-t7xx-Data-path-HW-layer.patch @@ -0,0 +1,1716 @@ +From 33f78ab5a38a79cc5227eb59da9c98b25cfb0ff1 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:04 -0700 +Subject: [PATCH] net: wwan: t7xx: Data path HW layer +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Data Path Modem AP Interface (DPMAIF) HW layer provides HW abstraction +for the upper layer (DPMAIF HIF). It implements functions to do the HW +configuration, TX/RX control and interrupt handling. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Ilpo Järvinen +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_dpmaif.c | 1283 +++++++++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_dpmaif.h | 179 ++++ + drivers/net/wwan/t7xx/t7xx_reg.h | 213 +++++ + 3 files changed, 1675 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/t7xx_dpmaif.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_dpmaif.h + +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c +@@ -0,0 +1,1283 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Andy Shevchenko ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_dpmaif.h" ++#include "t7xx_reg.h" ++ ++#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us) ++ ++static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info) ++{ ++ struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask; ++ u32 value, ul_intr_enable, dl_intr_enable; ++ int ret; ++ ++ ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK; ++ isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; ++ iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); ++ ++ /* Set interrupt enable mask */ ++ iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); ++ iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); ++ ++ /* Check mask status */ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, ++ value, (value & ul_intr_enable) != ul_intr_enable, 0, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (ret) ++ return ret; ++ ++ dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR; ++ isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable; ++ ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN | ++ DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN; ++ isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; ++ iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); ++ ++ /* Set DL ISR PD enable mask */ ++ iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0, ++ value, (value & ul_intr_enable) != ul_intr_enable, 0, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (ret) ++ return ret; ++ ++ isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY; ++ iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); ++ iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk, ++ hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK); ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); ++ value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); ++ iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK); ++ ++ return 0; ++} ++ ++static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) ++{ ++ struct dpmaif_isr_en_mask *isr_en_msk; ++ u32 value, ul_int_que_done; ++ int ret; ++ ++ isr_en_msk = &hw_info->isr_en_mask; ++ ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; ++ isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done; ++ iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, ++ value, (value & ul_int_que_done) == ul_int_que_done, 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (ret) ++ dev_err(hw_info->dev, ++ "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", ++ value); ++} ++ ++void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) ++{ ++ struct dpmaif_isr_en_mask *isr_en_msk; ++ u32 value, ul_int_que_done; ++ int ret; ++ ++ isr_en_msk = &hw_info->isr_en_mask; ++ ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; ++ isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done; ++ iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, ++ value, (value & ul_int_que_done) != ul_int_que_done, 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (ret) ++ dev_err(hw_info->dev, ++ "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", ++ value); ++} ++ ++void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info) ++{ ++ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR; ++ iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); ++} ++ ++void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info) ++{ ++ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR; ++ iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); ++} ++ ++static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); ++ iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++ return value; ++} ++ ++static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno) ++{ ++ u32 value, q_done; ++ int ret; ++ ++ q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; ++ iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++ ++ ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done, ++ 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done); ++ if (ret) { ++ dev_err(hw_info->dev, ++ "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", ++ value); ++ return -ETIMEDOUT; ++ } ++ ++ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done; ++ return 0; ++} ++ ++void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno) ++{ ++ u32 mask; ++ ++ mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; ++ iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); ++ hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask; ++} ++ ++void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info) ++{ ++ u32 ip_busy_sts; ++ ++ ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY); ++ iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); ++} ++ ++static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, ++ unsigned int qno) ++{ ++ if (qno == DPF_RX_QNO0) ++ iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, ++ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++ else ++ iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, ++ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++} ++ ++void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, ++ unsigned int qno) ++{ ++ if (qno == DPF_RX_QNO0) ++ iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, ++ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); ++ else ++ iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, ++ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); ++} ++ ++void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); ++} ++ ++void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); ++} ++ ++static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para, ++ enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue) ++{ ++ para->intr_types[para->intr_cnt] = intr_type; ++ para->intr_queues[para->intr_cnt] = intr_queue; ++ para->intr_cnt++; ++} ++ ++/* The para->intr_cnt counter is set to zero before this function is called. ++ * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. ++ */ ++static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info, ++ unsigned int intr_status, ++ struct dpmaif_hw_intr_st_para *para) ++{ ++ unsigned long value; ++ ++ value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status); ++ if (value) { ++ unsigned int index; ++ ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value); ++ ++ for_each_set_bit(index, &value, DPMAIF_TXQ_NUM) ++ t7xx_dpmaif_mask_ulq_intr(hw_info, index); ++ } ++ ++ value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status); ++ if (value) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value); ++ ++ value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status); ++ if (value) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value); ++ ++ value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status); ++ if (value) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value); ++ ++ value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status); ++ if (value) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value); ++ ++ /* Clear interrupt status */ ++ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); ++} ++ ++/* The para->intr_cnt counter is set to zero before this function is called. ++ * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. ++ */ ++static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info, ++ unsigned int intr_status, ++ struct dpmaif_hw_intr_st_para *para, int qno) ++{ ++ if (qno == DPF_RX_QNO_DFT) { ++ if (intr_status & DP_DL_INT_SKB_LEN_ERR) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT); ++ ++ if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) { ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT); ++ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR; ++ iowrite32(DP_DL_INT_BATCNT_LEN_ERR, ++ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++ } ++ ++ if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) { ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT); ++ hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR; ++ iowrite32(DP_DL_INT_PITCNT_LEN_ERR, ++ hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); ++ } ++ ++ if (intr_status & DP_DL_INT_PKT_EMPTY_MSK) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT); ++ ++ if (intr_status & DP_DL_INT_FRG_EMPTY_MSK) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT); ++ ++ if (intr_status & DP_DL_INT_MTU_ERR_MSK) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT); ++ ++ if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT); ++ ++ if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) { ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno)); ++ t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); ++ } ++ ++ if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR, ++ DPF_RX_QNO_DFT); ++ ++ if (intr_status & DP_DL_INT_Q0_DONE) { ++ /* Mask RX done interrupt immediately after it occurs, do not clear ++ * the interrupt if the mask operation fails. ++ */ ++ if (!t7xx_mask_dlq_intr(hw_info, qno)) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno)); ++ else ++ intr_status &= ~DP_DL_INT_Q0_DONE; ++ } ++ } else { ++ if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) { ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno)); ++ t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); ++ } ++ ++ if (intr_status & DP_DL_INT_Q1_DONE) { ++ if (!t7xx_mask_dlq_intr(hw_info, qno)) ++ t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno)); ++ else ++ intr_status &= ~DP_DL_INT_Q1_DONE; ++ } ++ } ++ ++ intr_status |= DP_DL_INT_BATCNT_LEN_ERR; ++ /* Clear interrupt status */ ++ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); ++} ++ ++/** ++ * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW. ++ * @hw_info: Pointer to struct hw_info. ++ * @para: Pointer to struct dpmaif_hw_intr_st_para. ++ * @qno: Queue number. ++ * ++ * Reads RX/TX interrupt status from HW and clears UL/DL status as needed. ++ * ++ * Return: Interrupt count. ++ */ ++int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, ++ struct dpmaif_hw_intr_st_para *para, int qno) ++{ ++ u32 rx_intr_status, tx_intr_status = 0; ++ u32 rx_intr_qdone, tx_intr_qdone = 0; ++ ++ rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); ++ rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0); ++ ++ /* TX interrupt status */ ++ if (qno == DPF_RX_QNO_DFT) { ++ /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts ++ * when a DLQ1 interrupt has occurred. ++ */ ++ tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); ++ tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); ++ } ++ ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ ++ if (qno == DPF_RX_QNO_DFT) { ++ /* Do not schedule bottom half again or clear UL interrupt status when we ++ * have already masked it. ++ */ ++ tx_intr_status &= ~tx_intr_qdone; ++ if (tx_intr_status) ++ t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para); ++ } ++ ++ if (rx_intr_status) { ++ if (qno == DPF_RX_QNO0) { ++ rx_intr_status &= DP_DL_Q0_STATUS_MASK; ++ if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE) ++ /* Do not schedule bottom half again or clear DL ++ * queue done interrupt status when we have already masked it. ++ */ ++ rx_intr_status &= ~DP_DL_INT_Q0_DONE; ++ } else { ++ rx_intr_status &= DP_DL_Q1_STATUS_MASK; ++ if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE) ++ rx_intr_status &= ~DP_DL_INT_Q1_DONE; ++ } ++ ++ if (rx_intr_status) ++ t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno); ++ } ++ ++ return para->intr_cnt; ++} ++ ++static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR); ++ value |= DPMAIF_MEM_CLR; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR); ++ ++ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR, ++ value, !(value & DPMAIF_MEM_CLR), 0, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++} ++ ++static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT); ++ udelay(2); ++ iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT); ++ udelay(2); ++ iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT); ++ udelay(2); ++ iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT); ++ udelay(2); ++} ++ ++static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info) ++{ ++ u32 ap_port_mode; ++ int ret; ++ ++ t7xx_dpmaif_hw_reset(hw_info); ++ ++ ret = t7xx_dpmaif_sram_init(hw_info); ++ if (ret) ++ return ret; ++ ++ ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++ ap_port_mode |= DPMAIF_PORT_MODE_PCIE; ++ iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++ iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN); ++ return 0; ++} ++ ++static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW); ++} ++ ++static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info) ++{ ++ u32 enable_bat_cache, enable_pit_burst; ++ ++ enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++ enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI; ++ iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++ ++ enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++ enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN; ++ iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++} ++ ++ /* DPMAIF DL DLQ part HW setting */ ++ ++static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2; ++ value |= DPMAIF_HASH_PRIME_DF << 4; ++ value |= DPMAIF_HPC_TOTAL_NUM << 8; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL); ++} ++ ++static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG); ++} ++ ++static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF, ++ hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5); ++} ++ ++static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0); ++} ++ ++static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value, i; ++ ++ /* Each register holds two DLQ threshold timeout values */ ++ for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) { ++ value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF); ++ value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK, ++ DPMAIF_DLQ_TIMEOUT_THRES_DF); ++ iowrite32(value, ++ hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i); ++ } ++} ++ ++static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES); ++} ++ ++static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info) ++{ ++ t7xx_dpmaif_hw_hpc_cntl_set(hw_info); ++ t7xx_dpmaif_hw_agg_cfg_set(hw_info); ++ t7xx_dpmaif_hw_hash_bit_choose_set(hw_info); ++ t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info); ++ t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info); ++ t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info); ++} ++ ++static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en) ++{ ++ u32 value, dl_bat_init = 0; ++ int ret; ++ ++ if (frg_en) ++ dl_bat_init = DPMAIF_DL_BAT_FRG_INIT; ++ ++ dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET; ++ dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, ++ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (ret) { ++ dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); ++ return ret; ++ } ++ ++ iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, ++ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (ret) ++ dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n"); ++ ++ return ret; ++} ++ ++static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info, ++ dma_addr_t addr) ++{ ++ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0); ++ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3); ++} ++ ++static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++ value &= ~DPMAIF_BAT_SIZE_MSK; ++ value |= size & DPMAIF_BAT_SIZE_MSK; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++} ++ ++static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++ ++ if (enable) ++ value |= DPMAIF_BAT_EN_MSK; ++ else ++ value &= ~DPMAIF_BAT_EN_MSK; ++ ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); ++ value &= ~DPMAIF_BAT_BID_MAXCNT_MSK; ++ value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT); ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info) ++{ ++ iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); ++ value &= ~DPMAIF_PIT_CHK_NUM_MSK; ++ value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM); ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); ++ value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK; ++ value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK, ++ DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE); ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); ++ value &= ~DPMAIF_BAT_BUF_SZ_MSK; ++ value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK, ++ DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE); ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); ++ value &= ~DPMAIF_BAT_RSV_LEN_MSK; ++ value |= DPMAIF_HW_BAT_RSVLEN; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); ++} ++ ++static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++ value &= ~DPMAIF_PKT_ALIGN_MSK; ++ value |= DPMAIF_PKT_ALIGN_EN; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++} ++ ++static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++ value |= DPMAIF_DL_PKT_CHECKSUM_EN; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); ++ value &= ~DPMAIF_FRG_CHECK_THRES_MSK; ++ value |= DPMAIF_HW_CHK_FRG_NUM; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); ++ value &= ~DPMAIF_FRG_BUF_SZ_MSK; ++ value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK, ++ DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE); ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); ++} ++ ++static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); ++ ++ if (enable) ++ value |= DPMAIF_FRG_EN_MSK; ++ else ++ value &= ~DPMAIF_FRG_EN_MSK; ++ ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); ++} ++ ++static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++ value &= ~DPMAIF_BAT_CHECK_THRES_MSK; ++ value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM); ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); ++} ++ ++static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); ++ value &= ~DPMAIF_DL_PIT_SEQ_MSK; ++ value |= DPMAIF_DL_PIT_SEQ_VALUE; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); ++} ++ ++static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info, ++ dma_addr_t addr) ++{ ++ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0); ++ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4); ++} ++ ++static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); ++ value &= ~DPMAIF_PIT_SIZE_MSK; ++ value |= size & DPMAIF_PIT_SIZE_MSK; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); ++ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2); ++ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); ++ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5); ++ iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6); ++} ++ ++static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); ++ value |= DPMAIF_DLQPIT_EN_MSK; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); ++} ++ ++static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info, ++ unsigned int pit_idx) ++{ ++ unsigned int dl_pit_init; ++ int timeout; ++ u32 value; ++ ++ dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET; ++ dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS); ++ dl_pit_init |= DPMAIF_DL_PIT_INIT_EN; ++ ++ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, ++ value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), ++ DPMAIF_CHECK_DELAY_US, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (timeout) { ++ dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n"); ++ return; ++ } ++ ++ iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT); ++ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, ++ value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), ++ DPMAIF_CHECK_DELAY_US, ++ DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (timeout) ++ dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n"); ++} ++ ++static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num, ++ struct dpmaif_dl *dl_que) ++{ ++ t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base); ++ t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt); ++ t7xx_dpmaif_dl_dlq_pit_en(hw_info); ++ t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num); ++} ++ ++static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info) ++{ ++ int i; ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) ++ t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]); ++} ++ ++static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) ++{ ++ u32 dl_bat_init, value; ++ int timeout; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++ ++ if (enable) ++ value |= DPMAIF_BAT_EN_MSK; ++ else ++ value &= ~DPMAIF_BAT_EN_MSK; ++ ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); ++ dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT; ++ dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; ++ ++ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, ++ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (timeout) ++ dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n"); ++ ++ iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); ++ timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, ++ value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (timeout) ++ dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); ++} ++ ++static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info) ++{ ++ struct dpmaif_dl *dl_que; ++ int ret; ++ ++ t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info); ++ ++ dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */ ++ if (!dl_que->que_started) ++ return -EBUSY; ++ ++ t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info); ++ t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info); ++ t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info); ++ t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info); ++ t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info); ++ t7xx_dpmaif_dl_set_pkt_alignment(hw_info); ++ t7xx_dpmaif_dl_set_pit_seqnum(hw_info); ++ t7xx_dpmaif_dl_set_ao_mtu(hw_info); ++ t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info); ++ t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info); ++ t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info); ++ t7xx_dpmaif_dl_frg_ao_en(hw_info, true); ++ ++ t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base); ++ t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt); ++ t7xx_dpmaif_dl_bat_en(hw_info, true); ++ ++ ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true); ++ if (ret) ++ return ret; ++ ++ t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base); ++ t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt); ++ t7xx_dpmaif_dl_bat_en(hw_info, false); ++ ++ ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false); ++ if (ret) ++ return ret; ++ ++ /* Init PIT (two PIT table) */ ++ t7xx_dpmaif_config_all_dlq_hw(hw_info); ++ t7xx_dpmaif_dl_all_q_en(hw_info, true); ++ t7xx_dpmaif_dl_set_pkt_checksum(hw_info); ++ return 0; ++} ++ ++static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info, ++ unsigned int q_num, unsigned int size) ++{ ++ unsigned int value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); ++ value &= ~DPMAIF_DRB_SIZE_MSK; ++ value |= size & DPMAIF_DRB_SIZE_MSK; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); ++} ++ ++static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info, ++ unsigned int q_num, dma_addr_t addr) ++{ ++ iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num)); ++ iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num)); ++} ++ ++static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info, ++ unsigned int q_num, bool ready) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); ++ ++ if (ready) ++ value |= BIT(q_num); ++ else ++ value &= ~BIT(q_num); ++ ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); ++} ++ ++static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info, ++ unsigned int q_num, bool enable) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); ++ ++ if (enable) ++ value |= BIT(q_num + 8); ++ else ++ value &= ~BIT(q_num + 8); ++ ++ iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); ++} ++ ++static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info) ++{ ++ struct dpmaif_ul *ul_que; ++ int i; ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ ul_que = &hw_info->ul_que[i]; ++ if (ul_que->que_started) { ++ t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt * ++ DPMAIF_UL_DRB_SIZE_WORD); ++ t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base); ++ t7xx_dpmaif_ul_rdy_en(hw_info, i, true); ++ t7xx_dpmaif_ul_arb_en(hw_info, i, true); ++ } else { ++ t7xx_dpmaif_ul_arb_en(hw_info, i, false); ++ } ++ } ++} ++ ++static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info) ++{ ++ u32 ap_cfg; ++ int ret; ++ ++ ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); ++ ap_cfg |= DPMAIF_SRAM_SYNC; ++ iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG, ++ ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (ret) ++ return ret; ++ ++ iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET); ++ iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET); ++ return 0; ++} ++ ++static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info) ++{ ++ u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY); ++ ++ return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS); ++} ++ ++static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) ++{ ++ u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); ++ ++ if (enable) ++ ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN; ++ else ++ ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN; ++ ++ iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); ++} ++ ++static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info) ++{ ++ u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY); ++ ++ return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS); ++} ++ ++void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, ++ unsigned int drb_entry_cnt) ++{ ++ u32 ul_update, value; ++ int err; ++ ++ ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK; ++ ul_update |= DPMAIF_UL_ADD_UPDATE; ++ ++ err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), ++ value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (err) { ++ dev_err(hw_info->dev, "UL add is not ready\n"); ++ return; ++ } ++ ++ iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num)); ++ ++ err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), ++ value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (err) ++ dev_err(hw_info->dev, "Timeout updating UL add\n"); ++} ++ ++unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) ++{ ++ unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num)); ++ ++ return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD; ++} ++ ++int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, ++ unsigned int pit_remain_cnt) ++{ ++ u32 dl_update, value; ++ int ret; ++ ++ dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK; ++ dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS); ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, ++ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (ret) { ++ dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n"); ++ return ret; ++ } ++ ++ iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD); ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, ++ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ if (ret) { ++ dev_err(hw_info->dev, "Data plane modem add dlq failed\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, ++ unsigned int dlq_pit_idx) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX + ++ dlq_pit_idx * DLQ_PIT_IDX_SIZE); ++ return value & DPMAIF_DL_RD_WR_IDX_MSK; ++} ++ ++static bool t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) ++{ ++ u32 value; ++ int ret; ++ ++ ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, ++ value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, ++ DPMAIF_CHECK_TIMEOUT_US); ++ return ret; ++} ++ ++int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt) ++{ ++ unsigned int value; ++ ++ if (t7xx_dl_add_timedout(hw_info)) { ++ dev_err(hw_info->dev, "DL add BAT not ready\n"); ++ return -EBUSY; ++ } ++ ++ value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; ++ value |= DPMAIF_DL_ADD_UPDATE; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); ++ ++ if (t7xx_dl_add_timedout(hw_info)) { ++ dev_err(hw_info->dev, "DL add BAT timeout\n"); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX); ++ return value & DPMAIF_DL_RD_WR_IDX_MSK; ++} ++ ++unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX); ++ return value & DPMAIF_DL_RD_WR_IDX_MSK; ++} ++ ++int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt) ++{ ++ unsigned int value; ++ ++ if (t7xx_dl_add_timedout(hw_info)) { ++ dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n"); ++ return -EBUSY; ++ } ++ ++ value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; ++ value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE; ++ iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); ++ ++ if (t7xx_dl_add_timedout(hw_info)) { ++ dev_err(hw_info->dev, "Data plane modem add frag DLQ failed"); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) ++{ ++ u32 value; ++ ++ value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX); ++ return value & DPMAIF_DL_RD_WR_IDX_MSK; ++} ++ ++static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info, ++ struct dpmaif_hw_params *init_para) ++{ ++ struct dpmaif_dl *dl_que; ++ struct dpmaif_ul *ul_que; ++ int i; ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) { ++ dl_que = &hw_info->dl_que[i]; ++ dl_que->bat_base = init_para->pkt_bat_base_addr[i]; ++ dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i]; ++ dl_que->pit_base = init_para->pit_base_addr[i]; ++ dl_que->pit_size_cnt = init_para->pit_size_cnt[i]; ++ dl_que->frg_base = init_para->frg_bat_base_addr[i]; ++ dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i]; ++ dl_que->que_started = true; ++ } ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ ul_que = &hw_info->ul_que[i]; ++ ul_que->drb_base = init_para->drb_base_addr[i]; ++ ul_que->drb_size_cnt = init_para->drb_size_cnt[i]; ++ ul_que->que_started = true; ++ } ++} ++ ++/** ++ * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues. ++ * @hw_info: Pointer to struct hw_info. ++ * ++ * Disable HW UL queues. Checks busy UL queues to go to idle ++ * with an attempt count of 1000000. ++ * ++ * Return: ++ * * 0 - Success ++ * * -ETIMEDOUT - Timed out checking busy queues ++ */ ++int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info) ++{ ++ int count = 0; ++ ++ t7xx_dpmaif_ul_all_q_en(hw_info, false); ++ while (t7xx_dpmaif_ul_idle_check(hw_info)) { ++ if (++count >= DPMAIF_MAX_CHECK_COUNT) { ++ dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n", ++ ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY)); ++ return -ETIMEDOUT; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues. ++ * @hw_info: Pointer to struct hw_info. ++ * ++ * Disable HW DL queue. Checks busy UL queues to go to idle ++ * with an attempt count of 1000000. ++ * Check that HW PIT write index equals read index with the same ++ * attempt count. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ETIMEDOUT - Timed out checking busy queues. ++ */ ++int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info) ++{ ++ unsigned int wr_idx, rd_idx; ++ int count = 0; ++ ++ t7xx_dpmaif_dl_all_q_en(hw_info, false); ++ while (t7xx_dpmaif_dl_idle_check(hw_info)) { ++ if (++count >= DPMAIF_MAX_CHECK_COUNT) { ++ dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n", ++ ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY)); ++ return -ETIMEDOUT; ++ } ++ } ++ ++ /* Check middle PIT sync done */ ++ count = 0; ++ do { ++ wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX); ++ wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK; ++ rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX); ++ rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK; ++ ++ if (wr_idx == rd_idx) ++ return 0; ++ } while (++count < DPMAIF_MAX_CHECK_COUNT); ++ ++ dev_err(hw_info->dev, "Check middle PIT sync fail\n"); ++ return -ETIMEDOUT; ++} ++ ++void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info) ++{ ++ t7xx_dpmaif_ul_all_q_en(hw_info, true); ++ t7xx_dpmaif_dl_all_q_en(hw_info, true); ++} ++ ++/** ++ * t7xx_dpmaif_hw_init() - Initialize HW data path API. ++ * @hw_info: Pointer to struct hw_info. ++ * @init_param: Pointer to struct dpmaif_hw_params. ++ * ++ * Configures port mode, clock config, HW interrupt initialization, and HW queue. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code from failure sub-initializations. ++ */ ++int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param) ++{ ++ int ret; ++ ++ ret = t7xx_dpmaif_hw_config(hw_info); ++ if (ret) { ++ dev_err(hw_info->dev, "DPMAIF HW config failed\n"); ++ return ret; ++ } ++ ++ ret = t7xx_dpmaif_init_intr(hw_info); ++ if (ret) { ++ dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n"); ++ return ret; ++ } ++ ++ t7xx_dpmaif_set_queue_property(hw_info, init_param); ++ t7xx_dpmaif_pcie_dpmaif_sign(hw_info); ++ t7xx_dpmaif_dl_performance(hw_info); ++ ++ ret = t7xx_dpmaif_config_dlq_hw(hw_info); ++ if (ret) { ++ dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n"); ++ return ret; ++ } ++ ++ t7xx_dpmaif_config_ulq_hw(hw_info); ++ ++ ret = t7xx_dpmaif_hw_init_done(hw_info); ++ if (ret) ++ dev_err(hw_info->dev, "DPMAIF HW queue init failed\n"); ++ ++ return ret; ++} ++ ++bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno) ++{ ++ u32 intr_status; ++ ++ intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); ++ intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno); ++ if (intr_status) { ++ iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); ++ return true; ++ } ++ ++ return false; ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.h +@@ -0,0 +1,179 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_DPMAIF_H__ ++#define __T7XX_DPMAIF_H__ ++ ++#include ++#include ++ ++#define DPMAIF_DL_PIT_SEQ_VALUE 251 ++#define DPMAIF_UL_DRB_SIZE_WORD 4 ++ ++#define DPMAIF_MAX_CHECK_COUNT 1000000 ++#define DPMAIF_CHECK_TIMEOUT_US 10000 ++#define DPMAIF_CHECK_INIT_TIMEOUT_US 100000 ++#define DPMAIF_CHECK_DELAY_US 10 ++ ++#define DPMAIF_RXQ_NUM 2 ++#define DPMAIF_TXQ_NUM 5 ++ ++struct dpmaif_isr_en_mask { ++ unsigned int ap_ul_l2intr_en_msk; ++ unsigned int ap_dl_l2intr_en_msk; ++ unsigned int ap_udl_ip_busy_en_msk; ++ unsigned int ap_dl_l2intr_err_en_msk; ++}; ++ ++struct dpmaif_ul { ++ bool que_started; ++ unsigned char reserved[3]; ++ dma_addr_t drb_base; ++ unsigned int drb_size_cnt; ++}; ++ ++struct dpmaif_dl { ++ bool que_started; ++ unsigned char reserved[3]; ++ dma_addr_t pit_base; ++ unsigned int pit_size_cnt; ++ dma_addr_t bat_base; ++ unsigned int bat_size_cnt; ++ dma_addr_t frg_base; ++ unsigned int frg_size_cnt; ++ unsigned int pit_seq; ++}; ++ ++struct dpmaif_hw_info { ++ struct device *dev; ++ void __iomem *pcie_base; ++ struct dpmaif_dl dl_que[DPMAIF_RXQ_NUM]; ++ struct dpmaif_ul ul_que[DPMAIF_TXQ_NUM]; ++ struct dpmaif_isr_en_mask isr_en_mask; ++}; ++ ++/* DPMAIF HW Initialization parameter structure */ ++struct dpmaif_hw_params { ++ /* UL part */ ++ dma_addr_t drb_base_addr[DPMAIF_TXQ_NUM]; ++ unsigned int drb_size_cnt[DPMAIF_TXQ_NUM]; ++ /* DL part */ ++ dma_addr_t pkt_bat_base_addr[DPMAIF_RXQ_NUM]; ++ unsigned int pkt_bat_size_cnt[DPMAIF_RXQ_NUM]; ++ dma_addr_t frg_bat_base_addr[DPMAIF_RXQ_NUM]; ++ unsigned int frg_bat_size_cnt[DPMAIF_RXQ_NUM]; ++ dma_addr_t pit_base_addr[DPMAIF_RXQ_NUM]; ++ unsigned int pit_size_cnt[DPMAIF_RXQ_NUM]; ++}; ++ ++enum dpmaif_hw_intr_type { ++ DPF_INTR_INVALID_MIN, ++ DPF_INTR_UL_DONE, ++ DPF_INTR_UL_DRB_EMPTY, ++ DPF_INTR_UL_MD_NOTREADY, ++ DPF_INTR_UL_MD_PWR_NOTREADY, ++ DPF_INTR_UL_LEN_ERR, ++ DPF_INTR_DL_DONE, ++ DPF_INTR_DL_SKB_LEN_ERR, ++ DPF_INTR_DL_BATCNT_LEN_ERR, ++ DPF_INTR_DL_PITCNT_LEN_ERR, ++ DPF_INTR_DL_PKT_EMPTY_SET, ++ DPF_INTR_DL_FRG_EMPTY_SET, ++ DPF_INTR_DL_MTU_ERR, ++ DPF_INTR_DL_FRGCNT_LEN_ERR, ++ DPF_INTR_DL_Q0_PITCNT_LEN_ERR, ++ DPF_INTR_DL_Q1_PITCNT_LEN_ERR, ++ DPF_INTR_DL_HPC_ENT_TYPE_ERR, ++ DPF_INTR_DL_Q0_DONE, ++ DPF_INTR_DL_Q1_DONE, ++ DPF_INTR_INVALID_MAX ++}; ++ ++#define DPF_RX_QNO0 0 ++#define DPF_RX_QNO1 1 ++#define DPF_RX_QNO_DFT DPF_RX_QNO0 ++ ++struct dpmaif_hw_intr_st_para { ++ unsigned int intr_cnt; ++ enum dpmaif_hw_intr_type intr_types[DPF_INTR_INVALID_MAX - 1]; ++ unsigned int intr_queues[DPF_INTR_INVALID_MAX - 1]; ++}; ++ ++#define DPMAIF_HW_BAT_REMAIN 64 ++#define DPMAIF_HW_BAT_PKTBUF (128 * 28) ++#define DPMAIF_HW_FRG_PKTBUF 128 ++#define DPMAIF_HW_BAT_RSVLEN 64 ++#define DPMAIF_HW_PKT_BIDCNT 1 ++#define DPMAIF_HW_MTU_SIZE (3 * 1024 + 8) ++#define DPMAIF_HW_CHK_BAT_NUM 62 ++#define DPMAIF_HW_CHK_FRG_NUM 3 ++#define DPMAIF_HW_CHK_PIT_NUM (2 * DPMAIF_HW_CHK_BAT_NUM) ++ ++#define DP_UL_INT_DONE_OFFSET 0 ++#define DP_UL_INT_QDONE_MSK GENMASK(4, 0) ++#define DP_UL_INT_EMPTY_MSK GENMASK(9, 5) ++#define DP_UL_INT_MD_NOTREADY_MSK GENMASK(14, 10) ++#define DP_UL_INT_MD_PWR_NOTREADY_MSK GENMASK(19, 15) ++#define DP_UL_INT_ERR_MSK GENMASK(24, 20) ++ ++#define DP_DL_INT_QDONE_MSK BIT(0) ++#define DP_DL_INT_SKB_LEN_ERR BIT(1) ++#define DP_DL_INT_BATCNT_LEN_ERR BIT(2) ++#define DP_DL_INT_PITCNT_LEN_ERR BIT(3) ++#define DP_DL_INT_PKT_EMPTY_MSK BIT(4) ++#define DP_DL_INT_FRG_EMPTY_MSK BIT(5) ++#define DP_DL_INT_MTU_ERR_MSK BIT(6) ++#define DP_DL_INT_FRG_LEN_ERR_MSK BIT(7) ++#define DP_DL_INT_Q0_PITCNT_LEN_ERR BIT(8) ++#define DP_DL_INT_Q1_PITCNT_LEN_ERR BIT(9) ++#define DP_DL_INT_HPC_ENT_TYPE_ERR BIT(10) ++#define DP_DL_INT_Q0_DONE BIT(13) ++#define DP_DL_INT_Q1_DONE BIT(14) ++ ++#define DP_DL_Q0_STATUS_MASK (DP_DL_INT_Q0_PITCNT_LEN_ERR | DP_DL_INT_Q0_DONE) ++#define DP_DL_Q1_STATUS_MASK (DP_DL_INT_Q1_PITCNT_LEN_ERR | DP_DL_INT_Q1_DONE) ++ ++int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param); ++int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info); ++int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info); ++void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info); ++int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, ++ struct dpmaif_hw_intr_st_para *para, int qno); ++void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num); ++void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, ++ unsigned int drb_entry_cnt); ++int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt); ++int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt); ++int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, ++ unsigned int pit_remain_cnt); ++void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, ++ unsigned int qno); ++void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno); ++bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno); ++void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info); ++void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info); ++void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info); ++void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info); ++void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info); ++unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); ++unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); ++unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); ++unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); ++unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, ++ unsigned int dlq_pit_idx); ++ ++#endif /* __T7XX_DPMAIF_H__ */ +--- a/drivers/net/wwan/t7xx/t7xx_reg.h ++++ b/drivers/net/wwan/t7xx/t7xx_reg.h +@@ -134,4 +134,217 @@ enum t7xx_int { + CLDMA3_INT, + }; + ++/* DPMA definitions */ ++ ++#define DPMAIF_PD_BASE 0x1022d000 ++#define BASE_DPMAIF_UL DPMAIF_PD_BASE ++#define BASE_DPMAIF_DL (DPMAIF_PD_BASE + 0x100) ++#define BASE_DPMAIF_AP_MISC (DPMAIF_PD_BASE + 0x400) ++#define BASE_DPMAIF_MMW_HPC (DPMAIF_PD_BASE + 0x600) ++#define BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX (DPMAIF_PD_BASE + 0x900) ++#define BASE_DPMAIF_PD_SRAM_DL (DPMAIF_PD_BASE + 0xc00) ++#define BASE_DPMAIF_PD_SRAM_UL (DPMAIF_PD_BASE + 0xd00) ++ ++#define DPMAIF_AO_BASE 0x10014000 ++#define BASE_DPMAIF_AO_UL DPMAIF_AO_BASE ++#define BASE_DPMAIF_AO_DL (DPMAIF_AO_BASE + 0x400) ++ ++#define DPMAIF_UL_ADD_DESC (BASE_DPMAIF_UL + 0x00) ++#define DPMAIF_UL_CHK_BUSY (BASE_DPMAIF_UL + 0x88) ++#define DPMAIF_UL_RESERVE_AO_RW (BASE_DPMAIF_UL + 0xac) ++#define DPMAIF_UL_ADD_DESC_CH0 (BASE_DPMAIF_UL + 0xb0) ++ ++#define DPMAIF_DL_BAT_INIT (BASE_DPMAIF_DL + 0x00) ++#define DPMAIF_DL_BAT_ADD (BASE_DPMAIF_DL + 0x04) ++#define DPMAIF_DL_BAT_INIT_CON0 (BASE_DPMAIF_DL + 0x08) ++#define DPMAIF_DL_BAT_INIT_CON1 (BASE_DPMAIF_DL + 0x0c) ++#define DPMAIF_DL_BAT_INIT_CON2 (BASE_DPMAIF_DL + 0x10) ++#define DPMAIF_DL_BAT_INIT_CON3 (BASE_DPMAIF_DL + 0x50) ++#define DPMAIF_DL_CHK_BUSY (BASE_DPMAIF_DL + 0xb4) ++ ++#define DPMAIF_AP_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x00) ++#define DPMAIF_AP_APDL_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x50) ++#define DPMAIF_AP_IP_BUSY (BASE_DPMAIF_AP_MISC + 0x60) ++#define DPMAIF_AP_CG_EN (BASE_DPMAIF_AP_MISC + 0x68) ++#define DPMAIF_AP_OVERWRITE_CFG (BASE_DPMAIF_AP_MISC + 0x90) ++#define DPMAIF_AP_MEM_CLR (BASE_DPMAIF_AP_MISC + 0x94) ++#define DPMAIF_AP_ALL_L2TISAR0_MASK GENMASK(31, 0) ++#define DPMAIF_AP_APDL_ALL_L2TISAR0_MASK GENMASK(31, 0) ++#define DPMAIF_AP_IP_BUSY_MASK GENMASK(31, 0) ++ ++#define DPMAIF_AO_UL_INIT_SET (BASE_DPMAIF_AO_UL + 0x0) ++#define DPMAIF_AO_UL_CHNL_ARB0 (BASE_DPMAIF_AO_UL + 0x1c) ++#define DPMAIF_AO_UL_AP_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x80) ++#define DPMAIF_AO_UL_AP_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x84) ++#define DPMAIF_AO_UL_AP_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x88) ++#define DPMAIF_AO_UL_AP_L1TIMR0 (BASE_DPMAIF_AO_UL + 0x8c) ++#define DPMAIF_AO_UL_APDL_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x90) ++#define DPMAIF_AO_UL_APDL_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x94) ++#define DPMAIF_AO_UL_APDL_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x98) ++#define DPMAIF_AO_AP_DLUL_IP_BUSY_MASK (BASE_DPMAIF_AO_UL + 0x9c) ++ ++#define DPMAIF_AO_UL_CHNL0_CON0 (BASE_DPMAIF_PD_SRAM_UL + 0x10) ++#define DPMAIF_AO_UL_CHNL0_CON1 (BASE_DPMAIF_PD_SRAM_UL + 0x14) ++#define DPMAIF_AO_UL_CHNL0_CON2 (BASE_DPMAIF_PD_SRAM_UL + 0x18) ++#define DPMAIF_AO_UL_CH0_STA (BASE_DPMAIF_PD_SRAM_UL + 0x70) ++ ++#define DPMAIF_AO_DL_INIT_SET (BASE_DPMAIF_AO_DL + 0x00) ++#define DPMAIF_AO_DL_IRQ_MASK (BASE_DPMAIF_AO_DL + 0x0c) ++#define DPMAIF_AO_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_AO_DL + 0x28) ++#define DPMAIF_AO_DL_DLQPIT_TRIG_THRES (BASE_DPMAIF_AO_DL + 0x34) ++ ++#define DPMAIF_AO_DL_PKTINFO_CON0 (BASE_DPMAIF_PD_SRAM_DL + 0x00) ++#define DPMAIF_AO_DL_PKTINFO_CON1 (BASE_DPMAIF_PD_SRAM_DL + 0x04) ++#define DPMAIF_AO_DL_PKTINFO_CON2 (BASE_DPMAIF_PD_SRAM_DL + 0x08) ++#define DPMAIF_AO_DL_RDY_CHK_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x0c) ++#define DPMAIF_AO_DL_RDY_CHK_FRG_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x10) ++ ++#define DPMAIF_AO_DL_DLQ_AGG_CFG (BASE_DPMAIF_PD_SRAM_DL + 0x20) ++#define DPMAIF_AO_DL_DLQPIT_TIMEOUT0 (BASE_DPMAIF_PD_SRAM_DL + 0x24) ++#define DPMAIF_AO_DL_DLQPIT_TIMEOUT1 (BASE_DPMAIF_PD_SRAM_DL + 0x28) ++#define DPMAIF_AO_DL_HPC_CNTL (BASE_DPMAIF_PD_SRAM_DL + 0x38) ++#define DPMAIF_AO_DL_PIT_SEQ_END (BASE_DPMAIF_PD_SRAM_DL + 0x40) ++ ++#define DPMAIF_AO_DL_BAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xd8) ++#define DPMAIF_AO_DL_BAT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xdc) ++#define DPMAIF_AO_DL_PIT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xec) ++#define DPMAIF_AO_DL_PIT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x60) ++#define DPMAIF_AO_DL_FRGBAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x78) ++#define DPMAIF_AO_DL_DLQ_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xa4) ++ ++#define DPMAIF_HPC_INTR_MASK (BASE_DPMAIF_MMW_HPC + 0x0f4) ++#define DPMA_HPC_ALL_INT_MASK GENMASK(15, 0) ++ ++#define DPMAIF_HPC_DLQ_PATH_MODE 3 ++#define DPMAIF_HPC_ADD_MODE_DF 0 ++#define DPMAIF_HPC_TOTAL_NUM 8 ++#define DPMAIF_HPC_MAX_TOTAL_NUM 8 ++ ++#define DPMAIF_DL_DLQPIT_INIT (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x00) ++#define DPMAIF_DL_DLQPIT_ADD (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x10) ++#define DPMAIF_DL_DLQPIT_INIT_CON0 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x14) ++#define DPMAIF_DL_DLQPIT_INIT_CON1 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x18) ++#define DPMAIF_DL_DLQPIT_INIT_CON2 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x1c) ++#define DPMAIF_DL_DLQPIT_INIT_CON3 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x20) ++#define DPMAIF_DL_DLQPIT_INIT_CON4 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x24) ++#define DPMAIF_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x28) ++#define DPMAIF_DL_DLQPIT_INIT_CON6 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x2c) ++ ++#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q)) ++#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q)) ++#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q)) ++#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q)) ++#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q)) ++ ++#define DPMAIF_UL_DRB_RIDX_MSK GENMASK(31, 16) ++ ++#define DPMAIF_AP_RGU_ASSERT 0x10001150 ++#define DPMAIF_AP_RGU_DEASSERT 0x10001154 ++#define DPMAIF_AP_RST_BIT BIT(2) ++ ++#define DPMAIF_AP_AO_RGU_ASSERT 0x10001140 ++#define DPMAIF_AP_AO_RGU_DEASSERT 0x10001144 ++#define DPMAIF_AP_AO_RST_BIT BIT(6) ++ ++/* DPMAIF init/restore */ ++#define DPMAIF_UL_ADD_NOT_READY BIT(31) ++#define DPMAIF_UL_ADD_UPDATE BIT(31) ++#define DPMAIF_UL_ADD_COUNT_MASK GENMASK(15, 0) ++#define DPMAIF_UL_ALL_QUE_ARB_EN GENMASK(11, 8) ++ ++#define DPMAIF_DL_ADD_UPDATE BIT(31) ++#define DPMAIF_DL_ADD_NOT_READY BIT(31) ++#define DPMAIF_DL_FRG_ADD_UPDATE BIT(16) ++#define DPMAIF_DL_ADD_COUNT_MASK GENMASK(15, 0) ++ ++#define DPMAIF_DL_BAT_INIT_ALLSET BIT(0) ++#define DPMAIF_DL_BAT_FRG_INIT BIT(16) ++#define DPMAIF_DL_BAT_INIT_EN BIT(31) ++#define DPMAIF_DL_BAT_INIT_NOT_READY BIT(31) ++#define DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT 0 ++ ++#define DPMAIF_DL_PIT_INIT_ALLSET BIT(0) ++#define DPMAIF_DL_PIT_INIT_EN BIT(31) ++#define DPMAIF_DL_PIT_INIT_NOT_READY BIT(31) ++ ++#define DPMAIF_BAT_REMAIN_SZ_BASE 16 ++#define DPMAIF_BAT_BUFFER_SZ_BASE 128 ++#define DPMAIF_FRG_BUFFER_SZ_BASE 128 ++ ++#define DLQ_PIT_IDX_SIZE 0x20 ++ ++#define DPMAIF_PIT_SIZE_MSK GENMASK(17, 0) ++ ++#define DPMAIF_PIT_REM_CNT_MSK GENMASK(17, 0) ++ ++#define DPMAIF_BAT_EN_MSK BIT(16) ++#define DPMAIF_FRG_EN_MSK BIT(28) ++#define DPMAIF_BAT_SIZE_MSK GENMASK(15, 0) ++ ++#define DPMAIF_BAT_BID_MAXCNT_MSK GENMASK(31, 16) ++#define DPMAIF_BAT_REMAIN_MINSZ_MSK GENMASK(15, 8) ++#define DPMAIF_PIT_CHK_NUM_MSK GENMASK(31, 24) ++#define DPMAIF_BAT_BUF_SZ_MSK GENMASK(16, 8) ++#define DPMAIF_FRG_BUF_SZ_MSK GENMASK(16, 8) ++#define DPMAIF_BAT_RSV_LEN_MSK GENMASK(7, 0) ++#define DPMAIF_PKT_ALIGN_MSK GENMASK(23, 22) ++ ++#define DPMAIF_BAT_CHECK_THRES_MSK GENMASK(21, 16) ++#define DPMAIF_FRG_CHECK_THRES_MSK GENMASK(7, 0) ++ ++#define DPMAIF_PKT_ALIGN_EN BIT(23) ++ ++#define DPMAIF_DRB_SIZE_MSK GENMASK(15, 0) ++ ++#define DPMAIF_DL_RD_WR_IDX_MSK GENMASK(17, 0) ++ ++/* DPMAIF_UL_CHK_BUSY */ ++#define DPMAIF_UL_IDLE_STS BIT(11) ++/* DPMAIF_DL_CHK_BUSY */ ++#define DPMAIF_DL_IDLE_STS BIT(23) ++/* DPMAIF_AO_DL_RDY_CHK_THRES */ ++#define DPMAIF_DL_PKT_CHECKSUM_EN BIT(31) ++#define DPMAIF_PORT_MODE_PCIE BIT(30) ++#define DPMAIF_DL_BURST_PIT_EN BIT(13) ++/* DPMAIF_DL_BAT_INIT_CON1 */ ++#define DPMAIF_DL_BAT_CACHE_PRI BIT(22) ++/* DPMAIF_AP_MEM_CLR */ ++#define DPMAIF_MEM_CLR BIT(0) ++/* DPMAIF_AP_OVERWRITE_CFG */ ++#define DPMAIF_SRAM_SYNC BIT(0) ++/* DPMAIF_AO_UL_INIT_SET */ ++#define DPMAIF_UL_INIT_DONE BIT(0) ++/* DPMAIF_AO_DL_INIT_SET */ ++#define DPMAIF_DL_INIT_DONE BIT(0) ++/* DPMAIF_AO_DL_PIT_SEQ_END */ ++#define DPMAIF_DL_PIT_SEQ_MSK GENMASK(7, 0) ++/* DPMAIF_UL_RESERVE_AO_RW */ ++#define DPMAIF_PCIE_MODE_SET_VALUE 0x55 ++/* DPMAIF_AP_CG_EN */ ++#define DPMAIF_CG_EN 0x7f ++ ++#define DPMAIF_UDL_IP_BUSY BIT(0) ++#define DPMAIF_DL_INT_DLQ0_QDONE BIT(8) ++#define DPMAIF_DL_INT_DLQ1_QDONE BIT(9) ++#define DPMAIF_DL_INT_DLQ0_PITCNT_LEN BIT(10) ++#define DPMAIF_DL_INT_DLQ1_PITCNT_LEN BIT(11) ++#define DPMAIF_DL_INT_Q2TOQ1 BIT(24) ++#define DPMAIF_DL_INT_Q2APTOP BIT(25) ++ ++#define DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS GENMASK(15, 0) ++#define DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK GENMASK(31, 16) ++ ++/* DPMAIF DLQ HW configure */ ++#define DPMAIF_AGG_MAX_LEN_DF 65535 ++#define DPMAIF_AGG_TBL_ENT_NUM_DF 50 ++#define DPMAIF_HASH_PRIME_DF 13 ++#define DPMAIF_MID_TIMEOUT_THRES_DF 100 ++#define DPMAIF_DLQ_TIMEOUT_THRES_DF 100 ++#define DPMAIF_DLQ_PRS_THRES_DF 10 ++#define DPMAIF_DLQ_HASH_BIT_CHOOSE_DF 0 ++ ++#define DPMAIF_DLQPIT_EN_MSK BIT(20) ++#define DPMAIF_DLQPIT_CHAN_OFS 16 ++#define DPMAIF_ADD_DLQ_PIT_CHAN_OFS 20 ++ + #endif /* __T7XX_REG_H__ */ diff --git a/target/linux/generic/backport-5.15/621-v5.19-07-net-wwan-t7xx-Add-data-path-interface.patch b/target/linux/generic/backport-5.15/621-v5.19-07-net-wwan-t7xx-Add-data-path-interface.patch new file mode 100644 index 0000000000..b28d210db7 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-07-net-wwan-t7xx-Add-data-path-interface.patch @@ -0,0 +1,2834 @@ +From d642b012df70a76dd5723f2d426b40bffe83ac49 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:05 -0700 +Subject: [PATCH] net: wwan: t7xx: Add data path interface +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Data Path Modem AP Interface (DPMAIF) HIF layer provides methods +for initialization, ISR, control and event handling of TX/RX flows. + +DPMAIF TX +Exposes the 'dmpaif_tx_send_skb' function which can be used by the +network device to transmit packets. +The uplink data management uses a Descriptor Ring Buffer (DRB). +First DRB entry is a message type that will be followed by 1 or more +normal DRB entries. Message type DRB will hold the skb information +and each normal DRB entry holds a pointer to the skb payload. + +DPMAIF RX +The downlink buffer management uses Buffer Address Table (BAT) and +Packet Information Table (PIT) rings. +The BAT ring holds the address of skb data buffer for the HW to use, +while the PIT contains metadata about a whole network packet including +a reference to the BAT entry holding the data buffer address. +The driver reads the PIT and BAT entries written by the modem, when +reaching a threshold, the driver will reload the PIT and BAT rings. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Sergey Ryazanov +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/Makefile | 4 + + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 484 ++++++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 205 ++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 1220 ++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 116 ++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 651 +++++++++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h | 78 ++ + 7 files changed, 2758 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h + +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -13,3 +13,7 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_port_proxy.o \ + t7xx_port_ctrl_msg.o \ + t7xx_port_wwan.o \ ++ t7xx_hif_dpmaif.o \ ++ t7xx_hif_dpmaif_tx.o \ ++ t7xx_hif_dpmaif_rx.o \ ++ t7xx_dpmaif.o \ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c +@@ -0,0 +1,484 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_dpmaif.h" ++#include "t7xx_hif_dpmaif.h" ++#include "t7xx_hif_dpmaif_rx.h" ++#include "t7xx_hif_dpmaif_tx.h" ++#include "t7xx_pci.h" ++#include "t7xx_pcie_mac.h" ++#include "t7xx_state_monitor.h" ++ ++unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx) ++{ ++ buf_idx++; ++ ++ return buf_idx < buf_len ? buf_idx : 0; ++} ++ ++unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, ++ unsigned int wr_idx, enum dpmaif_rdwr rd_wr) ++{ ++ int pkt_cnt; ++ ++ if (rd_wr == DPMAIF_READ) ++ pkt_cnt = wr_idx - rd_idx; ++ else ++ pkt_cnt = rd_idx - wr_idx - 1; ++ ++ if (pkt_cnt < 0) ++ pkt_cnt += total_cnt; ++ ++ return (unsigned int)pkt_cnt; ++} ++ ++static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_isr_para *isr_para; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { ++ isr_para = &dpmaif_ctrl->isr_para[i]; ++ t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ } ++} ++ ++static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_isr_para *isr_para; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { ++ isr_para = &dpmaif_ctrl->isr_para[i]; ++ t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ } ++} ++ ++static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para) ++{ ++ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; ++ struct dpmaif_hw_intr_st_para intr_status; ++ struct device *dev = dpmaif_ctrl->dev; ++ struct dpmaif_hw_info *hw_info; ++ int i; ++ ++ memset(&intr_status, 0, sizeof(intr_status)); ++ hw_info = &dpmaif_ctrl->hw_info; ++ ++ if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) { ++ dev_err(dev, "Failed to get HW interrupt count\n"); ++ return; ++ } ++ ++ t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ ++ for (i = 0; i < intr_status.intr_cnt; i++) { ++ switch (intr_status.intr_types[i]) { ++ case DPF_INTR_UL_DONE: ++ t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]); ++ break; ++ ++ case DPF_INTR_UL_DRB_EMPTY: ++ case DPF_INTR_UL_MD_NOTREADY: ++ case DPF_INTR_UL_MD_PWR_NOTREADY: ++ /* No need to log an error for these */ ++ break; ++ ++ case DPF_INTR_DL_BATCNT_LEN_ERR: ++ dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n"); ++ t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info); ++ break; ++ ++ case DPF_INTR_DL_PITCNT_LEN_ERR: ++ dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n"); ++ t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info); ++ break; ++ ++ case DPF_INTR_DL_Q0_PITCNT_LEN_ERR: ++ dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n"); ++ t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT); ++ break; ++ ++ case DPF_INTR_DL_Q1_PITCNT_LEN_ERR: ++ dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n"); ++ t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1); ++ break; ++ ++ case DPF_INTR_DL_DONE: ++ case DPF_INTR_DL_Q0_DONE: ++ case DPF_INTR_DL_Q1_DONE: ++ t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]); ++ break; ++ ++ default: ++ dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n", ++ intr_status.intr_types[i]); ++ } ++ } ++} ++ ++static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data) ++{ ++ struct dpmaif_isr_para *isr_para = data; ++ struct dpmaif_ctrl *dpmaif_ctrl; ++ ++ dpmaif_ctrl = isr_para->dpmaif_ctrl; ++ if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { ++ dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n"); ++ return IRQ_HANDLED; ++ } ++ ++ t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ t7xx_dpmaif_irq_cb(isr_para); ++ t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ return IRQ_HANDLED; ++} ++ ++static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_isr_para *isr_para; ++ unsigned char i; ++ ++ dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT; ++ dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT; ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) { ++ isr_para = &dpmaif_ctrl->isr_para[i]; ++ isr_para->dpmaif_ctrl = dpmaif_ctrl; ++ isr_para->dlq_id = i; ++ isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i]; ++ } ++} ++ ++static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev; ++ struct dpmaif_isr_para *isr_para; ++ enum t7xx_int int_type; ++ int i; ++ ++ t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl); ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) { ++ isr_para = &dpmaif_ctrl->isr_para[i]; ++ int_type = isr_para->pcie_int; ++ t7xx_pcie_mac_clear_int(t7xx_dev, int_type); ++ ++ t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; ++ t7xx_dev->intr_thread[int_type] = NULL; ++ t7xx_dev->callback_param[int_type] = isr_para; ++ ++ t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); ++ t7xx_pcie_mac_set_int(t7xx_dev, int_type); ++ } ++} ++ ++static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_rx_queue *rx_q; ++ struct dpmaif_tx_queue *tx_q; ++ int ret, rx_idx, tx_idx, i; ++ ++ ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL); ++ if (ret) { ++ dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret); ++ return ret; ++ } ++ ++ ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG); ++ if (ret) { ++ dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret); ++ goto err_free_normal_bat; ++ } ++ ++ for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) { ++ rx_q = &dpmaif_ctrl->rxq[rx_idx]; ++ rx_q->index = rx_idx; ++ rx_q->dpmaif_ctrl = dpmaif_ctrl; ++ ret = t7xx_dpmaif_rxq_init(rx_q); ++ if (ret) ++ goto err_free_rxq; ++ } ++ ++ for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) { ++ tx_q = &dpmaif_ctrl->txq[tx_idx]; ++ tx_q->index = tx_idx; ++ tx_q->dpmaif_ctrl = dpmaif_ctrl; ++ ret = t7xx_dpmaif_txq_init(tx_q); ++ if (ret) ++ goto err_free_txq; ++ } ++ ++ ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl); ++ if (ret) { ++ dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n"); ++ goto err_free_txq; ++ } ++ ++ ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl); ++ if (ret) ++ goto err_thread_rel; ++ ++ return 0; ++ ++err_thread_rel: ++ t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); ++ ++err_free_txq: ++ for (i = 0; i < tx_idx; i++) { ++ tx_q = &dpmaif_ctrl->txq[i]; ++ t7xx_dpmaif_txq_free(tx_q); ++ } ++ ++err_free_rxq: ++ for (i = 0; i < rx_idx; i++) { ++ rx_q = &dpmaif_ctrl->rxq[i]; ++ t7xx_dpmaif_rxq_free(rx_q); ++ } ++ ++ t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag); ++ ++err_free_normal_bat: ++ t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req); ++ ++ return ret; ++} ++ ++static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_rx_queue *rx_q; ++ struct dpmaif_tx_queue *tx_q; ++ int i; ++ ++ t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); ++ t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl); ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ tx_q = &dpmaif_ctrl->txq[i]; ++ t7xx_dpmaif_txq_free(tx_q); ++ } ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) { ++ rx_q = &dpmaif_ctrl->rxq[i]; ++ t7xx_dpmaif_rxq_free(rx_q); ++ } ++} ++ ++static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; ++ struct dpmaif_hw_params hw_init_para; ++ struct dpmaif_rx_queue *rxq; ++ struct dpmaif_tx_queue *txq; ++ unsigned int buf_cnt; ++ int i, ret = 0; ++ ++ if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ++ return -EFAULT; ++ ++ memset(&hw_init_para, 0, sizeof(hw_init_para)); ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) { ++ rxq = &dpmaif_ctrl->rxq[i]; ++ rxq->que_started = true; ++ rxq->index = i; ++ rxq->budget = rxq->bat_req->bat_size_cnt - 1; ++ ++ hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr; ++ hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt; ++ hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr; ++ hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt; ++ hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr; ++ hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt; ++ } ++ ++ bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt); ++ buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1; ++ ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true); ++ if (ret) { ++ dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret); ++ return ret; ++ } ++ ++ buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1; ++ ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true); ++ if (ret) { ++ dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret); ++ goto err_free_normal_bat; ++ } ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ txq = &dpmaif_ctrl->txq[i]; ++ txq->que_started = true; ++ ++ hw_init_para.drb_base_addr[i] = txq->drb_bus_addr; ++ hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt; ++ } ++ ++ ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para); ++ if (ret) { ++ dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret); ++ goto err_free_frag_bat; ++ } ++ ++ ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1); ++ if (ret) ++ goto err_free_frag_bat; ++ ++ ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1); ++ if (ret) ++ goto err_free_frag_bat; ++ ++ t7xx_dpmaif_ul_clr_all_intr(hw_info); ++ t7xx_dpmaif_dl_clr_all_intr(hw_info); ++ dpmaif_ctrl->state = DPMAIF_STATE_PWRON; ++ t7xx_dpmaif_enable_irq(dpmaif_ctrl); ++ wake_up(&dpmaif_ctrl->tx_wq); ++ return 0; ++ ++err_free_frag_bat: ++ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); ++ ++err_free_normal_bat: ++ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); ++ ++ return ret; ++} ++ ++static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ t7xx_dpmaif_tx_stop(dpmaif_ctrl); ++ t7xx_dpmaif_rx_stop(dpmaif_ctrl); ++} ++ ++static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); ++ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); ++} ++ ++static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ if (!dpmaif_ctrl->dpmaif_sw_init_done) { ++ dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n"); ++ return -EFAULT; ++ } ++ ++ if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF) ++ return -EFAULT; ++ ++ t7xx_dpmaif_disable_irq(dpmaif_ctrl); ++ dpmaif_ctrl->state = DPMAIF_STATE_PWROFF; ++ t7xx_dpmaif_stop_sw(dpmaif_ctrl); ++ t7xx_dpmaif_tx_clear(dpmaif_ctrl); ++ t7xx_dpmaif_rx_clear(dpmaif_ctrl); ++ return 0; ++} ++ ++int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) ++{ ++ int ret = 0; ++ ++ switch (state) { ++ case MD_STATE_WAITING_FOR_HS1: ++ ret = t7xx_dpmaif_start(dpmaif_ctrl); ++ break; ++ ++ case MD_STATE_EXCEPTION: ++ ret = t7xx_dpmaif_stop(dpmaif_ctrl); ++ break; ++ ++ case MD_STATE_STOPPED: ++ ret = t7xx_dpmaif_stop(dpmaif_ctrl); ++ break; ++ ++ case MD_STATE_WAITING_TO_STOP: ++ t7xx_dpmaif_stop_hw(dpmaif_ctrl); ++ break; ++ ++ default: ++ break; ++ } ++ ++ return ret; ++} ++ ++/** ++ * t7xx_dpmaif_hif_init() - Initialize data path. ++ * @t7xx_dev: MTK context structure. ++ * @callbacks: Callbacks implemented by the network layer to handle RX skb and ++ * event notifications. ++ * ++ * Allocate and initialize datapath control block. ++ * Register datapath ISR, TX and RX resources. ++ * ++ * Return: ++ * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure. ++ * * NULL - In case of error. ++ */ ++struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, ++ struct dpmaif_callbacks *callbacks) ++{ ++ struct device *dev = &t7xx_dev->pdev->dev; ++ struct dpmaif_ctrl *dpmaif_ctrl; ++ int ret; ++ ++ if (!callbacks) ++ return NULL; ++ ++ dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL); ++ if (!dpmaif_ctrl) ++ return NULL; ++ ++ dpmaif_ctrl->t7xx_dev = t7xx_dev; ++ dpmaif_ctrl->callbacks = callbacks; ++ dpmaif_ctrl->dev = dev; ++ dpmaif_ctrl->dpmaif_sw_init_done = false; ++ dpmaif_ctrl->hw_info.dev = dev; ++ dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - ++ t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; ++ ++ t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); ++ t7xx_dpmaif_disable_irq(dpmaif_ctrl); ++ ++ ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); ++ if (ret) { ++ dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); ++ return NULL; ++ } ++ ++ dpmaif_ctrl->dpmaif_sw_init_done = true; ++ return dpmaif_ctrl; ++} ++ ++void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ if (dpmaif_ctrl->dpmaif_sw_init_done) { ++ t7xx_dpmaif_stop(dpmaif_ctrl); ++ t7xx_dpmaif_sw_release(dpmaif_ctrl); ++ dpmaif_ctrl->dpmaif_sw_init_done = false; ++ } ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h +@@ -0,0 +1,205 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_HIF_DPMAIF_H__ ++#define __T7XX_HIF_DPMAIF_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_dpmaif.h" ++#include "t7xx_pci.h" ++#include "t7xx_state_monitor.h" ++ ++/* SKB control buffer */ ++struct t7xx_skb_cb { ++ u8 netif_idx; ++ u8 txq_number; ++ u8 rx_pkt_type; ++}; ++ ++#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb) ++ ++enum dpmaif_rdwr { ++ DPMAIF_READ, ++ DPMAIF_WRITE, ++}; ++ ++/* Structure of DL BAT */ ++struct dpmaif_cur_rx_skb_info { ++ bool msg_pit_received; ++ struct sk_buff *cur_skb; ++ unsigned int cur_chn_idx; ++ unsigned int check_sum; ++ unsigned int pit_dp; ++ unsigned int pkt_type; ++ int err_payload; ++}; ++ ++struct dpmaif_bat { ++ unsigned int p_buffer_addr; ++ unsigned int buffer_addr_ext; ++}; ++ ++struct dpmaif_bat_skb { ++ struct sk_buff *skb; ++ dma_addr_t data_bus_addr; ++ unsigned int data_len; ++}; ++ ++struct dpmaif_bat_page { ++ struct page *page; ++ dma_addr_t data_bus_addr; ++ unsigned int offset; ++ unsigned int data_len; ++}; ++ ++enum bat_type { ++ BAT_TYPE_NORMAL, ++ BAT_TYPE_FRAG, ++}; ++ ++struct dpmaif_bat_request { ++ void *bat_base; ++ dma_addr_t bat_bus_addr; ++ unsigned int bat_size_cnt; ++ unsigned int bat_wr_idx; ++ unsigned int bat_release_rd_idx; ++ void *bat_skb; ++ unsigned int pkt_buf_sz; ++ unsigned long *bat_bitmap; ++ atomic_t refcnt; ++ spinlock_t mask_lock; /* Protects BAT mask */ ++ enum bat_type type; ++}; ++ ++struct dpmaif_rx_queue { ++ unsigned int index; ++ bool que_started; ++ unsigned int budget; ++ ++ void *pit_base; ++ dma_addr_t pit_bus_addr; ++ unsigned int pit_size_cnt; ++ ++ unsigned int pit_rd_idx; ++ unsigned int pit_wr_idx; ++ unsigned int pit_release_rd_idx; ++ ++ struct dpmaif_bat_request *bat_req; ++ struct dpmaif_bat_request *bat_frag; ++ ++ wait_queue_head_t rx_wq; ++ struct task_struct *rx_thread; ++ struct sk_buff_head skb_list; ++ unsigned int skb_list_max_len; ++ ++ struct workqueue_struct *worker; ++ struct work_struct dpmaif_rxq_work; ++ ++ atomic_t rx_processing; ++ ++ struct dpmaif_ctrl *dpmaif_ctrl; ++ unsigned int expect_pit_seq; ++ unsigned int pit_remain_release_cnt; ++ struct dpmaif_cur_rx_skb_info rx_data_info; ++}; ++ ++struct dpmaif_tx_queue { ++ unsigned int index; ++ bool que_started; ++ atomic_t tx_budget; ++ void *drb_base; ++ dma_addr_t drb_bus_addr; ++ unsigned int drb_size_cnt; ++ unsigned int drb_wr_idx; ++ unsigned int drb_rd_idx; ++ unsigned int drb_release_rd_idx; ++ void *drb_skb_base; ++ wait_queue_head_t req_wq; ++ struct workqueue_struct *worker; ++ struct work_struct dpmaif_tx_work; ++ spinlock_t tx_lock; /* Protects txq DRB */ ++ atomic_t tx_processing; ++ ++ struct dpmaif_ctrl *dpmaif_ctrl; ++ struct sk_buff_head tx_skb_head; ++}; ++ ++struct dpmaif_isr_para { ++ struct dpmaif_ctrl *dpmaif_ctrl; ++ unsigned char pcie_int; ++ unsigned char dlq_id; ++}; ++ ++enum dpmaif_state { ++ DPMAIF_STATE_MIN, ++ DPMAIF_STATE_PWROFF, ++ DPMAIF_STATE_PWRON, ++ DPMAIF_STATE_EXCEPTION, ++ DPMAIF_STATE_MAX ++}; ++ ++enum dpmaif_txq_state { ++ DMPAIF_TXQ_STATE_IRQ, ++ DMPAIF_TXQ_STATE_FULL, ++}; ++ ++struct dpmaif_callbacks { ++ void (*state_notify)(struct t7xx_pci_dev *t7xx_dev, ++ enum dpmaif_txq_state state, int txq_number); ++ void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb); ++}; ++ ++struct dpmaif_ctrl { ++ struct device *dev; ++ struct t7xx_pci_dev *t7xx_dev; ++ enum dpmaif_state state; ++ bool dpmaif_sw_init_done; ++ struct dpmaif_hw_info hw_info; ++ struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM]; ++ struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM]; ++ ++ unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM]; ++ struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM]; ++ ++ struct dpmaif_bat_request bat_req; ++ struct dpmaif_bat_request bat_frag; ++ struct workqueue_struct *bat_release_wq; ++ struct work_struct bat_release_work; ++ ++ wait_queue_head_t tx_wq; ++ struct task_struct *tx_thread; ++ ++ struct dpmaif_callbacks *callbacks; ++}; ++ ++struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, ++ struct dpmaif_callbacks *callbacks); ++void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl); ++int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state); ++unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx); ++unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, ++ unsigned int wr_idx, enum dpmaif_rdwr); ++ ++#endif /* __T7XX_HIF_DPMAIF_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -0,0 +1,1220 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Eliot Lee ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Andy Shevchenko ++ * Chiranjeevi Rapolu ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_dpmaif.h" ++#include "t7xx_hif_dpmaif.h" ++#include "t7xx_hif_dpmaif_rx.h" ++#include "t7xx_pci.h" ++ ++#define DPMAIF_BAT_COUNT 8192 ++#define DPMAIF_FRG_COUNT 4814 ++#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2) ++ ++#define DPMAIF_BAT_CNT_THRESHOLD 30 ++#define DPMAIF_PIT_CNT_THRESHOLD 60 ++#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0) ++#define DPMAIF_NOTIFY_RELEASE_COUNT 128 ++#define DPMAIF_POLL_PIT_TIME_US 20 ++#define DPMAIF_POLL_PIT_MAX_TIME_US 2000 ++#define DPMAIF_WQ_TIME_LIMIT_MS 2 ++#define DPMAIF_CS_RESULT_PASS 0 ++ ++/* Packet type */ ++#define DES_PT_PD 0 ++#define DES_PT_MSG 1 ++/* Buffer type */ ++#define PKT_BUF_FRAG 1 ++ ++static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info) ++{ ++ u32 value; ++ ++ value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer)); ++ value <<= 13; ++ value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header)); ++ return value; ++} ++ ++static int t7xx_dpmaif_net_rx_push_thread(void *arg) ++{ ++ struct dpmaif_rx_queue *q = arg; ++ struct dpmaif_ctrl *hif_ctrl; ++ struct dpmaif_callbacks *cb; ++ ++ hif_ctrl = q->dpmaif_ctrl; ++ cb = hif_ctrl->callbacks; ++ ++ while (!kthread_should_stop()) { ++ struct sk_buff *skb; ++ unsigned long flags; ++ ++ if (skb_queue_empty(&q->skb_list)) { ++ if (wait_event_interruptible(q->rx_wq, ++ !skb_queue_empty(&q->skb_list) || ++ kthread_should_stop())) ++ continue; ++ ++ if (kthread_should_stop()) ++ break; ++ } ++ ++ spin_lock_irqsave(&q->skb_list.lock, flags); ++ skb = __skb_dequeue(&q->skb_list); ++ spin_unlock_irqrestore(&q->skb_list.lock, flags); ++ ++ if (!skb) ++ continue; ++ ++ cb->recv_skb(hif_ctrl->t7xx_dev, skb); ++ cond_resched(); ++ } ++ ++ return 0; ++} ++ ++static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl, ++ const unsigned int q_num, const unsigned int bat_cnt) ++{ ++ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; ++ struct dpmaif_bat_request *bat_req = rxq->bat_req; ++ unsigned int old_rl_idx, new_wr_idx, old_wr_idx; ++ ++ if (!rxq->que_started) { ++ dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); ++ return -EINVAL; ++ } ++ ++ old_rl_idx = bat_req->bat_release_rd_idx; ++ old_wr_idx = bat_req->bat_wr_idx; ++ new_wr_idx = old_wr_idx + bat_cnt; ++ ++ if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx) ++ goto err_flow; ++ ++ if (new_wr_idx >= bat_req->bat_size_cnt) { ++ new_wr_idx -= bat_req->bat_size_cnt; ++ if (new_wr_idx >= old_rl_idx) ++ goto err_flow; ++ } ++ ++ bat_req->bat_wr_idx = new_wr_idx; ++ return 0; ++ ++err_flow: ++ dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n"); ++ return -EINVAL; ++} ++ ++static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl, ++ const unsigned int size, struct dpmaif_bat_skb *cur_skb) ++{ ++ dma_addr_t data_bus_addr; ++ struct sk_buff *skb; ++ size_t data_len; ++ ++ skb = __dev_alloc_skb(size, GFP_KERNEL); ++ if (!skb) ++ return false; ++ ++ data_len = skb_data_area_size(skb); ++ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE); ++ if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { ++ dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); ++ dev_kfree_skb_any(skb); ++ return false; ++ } ++ ++ cur_skb->skb = skb; ++ cur_skb->data_bus_addr = data_bus_addr; ++ cur_skb->data_len = data_len; ++ ++ return true; ++} ++ ++static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base, ++ unsigned int index) ++{ ++ struct dpmaif_bat_skb *bat_skb = bat_skb_base + index; ++ ++ if (bat_skb->skb) { ++ dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); ++ dev_kfree_skb(bat_skb->skb); ++ bat_skb->skb = NULL; ++ } ++} ++ ++/** ++ * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring. ++ * @dpmaif_ctrl: Pointer to DPMAIF context structure. ++ * @bat_req: Pointer to BAT request structure. ++ * @q_num: Queue number. ++ * @buf_cnt: Number of buffers to allocate. ++ * @initial: Indicates if the ring is being populated for the first time. ++ * ++ * Allocate skb and store the start address of the data buffer into the BAT ring. ++ * If this is not the initial call, notify the HW about the new entries. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code. ++ */ ++int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, ++ const struct dpmaif_bat_request *bat_req, ++ const unsigned int q_num, const unsigned int buf_cnt, ++ const bool initial) ++{ ++ unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx; ++ int ret; ++ ++ if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) ++ return -EINVAL; ++ ++ /* Check BAT buffer space */ ++ bat_max_cnt = bat_req->bat_size_cnt; ++ ++ bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx, ++ bat_req->bat_wr_idx, DPMAIF_WRITE); ++ if (buf_cnt > bat_cnt) ++ return -ENOMEM; ++ ++ bat_start_idx = bat_req->bat_wr_idx; ++ ++ for (i = 0; i < buf_cnt; i++) { ++ unsigned int cur_bat_idx = bat_start_idx + i; ++ struct dpmaif_bat_skb *cur_skb; ++ struct dpmaif_bat *cur_bat; ++ ++ if (cur_bat_idx >= bat_max_cnt) ++ cur_bat_idx -= bat_max_cnt; ++ ++ cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx; ++ if (!cur_skb->skb && ++ !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb)) ++ break; ++ ++ cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; ++ cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr); ++ cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr); ++ } ++ ++ if (!i) ++ return -ENOMEM; ++ ++ ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i); ++ if (ret) ++ goto err_unmap_skbs; ++ ++ if (!initial) { ++ unsigned int hw_wr_idx; ++ ++ ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i); ++ if (ret) ++ goto err_unmap_skbs; ++ ++ hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info, ++ DPF_RX_QNO_DFT); ++ if (hw_wr_idx != bat_req->bat_wr_idx) { ++ ret = -EFAULT; ++ dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n"); ++ goto err_unmap_skbs; ++ } ++ } ++ ++ return 0; ++ ++err_unmap_skbs: ++ while (--i > 0) ++ t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); ++ ++ return ret; ++} ++ ++static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq, ++ const unsigned int rel_entry_num) ++{ ++ struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; ++ unsigned int old_rel_idx, new_rel_idx, hw_wr_idx; ++ int ret; ++ ++ if (!rxq->que_started) ++ return 0; ++ ++ if (rel_entry_num >= rxq->pit_size_cnt) { ++ dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n"); ++ return -EINVAL; ++ } ++ ++ old_rel_idx = rxq->pit_release_rd_idx; ++ new_rel_idx = old_rel_idx + rel_entry_num; ++ hw_wr_idx = rxq->pit_wr_idx; ++ if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt) ++ new_rel_idx -= rxq->pit_size_cnt; ++ ++ ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num); ++ if (ret) { ++ dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret); ++ return ret; ++ } ++ ++ rxq->pit_release_rd_idx = new_rel_idx; ++ return 0; ++} ++ ++static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&bat_req->mask_lock, flags); ++ set_bit(idx, bat_req->bat_bitmap); ++ spin_unlock_irqrestore(&bat_req->mask_lock, flags); ++} ++ ++static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, ++ const unsigned int cur_bid) ++{ ++ struct dpmaif_bat_request *bat_frag = rxq->bat_frag; ++ struct dpmaif_bat_page *bat_page; ++ ++ if (cur_bid >= DPMAIF_FRG_COUNT) ++ return -EINVAL; ++ ++ bat_page = bat_frag->bat_skb + cur_bid; ++ if (!bat_page->page) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base, ++ unsigned int index) ++{ ++ struct dpmaif_bat_page *bat_page = bat_page_base + index; ++ ++ if (bat_page->page) { ++ dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE); ++ put_page(bat_page->page); ++ bat_page->page = NULL; ++ } ++} ++ ++/** ++ * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring. ++ * @dpmaif_ctrl: Pointer to DPMAIF context structure. ++ * @bat_req: Pointer to BAT request structure. ++ * @buf_cnt: Number of buffers to allocate. ++ * @initial: Indicates if the ring is being populated for the first time. ++ * ++ * Fragment BAT is used when the received packet does not fit in a normal BAT entry. ++ * This function allocates a page fragment and stores the start address of the page ++ * into the Fragment BAT ring. ++ * If this is not the initial call, notify the HW about the new entries. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code. ++ */ ++int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, ++ const unsigned int buf_cnt, const bool initial) ++{ ++ unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx; ++ struct dpmaif_bat_page *bat_skb = bat_req->bat_skb; ++ int ret = 0, i; ++ ++ if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) ++ return -EINVAL; ++ ++ buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt, ++ bat_req->bat_release_rd_idx, bat_req->bat_wr_idx, ++ DPMAIF_WRITE); ++ if (buf_cnt > buf_space) { ++ dev_err(dpmaif_ctrl->dev, ++ "Requested more buffers than the space available in RX frag ring\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < buf_cnt; i++) { ++ struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx; ++ struct dpmaif_bat *cur_bat; ++ dma_addr_t data_base_addr; ++ ++ if (!cur_page->page) { ++ unsigned long offset; ++ struct page *page; ++ void *data; ++ ++ data = netdev_alloc_frag(bat_req->pkt_buf_sz); ++ if (!data) ++ break; ++ ++ page = virt_to_head_page(data); ++ offset = data - page_address(page); ++ ++ data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset, ++ bat_req->pkt_buf_sz, DMA_FROM_DEVICE); ++ if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) { ++ put_page(virt_to_head_page(data)); ++ dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n"); ++ break; ++ } ++ ++ cur_page->page = page; ++ cur_page->data_bus_addr = data_base_addr; ++ cur_page->offset = offset; ++ cur_page->data_len = bat_req->pkt_buf_sz; ++ } ++ ++ data_base_addr = cur_page->data_bus_addr; ++ cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; ++ cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr); ++ cur_bat->p_buffer_addr = lower_32_bits(data_base_addr); ++ cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx); ++ } ++ ++ bat_req->bat_wr_idx = cur_bat_idx; ++ ++ if (!initial) ++ t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i); ++ ++ if (i < buf_cnt) { ++ ret = -ENOMEM; ++ if (initial) { ++ while (--i > 0) ++ t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); ++ } ++ } ++ ++ return ret; ++} ++ ++static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, ++ const struct dpmaif_pit *pkt_info, ++ struct sk_buff *skb) ++{ ++ unsigned long long data_bus_addr, data_base_addr; ++ struct device *dev = rxq->dpmaif_ctrl->dev; ++ struct dpmaif_bat_page *page_info; ++ unsigned int data_len; ++ int data_offset; ++ ++ page_info = rxq->bat_frag->bat_skb; ++ page_info += t7xx_normal_pit_bid(pkt_info); ++ dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); ++ ++ if (!page_info->page) ++ return -EINVAL; ++ ++ data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); ++ data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); ++ data_base_addr = page_info->data_bus_addr; ++ data_offset = data_bus_addr - data_base_addr; ++ data_offset += page_info->offset; ++ data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); ++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, ++ data_offset, data_len, page_info->data_len); ++ ++ page_info->page = NULL; ++ page_info->offset = 0; ++ page_info->data_len = 0; ++ return 0; ++} ++ ++static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq, ++ const struct dpmaif_pit *pkt_info, ++ const struct dpmaif_cur_rx_skb_info *skb_info) ++{ ++ unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); ++ int ret; ++ ++ ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid); ++ if (ret < 0) ++ return ret; ++ ++ ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb); ++ if (ret < 0) { ++ dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret); ++ return ret; ++ } ++ ++ t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid); ++ return 0; ++} ++ ++static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid) ++{ ++ struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb; ++ ++ bat_skb += cur_bid; ++ if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit) ++{ ++ return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer)); ++} ++ ++static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq, ++ const struct dpmaif_pit *pit) ++{ ++ unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq; ++ ++ if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq, ++ cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US, ++ DPMAIF_POLL_PIT_MAX_TIME_US, false, pit)) ++ return -EFAULT; ++ ++ rxq->expect_pit_seq++; ++ if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE) ++ rxq->expect_pit_seq = 0; ++ ++ return 0; ++} ++ ++static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req) ++{ ++ unsigned int zero_index; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&bat_req->mask_lock, flags); ++ ++ zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt, ++ bat_req->bat_release_rd_idx); ++ ++ if (zero_index < bat_req->bat_size_cnt) { ++ spin_unlock_irqrestore(&bat_req->mask_lock, flags); ++ return zero_index - bat_req->bat_release_rd_idx; ++ } ++ ++ /* limiting the search till bat_release_rd_idx */ ++ zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx); ++ spin_unlock_irqrestore(&bat_req->mask_lock, flags); ++ return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index; ++} ++ ++static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq, ++ const unsigned int rel_entry_num, ++ const enum bat_type buf_type) ++{ ++ struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; ++ unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i; ++ struct dpmaif_bat_request *bat; ++ unsigned long flags; ++ ++ if (!rxq->que_started || !rel_entry_num) ++ return -EINVAL; ++ ++ if (buf_type == BAT_TYPE_FRAG) { ++ bat = rxq->bat_frag; ++ hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index); ++ } else { ++ bat = rxq->bat_req; ++ hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index); ++ } ++ ++ if (rel_entry_num >= bat->bat_size_cnt) ++ return -EINVAL; ++ ++ old_rel_idx = bat->bat_release_rd_idx; ++ new_rel_idx = old_rel_idx + rel_entry_num; ++ ++ /* Do not need to release if the queue is empty */ ++ if (bat->bat_wr_idx == old_rel_idx) ++ return 0; ++ ++ if (hw_rd_idx >= old_rel_idx) { ++ if (new_rel_idx > hw_rd_idx) ++ return -EINVAL; ++ } ++ ++ if (new_rel_idx >= bat->bat_size_cnt) { ++ new_rel_idx -= bat->bat_size_cnt; ++ if (new_rel_idx > hw_rd_idx) ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&bat->mask_lock, flags); ++ for (i = 0; i < rel_entry_num; i++) { ++ unsigned int index = bat->bat_release_rd_idx + i; ++ ++ if (index >= bat->bat_size_cnt) ++ index -= bat->bat_size_cnt; ++ ++ clear_bit(index, bat->bat_bitmap); ++ } ++ spin_unlock_irqrestore(&bat->mask_lock, flags); ++ ++ bat->bat_release_rd_idx = new_rel_idx; ++ return rel_entry_num; ++} ++ ++static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq) ++{ ++ int ret; ++ ++ if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD) ++ return 0; ++ ++ ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt); ++ if (ret) ++ return ret; ++ ++ rxq->pit_remain_release_cnt = 0; ++ return 0; ++} ++ ++static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq) ++{ ++ unsigned int bid_cnt; ++ int ret; ++ ++ bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req); ++ if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) ++ return 0; ++ ++ ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL); ++ if (ret <= 0) { ++ dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret); ++ return ret; ++ } ++ ++ ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false); ++ if (ret < 0) ++ dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret); ++ ++ return ret; ++} ++ ++static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq) ++{ ++ unsigned int bid_cnt; ++ int ret; ++ ++ bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag); ++ if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) ++ return 0; ++ ++ ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG); ++ if (ret <= 0) { ++ dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret); ++ return ret; ++ } ++ ++ return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false); ++} ++ ++static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq, ++ const struct dpmaif_pit *msg_pit, ++ struct dpmaif_cur_rx_skb_info *skb_info) ++{ ++ int header = le32_to_cpu(msg_pit->header); ++ ++ skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header); ++ skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header); ++ skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header); ++ skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3)); ++} ++ ++static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq, ++ const struct dpmaif_pit *pkt_info, ++ struct dpmaif_cur_rx_skb_info *skb_info) ++{ ++ unsigned long long data_bus_addr, data_base_addr; ++ struct device *dev = rxq->dpmaif_ctrl->dev; ++ struct dpmaif_bat_skb *bat_skb; ++ unsigned int data_len; ++ struct sk_buff *skb; ++ int data_offset; ++ ++ bat_skb = rxq->bat_req->bat_skb; ++ bat_skb += t7xx_normal_pit_bid(pkt_info); ++ dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); ++ ++ data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); ++ data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); ++ data_base_addr = bat_skb->data_bus_addr; ++ data_offset = data_bus_addr - data_base_addr; ++ data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); ++ skb = bat_skb->skb; ++ skb->len = 0; ++ skb_reset_tail_pointer(skb); ++ skb_reserve(skb, data_offset); ++ ++ if (skb->tail + data_len > skb->end) { ++ dev_err(dev, "No buffer space available\n"); ++ return -ENOBUFS; ++ } ++ ++ skb_put(skb, data_len); ++ skb_info->cur_skb = skb; ++ bat_skb->skb = NULL; ++ return 0; ++} ++ ++static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq, ++ const struct dpmaif_pit *pkt_info, ++ struct dpmaif_cur_rx_skb_info *skb_info) ++{ ++ unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); ++ int ret; ++ ++ ret = t7xx_bat_cur_bid_check(rxq, cur_bid); ++ if (ret < 0) ++ return ret; ++ ++ ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info); ++ if (ret < 0) { ++ dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret); ++ return ret; ++ } ++ ++ t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid); ++ return 0; ++} ++ ++static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq) ++{ ++ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; ++ int ret; ++ ++ queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work); ++ ++ ret = t7xx_dpmaif_pit_release_and_add(rxq); ++ if (ret < 0) ++ dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret); ++ ++ return ret; ++} ++ ++static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rxq->skb_list.lock, flags); ++ if (rxq->skb_list.qlen < rxq->skb_list_max_len) ++ __skb_queue_tail(&rxq->skb_list, skb); ++ else ++ dev_kfree_skb_any(skb); ++ spin_unlock_irqrestore(&rxq->skb_list.lock, flags); ++} ++ ++static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq, ++ struct dpmaif_cur_rx_skb_info *skb_info) ++{ ++ struct sk_buff *skb = skb_info->cur_skb; ++ struct t7xx_skb_cb *skb_cb; ++ u8 netif_id; ++ ++ skb_info->cur_skb = NULL; ++ ++ if (skb_info->pit_dp) { ++ dev_kfree_skb_any(skb); ++ return; ++ } ++ ++ skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY : ++ CHECKSUM_NONE; ++ netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx); ++ skb_cb = T7XX_SKB_CB(skb); ++ skb_cb->netif_idx = netif_id; ++ skb_cb->rx_pkt_type = skb_info->pkt_type; ++ t7xx_dpmaif_rx_skb_enqueue(rxq, skb); ++} ++ ++static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt, ++ const unsigned long timeout) ++{ ++ unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0; ++ struct device *dev = rxq->dpmaif_ctrl->dev; ++ struct dpmaif_cur_rx_skb_info *skb_info; ++ int ret = 0; ++ ++ pit_len = rxq->pit_size_cnt; ++ skb_info = &rxq->rx_data_info; ++ cur_pit = rxq->pit_rd_idx; ++ ++ for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) { ++ struct dpmaif_pit *pkt_info; ++ u32 val; ++ ++ if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout)) ++ break; ++ ++ pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; ++ if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) { ++ dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); ++ return -EAGAIN; ++ } ++ ++ val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); ++ if (val == DES_PT_MSG) { ++ if (skb_info->msg_pit_received) ++ dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index); ++ ++ skb_info->msg_pit_received = true; ++ t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info); ++ } else { /* DES_PT_PD */ ++ val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header)); ++ if (val != PKT_BUF_FRAG) ++ ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info); ++ else if (!skb_info->cur_skb) ++ ret = -EINVAL; ++ else ++ ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info); ++ ++ if (ret < 0) { ++ skb_info->err_payload = 1; ++ dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index); ++ } ++ ++ val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header)); ++ if (!val) { ++ if (!skb_info->err_payload) { ++ t7xx_dpmaif_rx_skb(rxq, skb_info); ++ } else if (skb_info->cur_skb) { ++ dev_kfree_skb_any(skb_info->cur_skb); ++ skb_info->cur_skb = NULL; ++ } ++ ++ memset(skb_info, 0, sizeof(*skb_info)); ++ ++ recv_skb_cnt++; ++ if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) { ++ wake_up_all(&rxq->rx_wq); ++ recv_skb_cnt = 0; ++ } ++ } ++ } ++ ++ cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit); ++ rxq->pit_rd_idx = cur_pit; ++ rxq->pit_remain_release_cnt++; ++ ++ if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) { ++ ret = t7xx_dpmaifq_rx_notify_hw(rxq); ++ if (ret < 0) ++ break; ++ } ++ } ++ ++ if (recv_skb_cnt) ++ wake_up_all(&rxq->rx_wq); ++ ++ if (!ret) ++ ret = t7xx_dpmaifq_rx_notify_hw(rxq); ++ ++ if (ret) ++ return ret; ++ ++ return rx_cnt; ++} ++ ++static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq) ++{ ++ unsigned int hw_wr_idx, pit_cnt; ++ ++ if (!rxq->que_started) ++ return 0; ++ ++ hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index); ++ pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx, ++ DPMAIF_READ); ++ rxq->pit_wr_idx = hw_wr_idx; ++ return pit_cnt; ++} ++ ++static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, ++ const unsigned int q_num, const unsigned int budget) ++{ ++ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; ++ unsigned long time_limit; ++ unsigned int cnt; ++ ++ time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS); ++ ++ while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) { ++ unsigned int rd_cnt; ++ int real_cnt; ++ ++ rd_cnt = min(cnt, budget); ++ ++ real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit); ++ if (real_cnt < 0) ++ return real_cnt; ++ ++ if (real_cnt < cnt) ++ return -EAGAIN; ++ } ++ ++ return 0; ++} ++ ++static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq) ++{ ++ struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; ++ int ret; ++ ++ ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget); ++ if (ret < 0) { ++ /* Try one more time */ ++ queue_work(rxq->worker, &rxq->dpmaif_rxq_work); ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ } else { ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index); ++ } ++} ++ ++static void t7xx_dpmaif_rxq_work(struct work_struct *work) ++{ ++ struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); ++ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; ++ ++ atomic_set(&rxq->rx_processing, 1); ++ /* Ensure rx_processing is changed to 1 before actually begin RX flow */ ++ smp_mb(); ++ ++ if (!rxq->que_started) { ++ atomic_set(&rxq->rx_processing, 0); ++ dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); ++ return; ++ } ++ ++ t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); ++ atomic_set(&rxq->rx_processing, 0); ++} ++ ++void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) ++{ ++ struct dpmaif_rx_queue *rxq; ++ int qno; ++ ++ qno = ffs(que_mask) - 1; ++ if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { ++ dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); ++ return; ++ } ++ ++ rxq = &dpmaif_ctrl->rxq[qno]; ++ queue_work(rxq->worker, &rxq->dpmaif_rxq_work); ++} ++ ++static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl, ++ const struct dpmaif_bat_request *bat_req) ++{ ++ if (bat_req->bat_base) ++ dma_free_coherent(dpmaif_ctrl->dev, ++ bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), ++ bat_req->bat_base, bat_req->bat_bus_addr); ++} ++ ++/** ++ * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer. ++ * @dpmaif_ctrl: Pointer to DPMAIF context structure. ++ * @bat_req: Pointer to BAT request structure. ++ * @buf_type: BAT ring type. ++ * ++ * This function allocates the BAT ring buffer shared with the HW device, also allocates ++ * a buffer used to store information about the BAT skbs for further release. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code. ++ */ ++int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, ++ const enum bat_type buf_type) ++{ ++ int sw_buf_size; ++ ++ if (buf_type == BAT_TYPE_FRAG) { ++ sw_buf_size = sizeof(struct dpmaif_bat_page); ++ bat_req->bat_size_cnt = DPMAIF_FRG_COUNT; ++ bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF; ++ } else { ++ sw_buf_size = sizeof(struct dpmaif_bat_skb); ++ bat_req->bat_size_cnt = DPMAIF_BAT_COUNT; ++ bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF; ++ } ++ ++ bat_req->type = buf_type; ++ bat_req->bat_wr_idx = 0; ++ bat_req->bat_release_rd_idx = 0; ++ ++ bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev, ++ bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), ++ &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO); ++ if (!bat_req->bat_base) ++ return -ENOMEM; ++ ++ /* For AP SW to record skb information */ ++ bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size, ++ GFP_KERNEL); ++ if (!bat_req->bat_skb) ++ goto err_free_dma_mem; ++ ++ bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL); ++ if (!bat_req->bat_bitmap) ++ goto err_free_dma_mem; ++ ++ spin_lock_init(&bat_req->mask_lock); ++ atomic_set(&bat_req->refcnt, 0); ++ return 0; ++ ++err_free_dma_mem: ++ t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); ++ ++ return -ENOMEM; ++} ++ ++void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req) ++{ ++ if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt)) ++ return; ++ ++ bitmap_free(bat_req->bat_bitmap); ++ bat_req->bat_bitmap = NULL; ++ ++ if (bat_req->bat_skb) { ++ unsigned int i; ++ ++ for (i = 0; i < bat_req->bat_size_cnt; i++) { ++ if (bat_req->type == BAT_TYPE_FRAG) ++ t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); ++ else ++ t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); ++ } ++ } ++ ++ t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); ++} ++ ++static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq) ++{ ++ rxq->pit_size_cnt = DPMAIF_PIT_COUNT; ++ rxq->pit_rd_idx = 0; ++ rxq->pit_wr_idx = 0; ++ rxq->pit_release_rd_idx = 0; ++ rxq->expect_pit_seq = 0; ++ rxq->pit_remain_release_cnt = 0; ++ memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); ++ ++ rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev, ++ rxq->pit_size_cnt * sizeof(struct dpmaif_pit), ++ &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO); ++ if (!rxq->pit_base) ++ return -ENOMEM; ++ ++ rxq->bat_req = &rxq->dpmaif_ctrl->bat_req; ++ atomic_inc(&rxq->bat_req->refcnt); ++ ++ rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag; ++ atomic_inc(&rxq->bat_frag->refcnt); ++ return 0; ++} ++ ++static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq) ++{ ++ if (!rxq->dpmaif_ctrl) ++ return; ++ ++ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); ++ t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); ++ ++ if (rxq->pit_base) ++ dma_free_coherent(rxq->dpmaif_ctrl->dev, ++ rxq->pit_size_cnt * sizeof(struct dpmaif_pit), ++ rxq->pit_base, rxq->pit_bus_addr); ++} ++ ++int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue) ++{ ++ int ret; ++ ++ ret = t7xx_dpmaif_rx_alloc(queue); ++ if (ret < 0) { ++ dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); ++ return ret; ++ } ++ ++ INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work); ++ ++ queue->worker = alloc_workqueue("dpmaif_rx%d_worker", ++ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index); ++ if (!queue->worker) { ++ ret = -ENOMEM; ++ goto err_free_rx_buffer; ++ } ++ ++ init_waitqueue_head(&queue->rx_wq); ++ skb_queue_head_init(&queue->skb_list); ++ queue->skb_list_max_len = queue->bat_req->pkt_buf_sz; ++ queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread, ++ queue, "dpmaif_rx%d_push", queue->index); ++ ++ ret = PTR_ERR_OR_ZERO(queue->rx_thread); ++ if (ret) ++ goto err_free_workqueue; ++ ++ return 0; ++ ++err_free_workqueue: ++ destroy_workqueue(queue->worker); ++ ++err_free_rx_buffer: ++ t7xx_dpmaif_rx_buf_free(queue); ++ ++ return ret; ++} ++ ++void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue) ++{ ++ if (queue->worker) ++ destroy_workqueue(queue->worker); ++ ++ if (queue->rx_thread) ++ kthread_stop(queue->rx_thread); ++ ++ skb_queue_purge(&queue->skb_list); ++ t7xx_dpmaif_rx_buf_free(queue); ++} ++ ++static void t7xx_dpmaif_bat_release_work(struct work_struct *work) ++{ ++ struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); ++ struct dpmaif_rx_queue *rxq; ++ ++ /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ ++ rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; ++ t7xx_dpmaif_bat_release_and_add(rxq); ++ t7xx_dpmaif_frag_bat_release_and_add(rxq); ++} ++ ++int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", ++ WQ_MEM_RECLAIM, 1); ++ if (!dpmaif_ctrl->bat_release_wq) ++ return -ENOMEM; ++ ++ INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work); ++ return 0; ++} ++ ++void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ flush_work(&dpmaif_ctrl->bat_release_work); ++ ++ if (dpmaif_ctrl->bat_release_wq) { ++ destroy_workqueue(dpmaif_ctrl->bat_release_wq); ++ dpmaif_ctrl->bat_release_wq = NULL; ++ } ++} ++ ++/** ++ * t7xx_dpmaif_rx_stop() - Suspend RX flow. ++ * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl. ++ * ++ * Wait for all the RX work to finish executing and mark the RX queue as paused. ++ */ ++void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) { ++ struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; ++ int timeout, value; ++ ++ flush_work(&rxq->dpmaif_rxq_work); ++ ++ timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, ++ !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US); ++ if (timeout) ++ dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n"); ++ ++ /* Ensure RX processing has stopped before we set rxq->que_started to false */ ++ smp_mb(); ++ rxq->que_started = false; ++ } ++} ++ ++static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq) ++{ ++ int cnt, j = 0; ++ ++ flush_work(&rxq->dpmaif_rxq_work); ++ rxq->que_started = false; ++ ++ do { ++ cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, ++ rxq->pit_wr_idx, DPMAIF_READ); ++ ++ if (++j >= DPMAIF_MAX_CHECK_COUNT) { ++ dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt); ++ break; ++ } ++ } while (cnt); ++ ++ memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit)); ++ memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat)); ++ bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt); ++ memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); ++ ++ rxq->pit_rd_idx = 0; ++ rxq->pit_wr_idx = 0; ++ rxq->pit_release_rd_idx = 0; ++ rxq->expect_pit_seq = 0; ++ rxq->pit_remain_release_cnt = 0; ++ rxq->bat_req->bat_release_rd_idx = 0; ++ rxq->bat_req->bat_wr_idx = 0; ++ rxq->bat_frag->bat_release_rd_idx = 0; ++ rxq->bat_frag->bat_wr_idx = 0; ++} ++ ++void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ int i; ++ ++ for (i = 0; i < DPMAIF_RXQ_NUM; i++) ++ t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h +@@ -0,0 +1,116 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Eliot Lee ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Moises Veleta ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_HIF_DPMA_RX_H__ ++#define __T7XX_HIF_DPMA_RX_H__ ++ ++#include ++#include ++ ++#include "t7xx_hif_dpmaif.h" ++ ++#define NETIF_MASK GENMASK(4, 0) ++ ++#define PKT_TYPE_IP4 0 ++#define PKT_TYPE_IP6 1 ++ ++/* Structure of DL PIT */ ++struct dpmaif_pit { ++ __le32 header; ++ union { ++ struct { ++ __le32 data_addr_l; ++ __le32 data_addr_h; ++ __le32 footer; ++ } pd; ++ struct { ++ __le32 params_1; ++ __le32 params_2; ++ __le32 params_3; ++ } msg; ++ }; ++}; ++ ++/* PIT header fields */ ++#define PD_PIT_DATA_LEN GENMASK(31, 16) ++#define PD_PIT_BUFFER_ID GENMASK(15, 3) ++#define PD_PIT_BUFFER_TYPE BIT(2) ++#define PD_PIT_CONT BIT(1) ++#define PD_PIT_PACKET_TYPE BIT(0) ++/* PIT footer fields */ ++#define PD_PIT_DLQ_DONE GENMASK(31, 30) ++#define PD_PIT_ULQ_DONE GENMASK(29, 24) ++#define PD_PIT_HEADER_OFFSET GENMASK(23, 19) ++#define PD_PIT_BI_F GENMASK(18, 17) ++#define PD_PIT_IG BIT(16) ++#define PD_PIT_RES GENMASK(15, 11) ++#define PD_PIT_H_BID GENMASK(10, 8) ++#define PD_PIT_PIT_SEQ GENMASK(7, 0) ++ ++#define MSG_PIT_DP BIT(31) ++#define MSG_PIT_RES GENMASK(30, 27) ++#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24) ++#define MSG_PIT_CHANNEL_ID GENMASK(23, 16) ++#define MSG_PIT_RES2 GENMASK(15, 12) ++#define MSG_PIT_HPC_IDX GENMASK(11, 8) ++#define MSG_PIT_SRC_QID GENMASK(7, 5) ++#define MSG_PIT_ERROR_BIT BIT(4) ++#define MSG_PIT_CHECKSUM GENMASK(3, 2) ++#define MSG_PIT_CONT BIT(1) ++#define MSG_PIT_PACKET_TYPE BIT(0) ++ ++#define MSG_PIT_HP_IDX GENMASK(31, 27) ++#define MSG_PIT_CMD GENMASK(26, 24) ++#define MSG_PIT_RES3 GENMASK(23, 21) ++#define MSG_PIT_FLOW GENMASK(20, 16) ++#define MSG_PIT_COUNT GENMASK(15, 0) ++ ++#define MSG_PIT_HASH GENMASK(31, 24) ++#define MSG_PIT_RES4 GENMASK(23, 18) ++#define MSG_PIT_PRO GENMASK(17, 16) ++#define MSG_PIT_VBID GENMASK(15, 3) ++#define MSG_PIT_RES5 GENMASK(2, 0) ++ ++#define MSG_PIT_DLQ_DONE GENMASK(31, 30) ++#define MSG_PIT_ULQ_DONE GENMASK(29, 24) ++#define MSG_PIT_IP BIT(23) ++#define MSG_PIT_RES6 BIT(22) ++#define MSG_PIT_MR GENMASK(21, 20) ++#define MSG_PIT_RES7 GENMASK(19, 17) ++#define MSG_PIT_IG BIT(16) ++#define MSG_PIT_RES8 GENMASK(15, 11) ++#define MSG_PIT_H_BID GENMASK(10, 8) ++#define MSG_PIT_PIT_SEQ GENMASK(7, 0) ++ ++int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue); ++void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl); ++int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl); ++int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, ++ const struct dpmaif_bat_request *bat_req, ++ const unsigned int q_num, const unsigned int buf_cnt, ++ const bool initial); ++int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, ++ const unsigned int buf_cnt, const bool first_time); ++void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl); ++void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask); ++void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue); ++void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl); ++int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, ++ const enum bat_type buf_type); ++void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, ++ struct dpmaif_bat_request *bat_req); ++ ++#endif /* __T7XX_HIF_DPMA_RX_H__ */ +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +@@ -0,0 +1,651 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Amir Hanania ++ * Haijun Liu ++ * Eliot Lee ++ * Moises Veleta ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Chiranjeevi Rapolu ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_dpmaif.h" ++#include "t7xx_hif_dpmaif.h" ++#include "t7xx_hif_dpmaif_tx.h" ++#include "t7xx_pci.h" ++ ++#define DPMAIF_SKB_TX_BURST_CNT 5 ++#define DPMAIF_DRB_LIST_LEN 6144 ++ ++/* DRB dtype */ ++#define DES_DTYP_PD 0 ++#define DES_DTYP_MSG 1 ++ ++static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl, ++ unsigned int q_num) ++{ ++ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; ++ unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt; ++ unsigned long flags; ++ ++ if (!txq->que_started) ++ return 0; ++ ++ old_sw_rd_idx = txq->drb_rd_idx; ++ new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num); ++ if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) { ++ dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx); ++ return 0; ++ } ++ ++ if (old_sw_rd_idx <= new_hw_rd_idx) ++ drb_cnt = new_hw_rd_idx - old_sw_rd_idx; ++ else ++ drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; ++ ++ spin_lock_irqsave(&txq->tx_lock, flags); ++ txq->drb_rd_idx = new_hw_rd_idx; ++ spin_unlock_irqrestore(&txq->tx_lock, flags); ++ ++ return drb_cnt; ++} ++ ++static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl, ++ unsigned int q_num, unsigned int release_cnt) ++{ ++ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; ++ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; ++ struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base; ++ struct dpmaif_drb *cur_drb, *drb_base; ++ unsigned int drb_cnt, i, cur_idx; ++ unsigned long flags; ++ ++ drb_skb_base = txq->drb_skb_base; ++ drb_base = txq->drb_base; ++ ++ spin_lock_irqsave(&txq->tx_lock, flags); ++ drb_cnt = txq->drb_size_cnt; ++ cur_idx = txq->drb_release_rd_idx; ++ spin_unlock_irqrestore(&txq->tx_lock, flags); ++ ++ for (i = 0; i < release_cnt; i++) { ++ cur_drb = drb_base + cur_idx; ++ if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) { ++ cur_drb_skb = drb_skb_base + cur_idx; ++ if (!cur_drb_skb->is_msg) ++ dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr, ++ cur_drb_skb->data_len, DMA_TO_DEVICE); ++ ++ if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) { ++ if (!cur_drb_skb->skb) { ++ dev_err(dpmaif_ctrl->dev, ++ "txq%u: DRB check fail, invalid skb\n", q_num); ++ continue; ++ } ++ ++ dev_kfree_skb_any(cur_drb_skb->skb); ++ } ++ ++ cur_drb_skb->skb = NULL; ++ } ++ ++ spin_lock_irqsave(&txq->tx_lock, flags); ++ cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx); ++ txq->drb_release_rd_idx = cur_idx; ++ spin_unlock_irqrestore(&txq->tx_lock, flags); ++ ++ if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8) ++ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index); ++ } ++ ++ if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) ++ dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num); ++ ++ return i; ++} ++ ++static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl, ++ unsigned int q_num, unsigned int budget) ++{ ++ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; ++ unsigned int rel_cnt, real_rel_cnt; ++ ++ /* Update read index from HW */ ++ t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num); ++ ++ rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, ++ txq->drb_rd_idx, DPMAIF_READ); ++ ++ real_rel_cnt = min_not_zero(budget, rel_cnt); ++ if (real_rel_cnt) ++ real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt); ++ ++ return real_rel_cnt < rel_cnt ? -EAGAIN : 0; ++} ++ ++static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq) ++{ ++ return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index); ++} ++ ++static void t7xx_dpmaif_tx_done(struct work_struct *work) ++{ ++ struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work); ++ struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl; ++ struct dpmaif_hw_info *hw_info; ++ int ret; ++ ++ hw_info = &dpmaif_ctrl->hw_info; ++ ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); ++ if (ret == -EAGAIN || ++ (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && ++ t7xx_dpmaif_drb_ring_not_empty(txq))) { ++ queue_work(dpmaif_ctrl->txq[txq->index].worker, ++ &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); ++ /* Give the device time to enter the low power state */ ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ } else { ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); ++ } ++} ++ ++static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, ++ unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l, ++ unsigned int channel_id) ++{ ++ struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; ++ struct dpmaif_drb *drb = drb_base + cur_idx; ++ ++ drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) | ++ FIELD_PREP(DRB_HDR_CONT, 1) | ++ FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len)); ++ ++ drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) | ++ FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) | ++ FIELD_PREP(DRB_MSG_L4_CHK, 1)); ++} ++ ++static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, ++ unsigned int cur_idx, dma_addr_t data_addr, ++ unsigned int pkt_size, bool last_one) ++{ ++ struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; ++ struct dpmaif_drb *drb = drb_base + cur_idx; ++ u32 header; ++ ++ header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size); ++ if (!last_one) ++ header |= FIELD_PREP(DRB_HDR_CONT, 1); ++ ++ drb->header = cpu_to_le32(header); ++ drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr)); ++ drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr)); ++} ++ ++static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, ++ unsigned int cur_idx, struct sk_buff *skb, bool is_msg, ++ bool is_frag, bool is_last_one, dma_addr_t bus_addr, ++ unsigned int data_len) ++{ ++ struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base; ++ struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx; ++ ++ drb_skb->skb = skb; ++ drb_skb->bus_addr = bus_addr; ++ drb_skb->data_len = data_len; ++ drb_skb->index = cur_idx; ++ drb_skb->is_msg = is_msg; ++ drb_skb->is_frag = is_frag; ++ drb_skb->is_last = is_last_one; ++} ++ ++static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb) ++{ ++ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; ++ unsigned int wr_cnt, send_cnt, payload_cnt; ++ unsigned int cur_idx, drb_wr_idx_backup; ++ struct skb_shared_info *shinfo; ++ struct dpmaif_tx_queue *txq; ++ struct t7xx_skb_cb *skb_cb; ++ unsigned long flags; ++ ++ skb_cb = T7XX_SKB_CB(skb); ++ txq = &dpmaif_ctrl->txq[skb_cb->txq_number]; ++ if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON) ++ return -ENODEV; ++ ++ atomic_set(&txq->tx_processing, 1); ++ /* Ensure tx_processing is changed to 1 before actually begin TX flow */ ++ smp_mb(); ++ ++ shinfo = skb_shinfo(skb); ++ if (shinfo->frag_list) ++ dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n"); ++ ++ payload_cnt = shinfo->nr_frags + 1; ++ /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */ ++ send_cnt = payload_cnt + 1; ++ ++ spin_lock_irqsave(&txq->tx_lock, flags); ++ cur_idx = txq->drb_wr_idx; ++ drb_wr_idx_backup = cur_idx; ++ txq->drb_wr_idx += send_cnt; ++ if (txq->drb_wr_idx >= txq->drb_size_cnt) ++ txq->drb_wr_idx -= txq->drb_size_cnt; ++ t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx); ++ t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0); ++ spin_unlock_irqrestore(&txq->tx_lock, flags); ++ ++ for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) { ++ bool is_frag, is_last_one = wr_cnt == payload_cnt - 1; ++ unsigned int data_len; ++ dma_addr_t bus_addr; ++ void *data_addr; ++ ++ if (!wr_cnt) { ++ data_len = skb_headlen(skb); ++ data_addr = skb->data; ++ is_frag = false; ++ } else { ++ skb_frag_t *frag = shinfo->frags + wr_cnt - 1; ++ ++ data_len = skb_frag_size(frag); ++ data_addr = skb_frag_address(frag); ++ is_frag = true; ++ } ++ ++ bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr)) ++ goto unmap_buffers; ++ ++ cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx); ++ ++ spin_lock_irqsave(&txq->tx_lock, flags); ++ t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len, ++ is_last_one); ++ t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag, ++ is_last_one, bus_addr, data_len); ++ spin_unlock_irqrestore(&txq->tx_lock, flags); ++ } ++ ++ if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2)) ++ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index); ++ ++ atomic_set(&txq->tx_processing, 0); ++ ++ return 0; ++ ++unmap_buffers: ++ while (wr_cnt--) { ++ struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base; ++ ++ cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1; ++ drb_skb += cur_idx; ++ dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr, ++ drb_skb->data_len, DMA_TO_DEVICE); ++ } ++ ++ txq->drb_wr_idx = drb_wr_idx_backup; ++ atomic_set(&txq->tx_processing, 0); ++ ++ return -ENOMEM; ++} ++ ++static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ int i; ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head)) ++ return false; ++ } ++ ++ return true; ++} ++ ++/* Currently, only the default TX queue is used */ ++static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_tx_queue *txq; ++ ++ txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE]; ++ if (!txq->que_started) ++ return NULL; ++ ++ return txq; ++} ++ ++static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq) ++{ ++ return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, ++ txq->drb_wr_idx, DPMAIF_WRITE); ++} ++ ++static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb) ++{ ++ /* Normal DRB (frags data + skb linear data) + msg DRB */ ++ return skb_shinfo(skb)->nr_frags + 2; ++} ++ ++static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq) ++{ ++ unsigned int drb_remain_cnt, i; ++ unsigned int send_drb_cnt; ++ int drb_cnt = 0; ++ int ret = 0; ++ ++ drb_remain_cnt = t7xx_txq_drb_wr_available(txq); ++ ++ for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) { ++ struct sk_buff *skb; ++ ++ skb = skb_peek(&txq->tx_skb_head); ++ if (!skb) ++ break; ++ ++ send_drb_cnt = t7xx_skb_drb_cnt(skb); ++ if (drb_remain_cnt < send_drb_cnt) { ++ drb_remain_cnt = t7xx_txq_drb_wr_available(txq); ++ continue; ++ } ++ ++ drb_remain_cnt -= send_drb_cnt; ++ ++ ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb); ++ if (ret < 0) { ++ dev_err(txq->dpmaif_ctrl->dev, ++ "Failed to add skb to device's ring: %d\n", ret); ++ break; ++ } ++ ++ drb_cnt += send_drb_cnt; ++ skb_unlink(skb, &txq->tx_skb_head); ++ } ++ ++ if (drb_cnt > 0) ++ return drb_cnt; ++ ++ return ret; ++} ++ ++static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ do { ++ struct dpmaif_tx_queue *txq; ++ int drb_send_cnt; ++ ++ txq = t7xx_select_tx_queue(dpmaif_ctrl); ++ if (!txq) ++ return; ++ ++ drb_send_cnt = t7xx_txq_burst_send_skb(txq); ++ if (drb_send_cnt <= 0) { ++ usleep_range(10, 20); ++ cond_resched(); ++ continue; ++ } ++ ++ t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, ++ drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD); ++ ++ cond_resched(); ++ } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() && ++ (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)); ++} ++ ++static int t7xx_dpmaif_tx_hw_push_thread(void *arg) ++{ ++ struct dpmaif_ctrl *dpmaif_ctrl = arg; ++ ++ while (!kthread_should_stop()) { ++ if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || ++ dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { ++ if (wait_event_interruptible(dpmaif_ctrl->tx_wq, ++ (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && ++ dpmaif_ctrl->state == DPMAIF_STATE_PWRON) || ++ kthread_should_stop())) ++ continue; ++ ++ if (kthread_should_stop()) ++ break; ++ } ++ ++ t7xx_do_tx_hw_push(dpmaif_ctrl); ++ } ++ ++ return 0; ++} ++ ++int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ init_waitqueue_head(&dpmaif_ctrl->tx_wq); ++ dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread, ++ dpmaif_ctrl, "dpmaif_tx_hw_push"); ++ return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread); ++} ++ ++void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ if (dpmaif_ctrl->tx_thread) ++ kthread_stop(dpmaif_ctrl->tx_thread); ++} ++ ++/** ++ * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue. ++ * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl. ++ * @txq_number: Queue number to xmit on. ++ * @skb: Pointer to the skb to transmit. ++ * ++ * Add the skb to the queue of the skbs to be transmit. ++ * Wake up the thread that push the skbs from the queue to the HW. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -EBUSY - Tx budget exhausted. ++ * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full ++ * state to prevent this error condition. ++ */ ++int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, ++ struct sk_buff *skb) ++{ ++ struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number]; ++ struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; ++ struct t7xx_skb_cb *skb_cb; ++ ++ if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) { ++ cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number); ++ return -EBUSY; ++ } ++ ++ skb_cb = T7XX_SKB_CB(skb); ++ skb_cb->txq_number = txq_number; ++ skb_queue_tail(&txq->tx_skb_head, skb); ++ wake_up(&dpmaif_ctrl->tx_wq); ++ ++ return 0; ++} ++ ++void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask) ++{ ++ int i; ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ if (que_mask & BIT(i)) ++ queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work); ++ } ++} ++ ++static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq) ++{ ++ size_t brb_skb_size, brb_pd_size; ++ ++ brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb); ++ brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb); ++ ++ txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN; ++ ++ /* For HW && AP SW */ ++ txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, ++ &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO); ++ if (!txq->drb_base) ++ return -ENOMEM; ++ ++ /* For AP SW to record the skb information */ ++ txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL); ++ if (!txq->drb_skb_base) { ++ dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, ++ txq->drb_base, txq->drb_bus_addr); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq) ++{ ++ struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base; ++ unsigned int i; ++ ++ if (!drb_skb_base) ++ return; ++ ++ for (i = 0; i < txq->drb_size_cnt; i++) { ++ drb_skb = drb_skb_base + i; ++ if (!drb_skb->skb) ++ continue; ++ ++ if (!drb_skb->is_msg) ++ dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr, ++ drb_skb->data_len, DMA_TO_DEVICE); ++ ++ if (drb_skb->is_last) { ++ dev_kfree_skb(drb_skb->skb); ++ drb_skb->skb = NULL; ++ } ++ } ++} ++ ++static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq) ++{ ++ if (txq->drb_base) ++ dma_free_coherent(txq->dpmaif_ctrl->dev, ++ txq->drb_size_cnt * sizeof(struct dpmaif_drb), ++ txq->drb_base, txq->drb_bus_addr); ++ ++ t7xx_dpmaif_tx_free_drb_skb(txq); ++} ++ ++/** ++ * t7xx_dpmaif_txq_init() - Initialize TX queue. ++ * @txq: Pointer to struct dpmaif_tx_queue. ++ * ++ * Initialize the TX queue data structure and allocate memory for it to use. ++ * ++ * Return: ++ * * 0 - Success. ++ * * -ERROR - Error code from failure sub-initializations. ++ */ ++int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq) ++{ ++ int ret; ++ ++ skb_queue_head_init(&txq->tx_skb_head); ++ init_waitqueue_head(&txq->req_wq); ++ atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN); ++ ++ ret = t7xx_dpmaif_tx_drb_buf_init(txq); ++ if (ret) { ++ dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret); ++ return ret; ++ } ++ ++ txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | ++ (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); ++ if (!txq->worker) ++ return -ENOMEM; ++ ++ INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done); ++ spin_lock_init(&txq->tx_lock); ++ ++ return 0; ++} ++ ++void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq) ++{ ++ if (txq->worker) ++ destroy_workqueue(txq->worker); ++ ++ skb_queue_purge(&txq->tx_skb_head); ++ t7xx_dpmaif_tx_drb_buf_rel(txq); ++} ++ ++void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ int i; ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) { ++ struct dpmaif_tx_queue *txq; ++ int count = 0; ++ ++ txq = &dpmaif_ctrl->txq[i]; ++ txq->que_started = false; ++ /* Make sure TXQ is disabled */ ++ smp_mb(); ++ ++ /* Wait for active Tx to be done */ ++ while (atomic_read(&txq->tx_processing)) { ++ if (++count >= DPMAIF_MAX_CHECK_COUNT) { ++ dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n"); ++ break; ++ } ++ } ++ } ++} ++ ++static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq) ++{ ++ txq->que_started = false; ++ ++ cancel_work_sync(&txq->dpmaif_tx_work); ++ flush_work(&txq->dpmaif_tx_work); ++ t7xx_dpmaif_tx_free_drb_skb(txq); ++ ++ txq->drb_rd_idx = 0; ++ txq->drb_wr_idx = 0; ++ txq->drb_release_rd_idx = 0; ++} ++ ++void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ int i; ++ ++ for (i = 0; i < DPMAIF_TXQ_NUM; i++) ++ t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h +@@ -0,0 +1,78 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Eliot Lee ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Moises Veleta ++ * Sreehari Kancharla ++ */ ++ ++#ifndef __T7XX_HIF_DPMA_TX_H__ ++#define __T7XX_HIF_DPMA_TX_H__ ++ ++#include ++#include ++#include ++ ++#include "t7xx_hif_dpmaif.h" ++ ++#define DPMAIF_TX_DEFAULT_QUEUE 0 ++ ++struct dpmaif_drb { ++ __le32 header; ++ union { ++ struct { ++ __le32 data_addr_l; ++ __le32 data_addr_h; ++ } pd; ++ struct { ++ __le32 msg_hdr; ++ __le32 reserved1; ++ } msg; ++ }; ++ __le32 reserved2; ++}; ++ ++/* Header fields */ ++#define DRB_HDR_DATA_LEN GENMASK(31, 16) ++#define DRB_HDR_RESERVED GENMASK(15, 3) ++#define DRB_HDR_CONT BIT(2) ++#define DRB_HDR_DTYP GENMASK(1, 0) ++ ++#define DRB_MSG_DW2_RES GENMASK(31, 30) ++#define DRB_MSG_L4_CHK BIT(29) ++#define DRB_MSG_IP_CHK BIT(28) ++#define DRB_MSG_RESERVED BIT(27) ++#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24) ++#define DRB_MSG_CHANNEL_ID GENMASK(23, 16) ++#define DRB_MSG_COUNT_L GENMASK(15, 0) ++ ++struct dpmaif_drb_skb { ++ struct sk_buff *skb; ++ dma_addr_t bus_addr; ++ unsigned int data_len; ++ u16 index:13; ++ u16 is_msg:1; ++ u16 is_frag:1; ++ u16 is_last:1; ++}; ++ ++int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, ++ struct sk_buff *skb); ++void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl); ++int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl); ++void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq); ++void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask); ++int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq); ++void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl); ++void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl); ++ ++#endif /* __T7XX_HIF_DPMA_TX_H__ */ diff --git a/target/linux/generic/backport-5.15/621-v5.19-08-net-wwan-t7xx-Add-WWAN-network-interface.patch b/target/linux/generic/backport-5.15/621-v5.19-08-net-wwan-t7xx-Add-WWAN-network-interface.patch new file mode 100644 index 0000000000..03f9f77bb4 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-08-net-wwan-t7xx-Add-WWAN-network-interface.patch @@ -0,0 +1,567 @@ +From 05d19bf500f8281f574713479b04679fa226d0a3 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:06 -0700 +Subject: [PATCH] net: wwan: t7xx: Add WWAN network interface +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Creates the Cross Core Modem Network Interface (CCMNI) which implements +the wwan_ops for registration with the WWAN framework, CCMNI also +implements the net_device_ops functions used by the network device. +Network device operations include open, close, start transmission, TX +timeout and change MTU. + +Signed-off-by: Haijun Liu +Co-developed-by: Chandrashekar Devegowda +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Loic Poulain +Reviewed-by: Ilpo Järvinen +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/Makefile | 1 + + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 11 +- + drivers/net/wwan/t7xx/t7xx_netdev.c | 423 +++++++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_netdev.h | 55 ++++ + 4 files changed, 489 insertions(+), 1 deletion(-) + create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.c + create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.h + +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -17,3 +17,4 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_hif_dpmaif_tx.o \ + t7xx_hif_dpmaif_rx.o \ + t7xx_dpmaif.o \ ++ t7xx_netdev.o +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -35,6 +35,7 @@ + #include "t7xx_hif_cldma.h" + #include "t7xx_mhccif.h" + #include "t7xx_modem_ops.h" ++#include "t7xx_netdev.h" + #include "t7xx_pci.h" + #include "t7xx_pcie_mac.h" + #include "t7xx_port.h" +@@ -670,10 +671,14 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + if (ret) + goto err_destroy_hswq; + +- ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); ++ ret = t7xx_ccmni_init(t7xx_dev); + if (ret) + goto err_uninit_fsm; + ++ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); ++ if (ret) ++ goto err_uninit_ccmni; ++ + ret = t7xx_port_proxy_init(md); + if (ret) + goto err_uninit_md_cldma; +@@ -692,6 +697,9 @@ err_uninit_proxy: + err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + ++err_uninit_ccmni: ++ t7xx_ccmni_exit(t7xx_dev); ++ + err_uninit_fsm: + t7xx_fsm_uninit(md); + +@@ -713,6 +721,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_port_proxy_uninit(md->port_prox); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_ccmni_exit(t7xx_dev); + t7xx_fsm_uninit(md); + destroy_workqueue(md->handshake_wq); + } +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -0,0 +1,423 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Chandrashekar Devegowda ++ * Haijun Liu ++ * Ricardo Martinez ++ * ++ * Contributors: ++ * Amir Hanania ++ * Andy Shevchenko ++ * Chiranjeevi Rapolu ++ * Eliot Lee ++ * Moises Veleta ++ * Sreehari Kancharla ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "t7xx_hif_dpmaif_rx.h" ++#include "t7xx_hif_dpmaif_tx.h" ++#include "t7xx_netdev.h" ++#include "t7xx_pci.h" ++#include "t7xx_port_proxy.h" ++#include "t7xx_state_monitor.h" ++ ++#define IP_MUX_SESSION_DEFAULT 0 ++ ++static int t7xx_ccmni_open(struct net_device *dev) ++{ ++ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ ++ netif_carrier_on(dev); ++ netif_tx_start_all_queues(dev); ++ atomic_inc(&ccmni->usage); ++ return 0; ++} ++ ++static int t7xx_ccmni_close(struct net_device *dev) ++{ ++ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ ++ atomic_dec(&ccmni->usage); ++ netif_carrier_off(dev); ++ netif_tx_disable(dev); ++ return 0; ++} ++ ++static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb, ++ unsigned int txq_number) ++{ ++ struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb; ++ struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb); ++ ++ skb_cb->netif_idx = ccmni->index; ++ ++ if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb)) ++ return NETDEV_TX_BUSY; ++ ++ return 0; ++} ++ ++static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ int skb_len = skb->len; ++ ++ /* If MTU is changed or there is no headroom, drop the packet */ ++ if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) { ++ dev_kfree_skb(skb); ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } ++ ++ if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE)) ++ return NETDEV_TX_BUSY; ++ ++ dev->stats.tx_packets++; ++ dev->stats.tx_bytes += skb_len; ++ ++ return NETDEV_TX_OK; ++} ++ ++static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue) ++{ ++ struct t7xx_ccmni *ccmni = netdev_priv(dev); ++ ++ dev->stats.tx_errors++; ++ ++ if (atomic_read(&ccmni->usage) > 0) ++ netif_tx_wake_all_queues(dev); ++} ++ ++static const struct net_device_ops ccmni_netdev_ops = { ++ .ndo_open = t7xx_ccmni_open, ++ .ndo_stop = t7xx_ccmni_close, ++ .ndo_start_xmit = t7xx_ccmni_start_xmit, ++ .ndo_tx_timeout = t7xx_ccmni_tx_timeout, ++}; ++ ++static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ struct t7xx_ccmni *ccmni; ++ int i; ++ ++ for (i = 0; i < ctlb->nic_dev_num; i++) { ++ ccmni = ctlb->ccmni_inst[i]; ++ if (!ccmni) ++ continue; ++ ++ if (atomic_read(&ccmni->usage) > 0) { ++ netif_tx_start_all_queues(ccmni->dev); ++ netif_carrier_on(ccmni->dev); ++ } ++ } ++} ++ ++static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ struct t7xx_ccmni *ccmni; ++ int i; ++ ++ for (i = 0; i < ctlb->nic_dev_num; i++) { ++ ccmni = ctlb->ccmni_inst[i]; ++ if (!ccmni) ++ continue; ++ ++ if (atomic_read(&ccmni->usage) > 0) ++ netif_tx_disable(ccmni->dev); ++ } ++} ++ ++static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ struct t7xx_ccmni *ccmni; ++ int i; ++ ++ for (i = 0; i < ctlb->nic_dev_num; i++) { ++ ccmni = ctlb->ccmni_inst[i]; ++ if (!ccmni) ++ continue; ++ ++ if (atomic_read(&ccmni->usage) > 0) ++ netif_carrier_off(ccmni->dev); ++ } ++} ++ ++static void t7xx_ccmni_wwan_setup(struct net_device *dev) ++{ ++ dev->hard_header_len += sizeof(struct ccci_header); ++ ++ dev->mtu = ETH_DATA_LEN; ++ dev->max_mtu = CCMNI_MTU_MAX; ++ BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE); ++ ++ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; ++ dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO; ++ ++ dev->flags = IFF_POINTOPOINT | IFF_NOARP; ++ ++ dev->features = NETIF_F_VLAN_CHALLENGED; ++ ++ dev->features |= NETIF_F_SG; ++ dev->hw_features |= NETIF_F_SG; ++ ++ dev->features |= NETIF_F_HW_CSUM; ++ dev->hw_features |= NETIF_F_HW_CSUM; ++ ++ dev->features |= NETIF_F_RXCSUM; ++ dev->hw_features |= NETIF_F_RXCSUM; ++ ++ dev->needs_free_netdev = true; ++ ++ dev->type = ARPHRD_NONE; ++ ++ dev->netdev_ops = &ccmni_netdev_ops; ++} ++ ++static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, ++ struct netlink_ext_ack *extack) ++{ ++ struct t7xx_ccmni_ctrl *ctlb = ctxt; ++ struct t7xx_ccmni *ccmni; ++ int ret; ++ ++ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) ++ return -EINVAL; ++ ++ ccmni = wwan_netdev_drvpriv(dev); ++ ccmni->index = if_id; ++ ccmni->ctlb = ctlb; ++ ccmni->dev = dev; ++ atomic_set(&ccmni->usage, 0); ++ ctlb->ccmni_inst[if_id] = ccmni; ++ ++ ret = register_netdevice(dev); ++ if (ret) ++ return ret; ++ ++ netif_device_attach(dev); ++ return 0; ++} ++ ++static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head) ++{ ++ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ struct t7xx_ccmni_ctrl *ctlb = ctxt; ++ u8 if_id = ccmni->index; ++ ++ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) ++ return; ++ ++ if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) ++ return; ++ ++ unregister_netdevice(dev); ++} ++ ++static const struct wwan_ops ccmni_wwan_ops = { ++ .priv_size = sizeof(struct t7xx_ccmni), ++ .setup = t7xx_ccmni_wwan_setup, ++ .newlink = t7xx_ccmni_wwan_newlink, ++ .dellink = t7xx_ccmni_wwan_dellink, ++}; ++ ++static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ struct device *dev = ctlb->hif_ctrl->dev; ++ int ret; ++ ++ if (ctlb->wwan_is_registered) ++ return 0; ++ ++ /* WWAN core will create a netdev for the default IP MUX channel */ ++ ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT); ++ if (ret < 0) { ++ dev_err(dev, "Unable to register WWAN ops, %d\n", ret); ++ return ret; ++ } ++ ++ ctlb->wwan_is_registered = true; ++ return 0; ++} ++ ++static int t7xx_ccmni_md_state_callback(enum md_state state, void *para) ++{ ++ struct t7xx_ccmni_ctrl *ctlb = para; ++ struct device *dev; ++ int ret = 0; ++ ++ dev = ctlb->hif_ctrl->dev; ++ ctlb->md_sta = state; ++ ++ switch (state) { ++ case MD_STATE_READY: ++ ret = t7xx_ccmni_register_wwan(ctlb); ++ if (!ret) ++ t7xx_ccmni_start(ctlb); ++ break; ++ ++ case MD_STATE_EXCEPTION: ++ case MD_STATE_STOPPED: ++ t7xx_ccmni_pre_stop(ctlb); ++ ++ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); ++ if (ret < 0) ++ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); ++ ++ t7xx_ccmni_post_stop(ctlb); ++ break; ++ ++ case MD_STATE_WAITING_FOR_HS1: ++ case MD_STATE_WAITING_TO_STOP: ++ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); ++ if (ret < 0) ++ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); ++ ++ break; ++ ++ default: ++ break; ++ } ++ ++ return ret; ++} ++ ++static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; ++ struct t7xx_fsm_notifier *md_status_notifier; ++ ++ md_status_notifier = &ctlb->md_status_notify; ++ INIT_LIST_HEAD(&md_status_notifier->entry); ++ md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback; ++ md_status_notifier->data = ctlb; ++ ++ t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); ++} ++ ++static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb) ++{ ++ struct t7xx_skb_cb *skb_cb; ++ struct net_device *net_dev; ++ struct t7xx_ccmni *ccmni; ++ int pkt_type, skb_len; ++ u8 netif_id; ++ ++ skb_cb = T7XX_SKB_CB(skb); ++ netif_id = skb_cb->netif_idx; ++ ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id]; ++ if (!ccmni) { ++ dev_kfree_skb(skb); ++ return; ++ } ++ ++ net_dev = ccmni->dev; ++ skb->dev = net_dev; ++ ++ pkt_type = skb_cb->rx_pkt_type; ++ if (pkt_type == PKT_TYPE_IP6) ++ skb->protocol = htons(ETH_P_IPV6); ++ else ++ skb->protocol = htons(ETH_P_IP); ++ ++ skb_len = skb->len; ++ netif_rx(skb); ++ net_dev->stats.rx_packets++; ++ net_dev->stats.rx_bytes += skb_len; ++} ++ ++static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) ++{ ++ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; ++ struct netdev_queue *net_queue; ++ ++ if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { ++ net_queue = netdev_get_tx_queue(ccmni->dev, qno); ++ if (netif_tx_queue_stopped(net_queue)) ++ netif_tx_wake_queue(net_queue); ++ } ++} ++ ++static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) ++{ ++ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; ++ struct netdev_queue *net_queue; ++ ++ if (atomic_read(&ccmni->usage) > 0) { ++ netdev_err(ccmni->dev, "TX queue %d is full\n", qno); ++ net_queue = netdev_get_tx_queue(ccmni->dev, qno); ++ netif_tx_stop_queue(net_queue); ++ } ++} ++ ++static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, ++ enum dpmaif_txq_state state, int qno) ++{ ++ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; ++ ++ if (ctlb->md_sta != MD_STATE_READY) ++ return; ++ ++ if (!ctlb->ccmni_inst[0]) { ++ dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); ++ return; ++ } ++ ++ if (state == DMPAIF_TXQ_STATE_IRQ) ++ t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); ++ else if (state == DMPAIF_TXQ_STATE_FULL) ++ t7xx_ccmni_queue_tx_full_notify(ctlb, qno); ++} ++ ++int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct device *dev = &t7xx_dev->pdev->dev; ++ struct t7xx_ccmni_ctrl *ctlb; ++ ++ ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL); ++ if (!ctlb) ++ return -ENOMEM; ++ ++ t7xx_dev->ccmni_ctlb = ctlb; ++ ctlb->t7xx_dev = t7xx_dev; ++ ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify; ++ ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb; ++ ctlb->nic_dev_num = NIC_DEV_DEFAULT; ++ ++ ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks); ++ if (!ctlb->hif_ctrl) ++ return -ENOMEM; ++ ++ init_md_status_notifier(t7xx_dev); ++ return 0; ++} ++ ++void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; ++ ++ t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify); ++ ++ if (ctlb->wwan_is_registered) { ++ wwan_unregister_ops(&t7xx_dev->pdev->dev); ++ ctlb->wwan_is_registered = false; ++ } ++ ++ t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); ++} +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.h +@@ -0,0 +1,55 @@ ++/* SPDX-License-Identifier: GPL-2.0-only ++ * ++ * Copyright (c) 2021, MediaTek Inc. ++ * Copyright (c) 2021-2022, Intel Corporation. ++ * ++ * Authors: ++ * Haijun Liu ++ * Moises Veleta ++ * ++ * Contributors: ++ * Amir Hanania ++ * Chiranjeevi Rapolu ++ * Ricardo Martinez ++ */ ++ ++#ifndef __T7XX_NETDEV_H__ ++#define __T7XX_NETDEV_H__ ++ ++#include ++#include ++#include ++ ++#include "t7xx_hif_dpmaif.h" ++#include "t7xx_pci.h" ++#include "t7xx_state_monitor.h" ++ ++#define RXQ_NUM DPMAIF_RXQ_NUM ++#define NIC_DEV_MAX 21 ++#define NIC_DEV_DEFAULT 2 ++ ++#define CCMNI_NETDEV_WDT_TO (1 * HZ) ++#define CCMNI_MTU_MAX 3000 ++ ++struct t7xx_ccmni { ++ u8 index; ++ atomic_t usage; ++ struct net_device *dev; ++ struct t7xx_ccmni_ctrl *ctlb; ++}; ++ ++struct t7xx_ccmni_ctrl { ++ struct t7xx_pci_dev *t7xx_dev; ++ struct dpmaif_ctrl *hif_ctrl; ++ struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX]; ++ struct dpmaif_callbacks callbacks; ++ unsigned int nic_dev_num; ++ unsigned int md_sta; ++ struct t7xx_fsm_notifier md_status_notify; ++ bool wwan_is_registered; ++}; ++ ++int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev); ++ ++#endif /* __T7XX_NETDEV_H__ */ diff --git a/target/linux/generic/backport-5.15/621-v5.19-09-net-wwan-t7xx-Introduce-power-management.patch b/target/linux/generic/backport-5.15/621-v5.19-09-net-wwan-t7xx-Introduce-power-management.patch new file mode 100644 index 0000000000..2cd2b0177d --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-09-net-wwan-t7xx-Introduce-power-management.patch @@ -0,0 +1,919 @@ +From 46e8f49ed7b3063f51e28f3ea2084b3da29c1503 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:07 -0700 +Subject: [PATCH] net: wwan: t7xx: Introduce power management +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Implements suspend, resumes, freeze, thaw, poweroff, and restore +`dev_pm_ops` callbacks. + +From the host point of view, the t7xx driver is one entity. But, the +device has several modules that need to be addressed in different ways +during power management (PM) flows. +The driver uses the term 'PM entities' to refer to the 2 DPMA and +2 CLDMA HW blocks that need to be managed during PM flows. +When a dev_pm_ops function is called, the PM entities list is iterated +and the matching function is called for each entry in the list. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 123 +++++- + drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 1 + + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 90 +++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 1 + + drivers/net/wwan/t7xx/t7xx_mhccif.c | 17 + + drivers/net/wwan/t7xx/t7xx_pci.c | 421 +++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_pci.h | 46 +++ + drivers/net/wwan/t7xx/t7xx_state_monitor.c | 2 + + 8 files changed, 700 insertions(+), 1 deletion(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1076,6 +1076,120 @@ int t7xx_cldma_alloc(enum cldma_id hif_i + return 0; + } + ++static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) ++{ ++ struct cldma_ctrl *md_ctrl = entity_param; ++ struct t7xx_cldma_hw *hw_info; ++ unsigned long flags; ++ int qno_t; ++ ++ hw_info = &md_ctrl->hw_info; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_restore(hw_info); ++ for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { ++ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, ++ MTK_TX); ++ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, ++ MTK_RX); ++ } ++ t7xx_cldma_enable_irq(md_ctrl); ++ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); ++ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; ++ t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); ++ t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) ++{ ++ struct cldma_ctrl *md_ctrl = entity_param; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ md_ctrl->txq_active |= TXRX_STATUS_BITMASK; ++ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); ++ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ if (md_ctrl->hif_id == CLDMA_ID_MD) ++ t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); ++ ++ return 0; ++} ++ ++static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) ++{ ++ struct cldma_ctrl *md_ctrl = entity_param; ++ struct t7xx_cldma_hw *hw_info; ++ unsigned long flags; ++ ++ hw_info = &md_ctrl->hw_info; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); ++ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); ++ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; ++ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); ++ t7xx_cldma_clear_ip_busy(hw_info); ++ t7xx_cldma_disable_irq(md_ctrl); ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++} ++ ++static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) ++{ ++ struct cldma_ctrl *md_ctrl = entity_param; ++ struct t7xx_cldma_hw *hw_info; ++ unsigned long flags; ++ ++ if (md_ctrl->hif_id == CLDMA_ID_MD) ++ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); ++ ++ hw_info = &md_ctrl->hw_info; ++ ++ spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ++ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); ++ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); ++ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; ++ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); ++ md_ctrl->txq_started = 0; ++ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ return 0; ++} ++ ++static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) ++{ ++ md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); ++ if (!md_ctrl->pm_entity) ++ return -ENOMEM; ++ ++ md_ctrl->pm_entity->entity_param = md_ctrl; ++ ++ if (md_ctrl->hif_id == CLDMA_ID_MD) ++ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; ++ else ++ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; ++ ++ md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; ++ md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; ++ md_ctrl->pm_entity->resume = t7xx_cldma_resume; ++ md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; ++ ++ return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); ++} ++ ++static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) ++{ ++ if (!md_ctrl->pm_entity) ++ return -EINVAL; ++ ++ t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); ++ kfree(md_ctrl->pm_entity); ++ md_ctrl->pm_entity = NULL; ++ return 0; ++} ++ + void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) + { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; +@@ -1126,6 +1240,7 @@ static void t7xx_cldma_destroy_wqs(struc + * t7xx_cldma_init() - Initialize CLDMA. + * @md_ctrl: CLDMA context structure. + * ++ * Allocate and initialize device power management entity. + * Initialize HIF TX/RX queue structure. + * Register CLDMA callback ISR with PCIe driver. + * +@@ -1136,12 +1251,16 @@ static void t7xx_cldma_destroy_wqs(struc + int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) + { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; +- int i; ++ int ret, i; + + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + md_ctrl->is_late_init = false; + ++ ret = t7xx_cldma_pm_init(md_ctrl); ++ if (ret) ++ return ret; ++ + spin_lock_init(&md_ctrl->cldma_lock); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { +@@ -1176,6 +1295,7 @@ int t7xx_cldma_init(struct cldma_ctrl *m + + err_workqueue: + t7xx_cldma_destroy_wqs(md_ctrl); ++ t7xx_cldma_pm_uninit(md_ctrl); + return -ENOMEM; + } + +@@ -1190,4 +1310,5 @@ void t7xx_cldma_exit(struct cldma_ctrl * + t7xx_cldma_stop(md_ctrl); + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_destroy_wqs(md_ctrl); ++ t7xx_cldma_pm_uninit(md_ctrl); + } +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +@@ -98,6 +98,7 @@ struct cldma_ctrl { + struct dma_pool *gpd_dmapool; + struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; + struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; ++ struct md_pm_entity *pm_entity; + struct t7xx_cldma_hw hw_info; + bool is_late_init; + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c +@@ -398,6 +398,90 @@ static int t7xx_dpmaif_stop(struct dpmai + return 0; + } + ++static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param) ++{ ++ struct dpmaif_ctrl *dpmaif_ctrl = param; ++ ++ t7xx_dpmaif_tx_stop(dpmaif_ctrl); ++ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); ++ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); ++ t7xx_dpmaif_disable_irq(dpmaif_ctrl); ++ t7xx_dpmaif_rx_stop(dpmaif_ctrl); ++ return 0; ++} ++ ++static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ int qno; ++ ++ for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) ++ t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); ++} ++ ++static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct dpmaif_rx_queue *rxq; ++ struct dpmaif_tx_queue *txq; ++ unsigned int que_cnt; ++ ++ for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) { ++ txq = &dpmaif_ctrl->txq[que_cnt]; ++ txq->que_started = true; ++ } ++ ++ for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) { ++ rxq = &dpmaif_ctrl->rxq[que_cnt]; ++ rxq->que_started = true; ++ } ++} ++ ++static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param) ++{ ++ struct dpmaif_ctrl *dpmaif_ctrl = param; ++ ++ if (!dpmaif_ctrl) ++ return 0; ++ ++ t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl); ++ t7xx_dpmaif_enable_irq(dpmaif_ctrl); ++ t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl); ++ t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info); ++ wake_up(&dpmaif_ctrl->tx_wq); ++ return 0; ++} ++ ++static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; ++ int ret; ++ ++ INIT_LIST_HEAD(&dpmaif_pm_entity->entity); ++ dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend; ++ dpmaif_pm_entity->suspend_late = NULL; ++ dpmaif_pm_entity->resume_early = NULL; ++ dpmaif_pm_entity->resume = &t7xx_dpmaif_resume; ++ dpmaif_pm_entity->id = PM_ENTITY_ID_DATA; ++ dpmaif_pm_entity->entity_param = dpmaif_ctrl; ++ ++ ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); ++ if (ret) ++ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); ++ ++ return ret; ++} ++ ++static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl) ++{ ++ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; ++ int ret; ++ ++ ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); ++ if (ret < 0) ++ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); ++ ++ return ret; ++} ++ + int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) + { + int ret = 0; +@@ -461,11 +545,16 @@ struct dpmaif_ctrl *t7xx_dpmaif_hif_init + dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + ++ ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl); ++ if (ret) ++ return NULL; ++ + t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + + ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); + if (ret) { ++ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); + dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); + return NULL; + } +@@ -478,6 +567,7 @@ void t7xx_dpmaif_hif_exit(struct dpmaif_ + { + if (dpmaif_ctrl->dpmaif_sw_init_done) { + t7xx_dpmaif_stop(dpmaif_ctrl); ++ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); + t7xx_dpmaif_sw_release(dpmaif_ctrl); + dpmaif_ctrl->dpmaif_sw_init_done = false; + } +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h +@@ -174,6 +174,7 @@ struct dpmaif_callbacks { + struct dpmaif_ctrl { + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; ++ struct md_pm_entity dpmaif_pm_entity; + enum dpmaif_state state; + bool dpmaif_sw_init_done; + struct dpmaif_hw_info hw_info; +--- a/drivers/net/wwan/t7xx/t7xx_mhccif.c ++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c +@@ -24,6 +24,11 @@ + #include "t7xx_pcie_mac.h" + #include "t7xx_reg.h" + ++#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \ ++ D2H_INT_RESUME_ACK | \ ++ D2H_INT_SUSPEND_ACK_AP | \ ++ D2H_INT_RESUME_ACK_AP) ++ + static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) + { + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; +@@ -53,6 +58,18 @@ static irqreturn_t t7xx_mhccif_isr_threa + } + + t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); ++ ++ if (int_status & D2H_INT_SR_ACK) ++ complete(&t7xx_dev->pm_sr_ack); ++ ++ iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ ++ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); ++ if (!int_status) { ++ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); ++ iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ } ++ + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + return IRQ_HANDLED; + } +--- a/drivers/net/wwan/t7xx/t7xx_pci.c ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -18,23 +18,438 @@ + + #include + #include ++#include + #include + #include + #include + #include + #include ++#include ++#include ++#include + #include ++#include + #include ++#include ++#include + + #include "t7xx_mhccif.h" + #include "t7xx_modem_ops.h" + #include "t7xx_pci.h" + #include "t7xx_pcie_mac.h" + #include "t7xx_reg.h" ++#include "t7xx_state_monitor.h" + + #define T7XX_PCI_IREG_BASE 0 + #define T7XX_PCI_EREG_BASE 2 + ++#define PM_ACK_TIMEOUT_MS 1500 ++#define PM_RESOURCE_POLL_TIMEOUT_US 10000 ++#define PM_RESOURCE_POLL_STEP_US 100 ++ ++enum t7xx_pm_state { ++ MTK_PM_EXCEPTION, ++ MTK_PM_INIT, /* Device initialized, but handshake not completed */ ++ MTK_PM_SUSPENDED, ++ MTK_PM_RESUMED, ++}; ++ ++static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) ++{ ++ int ret, val; ++ ++ ret = read_poll_timeout(ioread32, val, ++ (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK, ++ PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true, ++ IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); ++ if (ret == -ETIMEDOUT) ++ dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); ++ ++ return ret; ++} ++ ++static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct pci_dev *pdev = t7xx_dev->pdev; ++ ++ INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); ++ mutex_init(&t7xx_dev->md_pm_entity_mtx); ++ init_completion(&t7xx_dev->pm_sr_ack); ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); ++ ++ device_init_wakeup(&pdev->dev, true); ++ dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | ++ DPM_FLAG_NO_DIRECT_COMPLETE); ++ ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ ++ return t7xx_wait_pm_config(t7xx_dev); ++} ++ ++void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) ++{ ++ /* Enable the PCIe resource lock only after MD deep sleep is done */ ++ t7xx_mhccif_mask_clr(t7xx_dev, ++ D2H_INT_SUSPEND_ACK | ++ D2H_INT_RESUME_ACK | ++ D2H_INT_SUSPEND_ACK_AP | ++ D2H_INT_RESUME_ACK_AP); ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); ++} ++ ++static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) ++{ ++ /* The device is kept in FSM re-init flow ++ * so just roll back PM setting to the init setting. ++ */ ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ return t7xx_wait_pm_config(t7xx_dev); ++} ++ ++void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev) ++{ ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ t7xx_wait_pm_config(t7xx_dev); ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); ++} ++ ++int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) ++{ ++ struct md_pm_entity *entity; ++ ++ mutex_lock(&t7xx_dev->md_pm_entity_mtx); ++ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { ++ if (entity->id == pm_entity->id) { ++ mutex_unlock(&t7xx_dev->md_pm_entity_mtx); ++ return -EEXIST; ++ } ++ } ++ ++ list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); ++ mutex_unlock(&t7xx_dev->md_pm_entity_mtx); ++ return 0; ++} ++ ++int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) ++{ ++ struct md_pm_entity *entity, *tmp_entity; ++ ++ mutex_lock(&t7xx_dev->md_pm_entity_mtx); ++ list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { ++ if (entity->id == pm_entity->id) { ++ list_del(&pm_entity->entity); ++ mutex_unlock(&t7xx_dev->md_pm_entity_mtx); ++ return 0; ++ } ++ } ++ ++ mutex_unlock(&t7xx_dev->md_pm_entity_mtx); ++ ++ return -ENXIO; ++} ++ ++static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) ++{ ++ unsigned long wait_ret; ++ ++ reinit_completion(&t7xx_dev->pm_sr_ack); ++ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request); ++ wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, ++ msecs_to_jiffies(PM_ACK_TIMEOUT_MS)); ++ if (!wait_ret) ++ return -ETIMEDOUT; ++ ++ return 0; ++} ++ ++static int __t7xx_pci_pm_suspend(struct pci_dev *pdev) ++{ ++ enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID; ++ struct t7xx_pci_dev *t7xx_dev; ++ struct md_pm_entity *entity; ++ int ret; ++ ++ t7xx_dev = pci_get_drvdata(pdev); ++ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { ++ dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); ++ return -EFAULT; ++ } ++ ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ ret = t7xx_wait_pm_config(t7xx_dev); ++ if (ret) { ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ return ret; ++ } ++ ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); ++ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); ++ t7xx_dev->rgu_pci_irq_en = false; ++ ++ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { ++ if (!entity->suspend) ++ continue; ++ ++ ret = entity->suspend(t7xx_dev, entity->entity_param); ++ if (ret) { ++ entity_id = entity->id; ++ dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); ++ goto abort_suspend; ++ } ++ } ++ ++ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ); ++ if (ret) { ++ dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); ++ goto abort_suspend; ++ } ++ ++ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP); ++ if (ret) { ++ t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); ++ dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); ++ goto abort_suspend; ++ } ++ ++ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { ++ if (entity->suspend_late) ++ entity->suspend_late(t7xx_dev, entity->entity_param); ++ } ++ ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ return 0; ++ ++abort_suspend: ++ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { ++ if (entity_id == entity->id) ++ break; ++ ++ if (entity->resume) ++ entity->resume(t7xx_dev, entity->entity_param); ++ } ++ ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); ++ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); ++ return ret; ++} ++ ++static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev) ++{ ++ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); ++ ++ /* Disable interrupt first and let the IPs enable them */ ++ iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0); ++ ++ /* Device disables PCIe interrupts during resume and ++ * following function will re-enable PCIe interrupts. ++ */ ++ t7xx_pcie_mac_interrupts_en(t7xx_dev); ++ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); ++} ++ ++static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3) ++{ ++ int ret; ++ ++ ret = pcim_enable_device(t7xx_dev->pdev); ++ if (ret) ++ return ret; ++ ++ t7xx_pcie_mac_atr_init(t7xx_dev); ++ t7xx_pcie_interrupt_reinit(t7xx_dev); ++ ++ if (is_d3) { ++ t7xx_mhccif_init(t7xx_dev); ++ return t7xx_pci_pm_reinit(t7xx_dev); ++ } ++ ++ return 0; ++} ++ ++static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) ++{ ++ struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; ++ struct device *dev = &t7xx_dev->pdev->dev; ++ int ret = -EINVAL; ++ ++ switch (event) { ++ case FSM_CMD_STOP: ++ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); ++ break; ++ ++ case FSM_CMD_START: ++ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); ++ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); ++ t7xx_dev->rgu_pci_irq_en = true; ++ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); ++ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0); ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (ret) ++ dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret); ++ ++ return ret; ++} ++ ++static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) ++{ ++ struct t7xx_pci_dev *t7xx_dev; ++ struct md_pm_entity *entity; ++ u32 prev_state; ++ int ret = 0; ++ ++ t7xx_dev = pci_get_drvdata(pdev); ++ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ return 0; ++ } ++ ++ t7xx_pcie_mac_interrupts_en(t7xx_dev); ++ prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE); ++ ++ if (state_check) { ++ /* For D3/L3 resume, the device could boot so quickly that the ++ * initial value of the dummy register might be overwritten. ++ * Identify new boots if the ATR source address register is not initialized. ++ */ ++ u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) + ++ ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR); ++ if (prev_state == PM_RESUME_REG_STATE_L3 || ++ (prev_state == PM_RESUME_REG_STATE_INIT && ++ atr_reg_val == ATR_SRC_ADDR_INVALID)) { ++ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); ++ if (ret) ++ return ret; ++ ++ ret = t7xx_pcie_reinit(t7xx_dev, true); ++ if (ret) ++ return ret; ++ ++ t7xx_clear_rgu_irq(t7xx_dev); ++ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); ++ } ++ ++ if (prev_state == PM_RESUME_REG_STATE_EXP || ++ prev_state == PM_RESUME_REG_STATE_L2_EXP) { ++ if (prev_state == PM_RESUME_REG_STATE_L2_EXP) { ++ ret = t7xx_pcie_reinit(t7xx_dev, false); ++ if (ret) ++ return ret; ++ } ++ ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); ++ t7xx_dev->rgu_pci_irq_en = true; ++ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); ++ ++ t7xx_mhccif_mask_clr(t7xx_dev, ++ D2H_INT_EXCEPTION_INIT | ++ D2H_INT_EXCEPTION_INIT_DONE | ++ D2H_INT_EXCEPTION_CLEARQ_DONE | ++ D2H_INT_EXCEPTION_ALLQ_RESET | ++ D2H_INT_PORT_ENUM); ++ ++ return ret; ++ } ++ ++ if (prev_state == PM_RESUME_REG_STATE_L2) { ++ ret = t7xx_pcie_reinit(t7xx_dev, false); ++ if (ret) ++ return ret; ++ ++ } else if (prev_state != PM_RESUME_REG_STATE_L1 && ++ prev_state != PM_RESUME_REG_STATE_INIT) { ++ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); ++ if (ret) ++ return ret; ++ ++ t7xx_clear_rgu_irq(t7xx_dev); ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); ++ return 0; ++ } ++ } ++ ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ t7xx_wait_pm_config(t7xx_dev); ++ ++ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { ++ if (entity->resume_early) ++ entity->resume_early(t7xx_dev, entity->entity_param); ++ } ++ ++ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); ++ if (ret) ++ dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); ++ ++ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP); ++ if (ret) ++ dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); ++ ++ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { ++ if (entity->resume) { ++ ret = entity->resume(t7xx_dev, entity->entity_param); ++ if (ret) ++ dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", ++ entity->id, ret); ++ } ++ } ++ ++ t7xx_dev->rgu_pci_irq_en = true; ++ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); ++ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); ++ ++ return ret; ++} ++ ++static int t7xx_pci_pm_resume_noirq(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct t7xx_pci_dev *t7xx_dev; ++ ++ t7xx_dev = pci_get_drvdata(pdev); ++ t7xx_pcie_mac_interrupts_dis(t7xx_dev); ++ ++ return 0; ++} ++ ++static void t7xx_pci_shutdown(struct pci_dev *pdev) ++{ ++ __t7xx_pci_pm_suspend(pdev); ++} ++ ++static int t7xx_pci_pm_suspend(struct device *dev) ++{ ++ return __t7xx_pci_pm_suspend(to_pci_dev(dev)); ++} ++ ++static int t7xx_pci_pm_resume(struct device *dev) ++{ ++ return __t7xx_pci_pm_resume(to_pci_dev(dev), true); ++} ++ ++static int t7xx_pci_pm_thaw(struct device *dev) ++{ ++ return __t7xx_pci_pm_resume(to_pci_dev(dev), false); ++} ++ ++static const struct dev_pm_ops t7xx_pci_pm_ops = { ++ .suspend = t7xx_pci_pm_suspend, ++ .resume = t7xx_pci_pm_resume, ++ .resume_noirq = t7xx_pci_pm_resume_noirq, ++ .freeze = t7xx_pci_pm_suspend, ++ .thaw = t7xx_pci_pm_thaw, ++ .poweroff = t7xx_pci_pm_suspend, ++ .restore = t7xx_pci_pm_resume, ++ .restore_noirq = t7xx_pci_pm_resume_noirq, ++}; ++ + static int t7xx_request_irq(struct pci_dev *pdev) + { + struct t7xx_pci_dev *t7xx_dev; +@@ -165,6 +580,10 @@ static int t7xx_pci_probe(struct pci_dev + IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; + t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; + ++ ret = t7xx_pci_pm_init(t7xx_dev); ++ if (ret) ++ return ret; ++ + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pci_infracfg_ao_calc(t7xx_dev); + t7xx_mhccif_init(t7xx_dev); +@@ -216,6 +635,8 @@ static struct pci_driver t7xx_pci_driver + .id_table = t7xx_pci_table, + .probe = t7xx_pci_probe, + .remove = t7xx_pci_remove, ++ .driver.pm = &t7xx_pci_pm_ops, ++ .shutdown = t7xx_pci_shutdown, + }; + + module_pci_driver(t7xx_pci_driver); +--- a/drivers/net/wwan/t7xx/t7xx_pci.h ++++ b/drivers/net/wwan/t7xx/t7xx_pci.h +@@ -17,7 +17,9 @@ + #ifndef __T7XX_PCI_H__ + #define __T7XX_PCI_H__ + ++#include + #include ++#include + #include + #include + +@@ -49,6 +51,10 @@ typedef irqreturn_t (*t7xx_intr_callback + * @md: modem interface + * @ccmni_ctlb: context structure used to control the network data path + * @rgu_pci_irq_en: RGU callback ISR registered and active ++ * @md_pm_entities: list of pm entities ++ * @md_pm_entity_mtx: protects md_pm_entities list ++ * @pm_sr_ack: ack from the device when went to sleep or woke up ++ * @md_pm_state: state for resume/suspend + */ + struct t7xx_pci_dev { + t7xx_intr_callback intr_handler[EXT_INT_NUM]; +@@ -59,6 +65,46 @@ struct t7xx_pci_dev { + struct t7xx_modem *md; + struct t7xx_ccmni_ctrl *ccmni_ctlb; + bool rgu_pci_irq_en; ++ ++ /* Low Power Items */ ++ struct list_head md_pm_entities; ++ struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ ++ struct completion pm_sr_ack; ++ atomic_t md_pm_state; ++}; ++ ++enum t7xx_pm_id { ++ PM_ENTITY_ID_CTRL1, ++ PM_ENTITY_ID_CTRL2, ++ PM_ENTITY_ID_DATA, ++ PM_ENTITY_ID_INVALID + }; + ++/* struct md_pm_entity - device power management entity ++ * @entity: list of PM Entities ++ * @suspend: callback invoked before sending D3 request to device ++ * @suspend_late: callback invoked after getting D3 ACK from device ++ * @resume_early: callback invoked before sending the resume request to device ++ * @resume: callback invoked after getting resume ACK from device ++ * @id: unique PM entity identifier ++ * @entity_param: parameter passed to the registered callbacks ++ * ++ * This structure is used to indicate PM operations required by internal ++ * HW modules such as CLDMA and DPMA. ++ */ ++struct md_pm_entity { ++ struct list_head entity; ++ int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); ++ void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); ++ void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); ++ int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); ++ enum t7xx_pm_id id; ++ void *entity_param; ++}; ++ ++int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); ++int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); ++void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); ++ + #endif /* __T7XX_PCI_H__ */ +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c +@@ -188,6 +188,7 @@ static void fsm_routine_exception(struct + case EXCEPTION_EVENT: + dev_err(dev, "Exception event\n"); + t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); ++ t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); + t7xx_md_exception_handshake(ctl->md); + + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, +@@ -300,6 +301,7 @@ static int fsm_routine_starting(struct t + return -ETIMEDOUT; + } + ++ t7xx_pci_pm_init_late(md->t7xx_dev); + fsm_routine_ready(ctl); + return 0; + } diff --git a/target/linux/generic/backport-5.15/621-v5.19-10-net-wwan-t7xx-Runtime-PM.patch b/target/linux/generic/backport-5.15/621-v5.19-10-net-wwan-t7xx-Runtime-PM.patch new file mode 100644 index 0000000000..dbf2fb3004 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-10-net-wwan-t7xx-Runtime-PM.patch @@ -0,0 +1,277 @@ +From d10b3a695ba0227faf249537402bb72b283a36b8 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:08 -0700 +Subject: [PATCH] net: wwan: t7xx: Runtime PM +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Enables runtime power management callbacks including runtime_suspend +and runtime_resume. Autosuspend is used to prevent overhead by frequent +wake-ups. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Eliot Lee +Signed-off-by: Eliot Lee +Signed-off-by: Ricardo Martinez +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 14 ++++++++++++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 17 +++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 15 +++++++++++++++ + drivers/net/wwan/t7xx/t7xx_pci.c | 22 ++++++++++++++++++++++ + 4 files changed, 68 insertions(+) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -251,6 +252,8 @@ static void t7xx_cldma_rx_done(struct wo + t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); ++ pm_runtime_mark_last_busy(md_ctrl->dev); ++ pm_runtime_put_autosuspend(md_ctrl->dev); + } + + static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) +@@ -360,6 +363,9 @@ static void t7xx_cldma_tx_done(struct wo + t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); ++ ++ pm_runtime_mark_last_busy(md_ctrl->dev); ++ pm_runtime_put_autosuspend(md_ctrl->dev); + } + + static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, +@@ -568,6 +574,7 @@ static void t7xx_cldma_irq_work_cb(struc + if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { + if (i < CLDMA_TXQ_NUM) { ++ pm_runtime_get(md_ctrl->dev); + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); + queue_work(md_ctrl->txq[i].worker, +@@ -592,6 +599,7 @@ static void t7xx_cldma_irq_work_cb(struc + if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; + for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { ++ pm_runtime_get(md_ctrl->dev); + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); + queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); +@@ -922,6 +930,10 @@ int t7xx_cldma_send_skb(struct cldma_ctr + if (qno >= CLDMA_TXQ_NUM) + return -EINVAL; + ++ ret = pm_runtime_resume_and_get(md_ctrl->dev); ++ if (ret < 0 && ret != -EACCES) ++ return ret; ++ + queue = &md_ctrl->txq[qno]; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); +@@ -965,6 +977,8 @@ int t7xx_cldma_send_skb(struct cldma_ctr + } while (!ret); + + allow_sleep: ++ pm_runtime_mark_last_busy(md_ctrl->dev); ++ pm_runtime_put_autosuspend(md_ctrl->dev); + return ret; + } + +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -32,6 +32,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -910,6 +911,7 @@ static void t7xx_dpmaif_rxq_work(struct + { + struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; ++ int ret; + + atomic_set(&rxq->rx_processing, 1); + /* Ensure rx_processing is changed to 1 before actually begin RX flow */ +@@ -921,7 +923,14 @@ static void t7xx_dpmaif_rxq_work(struct + return; + } + ++ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); ++ if (ret < 0 && ret != -EACCES) ++ return; ++ + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); ++ ++ pm_runtime_mark_last_busy(dpmaif_ctrl->dev); ++ pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + atomic_set(&rxq->rx_processing, 0); + } + +@@ -1123,11 +1132,19 @@ static void t7xx_dpmaif_bat_release_work + { + struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); + struct dpmaif_rx_queue *rxq; ++ int ret; ++ ++ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); ++ if (ret < 0 && ret != -EACCES) ++ return; + + /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ + rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; + t7xx_dpmaif_bat_release_and_add(rxq); + t7xx_dpmaif_frag_bat_release_and_add(rxq); ++ ++ pm_runtime_mark_last_busy(dpmaif_ctrl->dev); ++ pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } + + int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -161,6 +162,10 @@ static void t7xx_dpmaif_tx_done(struct w + struct dpmaif_hw_info *hw_info; + int ret; + ++ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); ++ if (ret < 0 && ret != -EACCES) ++ return; ++ + hw_info = &dpmaif_ctrl->hw_info; + ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); + if (ret == -EAGAIN || +@@ -174,6 +179,9 @@ static void t7xx_dpmaif_tx_done(struct w + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); + } ++ ++ pm_runtime_mark_last_busy(dpmaif_ctrl->dev); ++ pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } + + static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, +@@ -423,6 +431,7 @@ static void t7xx_do_tx_hw_push(struct dp + static int t7xx_dpmaif_tx_hw_push_thread(void *arg) + { + struct dpmaif_ctrl *dpmaif_ctrl = arg; ++ int ret; + + while (!kthread_should_stop()) { + if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || +@@ -437,7 +446,13 @@ static int t7xx_dpmaif_tx_hw_push_thread + break; + } + ++ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); ++ if (ret < 0 && ret != -EACCES) ++ return ret; ++ + t7xx_do_tx_hw_push(dpmaif_ctrl); ++ pm_runtime_mark_last_busy(dpmaif_ctrl->dev); ++ pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } + + return 0; +--- a/drivers/net/wwan/t7xx/t7xx_pci.c ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + + #include "t7xx_mhccif.h" +@@ -44,6 +45,7 @@ + #define T7XX_PCI_EREG_BASE 2 + + #define PM_ACK_TIMEOUT_MS 1500 ++#define PM_AUTOSUSPEND_MS 20000 + #define PM_RESOURCE_POLL_TIMEOUT_US 10000 + #define PM_RESOURCE_POLL_STEP_US 100 + +@@ -82,6 +84,8 @@ static int t7xx_pci_pm_init(struct t7xx_ + DPM_FLAG_NO_DIRECT_COMPLETE); + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); ++ pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); ++ pm_runtime_use_autosuspend(&pdev->dev); + + return t7xx_wait_pm_config(t7xx_dev); + } +@@ -96,6 +100,8 @@ void t7xx_pci_pm_init_late(struct t7xx_p + D2H_INT_RESUME_ACK_AP); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); ++ ++ pm_runtime_put_noidle(&t7xx_dev->pdev->dev); + } + + static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) +@@ -104,6 +110,9 @@ static int t7xx_pci_pm_reinit(struct t7x + * so just roll back PM setting to the init setting. + */ + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); ++ ++ pm_runtime_get_noresume(&t7xx_dev->pdev->dev); ++ + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + return t7xx_wait_pm_config(t7xx_dev); + } +@@ -403,6 +412,7 @@ static int __t7xx_pci_pm_resume(struct p + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); ++ pm_runtime_mark_last_busy(&pdev->dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + return ret; +@@ -439,6 +449,16 @@ static int t7xx_pci_pm_thaw(struct devic + return __t7xx_pci_pm_resume(to_pci_dev(dev), false); + } + ++static int t7xx_pci_pm_runtime_suspend(struct device *dev) ++{ ++ return __t7xx_pci_pm_suspend(to_pci_dev(dev)); ++} ++ ++static int t7xx_pci_pm_runtime_resume(struct device *dev) ++{ ++ return __t7xx_pci_pm_resume(to_pci_dev(dev), true); ++} ++ + static const struct dev_pm_ops t7xx_pci_pm_ops = { + .suspend = t7xx_pci_pm_suspend, + .resume = t7xx_pci_pm_resume, +@@ -448,6 +468,8 @@ static const struct dev_pm_ops t7xx_pci_ + .poweroff = t7xx_pci_pm_suspend, + .restore = t7xx_pci_pm_resume, + .restore_noirq = t7xx_pci_pm_resume_noirq, ++ .runtime_suspend = t7xx_pci_pm_runtime_suspend, ++ .runtime_resume = t7xx_pci_pm_runtime_resume + }; + + static int t7xx_request_irq(struct pci_dev *pdev) diff --git a/target/linux/generic/backport-5.15/621-v5.19-11-net-wwan-t7xx-Device-deep-sleep-lock-unlock.patch b/target/linux/generic/backport-5.15/621-v5.19-11-net-wwan-t7xx-Device-deep-sleep-lock-unlock.patch new file mode 100644 index 0000000000..ea108d36f0 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-11-net-wwan-t7xx-Device-deep-sleep-lock-unlock.patch @@ -0,0 +1,368 @@ +From de49ea38ba11c1f0fd9e126e93b2f7eb67ed5020 Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Fri, 6 May 2022 11:13:09 -0700 +Subject: [PATCH] net: wwan: t7xx: Device deep sleep lock/unlock +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Introduce the mechanism to lock/unlock the device 'deep sleep' mode. +When the PCIe link state is L1.2 or L2, the host side still can keep +the device is in D0 state from the host side point of view. At the same +time, if the device's 'deep sleep' mode is unlocked, the device will +go to 'deep sleep' while it is still in D0 state on the host side. + +Signed-off-by: Haijun Liu +Signed-off-by: Chandrashekar Devegowda +Co-developed-by: Ricardo Martinez +Signed-off-by: Ricardo Martinez +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 12 +++ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 14 +++- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 41 +++++++--- + drivers/net/wwan/t7xx/t7xx_mhccif.c | 3 + + drivers/net/wwan/t7xx/t7xx_pci.c | 93 ++++++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_pci.h | 10 +++ + 6 files changed, 158 insertions(+), 15 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -934,6 +934,7 @@ int t7xx_cldma_send_skb(struct cldma_ctr + if (ret < 0 && ret != -EACCES) + return ret; + ++ t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); + queue = &md_ctrl->txq[qno]; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); +@@ -955,6 +956,11 @@ int t7xx_cldma_send_skb(struct cldma_ctr + queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + ++ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { ++ ret = -ETIMEDOUT; ++ break; ++ } ++ + /* Protect the access to the modem for queues operations (resume/start) + * which access shared locations by all the queues. + * cldma_lock is independent of ring_lock which is per queue. +@@ -967,6 +973,11 @@ int t7xx_cldma_send_skb(struct cldma_ctr + } + spin_unlock_irqrestore(&queue->ring_lock, flags); + ++ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { ++ ret = -ETIMEDOUT; ++ break; ++ } ++ + if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); +@@ -977,6 +988,7 @@ int t7xx_cldma_send_skb(struct cldma_ctr + } while (!ret); + + allow_sleep: ++ t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); + return ret; +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -927,8 +927,11 @@ static void t7xx_dpmaif_rxq_work(struct + if (ret < 0 && ret != -EACCES) + return; + +- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); ++ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); ++ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) ++ t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + ++ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + atomic_set(&rxq->rx_processing, 0); +@@ -1138,11 +1141,16 @@ static void t7xx_dpmaif_bat_release_work + if (ret < 0 && ret != -EACCES) + return; + ++ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); ++ + /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ + rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; +- t7xx_dpmaif_bat_release_and_add(rxq); +- t7xx_dpmaif_frag_bat_release_and_add(rxq); ++ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { ++ t7xx_dpmaif_bat_release_and_add(rxq); ++ t7xx_dpmaif_frag_bat_release_and_add(rxq); ++ } + ++ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +@@ -166,20 +166,25 @@ static void t7xx_dpmaif_tx_done(struct w + if (ret < 0 && ret != -EACCES) + return; + +- hw_info = &dpmaif_ctrl->hw_info; +- ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); +- if (ret == -EAGAIN || +- (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && +- t7xx_dpmaif_drb_ring_not_empty(txq))) { +- queue_work(dpmaif_ctrl->txq[txq->index].worker, +- &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); +- /* Give the device time to enter the low power state */ +- t7xx_dpmaif_clr_ip_busy_sts(hw_info); +- } else { +- t7xx_dpmaif_clr_ip_busy_sts(hw_info); +- t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); ++ /* The device may be in low power state. Disable sleep if needed */ ++ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); ++ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { ++ hw_info = &dpmaif_ctrl->hw_info; ++ ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); ++ if (ret == -EAGAIN || ++ (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && ++ t7xx_dpmaif_drb_ring_not_empty(txq))) { ++ queue_work(dpmaif_ctrl->txq[txq->index].worker, ++ &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); ++ /* Give the device time to enter the low power state */ ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ } else { ++ t7xx_dpmaif_clr_ip_busy_sts(hw_info); ++ t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); ++ } + } + ++ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } +@@ -405,6 +410,8 @@ static int t7xx_txq_burst_send_skb(struc + + static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) + { ++ bool wait_disable_sleep = true; ++ + do { + struct dpmaif_tx_queue *txq; + int drb_send_cnt; +@@ -420,6 +427,14 @@ static void t7xx_do_tx_hw_push(struct dp + continue; + } + ++ /* Wait for the PCIe resource to unlock */ ++ if (wait_disable_sleep) { ++ if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) ++ return; ++ ++ wait_disable_sleep = false; ++ } ++ + t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, + drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD); + +@@ -450,7 +465,9 @@ static int t7xx_dpmaif_tx_hw_push_thread + if (ret < 0 && ret != -EACCES) + return ret; + ++ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + t7xx_do_tx_hw_push(dpmaif_ctrl); ++ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); + } +--- a/drivers/net/wwan/t7xx/t7xx_mhccif.c ++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c +@@ -59,6 +59,9 @@ static irqreturn_t t7xx_mhccif_isr_threa + + t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); + ++ if (int_status & D2H_INT_DS_LOCK_ACK) ++ complete_all(&t7xx_dev->sleep_lock_acquire); ++ + if (int_status & D2H_INT_SR_ACK) + complete(&t7xx_dev->pm_sr_ack); + +--- a/drivers/net/wwan/t7xx/t7xx_pci.c ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + + #include "t7xx_mhccif.h" + #include "t7xx_modem_ops.h" +@@ -44,6 +45,7 @@ + #define T7XX_PCI_IREG_BASE 0 + #define T7XX_PCI_EREG_BASE 2 + ++#define PM_SLEEP_DIS_TIMEOUT_MS 20 + #define PM_ACK_TIMEOUT_MS 1500 + #define PM_AUTOSUSPEND_MS 20000 + #define PM_RESOURCE_POLL_TIMEOUT_US 10000 +@@ -56,6 +58,21 @@ enum t7xx_pm_state { + MTK_PM_RESUMED, + }; + ++static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable) ++{ ++ void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL; ++ u32 value; ++ ++ value = ioread32(ctrl_reg); ++ ++ if (enable) ++ value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS; ++ else ++ value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS; ++ ++ iowrite32(value, ctrl_reg); ++} ++ + static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) + { + int ret, val; +@@ -76,6 +93,8 @@ static int t7xx_pci_pm_init(struct t7xx_ + + INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); + mutex_init(&t7xx_dev->md_pm_entity_mtx); ++ spin_lock_init(&t7xx_dev->md_pm_lock); ++ init_completion(&t7xx_dev->sleep_lock_acquire); + init_completion(&t7xx_dev->pm_sr_ack); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + +@@ -94,6 +113,7 @@ void t7xx_pci_pm_init_late(struct t7xx_p + { + /* Enable the PCIe resource lock only after MD deep sleep is done */ + t7xx_mhccif_mask_clr(t7xx_dev, ++ D2H_INT_DS_LOCK_ACK | + D2H_INT_SUSPEND_ACK | + D2H_INT_RESUME_ACK | + D2H_INT_SUSPEND_ACK_AP | +@@ -159,6 +179,79 @@ int t7xx_pci_pm_entity_unregister(struct + return -ENXIO; + } + ++int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev) ++{ ++ struct device *dev = &t7xx_dev->pdev->dev; ++ int ret; ++ ++ ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, ++ msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS)); ++ if (!ret) ++ dev_err_ratelimited(dev, "Resource wait complete timed out\n"); ++ ++ return ret; ++} ++ ++/** ++ * t7xx_pci_disable_sleep() - Disable deep sleep capability. ++ * @t7xx_dev: MTK device. ++ * ++ * Lock the deep sleep capability, note that the device can still go into deep sleep ++ * state while device is in D0 state, from the host's point-of-view. ++ * ++ * If device is in deep sleep state, wake up the device and disable deep sleep capability. ++ */ ++void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); ++ t7xx_dev->sleep_disable_count++; ++ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) ++ goto unlock_and_complete; ++ ++ if (t7xx_dev->sleep_disable_count == 1) { ++ u32 status; ++ ++ reinit_completion(&t7xx_dev->sleep_lock_acquire); ++ t7xx_dev_set_sleep_capability(t7xx_dev, false); ++ ++ status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); ++ if (status & T7XX_PCIE_RESOURCE_STS_MSK) ++ goto unlock_and_complete; ++ ++ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK); ++ } ++ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); ++ return; ++ ++unlock_and_complete: ++ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); ++ complete_all(&t7xx_dev->sleep_lock_acquire); ++} ++ ++/** ++ * t7xx_pci_enable_sleep() - Enable deep sleep capability. ++ * @t7xx_dev: MTK device. ++ * ++ * After enabling deep sleep, device can enter into deep sleep state. ++ */ ++void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); ++ t7xx_dev->sleep_disable_count--; ++ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) ++ goto unlock; ++ ++ if (t7xx_dev->sleep_disable_count == 0) ++ t7xx_dev_set_sleep_capability(t7xx_dev, true); ++ ++unlock: ++ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); ++} ++ + static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) + { + unsigned long wait_ret; +--- a/drivers/net/wwan/t7xx/t7xx_pci.h ++++ b/drivers/net/wwan/t7xx/t7xx_pci.h +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + + #include "t7xx_reg.h" +@@ -55,6 +56,9 @@ typedef irqreturn_t (*t7xx_intr_callback + * @md_pm_entity_mtx: protects md_pm_entities list + * @pm_sr_ack: ack from the device when went to sleep or woke up + * @md_pm_state: state for resume/suspend ++ * @md_pm_lock: protects PCIe sleep lock ++ * @sleep_disable_count: PCIe L1.2 lock counter ++ * @sleep_lock_acquire: indicates that sleep has been disabled + */ + struct t7xx_pci_dev { + t7xx_intr_callback intr_handler[EXT_INT_NUM]; +@@ -71,6 +75,9 @@ struct t7xx_pci_dev { + struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ + struct completion pm_sr_ack; + atomic_t md_pm_state; ++ spinlock_t md_pm_lock; /* Protects PCI resource lock */ ++ unsigned int sleep_disable_count; ++ struct completion sleep_lock_acquire; + }; + + enum t7xx_pm_id { +@@ -102,6 +109,9 @@ struct md_pm_entity { + void *entity_param; + }; + ++void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev); ++void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev); ++int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev); + int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); + int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); + void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); diff --git a/target/linux/generic/backport-5.15/621-v5.19-12-net-wwan-t7xx-Fix-return-type-of-t7xx_dl_add_timedout.patch b/target/linux/generic/backport-5.15/621-v5.19-12-net-wwan-t7xx-Fix-return-type-of-t7xx_dl_add_timedout.patch new file mode 100644 index 0000000000..2b9a50d6b1 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-12-net-wwan-t7xx-Fix-return-type-of-t7xx_dl_add_timedout.patch @@ -0,0 +1,34 @@ +From b321dfafb0b99e285d14bcaae00b4f9093556eb6 Mon Sep 17 00:00:00 2001 +From: YueHaibing +Date: Fri, 13 May 2022 15:56:11 +0800 +Subject: [PATCH] net: wwan: t7xx: Fix return type of t7xx_dl_add_timedout() + +t7xx_dl_add_timedout() now return int 'ret', but the return type +is bool. Change the return type to int for furthor errcode upstream. + +Signed-off-by: YueHaibing +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_dpmaif.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_dpmaif.c ++++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c +@@ -1043,15 +1043,13 @@ unsigned int t7xx_dpmaif_dl_dlq_pit_get_ + return value & DPMAIF_DL_RD_WR_IDX_MSK; + } + +-static bool t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) ++static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) + { + u32 value; +- int ret; + +- ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, ++ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); +- return ret; + } + + int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt) diff --git a/target/linux/generic/backport-5.15/621-v5.19-13-net-wwan-t7xx-Avoid-calls-to-skb_data_area_size.patch b/target/linux/generic/backport-5.15/621-v5.19-13-net-wwan-t7xx-Avoid-calls-to-skb_data_area_size.patch new file mode 100644 index 0000000000..22663f7985 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-13-net-wwan-t7xx-Avoid-calls-to-skb_data_area_size.patch @@ -0,0 +1,75 @@ +From 262d98b1193fec68c66f3d57772b72240fc4b9da Mon Sep 17 00:00:00 2001 +From: Ricardo Martinez +Date: Fri, 13 May 2022 10:33:59 -0700 +Subject: [PATCH] net: wwan: t7xx: Avoid calls to skb_data_area_size() + +skb_data_area_size() helper was used to calculate the size of the +DMA mapped buffer passed to the HW. Instead of doing this, use the +size passed to allocate the skbs. + +Signed-off-by: Ricardo Martinez +Reviewed-by: Andy Shevchenko +Reviewed-by: Sergey Ryazanov +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 7 +++---- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 6 ++---- + 2 files changed, 5 insertions(+), 8 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -97,8 +97,7 @@ static int t7xx_cldma_alloc_and_map_skb( + if (!req->skb) + return -ENOMEM; + +- req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, +- skb_data_area_size(req->skb), DMA_FROM_DEVICE); ++ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); + if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { + dev_kfree_skb_any(req->skb); + req->skb = NULL; +@@ -154,7 +153,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru + + if (req->mapped_buff) { + dma_unmap_single(md_ctrl->dev, req->mapped_buff, +- skb_data_area_size(skb), DMA_FROM_DEVICE); ++ queue->tr_ring->pkt_size, DMA_FROM_DEVICE); + req->mapped_buff = 0; + } + +@@ -376,7 +375,7 @@ static void t7xx_cldma_ring_free(struct + list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { + if (req_cur->mapped_buff && req_cur->skb) { + dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, +- skb_data_area_size(req_cur->skb), tx_rx); ++ ring->pkt_size, tx_rx); + req_cur->mapped_buff = 0; + } + +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -151,14 +151,12 @@ static bool t7xx_alloc_and_map_skb_info( + { + dma_addr_t data_bus_addr; + struct sk_buff *skb; +- size_t data_len; + + skb = __dev_alloc_skb(size, GFP_KERNEL); + if (!skb) + return false; + +- data_len = skb_data_area_size(skb); +- data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE); ++ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { + dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); +@@ -167,7 +165,7 @@ static bool t7xx_alloc_and_map_skb_info( + + cur_skb->skb = skb; + cur_skb->data_bus_addr = data_bus_addr; +- cur_skb->data_len = data_len; ++ cur_skb->data_len = size; + + return true; + } diff --git a/target/linux/generic/backport-5.15/621-v5.19-14-net-wwan-t7xx-Fix-smatch-errors.patch b/target/linux/generic/backport-5.15/621-v5.19-14-net-wwan-t7xx-Fix-smatch-errors.patch new file mode 100644 index 0000000000..a51d7873fe --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-14-net-wwan-t7xx-Fix-smatch-errors.patch @@ -0,0 +1,71 @@ +From 86afd5a0e78eb9b84b158b33d85f711c5f748fd1 Mon Sep 17 00:00:00 2001 +From: Ricardo Martinez +Date: Wed, 18 May 2022 12:55:29 -0700 +Subject: [PATCH] net: wwan: t7xx: Fix smatch errors + +t7xx_request_irq() error: uninitialized symbol 'ret'. + +t7xx_core_hk_handler() error: potentially dereferencing uninitialized 'event'. +If the condition to enter the loop that waits for the handshake event +is false on the first iteration then the uninitialized 'event' will be +dereferenced, fix this by initializing 'event' to NULL. + +t7xx_port_proxy_recv_skb() warn: variable dereferenced before check 'skb'. +No need to check skb at t7xx_port_proxy_recv_skb() since we know it +is always called with a valid skb by t7xx_cldma_gpd_rx_from_q(). + +Reported-by: Dan Carpenter +Signed-off-by: Ricardo Martinez +Link: https://lore.kernel.org/r/20220518195529.126246-1-ricardo.martinez@linux.intel.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 4 ++-- + drivers/net/wwan/t7xx/t7xx_pci.c | 2 +- + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 3 --- + 3 files changed, 3 insertions(+), 6 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -458,9 +458,9 @@ static void t7xx_core_hk_handler(struct + enum t7xx_fsm_event_state event_id, + enum t7xx_fsm_event_state err_detect) + { ++ struct t7xx_fsm_event *event = NULL, *event_next; + struct t7xx_sys_info *core_info = &md->core_md; + struct device *dev = &md->t7xx_dev->pdev->dev; +- struct t7xx_fsm_event *event, *event_next; + unsigned long flags; + int ret; + +@@ -493,7 +493,7 @@ static void t7xx_core_hk_handler(struct + goto err_free_event; + } + +- if (ctl->exp_flg) ++ if (!event || ctl->exp_flg) + goto err_free_event; + + ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); +--- a/drivers/net/wwan/t7xx/t7xx_pci.c ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -568,7 +568,7 @@ static const struct dev_pm_ops t7xx_pci_ + static int t7xx_request_irq(struct pci_dev *pdev) + { + struct t7xx_pci_dev *t7xx_dev; +- int ret, i; ++ int ret = 0, i; + + t7xx_dev = pci_get_drvdata(pdev); + +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -350,9 +350,6 @@ static int t7xx_port_proxy_recv_skb(stru + u16 seq_num, channel; + int ret; + +- if (!skb) +- return -EINVAL; +- + channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); + if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); diff --git a/target/linux/generic/backport-5.15/621-v5.19-15-net-wwan-t7xx-use-GFP_ATOMIC-under-spin-lock-in.patch b/target/linux/generic/backport-5.15/621-v5.19-15-net-wwan-t7xx-use-GFP_ATOMIC-under-spin-lock-in.patch new file mode 100644 index 0000000000..4af32a739d --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v5.19-15-net-wwan-t7xx-use-GFP_ATOMIC-under-spin-lock-in.patch @@ -0,0 +1,61 @@ +From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001 +From: Yang Yingliang +Date: Thu, 19 May 2022 11:21:08 +0800 +Subject: [PATCH] net: wwan: t7xx: use GFP_ATOMIC under spin lock in + t7xx_cldma_gpd_set_next_ptr() + +Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock, +so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass +the flag. + +Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface") +Reported-by: Hulk Robot +Signed-off-by: Yang Yingliang +Reviewed-by: Loic Poulain +Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -91,9 +91,9 @@ static void t7xx_cldma_gpd_set_next_ptr( + } + + static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, +- size_t size) ++ size_t size, gfp_t gfp_mask) + { +- req->skb = __dev_alloc_skb(size, GFP_KERNEL); ++ req->skb = __dev_alloc_skb(size, gfp_mask); + if (!req->skb) + return -ENOMEM; + +@@ -174,7 +174,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru + spin_unlock_irqrestore(&queue->ring_lock, flags); + req = queue->rx_refill; + +- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); ++ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); + if (ret) + return ret; + +@@ -402,7 +402,7 @@ static struct cldma_request *t7xx_alloc_ + if (!req->gpd) + goto err_free_req; + +- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); ++ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); + if (val) + goto err_free_pool; + +@@ -801,7 +801,7 @@ static int t7xx_cldma_clear_rxq(struct c + if (req->skb) + continue; + +- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); ++ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); + if (ret) + break; + diff --git a/target/linux/generic/backport-5.15/621-v6.1-16-net-wwan-t7xx-Fix-return-type-of-t7xx_ccmni_start_xmit.patch b/target/linux/generic/backport-5.15/621-v6.1-16-net-wwan-t7xx-Fix-return-type-of-t7xx_ccmni_start_xmit.patch new file mode 100644 index 0000000000..864ef31693 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.1-16-net-wwan-t7xx-Fix-return-type-of-t7xx_ccmni_start_xmit.patch @@ -0,0 +1,36 @@ +From 73c99e26036529e633a0f2d628ad7ddff6594668 Mon Sep 17 00:00:00 2001 +From: Nathan Huckleberry +Date: Mon, 12 Sep 2022 14:45:10 -0700 +Subject: [PATCH] net: wwan: t7xx: Fix return type of t7xx_ccmni_start_xmit + +The ndo_start_xmit field in net_device_ops is expected to be of type +netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev). + +The mismatched return type breaks forward edge kCFI since the underlying +function definition does not match the function hook definition. + +The return type of t7xx_ccmni_start_xmit should be changed from int to +netdev_tx_t. + +Reported-by: Dan Carpenter +Link: https://github.com/ClangBuiltLinux/linux/issues/1703 +Cc: llvm@lists.linux.dev +Signed-off-by: Nathan Huckleberry +Acked-by: Sergey Ryazanov +Link: https://lore.kernel.org/r/20220912214510.929070-1-nhuck@google.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/t7xx/t7xx_netdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -74,7 +74,7 @@ static int t7xx_ccmni_send_packet(struct + return 0; + } + +-static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) ++static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + int skb_len = skb->len; diff --git a/target/linux/generic/backport-5.15/621-v6.1-21-net-wwan-t7xx-Fix-the-ACPI-memory-leak.patch b/target/linux/generic/backport-5.15/621-v6.1-21-net-wwan-t7xx-Fix-the-ACPI-memory-leak.patch new file mode 100644 index 0000000000..4c76e77c9a --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.1-21-net-wwan-t7xx-Fix-the-ACPI-memory-leak.patch @@ -0,0 +1,28 @@ +From 08e8a949f684e1fbc4b1efd2337d72ec8f3613d9 Mon Sep 17 00:00:00 2001 +From: Hanjun Guo +Date: Tue, 22 Nov 2022 20:19:40 +0800 +Subject: [PATCH] net: wwan: t7xx: Fix the ACPI memory leak + +The ACPI buffer memory (buffer.pointer) should be freed as the +buffer is not used after acpi_evaluate_object(), free it to +prevent memory leak. + +Fixes: 13e920d93e37 ("net: wwan: t7xx: Add core components") +Signed-off-by: Hanjun Guo +Link: https://lore.kernel.org/r/1669119580-28977-1-git-send-email-guohanjun@huawei.com +Signed-off-by: Paolo Abeni +--- + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -165,6 +165,8 @@ static int t7xx_acpi_reset(struct t7xx_p + return -EFAULT; + } + ++ kfree(buffer.pointer); ++ + #endif + return 0; + } diff --git a/target/linux/generic/backport-5.15/621-v6.2-17-net-wwan-t7xx-use-union-to-group-port-type-specific-data.patch b/target/linux/generic/backport-5.15/621-v6.2-17-net-wwan-t7xx-use-union-to-group-port-type-specific-data.patch new file mode 100644 index 0000000000..075cc529fc --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.2-17-net-wwan-t7xx-use-union-to-group-port-type-specific-data.patch @@ -0,0 +1,79 @@ +From fece7a8c65d1476b901b969a07b2979e1b459e66 Mon Sep 17 00:00:00 2001 +From: M Chetan Kumar +Date: Fri, 28 Oct 2022 21:04:50 +0530 +Subject: [PATCH] net: wwan: t7xx: use union to group port type specific data + +Use union inside t7xx_port to group port type specific data members. + +Signed-off-by: M Chetan Kumar +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_port.h | 6 +++++- + drivers/net/wwan/t7xx/t7xx_port_wwan.c | 16 ++++++++-------- + 2 files changed, 13 insertions(+), 9 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -99,7 +99,6 @@ struct t7xx_port_conf { + struct t7xx_port { + /* Members not initialized in definition */ + const struct t7xx_port_conf *port_conf; +- struct wwan_port *wwan_port; + struct t7xx_pci_dev *t7xx_dev; + struct device *dev; + u16 seq_nums[2]; /* TX/RX sequence numbers */ +@@ -122,6 +121,11 @@ struct t7xx_port { + int rx_length_th; + bool chan_enable; + struct task_struct *thread; ++ union { ++ struct { ++ struct wwan_port *wwan_port; ++ } wwan; ++ }; + }; + + struct sk_buff *t7xx_port_alloc_skb(int payload); +--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c +@@ -109,12 +109,12 @@ static int t7xx_port_wwan_init(struct t7 + + static void t7xx_port_wwan_uninit(struct t7xx_port *port) + { +- if (!port->wwan_port) ++ if (!port->wwan.wwan_port) + return; + + port->rx_length_th = 0; +- wwan_remove_port(port->wwan_port); +- port->wwan_port = NULL; ++ wwan_remove_port(port->wwan.wwan_port); ++ port->wwan.wwan_port = NULL; + } + + static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb) +@@ -129,7 +129,7 @@ static int t7xx_port_wwan_recv_skb(struc + return 0; + } + +- wwan_port_rx(port->wwan_port, skb); ++ wwan_port_rx(port->wwan.wwan_port, skb); + return 0; + } + +@@ -158,10 +158,10 @@ static void t7xx_port_wwan_md_state_noti + if (state != MD_STATE_READY) + return; + +- if (!port->wwan_port) { +- port->wwan_port = wwan_create_port(port->dev, port_conf->port_type, +- &wwan_ops, port); +- if (IS_ERR(port->wwan_port)) ++ if (!port->wwan.wwan_port) { ++ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, ++ &wwan_ops, port); ++ if (IS_ERR(port->wwan.wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } + } diff --git a/target/linux/generic/backport-5.15/621-v6.2-18-net-wwan-t7xx-Add-port-for-modem-logging.patch b/target/linux/generic/backport-5.15/621-v6.2-18-net-wwan-t7xx-Add-port-for-modem-logging.patch new file mode 100644 index 0000000000..b8a5c9312d --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.2-18-net-wwan-t7xx-Add-port-for-modem-logging.patch @@ -0,0 +1,237 @@ +From 3349e4a48acb0923fa98d2beac82a833a76116cb Mon Sep 17 00:00:00 2001 +From: M Chetan Kumar +Date: Fri, 28 Oct 2022 21:05:34 +0530 +Subject: [PATCH] net: wwan: t7xx: Add port for modem logging + +The Modem Logging (MDL) port provides an interface to collect modem +logs for debugging purposes. MDL is supported by the relay interface, +and the mtk_t7xx port infrastructure. MDL allows user-space apps to +control logging via mbim command and to collect logs via the relay +interface, while port infrastructure facilitates communication between +the driver and the modem. + +Signed-off-by: Moises Veleta +Signed-off-by: M Chetan Kumar +Signed-off-by: Devegowda Chandrashekar +Acked-by: Ricardo Martinez +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/Kconfig | 1 + + drivers/net/wwan/t7xx/Makefile | 3 + + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 2 + + drivers/net/wwan/t7xx/t7xx_pci.h | 3 + + drivers/net/wwan/t7xx/t7xx_port.h | 3 + + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 12 +++ + drivers/net/wwan/t7xx/t7xx_port_proxy.h | 4 + + drivers/net/wwan/t7xx/t7xx_port_trace.c | 116 ++++++++++++++++++++++++ + 8 files changed, 144 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/t7xx_port_trace.c + +--- a/drivers/net/wwan/Kconfig ++++ b/drivers/net/wwan/Kconfig +@@ -82,6 +82,7 @@ config IOSM + config MTK_T7XX + tristate "MediaTek PCIe 5G WWAN modem T7xx device" + depends on PCI ++ select RELAY if WWAN_DEBUGFS + help + Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device. + Adapts WWAN framework and provides network interface like wwan0 +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -18,3 +18,6 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_hif_dpmaif_rx.o \ + t7xx_dpmaif.o \ + t7xx_netdev.o ++ ++mtk_t7xx-$(CONFIG_WWAN_DEBUGFS) += \ ++ t7xx_port_trace.o \ +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1018,6 +1018,8 @@ static int t7xx_cldma_late_init(struct c + dev_err(md_ctrl->dev, "control TX ring init fail\n"); + goto err_free_tx_ring; + } ++ ++ md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU; + } + + for (j = 0; j < CLDMA_RXQ_NUM; j++) { +--- a/drivers/net/wwan/t7xx/t7xx_pci.h ++++ b/drivers/net/wwan/t7xx/t7xx_pci.h +@@ -78,6 +78,9 @@ struct t7xx_pci_dev { + spinlock_t md_pm_lock; /* Protects PCI resource lock */ + unsigned int sleep_disable_count; + struct completion sleep_lock_acquire; ++#ifdef CONFIG_WWAN_DEBUGFS ++ struct dentry *debugfs_dir; ++#endif + }; + + enum t7xx_pm_id { +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -125,6 +125,9 @@ struct t7xx_port { + struct { + struct wwan_port *wwan_port; + } wwan; ++ struct { ++ struct rchan *relaych; ++ } log; + }; + }; + +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -70,6 +70,18 @@ static const struct t7xx_port_conf t7xx_ + .name = "MBIM", + .port_type = WWAN_PORT_MBIM, + }, { ++#ifdef CONFIG_WWAN_DEBUGFS ++ .tx_ch = PORT_CH_MD_LOG_TX, ++ .rx_ch = PORT_CH_MD_LOG_RX, ++ .txq_index = 7, ++ .rxq_index = 7, ++ .txq_exp_index = 7, ++ .rxq_exp_index = 7, ++ .path_id = CLDMA_ID_MD, ++ .ops = &t7xx_trace_port_ops, ++ .name = "mdlog", ++ }, { ++#endif + .tx_ch = PORT_CH_CONTROL_TX, + .rx_ch = PORT_CH_CONTROL_RX, + .txq_index = Q_IDX_CTRL, +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h +@@ -87,6 +87,10 @@ struct ctrl_msg_header { + extern struct port_ops wwan_sub_port_ops; + extern struct port_ops ctl_port_ops; + ++#ifdef CONFIG_WWAN_DEBUGFS ++extern struct port_ops t7xx_trace_port_ops; ++#endif ++ + void t7xx_port_proxy_reset(struct port_proxy *port_prox); + void t7xx_port_proxy_uninit(struct port_proxy *port_prox); + int t7xx_port_proxy_init(struct t7xx_modem *md); +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c +@@ -0,0 +1,116 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2022 Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "t7xx_port.h" ++#include "t7xx_port_proxy.h" ++#include "t7xx_state_monitor.h" ++ ++#define T7XX_TRC_SUB_BUFF_SIZE 131072 ++#define T7XX_TRC_N_SUB_BUFF 32 ++ ++static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename, ++ struct dentry *parent, ++ umode_t mode, ++ struct rchan_buf *buf, ++ int *is_global) ++{ ++ *is_global = 1; ++ return debugfs_create_file(filename, mode, parent, buf, ++ &relay_file_operations); ++} ++ ++static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry) ++{ ++ debugfs_remove(dentry); ++ return 0; ++} ++ ++static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf, ++ void *prev_subbuf, size_t prev_padding) ++{ ++ if (relay_buf_full(buf)) { ++ pr_err_ratelimited("Relay_buf full dropping traces"); ++ return 0; ++ } ++ ++ return 1; ++} ++ ++static struct rchan_callbacks relay_callbacks = { ++ .subbuf_start = t7xx_trace_subbuf_start_handler, ++ .create_buf_file = t7xx_trace_create_buf_file_handler, ++ .remove_buf_file = t7xx_trace_remove_buf_file_handler, ++}; ++ ++static void t7xx_trace_port_uninit(struct t7xx_port *port) ++{ ++ struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir; ++ struct rchan *relaych = port->log.relaych; ++ ++ if (!relaych) ++ return; ++ ++ relay_close(relaych); ++ debugfs_remove_recursive(debugfs_dir); ++} ++ ++static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb) ++{ ++ struct rchan *relaych = port->log.relaych; ++ ++ if (!relaych) ++ return -EINVAL; ++ ++ relay_write(relaych, skb->data, skb->len); ++ dev_kfree_skb(skb); ++ return 0; ++} ++ ++static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state) ++{ ++ struct rchan *relaych = port->log.relaych; ++ struct dentry *debugfs_wwan_dir; ++ struct dentry *debugfs_dir; ++ ++ if (state != MD_STATE_READY || relaych) ++ return; ++ ++ debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev); ++ if (IS_ERR(debugfs_wwan_dir)) ++ return; ++ ++ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir); ++ if (IS_ERR_OR_NULL(debugfs_dir)) { ++ wwan_put_debugfs_dir(debugfs_wwan_dir); ++ dev_err(port->dev, "Unable to create debugfs for trace"); ++ return; ++ } ++ ++ relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE, ++ T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL); ++ if (!relaych) ++ goto err_rm_debugfs_dir; ++ ++ wwan_put_debugfs_dir(debugfs_wwan_dir); ++ port->log.relaych = relaych; ++ port->t7xx_dev->debugfs_dir = debugfs_dir; ++ return; ++ ++err_rm_debugfs_dir: ++ debugfs_remove_recursive(debugfs_dir); ++ wwan_put_debugfs_dir(debugfs_wwan_dir); ++ dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name); ++} ++ ++struct port_ops t7xx_trace_port_ops = { ++ .recv_skb = t7xx_trace_port_recv_skb, ++ .uninit = t7xx_trace_port_uninit, ++ .md_state_notify = t7xx_port_trace_md_state_notify, ++}; diff --git a/target/linux/generic/backport-5.15/621-v6.2-19-net-wwan-t7xx-Use-needed_headroom-instead-of.patch b/target/linux/generic/backport-5.15/621-v6.2-19-net-wwan-t7xx-Use-needed_headroom-instead-of.patch new file mode 100644 index 0000000000..a464ee3143 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.2-19-net-wwan-t7xx-Use-needed_headroom-instead-of.patch @@ -0,0 +1,31 @@ +From c053d7b6bdcb45780036b32be6a950f71a78bf52 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= +Date: Thu, 3 Nov 2022 14:48:28 +0530 +Subject: [PATCH] net: wwan: t7xx: Use needed_headroom instead of + hard_header_len +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +hard_header_len is used by gro_list_prepare() but on Rx, there +is no header so use needed_headroom instead. + +Signed-off-by: Ilpo Järvinen +Signed-off-by: Sreehari Kancharla +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_netdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -161,7 +161,7 @@ static void t7xx_ccmni_post_stop(struct + + static void t7xx_ccmni_wwan_setup(struct net_device *dev) + { +- dev->hard_header_len += sizeof(struct ccci_header); ++ dev->needed_headroom += sizeof(struct ccci_header); + + dev->mtu = ETH_DATA_LEN; + dev->max_mtu = CCMNI_MTU_MAX; diff --git a/target/linux/generic/backport-5.15/621-v6.2-20-net-wwan-t7xx-Add-NAPI-support.patch b/target/linux/generic/backport-5.15/621-v6.2-20-net-wwan-t7xx-Add-NAPI-support.patch new file mode 100644 index 0000000000..2310a49801 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.2-20-net-wwan-t7xx-Add-NAPI-support.patch @@ -0,0 +1,652 @@ +From 5545b7b9f294de7f95ec6a7cb1de0db52296001c Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Thu, 3 Nov 2022 14:48:29 +0530 +Subject: [PATCH] net: wwan: t7xx: Add NAPI support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Replace the work queue based RX flow with a NAPI implementation +Remove rx_thread and dpmaif_rxq_work. +Enable GRO on RX path. +Introduce dummy network device. its responsibility is + - Binds one NAPI object for each DL HW queue and acts as + the agent of all those network devices. + - Use NAPI object to poll DL packets. + - Helps to dispatch each packet to the network interface. + +Signed-off-by: Haijun Liu +Co-developed-by: Sreehari Kancharla +Signed-off-by: Sreehari Kancharla +Signed-off-by: Chandrashekar Devegowda +Acked-by: Ricardo Martinez +Acked-by: M Chetan Kumar +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 14 +- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 218 +++++++-------------- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 1 + + drivers/net/wwan/t7xx/t7xx_netdev.c | 89 ++++++++- + drivers/net/wwan/t7xx/t7xx_netdev.h | 5 + + 5 files changed, 161 insertions(+), 166 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h +@@ -20,6 +20,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -109,20 +110,14 @@ struct dpmaif_rx_queue { + struct dpmaif_bat_request *bat_req; + struct dpmaif_bat_request *bat_frag; + +- wait_queue_head_t rx_wq; +- struct task_struct *rx_thread; +- struct sk_buff_head skb_list; +- unsigned int skb_list_max_len; +- +- struct workqueue_struct *worker; +- struct work_struct dpmaif_rxq_work; +- + atomic_t rx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned int expect_pit_seq; + unsigned int pit_remain_release_cnt; + struct dpmaif_cur_rx_skb_info rx_data_info; ++ struct napi_struct napi; ++ bool sleep_lock_pending; + }; + + struct dpmaif_tx_queue { +@@ -168,7 +163,8 @@ enum dpmaif_txq_state { + struct dpmaif_callbacks { + void (*state_notify)(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int txq_number); +- void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb); ++ void (*recv_skb)(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb, ++ struct napi_struct *napi); + }; + + struct dpmaif_ctrl { +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -45,6 +45,7 @@ + #include "t7xx_dpmaif.h" + #include "t7xx_hif_dpmaif.h" + #include "t7xx_hif_dpmaif_rx.h" ++#include "t7xx_netdev.h" + #include "t7xx_pci.h" + + #define DPMAIF_BAT_COUNT 8192 +@@ -76,43 +77,6 @@ static unsigned int t7xx_normal_pit_bid( + return value; + } + +-static int t7xx_dpmaif_net_rx_push_thread(void *arg) +-{ +- struct dpmaif_rx_queue *q = arg; +- struct dpmaif_ctrl *hif_ctrl; +- struct dpmaif_callbacks *cb; +- +- hif_ctrl = q->dpmaif_ctrl; +- cb = hif_ctrl->callbacks; +- +- while (!kthread_should_stop()) { +- struct sk_buff *skb; +- unsigned long flags; +- +- if (skb_queue_empty(&q->skb_list)) { +- if (wait_event_interruptible(q->rx_wq, +- !skb_queue_empty(&q->skb_list) || +- kthread_should_stop())) +- continue; +- +- if (kthread_should_stop()) +- break; +- } +- +- spin_lock_irqsave(&q->skb_list.lock, flags); +- skb = __skb_dequeue(&q->skb_list); +- spin_unlock_irqrestore(&q->skb_list.lock, flags); +- +- if (!skb) +- continue; +- +- cb->recv_skb(hif_ctrl->t7xx_dev, skb); +- cond_resched(); +- } +- +- return 0; +-} +- + static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int bat_cnt) + { +@@ -726,21 +690,10 @@ static int t7xx_dpmaifq_rx_notify_hw(str + return ret; + } + +-static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb) +-{ +- unsigned long flags; +- +- spin_lock_irqsave(&rxq->skb_list.lock, flags); +- if (rxq->skb_list.qlen < rxq->skb_list_max_len) +- __skb_queue_tail(&rxq->skb_list, skb); +- else +- dev_kfree_skb_any(skb); +- spin_unlock_irqrestore(&rxq->skb_list.lock, flags); +-} +- + static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq, + struct dpmaif_cur_rx_skb_info *skb_info) + { ++ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + struct sk_buff *skb = skb_info->cur_skb; + struct t7xx_skb_cb *skb_cb; + u8 netif_id; +@@ -758,11 +711,11 @@ static void t7xx_dpmaif_rx_skb(struct dp + skb_cb = T7XX_SKB_CB(skb); + skb_cb->netif_idx = netif_id; + skb_cb->rx_pkt_type = skb_info->pkt_type; +- t7xx_dpmaif_rx_skb_enqueue(rxq, skb); ++ dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi); + } + + static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt, +- const unsigned long timeout) ++ const unsigned int budget, int *once_more) + { + unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0; + struct device *dev = rxq->dpmaif_ctrl->dev; +@@ -777,13 +730,14 @@ static int t7xx_dpmaif_rx_start(struct d + struct dpmaif_pit *pkt_info; + u32 val; + +- if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout)) ++ if (!skb_info->msg_pit_received && recv_skb_cnt >= budget) + break; + + pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; + if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) { + dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); +- return -EAGAIN; ++ *once_more = 1; ++ return recv_skb_cnt; + } + + val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); +@@ -817,12 +771,7 @@ static int t7xx_dpmaif_rx_start(struct d + } + + memset(skb_info, 0, sizeof(*skb_info)); +- + recv_skb_cnt++; +- if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) { +- wake_up_all(&rxq->rx_wq); +- recv_skb_cnt = 0; +- } + } + } + +@@ -837,16 +786,13 @@ static int t7xx_dpmaif_rx_start(struct d + } + } + +- if (recv_skb_cnt) +- wake_up_all(&rxq->rx_wq); +- + if (!ret) + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + + if (ret) + return ret; + +- return rx_cnt; ++ return recv_skb_cnt; + } + + static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq) +@@ -863,53 +809,30 @@ static unsigned int t7xx_dpmaifq_poll_pi + return pit_cnt; + } + +-static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, +- const unsigned int q_num, const unsigned int budget) ++static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, ++ const unsigned int q_num, ++ const unsigned int budget, int *once_more) + { + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; +- unsigned long time_limit; + unsigned int cnt; ++ int ret = 0; + +- time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS); +- +- while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) { +- unsigned int rd_cnt; +- int real_cnt; +- +- rd_cnt = min(cnt, budget); +- +- real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit); +- if (real_cnt < 0) +- return real_cnt; +- +- if (real_cnt < cnt) +- return -EAGAIN; +- } +- +- return 0; +-} ++ cnt = t7xx_dpmaifq_poll_pit(rxq); ++ if (!cnt) ++ return ret; + +-static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq) +-{ +- struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; +- int ret; ++ ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more); ++ if (ret < 0) ++ dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret); + +- ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget); +- if (ret < 0) { +- /* Try one more time */ +- queue_work(rxq->worker, &rxq->dpmaif_rxq_work); +- t7xx_dpmaif_clr_ip_busy_sts(hw_info); +- } else { +- t7xx_dpmaif_clr_ip_busy_sts(hw_info); +- t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index); +- } ++ return ret; + } + +-static void t7xx_dpmaif_rxq_work(struct work_struct *work) ++int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget) + { +- struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); +- struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; +- int ret; ++ struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi); ++ struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev; ++ int ret, once_more = 0, work_done = 0; + + atomic_set(&rxq->rx_processing, 1); + /* Ensure rx_processing is changed to 1 before actually begin RX flow */ +@@ -917,22 +840,52 @@ static void t7xx_dpmaif_rxq_work(struct + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); +- dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); +- return; ++ dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); ++ return work_done; + } + +- ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); +- if (ret < 0 && ret != -EACCES) +- return; ++ if (!rxq->sleep_lock_pending) { ++ pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev); ++ t7xx_pci_disable_sleep(t7xx_dev); ++ } ++ ++ ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire); ++ if (!ret) { ++ napi_complete_done(napi, work_done); ++ rxq->sleep_lock_pending = true; ++ napi_reschedule(napi); ++ return work_done; ++ } ++ ++ rxq->sleep_lock_pending = false; ++ while (work_done < budget) { ++ int each_budget = budget - work_done; ++ int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index, ++ each_budget, &once_more); ++ if (rx_cnt > 0) ++ work_done += rx_cnt; ++ else ++ break; ++ } + +- t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); +- if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) +- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); ++ if (once_more) { ++ napi_gro_flush(napi, false); ++ work_done = budget; ++ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); ++ } else if (work_done < budget) { ++ napi_complete_done(napi, work_done); ++ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); ++ t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index); ++ } else { ++ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); ++ } + +- t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); +- pm_runtime_mark_last_busy(dpmaif_ctrl->dev); +- pm_runtime_put_autosuspend(dpmaif_ctrl->dev); ++ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); ++ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); ++ pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev); + atomic_set(&rxq->rx_processing, 0); ++ ++ return work_done; + } + + void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) +@@ -947,7 +900,7 @@ void t7xx_dpmaif_irq_rx_done(struct dpma + } + + rxq = &dpmaif_ctrl->rxq[qno]; +- queue_work(rxq->worker, &rxq->dpmaif_rxq_work); ++ napi_schedule(&rxq->napi); + } + + static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl, +@@ -1082,50 +1035,14 @@ int t7xx_dpmaif_rxq_init(struct dpmaif_r + int ret; + + ret = t7xx_dpmaif_rx_alloc(queue); +- if (ret < 0) { ++ if (ret < 0) + dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); +- return ret; +- } +- +- INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work); +- +- queue->worker = alloc_workqueue("dpmaif_rx%d_worker", +- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index); +- if (!queue->worker) { +- ret = -ENOMEM; +- goto err_free_rx_buffer; +- } +- +- init_waitqueue_head(&queue->rx_wq); +- skb_queue_head_init(&queue->skb_list); +- queue->skb_list_max_len = queue->bat_req->pkt_buf_sz; +- queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread, +- queue, "dpmaif_rx%d_push", queue->index); +- +- ret = PTR_ERR_OR_ZERO(queue->rx_thread); +- if (ret) +- goto err_free_workqueue; +- +- return 0; +- +-err_free_workqueue: +- destroy_workqueue(queue->worker); +- +-err_free_rx_buffer: +- t7xx_dpmaif_rx_buf_free(queue); + + return ret; + } + + void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue) + { +- if (queue->worker) +- destroy_workqueue(queue->worker); +- +- if (queue->rx_thread) +- kthread_stop(queue->rx_thread); +- +- skb_queue_purge(&queue->skb_list); + t7xx_dpmaif_rx_buf_free(queue); + } + +@@ -1188,8 +1105,6 @@ void t7xx_dpmaif_rx_stop(struct dpmaif_c + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; + int timeout, value; + +- flush_work(&rxq->dpmaif_rxq_work); +- + timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, + !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) +@@ -1205,7 +1120,6 @@ static void t7xx_dpmaif_stop_rxq(struct + { + int cnt, j = 0; + +- flush_work(&rxq->dpmaif_rxq_work); + rxq->que_started = false; + + do { +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h +@@ -112,5 +112,6 @@ int t7xx_dpmaif_bat_alloc(const struct d + const enum bat_type buf_type); + void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, + struct dpmaif_bat_request *bat_req); ++int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget); + + #endif /* __T7XX_HIF_DPMA_RX_H__ */ +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -29,6 +30,7 @@ + #include + #include + #include ++#include + #include + + #include "t7xx_hif_dpmaif_rx.h" +@@ -39,13 +41,47 @@ + #include "t7xx_state_monitor.h" + + #define IP_MUX_SESSION_DEFAULT 0 ++#define SBD_PACKET_TYPE_MASK GENMASK(7, 4) ++ ++static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ if (ctlb->is_napi_en) ++ return; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ napi_enable(ctlb->napi[i]); ++ napi_schedule(ctlb->napi[i]); ++ } ++ ctlb->is_napi_en = true; ++} ++ ++static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ if (!ctlb->is_napi_en) ++ return; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ napi_synchronize(ctlb->napi[i]); ++ napi_disable(ctlb->napi[i]); ++ } ++ ++ ctlb->is_napi_en = false; ++} + + static int t7xx_ccmni_open(struct net_device *dev) + { + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); ++ if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt)) ++ t7xx_ccmni_enable_napi(ccmni_ctl); ++ + atomic_inc(&ccmni->usage); + return 0; + } +@@ -53,8 +89,12 @@ static int t7xx_ccmni_open(struct net_de + static int t7xx_ccmni_close(struct net_device *dev) + { + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; + + atomic_dec(&ccmni->usage); ++ if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt)) ++ t7xx_ccmni_disable_napi(ccmni_ctl); ++ + netif_carrier_off(dev); + netif_tx_disable(dev); + return 0; +@@ -127,6 +167,9 @@ static void t7xx_ccmni_start(struct t7xx + netif_carrier_on(ccmni->dev); + } + } ++ ++ if (atomic_read(&ctlb->napi_usr_refcnt)) ++ t7xx_ccmni_enable_napi(ctlb); + } + + static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) +@@ -149,6 +192,9 @@ static void t7xx_ccmni_post_stop(struct + struct t7xx_ccmni *ccmni; + int i; + ++ if (atomic_read(&ctlb->napi_usr_refcnt)) ++ t7xx_ccmni_disable_napi(ctlb); ++ + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) +@@ -183,6 +229,9 @@ static void t7xx_ccmni_wwan_setup(struct + dev->features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXCSUM; + ++ dev->features |= NETIF_F_GRO; ++ dev->hw_features |= NETIF_F_GRO; ++ + dev->needs_free_netdev = true; + + dev->type = ARPHRD_NONE; +@@ -190,6 +239,34 @@ static void t7xx_ccmni_wwan_setup(struct + dev->netdev_ops = &ccmni_netdev_ops; + } + ++static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ /* one HW, but shared with multiple net devices, ++ * so add a dummy device for NAPI. ++ */ ++ init_dummy_netdev(&ctlb->dummy_dev); ++ atomic_set(&ctlb->napi_usr_refcnt, 0); ++ ctlb->is_napi_en = false; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi; ++ netif_napi_add(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll, ++ NIC_NAPI_POLL_BUDGET); ++ } ++} ++ ++static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ netif_napi_del(ctlb->napi[i]); ++ ctlb->napi[i] = NULL; ++ } ++} ++ + static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, + struct netlink_ext_ack *extack) + { +@@ -311,7 +388,8 @@ static void init_md_status_notifier(stru + t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); + } + +-static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb) ++static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb, ++ struct napi_struct *napi) + { + struct t7xx_skb_cb *skb_cb; + struct net_device *net_dev; +@@ -321,23 +399,22 @@ static void t7xx_ccmni_recv_skb(struct t + + skb_cb = T7XX_SKB_CB(skb); + netif_id = skb_cb->netif_idx; +- ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id]; ++ ccmni = ccmni_ctlb->ccmni_inst[netif_id]; + if (!ccmni) { + dev_kfree_skb(skb); + return; + } + + net_dev = ccmni->dev; +- skb->dev = net_dev; +- + pkt_type = skb_cb->rx_pkt_type; ++ skb->dev = net_dev; + if (pkt_type == PKT_TYPE_IP6) + skb->protocol = htons(ETH_P_IPV6); + else + skb->protocol = htons(ETH_P_IP); + + skb_len = skb->len; +- netif_rx(skb); ++ napi_gro_receive(napi, skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += skb_len; + } +@@ -404,6 +481,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev + if (!ctlb->hif_ctrl) + return -ENOMEM; + ++ t7xx_init_netdev_napi(ctlb); + init_md_status_notifier(t7xx_dev); + return 0; + } +@@ -419,5 +497,6 @@ void t7xx_ccmni_exit(struct t7xx_pci_dev + ctlb->wwan_is_registered = false; + } + ++ t7xx_uninit_netdev_napi(ctlb); + t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); + } +--- a/drivers/net/wwan/t7xx/t7xx_netdev.h ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.h +@@ -30,6 +30,7 @@ + + #define CCMNI_NETDEV_WDT_TO (1 * HZ) + #define CCMNI_MTU_MAX 3000 ++#define NIC_NAPI_POLL_BUDGET 128 + + struct t7xx_ccmni { + u8 index; +@@ -47,6 +48,10 @@ struct t7xx_ccmni_ctrl { + unsigned int md_sta; + struct t7xx_fsm_notifier md_status_notify; + bool wwan_is_registered; ++ struct net_device dummy_dev; ++ struct napi_struct *napi[RXQ_NUM]; ++ atomic_t napi_usr_refcnt; ++ bool is_napi_en; + }; + + int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev); diff --git a/target/linux/generic/backport-5.15/621-v6.2-22-net-wwan-t7xx-Fix-Runtime-PM-resume-sequence.patch b/target/linux/generic/backport-5.15/621-v6.2-22-net-wwan-t7xx-Fix-Runtime-PM-resume-sequence.patch new file mode 100644 index 0000000000..247a19fe3c --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.2-22-net-wwan-t7xx-Fix-Runtime-PM-resume-sequence.patch @@ -0,0 +1,154 @@ +From 364d0221f1788e5225006ba7a0026e5968431c29 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Kornel=20Dul=C4=99ba?= +Date: Thu, 26 Jan 2023 13:25:34 +0000 +Subject: [PATCH] net: wwan: t7xx: Fix Runtime PM resume sequence +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Resume device before calling napi_schedule, instead of doing in the napi +poll routine. Polling is done in softrq context. We can't call the PM +resume logic from there as it's blocking and not irq safe. +In order to make it work modify the interrupt handler to be run from irq +handler thread. + +Fixes: 5545b7b9f294 ("net: wwan: t7xx: Add NAPI support") +Signed-off-by: Kornel Dulęba +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 11 +++++++- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 29 +++++++++++++++------- + drivers/net/wwan/t7xx/t7xx_netdev.c | 16 +++++++++++- + 3 files changed, 45 insertions(+), 11 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c +@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handl + } + + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data) ++{ ++ struct dpmaif_isr_para *isr_para = data; ++ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; ++ + t7xx_dpmaif_irq_cb(isr_para); + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + return IRQ_HANDLED; +@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_ir + t7xx_pcie_mac_clear_int(t7xx_dev, int_type); + + t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; +- t7xx_dev->intr_thread[int_type] = NULL; ++ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread; + t7xx_dev->callback_param[int_type] = isr_para; + + t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); ++ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); + dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); + return work_done; + } + +- if (!rxq->sleep_lock_pending) { +- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev); ++ if (!rxq->sleep_lock_pending) + t7xx_pci_disable_sleep(t7xx_dev); +- } + + ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire); + if (!ret) { +@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi + napi_complete_done(napi, work_done); + t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); + t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index); ++ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); ++ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); ++ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); ++ atomic_set(&rxq->rx_processing, 0); + } else { + t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); + } + +- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); +- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); +- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev); +- atomic_set(&rxq->rx_processing, 0); +- + return work_done; + } + + void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) + { + struct dpmaif_rx_queue *rxq; +- int qno; ++ struct dpmaif_ctrl *ctrl; ++ int qno, ret; + + qno = ffs(que_mask) - 1; + if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { +@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpma + } + + rxq = &dpmaif_ctrl->rxq[qno]; ++ ctrl = rxq->dpmaif_ctrl; ++ /* We need to make sure that the modem has been resumed before ++ * calling napi. This can't be done inside the polling function ++ * as we could be blocked waiting for device to be resumed, ++ * which can't be done from softirq context the poll function ++ * is running in. ++ */ ++ ret = pm_runtime_resume_and_get(ctrl->dev); ++ if (ret < 0 && ret != -EACCES) { ++ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret); ++ return; ++ } + napi_schedule(&rxq->napi); + } + +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -45,12 +46,25 @@ + + static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb) + { +- int i; ++ struct dpmaif_ctrl *ctrl; ++ int i, ret; ++ ++ ctrl = ctlb->hif_ctrl; + + if (ctlb->is_napi_en) + return; + + for (i = 0; i < RXQ_NUM; i++) { ++ /* The usage count has to be bumped every time before calling ++ * napi_schedule. It will be decresed in the poll routine, ++ * right after napi_complete_done is called. ++ */ ++ ret = pm_runtime_resume_and_get(ctrl->dev); ++ if (ret < 0) { ++ dev_err(ctrl->dev, "Failed to resume device: %d\n", ++ ret); ++ return; ++ } + napi_enable(ctlb->napi[i]); + napi_schedule(ctlb->napi[i]); + } diff --git a/target/linux/generic/backport-5.15/621-v6.2-23-net-wwan-t7xx-Fix-Runtime-PM-initialization.patch b/target/linux/generic/backport-5.15/621-v6.2-23-net-wwan-t7xx-Fix-Runtime-PM-initialization.patch new file mode 100644 index 0000000000..e4443168a5 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.2-23-net-wwan-t7xx-Fix-Runtime-PM-initialization.patch @@ -0,0 +1,34 @@ +From e3d6d152a1cbdee25f2e3962009a2751b54e2297 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Kornel=20Dul=C4=99ba?= +Date: Thu, 26 Jan 2023 13:25:35 +0000 +Subject: [PATCH] net: wwan: t7xx: Fix Runtime PM initialization +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +For PCI devices the Runtime PM refcount is incremented twice: +1. During device enumeration with a call to pm_runtime_forbid. +2. Just before a driver probe logic is called. +Because of that in order to enable Runtime PM on a given device +we have to call both pm_runtime_allow and pm_runtime_put_noidle, +once it's ready to be runtime suspended. +The former was missing causing the pm refcount to never reach 0. + +Fixes: d10b3a695ba0 ("net: wwan: t7xx: Runtime PM") +Signed-off-by: Kornel Dulęba +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_pci.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/wwan/t7xx/t7xx_pci.c ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_p + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + ++ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); ++ pm_runtime_allow(&t7xx_dev->pdev->dev); + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); + } + diff --git a/target/linux/generic/backport-5.15/621-v6.3-25-net-wwan-t7xx-do-not-compile-with-Werror.patch b/target/linux/generic/backport-5.15/621-v6.3-25-net-wwan-t7xx-do-not-compile-with-Werror.patch new file mode 100644 index 0000000000..656179a6a4 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.3-25-net-wwan-t7xx-do-not-compile-with-Werror.patch @@ -0,0 +1,51 @@ +From 362f0b6678ad1377c322a7dd237ea6785efc7342 Mon Sep 17 00:00:00 2001 +From: "Jiri Slaby (SUSE)" +Date: Fri, 31 Mar 2023 08:35:15 +0200 +Subject: [PATCH] net: wwan: t7xx: do not compile with -Werror + +When playing with various compilers or their versions, some choke on +the t7xx code. For example (with gcc 13): + In file included from ./arch/s390/include/generated/asm/rwonce.h:1, + from ../include/linux/compiler.h:247, + from ../include/linux/build_bug.h:5, + from ../include/linux/bits.h:22, + from ../drivers/net/wwan/t7xx/t7xx_state_monitor.c:17: + In function 'preempt_count', + inlined from 't7xx_fsm_append_event' at ../drivers/net/wwan/t7xx/t7xx_state_monitor.c:439:43: + ../include/asm-generic/rwonce.h:44:26: error: array subscript 0 is outside array bounds of 'const volatile int[0]' [-Werror=array-bounds=] + +There is no reason for any code in the kernel to be built with -Werror +by default. Note that we have generic CONFIG_WERROR. So if anyone wants +-Werror, they can enable that. + +Signed-off-by: Jiri Slaby (SUSE) +Link: https://lore.kernel.org/all/20230330232717.1f8bf5ea@kernel.org/ +Cc: Chandrashekar Devegowda +Cc: Intel Corporation +Cc: Chiranjeevi Rapolu +Cc: Liu Haijun +Cc: M Chetan Kumar +Cc: Ricardo Martinez +Cc: Loic Poulain +Cc: Sergey Ryazanov +Cc: Johannes Berg +Cc: "David S. Miller" +Cc: Eric Dumazet +Cc: Jakub Kicinski +Cc: Paolo Abeni +Cc: netdev@vger.kernel.org +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/Makefile | 2 -- + 1 file changed, 2 deletions(-) + +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -1,7 +1,5 @@ + # SPDX-License-Identifier: GPL-2.0-only + +-ccflags-y += -Werror +- + obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o + mtk_t7xx-y:= t7xx_pci.o \ + t7xx_pcie_mac.o \ diff --git a/target/linux/generic/backport-5.15/621-v6.4-24-wwan-core-Support-slicing-in-port-TX-flow-of-WWAN-subsyst.patch b/target/linux/generic/backport-5.15/621-v6.4-24-wwan-core-Support-slicing-in-port-TX-flow-of-WWAN-subsyst.patch new file mode 100644 index 0000000000..b94bb471bb --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.4-24-wwan-core-Support-slicing-in-port-TX-flow-of-WWAN-subsyst.patch @@ -0,0 +1,300 @@ +From 36bd28c1cb0dbf48645cfe43159907fb3253b33a Mon Sep 17 00:00:00 2001 +From: haozhe chang +Date: Thu, 16 Mar 2023 17:58:20 +0800 +Subject: [PATCH] wwan: core: Support slicing in port TX flow of WWAN subsystem + +wwan_port_fops_write inputs the SKB parameter to the TX callback of +the WWAN device driver. However, the WWAN device (e.g., t7xx) may +have an MTU less than the size of SKB, causing the TX buffer to be +sliced and copied once more in the WWAN device driver. + +This patch implements the slicing in the WWAN subsystem and gives +the WWAN devices driver the option to slice(by frag_len) or not. By +doing so, the additional memory copy is reduced. + +Meanwhile, this patch gives WWAN devices driver the option to reserve +headroom in fragments for the device-specific metadata. + +Signed-off-by: haozhe chang +Reviewed-by: Loic Poulain +Link: https://lore.kernel.org/r/20230316095826.181904-1-haozhe.chang@mediatek.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/iosm/iosm_ipc_port.c | 3 +- + drivers/net/wwan/mhi_wwan_ctrl.c | 2 +- + drivers/net/wwan/rpmsg_wwan_ctrl.c | 2 +- + drivers/net/wwan/t7xx/t7xx_port_wwan.c | 36 ++++++++-------- + drivers/net/wwan/wwan_core.c | 58 +++++++++++++++++++------- + drivers/net/wwan/wwan_hwsim.c | 2 +- + drivers/usb/class/cdc-wdm.c | 3 +- + include/linux/wwan.h | 11 +++++ + 8 files changed, 81 insertions(+), 36 deletions(-) + +--- a/drivers/net/wwan/iosm/iosm_ipc_port.c ++++ b/drivers/net/wwan/iosm/iosm_ipc_port.c +@@ -63,7 +63,8 @@ struct iosm_cdev *ipc_port_init(struct i + ipc_port->ipc_imem = ipc_imem; + + ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type, +- &ipc_wwan_ctrl_ops, ipc_port); ++ &ipc_wwan_ctrl_ops, NULL, ++ ipc_port); + + return ipc_port; + } +--- a/drivers/net/wwan/mhi_wwan_ctrl.c ++++ b/drivers/net/wwan/mhi_wwan_ctrl.c +@@ -237,7 +237,7 @@ static int mhi_wwan_ctrl_probe(struct mh + + /* Register as a wwan port, id->driver_data contains wwan port type */ + port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data, +- &wwan_pops, mhiwwan); ++ &wwan_pops, NULL, mhiwwan); + if (IS_ERR(port)) { + kfree(mhiwwan); + return PTR_ERR(port); +--- a/drivers/net/wwan/rpmsg_wwan_ctrl.c ++++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c +@@ -129,7 +129,7 @@ static int rpmsg_wwan_ctrl_probe(struct + + /* Register as a wwan port, id.driver_data contains wwan port type */ + port = wwan_create_port(parent, rpdev->id.driver_data, +- &rpmsg_wwan_pops, rpwwan); ++ &rpmsg_wwan_pops, NULL, rpwwan); + if (IS_ERR(port)) + return PTR_ERR(port); + +--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c +@@ -54,13 +54,13 @@ static void t7xx_port_ctrl_stop(struct w + static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) + { + struct t7xx_port *port_private = wwan_port_get_drvdata(port); +- size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU; + const struct t7xx_port_conf *port_conf; ++ struct sk_buff *cur = skb, *cloned; + struct t7xx_fsm_ctl *ctl; + enum md_state md_state; ++ int cnt = 0, ret; + +- len = skb->len; +- if (!len || !port_private->chan_enable) ++ if (!port_private->chan_enable) + return -EINVAL; + + port_conf = port_private->port_conf; +@@ -72,23 +72,21 @@ static int t7xx_port_ctrl_tx(struct wwan + return -ENODEV; + } + +- for (offset = 0; offset < len; offset += chunk_len) { +- struct sk_buff *skb_ccci; +- int ret; +- +- chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header)); +- skb_ccci = t7xx_port_alloc_skb(chunk_len); +- if (!skb_ccci) +- return -ENOMEM; +- +- skb_put_data(skb_ccci, skb->data + offset, chunk_len); +- ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0); ++ while (cur) { ++ cloned = skb_clone(cur, GFP_KERNEL); ++ cloned->len = skb_headlen(cur); ++ ret = t7xx_port_send_skb(port_private, cloned, 0, 0); + if (ret) { +- dev_kfree_skb_any(skb_ccci); ++ dev_kfree_skb(cloned); + dev_err(port_private->dev, "Write error on %s port, %d\n", + port_conf->name, ret); +- return ret; ++ return cnt ? cnt + ret : ret; + } ++ cnt += cur->len; ++ if (cur == skb) ++ cur = skb_shinfo(skb)->frag_list; ++ else ++ cur = cur->next; + } + + dev_kfree_skb(skb); +@@ -154,13 +152,17 @@ static int t7xx_port_wwan_disable_chl(st + static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) + { + const struct t7xx_port_conf *port_conf = port->port_conf; ++ unsigned int header_len = sizeof(struct ccci_header); ++ struct wwan_port_caps caps; + + if (state != MD_STATE_READY) + return; + + if (!port->wwan.wwan_port) { ++ caps.frag_len = CLDMA_MTU - header_len; ++ caps.headroom_len = header_len; + port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, +- &wwan_ops, port); ++ &wwan_ops, &caps, port); + if (IS_ERR(port->wwan.wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } +--- a/drivers/net/wwan/wwan_core.c ++++ b/drivers/net/wwan/wwan_core.c +@@ -60,6 +60,8 @@ struct wwan_device { + * @rxq: Buffer inbound queue + * @waitqueue: The waitqueue for port fops (read/write/poll) + * @data_lock: Port specific data access serialization ++ * @headroom_len: SKB reserved headroom size ++ * @frag_len: Length to fragment packet + * @at_data: AT port specific data + */ + struct wwan_port { +@@ -72,6 +74,8 @@ struct wwan_port { + struct sk_buff_head rxq; + wait_queue_head_t waitqueue; + struct mutex data_lock; /* Port specific data access serialization */ ++ size_t headroom_len; ++ size_t frag_len; + union { + struct { + struct ktermios termios; +@@ -355,6 +359,7 @@ static int __wwan_port_dev_assign_name(s + struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, ++ struct wwan_port_caps *caps, + void *drvdata) + { + struct wwan_device *wwandev; +@@ -388,6 +393,8 @@ struct wwan_port *wwan_create_port(struc + + port->type = type; + port->ops = ops; ++ port->frag_len = caps ? caps->frag_len : SIZE_MAX; ++ port->headroom_len = caps ? caps->headroom_len : 0; + mutex_init(&port->ops_lock); + skb_queue_head_init(&port->rxq); + init_waitqueue_head(&port->waitqueue); +@@ -631,30 +638,53 @@ static ssize_t wwan_port_fops_read(struc + static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, + size_t count, loff_t *offp) + { ++ struct sk_buff *skb, *head = NULL, *tail = NULL; + struct wwan_port *port = filp->private_data; +- struct sk_buff *skb; ++ size_t frag_len, remain = count; + int ret; + + ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); + if (ret) + return ret; + +- skb = alloc_skb(count, GFP_KERNEL); +- if (!skb) +- return -ENOMEM; +- +- if (copy_from_user(skb_put(skb, count), buf, count)) { +- kfree_skb(skb); +- return -EFAULT; +- } ++ do { ++ frag_len = min(remain, port->frag_len); ++ skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL); ++ if (!skb) { ++ ret = -ENOMEM; ++ goto freeskb; ++ } ++ skb_reserve(skb, port->headroom_len); ++ ++ if (!head) { ++ head = skb; ++ } else if (!tail) { ++ skb_shinfo(head)->frag_list = skb; ++ tail = skb; ++ } else { ++ tail->next = skb; ++ tail = skb; ++ } ++ ++ if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) { ++ ret = -EFAULT; ++ goto freeskb; ++ } ++ ++ if (skb != head) { ++ head->data_len += skb->len; ++ head->len += skb->len; ++ head->truesize += skb->truesize; ++ } ++ } while (remain -= frag_len); + +- ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK)); +- if (ret) { +- kfree_skb(skb); +- return ret; +- } ++ ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK)); ++ if (!ret) ++ return count; + +- return count; ++freeskb: ++ kfree_skb(head); ++ return ret; + } + + static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) +--- a/drivers/net/wwan/wwan_hwsim.c ++++ b/drivers/net/wwan/wwan_hwsim.c +@@ -204,7 +204,7 @@ static struct wwan_hwsim_port *wwan_hwsi + + port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT, + &wwan_hwsim_port_ops, +- port); ++ NULL, port); + if (IS_ERR(port->wwan)) { + err = PTR_ERR(port->wwan); + goto err_free_port; +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -929,7 +929,8 @@ static void wdm_wwan_init(struct wdm_dev + return; + } + +- port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, desc); ++ port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, ++ NULL, desc); + if (IS_ERR(port)) { + dev_err(&intf->dev, "%s: Unable to create WWAN port\n", + dev_name(intf->usb_dev)); +--- a/include/linux/wwan.h ++++ b/include/linux/wwan.h +@@ -61,11 +61,21 @@ struct wwan_port_ops { + poll_table *wait); + }; + ++/** struct wwan_port_caps - The WWAN port capbilities ++ * @frag_len: WWAN port TX fragments length ++ * @headroom_len: WWAN port TX fragments reserved headroom length ++ */ ++struct wwan_port_caps { ++ size_t frag_len; ++ unsigned int headroom_len; ++}; ++ + /** + * wwan_create_port - Add a new WWAN port + * @parent: Device to use as parent and shared by all WWAN ports + * @type: WWAN port type + * @ops: WWAN port operations ++ * @caps: WWAN port capabilities + * @drvdata: Pointer to caller driver data + * + * Allocate and register a new WWAN port. The port will be automatically exposed +@@ -83,6 +93,7 @@ struct wwan_port_ops { + struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, ++ struct wwan_port_caps *caps, + void *drvdata); + + /** diff --git a/target/linux/generic/backport-5.15/621-v6.4-26-net-wwan-t7xx-Ensure-init-is-completed-before-system-slee.patch b/target/linux/generic/backport-5.15/621-v6.4-26-net-wwan-t7xx-Ensure-init-is-completed-before-system-slee.patch new file mode 100644 index 0000000000..7785b7f746 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.4-26-net-wwan-t7xx-Ensure-init-is-completed-before-system-slee.patch @@ -0,0 +1,88 @@ +From ab87603b251134441a67385ecc9d3371be17b7a7 Mon Sep 17 00:00:00 2001 +From: Kai-Heng Feng +Date: Wed, 17 May 2023 13:24:51 +0800 +Subject: [PATCH] net: wwan: t7xx: Ensure init is completed before system sleep + +When the system attempts to sleep while mtk_t7xx is not ready, the driver +cannot put the device to sleep: +[ 12.472918] mtk_t7xx 0000:57:00.0: [PM] Exiting suspend, modem in invalid state +[ 12.472936] mtk_t7xx 0000:57:00.0: PM: pci_pm_suspend(): t7xx_pci_pm_suspend+0x0/0x20 [mtk_t7xx] returns -14 +[ 12.473678] mtk_t7xx 0000:57:00.0: PM: dpm_run_callback(): pci_pm_suspend+0x0/0x1b0 returns -14 +[ 12.473711] mtk_t7xx 0000:57:00.0: PM: failed to suspend async: error -14 +[ 12.764776] PM: Some devices failed to suspend, or early wake event detected + +Mediatek confirmed the device can take a rather long time to complete +its initialization, so wait for up to 20 seconds until init is done. + +Signed-off-by: Kai-Heng Feng +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_pci.c | 18 ++++++++++++++++++ + drivers/net/wwan/t7xx/t7xx_pci.h | 1 + + 2 files changed, 19 insertions(+) + +--- a/drivers/net/wwan/t7xx/t7xx_pci.c ++++ b/drivers/net/wwan/t7xx/t7xx_pci.c +@@ -45,6 +45,7 @@ + #define T7XX_PCI_IREG_BASE 0 + #define T7XX_PCI_EREG_BASE 2 + ++#define T7XX_INIT_TIMEOUT 20 + #define PM_SLEEP_DIS_TIMEOUT_MS 20 + #define PM_ACK_TIMEOUT_MS 1500 + #define PM_AUTOSUSPEND_MS 20000 +@@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_ + spin_lock_init(&t7xx_dev->md_pm_lock); + init_completion(&t7xx_dev->sleep_lock_acquire); + init_completion(&t7xx_dev->pm_sr_ack); ++ init_completion(&t7xx_dev->init_done); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + device_init_wakeup(&pdev->dev, true); +@@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_p + pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); + pm_runtime_allow(&t7xx_dev->pdev->dev); + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); ++ complete_all(&t7xx_dev->init_done); + } + + static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) +@@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci + __t7xx_pci_pm_suspend(pdev); + } + ++static int t7xx_pci_pm_prepare(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct t7xx_pci_dev *t7xx_dev; ++ ++ t7xx_dev = pci_get_drvdata(pdev); ++ if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { ++ dev_warn(dev, "Not ready for system sleep.\n"); ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++ + static int t7xx_pci_pm_suspend(struct device *dev) + { + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +@@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(st + } + + static const struct dev_pm_ops t7xx_pci_pm_ops = { ++ .prepare = t7xx_pci_pm_prepare, + .suspend = t7xx_pci_pm_suspend, + .resume = t7xx_pci_pm_resume, + .resume_noirq = t7xx_pci_pm_resume_noirq, +--- a/drivers/net/wwan/t7xx/t7xx_pci.h ++++ b/drivers/net/wwan/t7xx/t7xx_pci.h +@@ -69,6 +69,7 @@ struct t7xx_pci_dev { + struct t7xx_modem *md; + struct t7xx_ccmni_ctrl *ccmni_ctlb; + bool rgu_pci_irq_en; ++ struct completion init_done; + + /* Low Power Items */ + struct list_head md_pm_entities; diff --git a/target/linux/generic/backport-5.15/621-v6.5-27-net-wwan-t7xx-Use-alloc_ordered_workqueue-to-create.patch b/target/linux/generic/backport-5.15/621-v6.5-27-net-wwan-t7xx-Use-alloc_ordered_workqueue-to-create.patch new file mode 100644 index 0000000000..984ea3ab56 --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.5-27-net-wwan-t7xx-Use-alloc_ordered_workqueue-to-create.patch @@ -0,0 +1,121 @@ +From 72b1fe6cc6523908bfc339d07d18cb0f3469a643 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Thu, 25 May 2023 12:15:29 -1000 +Subject: [PATCH] net: wwan: t7xx: Use alloc_ordered_workqueue() to create + ordered workqueues + +BACKGROUND +========== + +When multiple work items are queued to a workqueue, their execution order +doesn't match the queueing order. They may get executed in any order and +simultaneously. When fully serialized execution - one by one in the queueing +order - is needed, an ordered workqueue should be used which can be created +with alloc_ordered_workqueue(). + +However, alloc_ordered_workqueue() was a later addition. Before it, an +ordered workqueue could be obtained by creating an UNBOUND workqueue with +@max_active==1. This originally was an implementation side-effect which was +broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be +ordered"). Because there were users that depended on the ordered execution, +5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered") +made workqueue allocation path to implicitly promote UNBOUND workqueues w/ +@max_active==1 to ordered workqueues. + +While this has worked okay, overloading the UNBOUND allocation interface +this way creates other issues. It's difficult to tell whether a given +workqueue actually needs to be ordered and users that legitimately want a +min concurrency level wq unexpectedly gets an ordered one instead. With +planned UNBOUND workqueue updates to improve execution locality and more +prevalence of chiplet designs which can benefit from such improvements, this +isn't a state we wanna be in forever. + +This patch series audits all callsites that create an UNBOUND workqueue w/ +@max_active==1 and converts them to alloc_ordered_workqueue() as necessary. + +WHAT TO LOOK FOR +================ + +The conversions are from + + alloc_workqueue(WQ_UNBOUND | flags, 1, args..) + +to + + alloc_ordered_workqueue(flags, args...) + +which don't cause any functional changes. If you know that fully ordered +execution is not necessary, please let me know. I'll drop the conversion and +instead add a comment noting the fact to reduce confusion while conversion +is in progress. + +If you aren't fully sure, it's completely fine to let the conversion +through. The behavior will stay exactly the same and we can always +reconsider later. + +As there are follow-up workqueue core changes, I'd really appreciate if the +patch can be routed through the workqueue tree w/ your acks. Thanks. + +Signed-off-by: Tejun Heo +Cc: Chandrashekar Devegowda +Cc: Intel Corporation +Cc: Chiranjeevi Rapolu +Cc: Liu Haijun +Cc: M Chetan Kumar +Cc: Ricardo Martinez +Cc: Loic Poulain +Cc: Sergey Ryazanov +Cc: Johannes Berg +Cc: "David S. Miller" +Cc: Eric Dumazet +Cc: Jakub Kicinski +Cc: Paolo Abeni +Cc: netdev@vger.kernel.org +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 +++++++------ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 5 +++-- + 2 files changed, 10 insertions(+), 8 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1293,9 +1293,9 @@ int t7xx_cldma_init(struct cldma_ctrl *m + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + md_ctrl->txq[i].worker = +- alloc_workqueue("md_hif%d_tx%d_worker", +- WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), +- 1, md_ctrl->hif_id, i); ++ alloc_ordered_workqueue("md_hif%d_tx%d_worker", ++ WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), ++ md_ctrl->hif_id, i); + if (!md_ctrl->txq[i].worker) + goto err_workqueue; + +@@ -1306,9 +1306,10 @@ int t7xx_cldma_init(struct cldma_ctrl *m + md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); + +- md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", +- WQ_UNBOUND | WQ_MEM_RECLAIM, +- 1, md_ctrl->hif_id, i); ++ md_ctrl->rxq[i].worker = ++ alloc_ordered_workqueue("md_hif%d_rx%d_worker", ++ WQ_MEM_RECLAIM, ++ md_ctrl->hif_id, i); + if (!md_ctrl->rxq[i].worker) + goto err_workqueue; + } +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +@@ -618,8 +618,9 @@ int t7xx_dpmaif_txq_init(struct dpmaif_t + return ret; + } + +- txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | +- (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); ++ txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker", ++ WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI), ++ txq->index); + if (!txq->worker) + return -ENOMEM; + diff --git a/target/linux/generic/backport-5.15/621-v6.6-28-net-wwan-t7xx-Add-AP-CLDMA.patch b/target/linux/generic/backport-5.15/621-v6.6-28-net-wwan-t7xx-Add-AP-CLDMA.patch new file mode 100644 index 0000000000..3e7d7c2aee --- /dev/null +++ b/target/linux/generic/backport-5.15/621-v6.6-28-net-wwan-t7xx-Add-AP-CLDMA.patch @@ -0,0 +1,482 @@ +From ba2274dcfda859b8a27193e68ad37bfe4da28ddc Mon Sep 17 00:00:00 2001 +From: Jose Ignacio Tornos Martinez +Date: Tue, 11 Jul 2023 08:28:13 +0200 +Subject: [PATCH] net: wwan: t7xx: Add AP CLDMA +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +At this moment with the current status, t7xx is not functional due to +problems like this after connection, if there is no activity: +[ 57.370534] mtk_t7xx 0000:72:00.0: [PM] SAP suspend error: -110 +[ 57.370581] mtk_t7xx 0000:72:00.0: can't suspend + (t7xx_pci_pm_runtime_suspend [mtk_t7xx] returned -110) +because after this, the traffic no longer works. + +The complete series 'net: wwan: t7xx: fw flashing & coredump support' +was reverted because of issues with the pci implementation. +In order to have at least the modem working, it would be enough if just +the first commit of the series is re-applied: +d20ef656f994 net: wwan: t7xx: Add AP CLDMA +With that, the Application Processor would be controlled, correctly +suspended and the commented problems would be fixed (I am testing here +like this with no related issue). + +This commit is independent of the others and not related to the +commented pci implementation for the new features: fw flashing and +coredump collection. + +Use v2 patch version of d20ef656f994 as JinJian Song suggests +(https://patchwork.kernel.org/project/netdevbpf/patch/20230105154215.198828-1-m.chetan.kumar@linux.intel.com/). + +Original text from the commit that would be re-applied: + + d20ef656f994 net: wwan: t7xx: Add AP CLDMA + Author: Haijun Liu + Date: Tue Aug 16 09:53:28 2022 +0530 + + The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to + communicate with AP and Modem processors respectively. So far only + MD-CLDMA was being used, this patch enables AP-CLDMA. + + Rename small Application Processor (sAP) to AP. + + Signed-off-by: Haijun Liu + Co-developed-by: Madhusmita Sahu + Signed-off-by: Madhusmita Sahu + Signed-off-by: Moises Veleta + Signed-off-by: Devegowda Chandrashekar + Signed-off-by: M Chetan Kumar + Reviewed-by: Ilpo Järvinen + Reviewed-by: Sergey Ryazanov + Reviewed-by: Jesse Brandeburg + +Signed-off-by: Jose Ignacio Tornos Martinez +Reviewed-by: Simon Horman +Link: https://lore.kernel.org/r/20230711062817.6108-1-jtornosm@redhat.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 17 +++-- + drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 2 +- + drivers/net/wwan/t7xx/t7xx_mhccif.h | 1 + + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 76 +++++++++++++++++----- + drivers/net/wwan/t7xx/t7xx_modem_ops.h | 2 + + drivers/net/wwan/t7xx/t7xx_port.h | 6 +- + drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 8 ++- + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 18 ++++- + drivers/net/wwan/t7xx/t7xx_reg.h | 2 +- + drivers/net/wwan/t7xx/t7xx_state_monitor.c | 13 +++- + drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 + + 11 files changed, 116 insertions(+), 31 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cld + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + u32 phy_ao_base, phy_pd_base; + +- if (md_ctrl->hif_id != CLDMA_ID_MD) +- return; +- +- phy_ao_base = CLDMA1_AO_BASE; +- phy_pd_base = CLDMA1_PD_BASE; +- hw_info->phy_interrupt_id = CLDMA1_INT; + hw_info->hw_mode = MODE_BIT_64; ++ ++ if (md_ctrl->hif_id == CLDMA_ID_MD) { ++ phy_ao_base = CLDMA1_AO_BASE; ++ phy_pd_base = CLDMA1_PD_BASE; ++ hw_info->phy_interrupt_id = CLDMA1_INT; ++ } else { ++ phy_ao_base = CLDMA0_AO_BASE; ++ phy_pd_base = CLDMA0_PD_BASE; ++ hw_info->phy_interrupt_id = CLDMA0_INT; ++ } ++ + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_ao_base); + hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +@@ -34,7 +34,7 @@ + /** + * enum cldma_id - Identifiers for CLDMA HW units. + * @CLDMA_ID_MD: Modem control channel. +- * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). ++ * @CLDMA_ID_AP: Application Processor control channel. + * @CLDMA_NUM: Number of CLDMA HW units available. + */ + enum cldma_id { +--- a/drivers/net/wwan/t7xx/t7xx_mhccif.h ++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h +@@ -25,6 +25,7 @@ + D2H_INT_EXCEPTION_CLEARQ_DONE | \ + D2H_INT_EXCEPTION_ALLQ_RESET | \ + D2H_INT_PORT_ENUM | \ ++ D2H_INT_ASYNC_AP_HK | \ + D2H_INT_ASYNC_MD_HK) + + void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -44,6 +44,7 @@ + #include "t7xx_state_monitor.h" + + #define RT_ID_MD_PORT_ENUM 0 ++#define RT_ID_AP_PORT_ENUM 1 + /* Modem feature query identification code - "ICCC" */ + #define MD_FEATURE_QUERY_ID 0x49434343 + +@@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7x + } + + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); ++ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage); + + if (stage == HIF_EX_INIT) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); +@@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struc + if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) + return -EINVAL; + +- if (i == RT_ID_MD_PORT_ENUM) ++ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM) + t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); + } + +@@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_m + return 0; + } + +-static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, ++static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info, ++ struct t7xx_fsm_ctl *ctl, + enum t7xx_fsm_event_state event_id, + enum t7xx_fsm_event_state err_detect) + { + struct t7xx_fsm_event *event = NULL, *event_next; +- struct t7xx_sys_info *core_info = &md->core_md; + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned long flags; + int ret; +@@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_st + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); + md->core_md.handshake_ongoing = true; +- t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); ++ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); ++} ++ ++static void t7xx_ap_hk_wq(struct work_struct *work) ++{ ++ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work); ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ ++ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */ ++ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT); ++ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); ++ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]); ++ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); ++ md->core_ap.handshake_ongoing = true; ++ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); + } + + void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) + { + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; +- void __iomem *mhccif_base; + unsigned int int_sta; + unsigned long flags; + + switch (evt_id) { + case FSM_PRE_START: +- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); ++ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK | ++ D2H_INT_ASYNC_AP_HK); + break; + + case FSM_START: +@@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_mo + ctl->exp_flg = true; + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK; + } else if (ctl->exp_flg) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; +- } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { +- queue_work(md->handshake_wq, &md->handshake_work); +- md->exp_id &= ~D2H_INT_ASYNC_MD_HK; +- mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; +- iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); +- t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK; + } else { +- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; ++ ++ if (md->exp_id & D2H_INT_ASYNC_MD_HK) { ++ queue_work(md->handshake_wq, &md->handshake_work); ++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ } ++ ++ if (md->exp_id & D2H_INT_ASYNC_AP_HK) { ++ queue_work(md->handshake_wq, &md->ap_handshake_work); ++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK; ++ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); ++ } + } + spin_unlock_irqrestore(&md->exp_lock, flags); + +@@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_mo + + case FSM_READY: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); + break; + + default: +@@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc( + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); ++ ++ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq); ++ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK; ++ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |= ++ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); ++ + return md; + } + +@@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t + md->exp_id = 0; + t7xx_fsm_reset(md); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]); + t7xx_port_proxy_reset(md->port_prox); + md->md_init_finish = true; + return t7xx_core_reset(md); +@@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + if (ret) + goto err_destroy_hswq; + ++ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev); ++ if (ret) ++ goto err_destroy_hswq; ++ + ret = t7xx_fsm_init(md); + if (ret) + goto err_destroy_hswq; +@@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + if (ret) + goto err_uninit_ccmni; + +- ret = t7xx_port_proxy_init(md); ++ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]); + if (ret) + goto err_uninit_md_cldma; + ++ ret = t7xx_port_proxy_init(md); ++ if (ret) ++ goto err_uninit_ap_cldma; ++ + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); +- if (ret) /* fsm_uninit flushes cmd queue */ ++ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */ + goto err_uninit_proxy; + + t7xx_md_sys_sw_init(t7xx_dev); +@@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + err_uninit_proxy: + t7xx_port_proxy_uninit(md->port_prox); + ++err_uninit_ap_cldma: ++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); ++ + err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + +@@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t + + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_port_proxy_uninit(md->port_prox); ++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_ccmni_exit(t7xx_dev); + t7xx_fsm_uninit(md); +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h +@@ -66,10 +66,12 @@ struct t7xx_modem { + struct cldma_ctrl *md_ctrl[CLDMA_NUM]; + struct t7xx_pci_dev *t7xx_dev; + struct t7xx_sys_info core_md; ++ struct t7xx_sys_info core_ap; + bool md_init_finish; + bool rgu_irq_asserted; + struct workqueue_struct *handshake_wq; + struct work_struct handshake_work; ++ struct work_struct ap_handshake_work; + struct t7xx_fsm_ctl *fsm_ctl; + struct port_proxy *port_prox; + unsigned int exp_id; +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -36,9 +36,13 @@ + /* Channel ID and Message ID definitions. + * The channel number consists of peer_id(15:12) , channel_id(11:0) + * peer_id: +- * 0:reserved, 1: to sAP, 2: to MD ++ * 0:reserved, 1: to AP, 2: to MD + */ + enum port_ch { ++ /* to AP */ ++ PORT_CH_AP_CONTROL_RX = 0x1000, ++ PORT_CH_AP_CONTROL_TX = 0x1001, ++ + /* to MD */ + PORT_CH_CONTROL_RX = 0x2000, + PORT_CH_CONTROL_TX = 0x2001, +--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c +@@ -167,8 +167,12 @@ static int control_msg_handler(struct t7 + case CTL_ID_HS2_MSG: + skb_pull(skb, sizeof(*ctrl_msg_h)); + +- if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { +- ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, ++ if (port_conf->rx_ch == PORT_CH_CONTROL_RX || ++ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) { ++ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ? ++ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2; ++ ++ ret = t7xx_fsm_append_event(ctl, event, skb->data, + le32_to_cpu(ctrl_msg_h->data_length)); + if (ret) + dev_err(port->dev, "Failed to append Handshake 2 event"); +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -48,7 +48,7 @@ + i < (proxy)->port_count; \ + i++, (p) = &(proxy)->ports[i]) + +-static const struct t7xx_port_conf t7xx_md_port_conf[] = { ++static const struct t7xx_port_conf t7xx_port_conf[] = { + { + .tx_ch = PORT_CH_UART2_TX, + .rx_ch = PORT_CH_UART2_RX, +@@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_ + .path_id = CLDMA_ID_MD, + .ops = &ctl_port_ops, + .name = "t7xx_ctrl", ++ }, { ++ .tx_ch = PORT_CH_AP_CONTROL_TX, ++ .rx_ch = PORT_CH_AP_CONTROL_RX, ++ .txq_index = Q_IDX_CTRL, ++ .rxq_index = Q_IDX_CTRL, ++ .path_id = CLDMA_ID_AP, ++ .ops = &ctl_port_ops, ++ .name = "t7xx_ap_ctrl", + }, + }; + +@@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(st + if (port_conf->tx_ch == PORT_CH_CONTROL_TX) + md->core_md.ctl_port = port; + ++ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX) ++ md->core_ap.ctl_port = port; ++ + port->t7xx_dev = md->t7xx_dev; + port->dev = &md->t7xx_dev->pdev->dev; + spin_lock_init(&port->port_update_lock); +@@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(st + + static int t7xx_proxy_alloc(struct t7xx_modem *md) + { +- unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); ++ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf); + struct device *dev = &md->t7xx_dev->pdev->dev; + struct port_proxy *port_prox; + int i; +@@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_ + port_prox->dev = dev; + + for (i = 0; i < port_count; i++) +- port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; ++ port_prox->ports[i].port_conf = &t7xx_port_conf[i]; + + port_prox->port_count = port_count; + t7xx_proxy_init_all_ports(md); +@@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_mod + if (ret) + return ret; + ++ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb); + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); + return 0; + } +--- a/drivers/net/wwan/t7xx/t7xx_reg.h ++++ b/drivers/net/wwan/t7xx/t7xx_reg.h +@@ -56,7 +56,7 @@ + #define D2H_INT_RESUME_ACK BIT(12) + #define D2H_INT_SUSPEND_ACK_AP BIT(13) + #define D2H_INT_RESUME_ACK_AP BIT(14) +-#define D2H_INT_ASYNC_SAP_HK BIT(15) ++#define D2H_INT_ASYNC_AP_HK BIT(15) + #define D2H_INT_ASYNC_MD_HK BIT(16) + + /* Register base */ +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c +@@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); + t7xx_md_event_notify(md, FSM_START); + +- wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, +- HZ * 60); ++ wait_event_interruptible_timeout(ctl->async_hk_wq, ++ (md->core_md.ready && md->core_ap.ready) || ++ ctl->exp_flg, HZ * 60); + dev = &md->t7xx_dev->pdev->dev; + + if (ctl->exp_flg) +@@ -299,6 +300,13 @@ static int fsm_routine_starting(struct t + + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; ++ } else if (!md->core_ap.ready) { ++ dev_err(dev, "AP handshake timeout\n"); ++ if (md->core_ap.handshake_ongoing) ++ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); ++ ++ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); ++ return -ETIMEDOUT; + } + + t7xx_pci_pm_init_late(md->t7xx_dev); +@@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7x + return; + } + ++ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); + fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); + } +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h +@@ -38,10 +38,12 @@ enum t7xx_fsm_state { + enum t7xx_fsm_event_state { + FSM_EVENT_INVALID, + FSM_EVENT_MD_HS2, ++ FSM_EVENT_AP_HS2, + FSM_EVENT_MD_EX, + FSM_EVENT_MD_EX_REC_OK, + FSM_EVENT_MD_EX_PASS, + FSM_EVENT_MD_HS2_EXIT, ++ FSM_EVENT_AP_HS2_EXIT, + FSM_EVENT_MAX + }; + diff --git a/target/linux/generic/backport-6.1/621-v6.2-01-net-wwan-t7xx-use-union-to-group-port-type-specific-data.patch b/target/linux/generic/backport-6.1/621-v6.2-01-net-wwan-t7xx-use-union-to-group-port-type-specific-data.patch new file mode 100644 index 0000000000..075cc529fc --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.2-01-net-wwan-t7xx-use-union-to-group-port-type-specific-data.patch @@ -0,0 +1,79 @@ +From fece7a8c65d1476b901b969a07b2979e1b459e66 Mon Sep 17 00:00:00 2001 +From: M Chetan Kumar +Date: Fri, 28 Oct 2022 21:04:50 +0530 +Subject: [PATCH] net: wwan: t7xx: use union to group port type specific data + +Use union inside t7xx_port to group port type specific data members. + +Signed-off-by: M Chetan Kumar +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_port.h | 6 +++++- + drivers/net/wwan/t7xx/t7xx_port_wwan.c | 16 ++++++++-------- + 2 files changed, 13 insertions(+), 9 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -99,7 +99,6 @@ struct t7xx_port_conf { + struct t7xx_port { + /* Members not initialized in definition */ + const struct t7xx_port_conf *port_conf; +- struct wwan_port *wwan_port; + struct t7xx_pci_dev *t7xx_dev; + struct device *dev; + u16 seq_nums[2]; /* TX/RX sequence numbers */ +@@ -122,6 +121,11 @@ struct t7xx_port { + int rx_length_th; + bool chan_enable; + struct task_struct *thread; ++ union { ++ struct { ++ struct wwan_port *wwan_port; ++ } wwan; ++ }; + }; + + struct sk_buff *t7xx_port_alloc_skb(int payload); +--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c +@@ -109,12 +109,12 @@ static int t7xx_port_wwan_init(struct t7 + + static void t7xx_port_wwan_uninit(struct t7xx_port *port) + { +- if (!port->wwan_port) ++ if (!port->wwan.wwan_port) + return; + + port->rx_length_th = 0; +- wwan_remove_port(port->wwan_port); +- port->wwan_port = NULL; ++ wwan_remove_port(port->wwan.wwan_port); ++ port->wwan.wwan_port = NULL; + } + + static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb) +@@ -129,7 +129,7 @@ static int t7xx_port_wwan_recv_skb(struc + return 0; + } + +- wwan_port_rx(port->wwan_port, skb); ++ wwan_port_rx(port->wwan.wwan_port, skb); + return 0; + } + +@@ -158,10 +158,10 @@ static void t7xx_port_wwan_md_state_noti + if (state != MD_STATE_READY) + return; + +- if (!port->wwan_port) { +- port->wwan_port = wwan_create_port(port->dev, port_conf->port_type, +- &wwan_ops, port); +- if (IS_ERR(port->wwan_port)) ++ if (!port->wwan.wwan_port) { ++ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, ++ &wwan_ops, port); ++ if (IS_ERR(port->wwan.wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } + } diff --git a/target/linux/generic/backport-6.1/621-v6.2-02-net-wwan-t7xx-Add-port-for-modem-logging.patch b/target/linux/generic/backport-6.1/621-v6.2-02-net-wwan-t7xx-Add-port-for-modem-logging.patch new file mode 100644 index 0000000000..60212cd0bb --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.2-02-net-wwan-t7xx-Add-port-for-modem-logging.patch @@ -0,0 +1,237 @@ +From 3349e4a48acb0923fa98d2beac82a833a76116cb Mon Sep 17 00:00:00 2001 +From: M Chetan Kumar +Date: Fri, 28 Oct 2022 21:05:34 +0530 +Subject: [PATCH] net: wwan: t7xx: Add port for modem logging + +The Modem Logging (MDL) port provides an interface to collect modem +logs for debugging purposes. MDL is supported by the relay interface, +and the mtk_t7xx port infrastructure. MDL allows user-space apps to +control logging via mbim command and to collect logs via the relay +interface, while port infrastructure facilitates communication between +the driver and the modem. + +Signed-off-by: Moises Veleta +Signed-off-by: M Chetan Kumar +Signed-off-by: Devegowda Chandrashekar +Acked-by: Ricardo Martinez +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/Kconfig | 1 + + drivers/net/wwan/t7xx/Makefile | 3 + + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 2 + + drivers/net/wwan/t7xx/t7xx_pci.h | 3 + + drivers/net/wwan/t7xx/t7xx_port.h | 3 + + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 12 +++ + drivers/net/wwan/t7xx/t7xx_port_proxy.h | 4 + + drivers/net/wwan/t7xx/t7xx_port_trace.c | 116 ++++++++++++++++++++++++ + 8 files changed, 144 insertions(+) + create mode 100644 drivers/net/wwan/t7xx/t7xx_port_trace.c + +--- a/drivers/net/wwan/Kconfig ++++ b/drivers/net/wwan/Kconfig +@@ -108,6 +108,7 @@ config IOSM + config MTK_T7XX + tristate "MediaTek PCIe 5G WWAN modem T7xx device" + depends on PCI ++ select RELAY if WWAN_DEBUGFS + help + Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device. + Adapts WWAN framework and provides network interface like wwan0 +--- a/drivers/net/wwan/t7xx/Makefile ++++ b/drivers/net/wwan/t7xx/Makefile +@@ -16,3 +16,6 @@ mtk_t7xx-y:= t7xx_pci.o \ + t7xx_hif_dpmaif_rx.o \ + t7xx_dpmaif.o \ + t7xx_netdev.o ++ ++mtk_t7xx-$(CONFIG_WWAN_DEBUGFS) += \ ++ t7xx_port_trace.o \ +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1018,6 +1018,8 @@ static int t7xx_cldma_late_init(struct c + dev_err(md_ctrl->dev, "control TX ring init fail\n"); + goto err_free_tx_ring; + } ++ ++ md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU; + } + + for (j = 0; j < CLDMA_RXQ_NUM; j++) { +--- a/drivers/net/wwan/t7xx/t7xx_pci.h ++++ b/drivers/net/wwan/t7xx/t7xx_pci.h +@@ -79,6 +79,9 @@ struct t7xx_pci_dev { + spinlock_t md_pm_lock; /* Protects PCI resource lock */ + unsigned int sleep_disable_count; + struct completion sleep_lock_acquire; ++#ifdef CONFIG_WWAN_DEBUGFS ++ struct dentry *debugfs_dir; ++#endif + }; + + enum t7xx_pm_id { +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -125,6 +125,9 @@ struct t7xx_port { + struct { + struct wwan_port *wwan_port; + } wwan; ++ struct { ++ struct rchan *relaych; ++ } log; + }; + }; + +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -70,6 +70,18 @@ static const struct t7xx_port_conf t7xx_ + .name = "MBIM", + .port_type = WWAN_PORT_MBIM, + }, { ++#ifdef CONFIG_WWAN_DEBUGFS ++ .tx_ch = PORT_CH_MD_LOG_TX, ++ .rx_ch = PORT_CH_MD_LOG_RX, ++ .txq_index = 7, ++ .rxq_index = 7, ++ .txq_exp_index = 7, ++ .rxq_exp_index = 7, ++ .path_id = CLDMA_ID_MD, ++ .ops = &t7xx_trace_port_ops, ++ .name = "mdlog", ++ }, { ++#endif + .tx_ch = PORT_CH_CONTROL_TX, + .rx_ch = PORT_CH_CONTROL_RX, + .txq_index = Q_IDX_CTRL, +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h +@@ -87,6 +87,10 @@ struct ctrl_msg_header { + extern struct port_ops wwan_sub_port_ops; + extern struct port_ops ctl_port_ops; + ++#ifdef CONFIG_WWAN_DEBUGFS ++extern struct port_ops t7xx_trace_port_ops; ++#endif ++ + void t7xx_port_proxy_reset(struct port_proxy *port_prox); + void t7xx_port_proxy_uninit(struct port_proxy *port_prox); + int t7xx_port_proxy_init(struct t7xx_modem *md); +--- /dev/null ++++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c +@@ -0,0 +1,116 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2022 Intel Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "t7xx_port.h" ++#include "t7xx_port_proxy.h" ++#include "t7xx_state_monitor.h" ++ ++#define T7XX_TRC_SUB_BUFF_SIZE 131072 ++#define T7XX_TRC_N_SUB_BUFF 32 ++ ++static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename, ++ struct dentry *parent, ++ umode_t mode, ++ struct rchan_buf *buf, ++ int *is_global) ++{ ++ *is_global = 1; ++ return debugfs_create_file(filename, mode, parent, buf, ++ &relay_file_operations); ++} ++ ++static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry) ++{ ++ debugfs_remove(dentry); ++ return 0; ++} ++ ++static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf, ++ void *prev_subbuf, size_t prev_padding) ++{ ++ if (relay_buf_full(buf)) { ++ pr_err_ratelimited("Relay_buf full dropping traces"); ++ return 0; ++ } ++ ++ return 1; ++} ++ ++static struct rchan_callbacks relay_callbacks = { ++ .subbuf_start = t7xx_trace_subbuf_start_handler, ++ .create_buf_file = t7xx_trace_create_buf_file_handler, ++ .remove_buf_file = t7xx_trace_remove_buf_file_handler, ++}; ++ ++static void t7xx_trace_port_uninit(struct t7xx_port *port) ++{ ++ struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir; ++ struct rchan *relaych = port->log.relaych; ++ ++ if (!relaych) ++ return; ++ ++ relay_close(relaych); ++ debugfs_remove_recursive(debugfs_dir); ++} ++ ++static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb) ++{ ++ struct rchan *relaych = port->log.relaych; ++ ++ if (!relaych) ++ return -EINVAL; ++ ++ relay_write(relaych, skb->data, skb->len); ++ dev_kfree_skb(skb); ++ return 0; ++} ++ ++static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state) ++{ ++ struct rchan *relaych = port->log.relaych; ++ struct dentry *debugfs_wwan_dir; ++ struct dentry *debugfs_dir; ++ ++ if (state != MD_STATE_READY || relaych) ++ return; ++ ++ debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev); ++ if (IS_ERR(debugfs_wwan_dir)) ++ return; ++ ++ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir); ++ if (IS_ERR_OR_NULL(debugfs_dir)) { ++ wwan_put_debugfs_dir(debugfs_wwan_dir); ++ dev_err(port->dev, "Unable to create debugfs for trace"); ++ return; ++ } ++ ++ relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE, ++ T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL); ++ if (!relaych) ++ goto err_rm_debugfs_dir; ++ ++ wwan_put_debugfs_dir(debugfs_wwan_dir); ++ port->log.relaych = relaych; ++ port->t7xx_dev->debugfs_dir = debugfs_dir; ++ return; ++ ++err_rm_debugfs_dir: ++ debugfs_remove_recursive(debugfs_dir); ++ wwan_put_debugfs_dir(debugfs_wwan_dir); ++ dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name); ++} ++ ++struct port_ops t7xx_trace_port_ops = { ++ .recv_skb = t7xx_trace_port_recv_skb, ++ .uninit = t7xx_trace_port_uninit, ++ .md_state_notify = t7xx_port_trace_md_state_notify, ++}; diff --git a/target/linux/generic/backport-6.1/621-v6.2-03-net-wwan-t7xx-Use-needed_headroom-instead-of.patch b/target/linux/generic/backport-6.1/621-v6.2-03-net-wwan-t7xx-Use-needed_headroom-instead-of.patch new file mode 100644 index 0000000000..a464ee3143 --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.2-03-net-wwan-t7xx-Use-needed_headroom-instead-of.patch @@ -0,0 +1,31 @@ +From c053d7b6bdcb45780036b32be6a950f71a78bf52 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= +Date: Thu, 3 Nov 2022 14:48:28 +0530 +Subject: [PATCH] net: wwan: t7xx: Use needed_headroom instead of + hard_header_len +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +hard_header_len is used by gro_list_prepare() but on Rx, there +is no header so use needed_headroom instead. + +Signed-off-by: Ilpo Järvinen +Signed-off-by: Sreehari Kancharla +Reviewed-by: Sergey Ryazanov +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_netdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -161,7 +161,7 @@ static void t7xx_ccmni_post_stop(struct + + static void t7xx_ccmni_wwan_setup(struct net_device *dev) + { +- dev->hard_header_len += sizeof(struct ccci_header); ++ dev->needed_headroom += sizeof(struct ccci_header); + + dev->mtu = ETH_DATA_LEN; + dev->max_mtu = CCMNI_MTU_MAX; diff --git a/target/linux/generic/backport-6.1/621-v6.2-04-net-wwan-t7xx-Add-NAPI-support.patch b/target/linux/generic/backport-6.1/621-v6.2-04-net-wwan-t7xx-Add-NAPI-support.patch new file mode 100644 index 0000000000..2310a49801 --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.2-04-net-wwan-t7xx-Add-NAPI-support.patch @@ -0,0 +1,652 @@ +From 5545b7b9f294de7f95ec6a7cb1de0db52296001c Mon Sep 17 00:00:00 2001 +From: Haijun Liu +Date: Thu, 3 Nov 2022 14:48:29 +0530 +Subject: [PATCH] net: wwan: t7xx: Add NAPI support +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Replace the work queue based RX flow with a NAPI implementation +Remove rx_thread and dpmaif_rxq_work. +Enable GRO on RX path. +Introduce dummy network device. its responsibility is + - Binds one NAPI object for each DL HW queue and acts as + the agent of all those network devices. + - Use NAPI object to poll DL packets. + - Helps to dispatch each packet to the network interface. + +Signed-off-by: Haijun Liu +Co-developed-by: Sreehari Kancharla +Signed-off-by: Sreehari Kancharla +Signed-off-by: Chandrashekar Devegowda +Acked-by: Ricardo Martinez +Acked-by: M Chetan Kumar +Reviewed-by: Ilpo Järvinen +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 14 +- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 218 +++++++-------------- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 1 + + drivers/net/wwan/t7xx/t7xx_netdev.c | 89 ++++++++- + drivers/net/wwan/t7xx/t7xx_netdev.h | 5 + + 5 files changed, 161 insertions(+), 166 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h +@@ -20,6 +20,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -109,20 +110,14 @@ struct dpmaif_rx_queue { + struct dpmaif_bat_request *bat_req; + struct dpmaif_bat_request *bat_frag; + +- wait_queue_head_t rx_wq; +- struct task_struct *rx_thread; +- struct sk_buff_head skb_list; +- unsigned int skb_list_max_len; +- +- struct workqueue_struct *worker; +- struct work_struct dpmaif_rxq_work; +- + atomic_t rx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned int expect_pit_seq; + unsigned int pit_remain_release_cnt; + struct dpmaif_cur_rx_skb_info rx_data_info; ++ struct napi_struct napi; ++ bool sleep_lock_pending; + }; + + struct dpmaif_tx_queue { +@@ -168,7 +163,8 @@ enum dpmaif_txq_state { + struct dpmaif_callbacks { + void (*state_notify)(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int txq_number); +- void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb); ++ void (*recv_skb)(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb, ++ struct napi_struct *napi); + }; + + struct dpmaif_ctrl { +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -45,6 +45,7 @@ + #include "t7xx_dpmaif.h" + #include "t7xx_hif_dpmaif.h" + #include "t7xx_hif_dpmaif_rx.h" ++#include "t7xx_netdev.h" + #include "t7xx_pci.h" + + #define DPMAIF_BAT_COUNT 8192 +@@ -76,43 +77,6 @@ static unsigned int t7xx_normal_pit_bid( + return value; + } + +-static int t7xx_dpmaif_net_rx_push_thread(void *arg) +-{ +- struct dpmaif_rx_queue *q = arg; +- struct dpmaif_ctrl *hif_ctrl; +- struct dpmaif_callbacks *cb; +- +- hif_ctrl = q->dpmaif_ctrl; +- cb = hif_ctrl->callbacks; +- +- while (!kthread_should_stop()) { +- struct sk_buff *skb; +- unsigned long flags; +- +- if (skb_queue_empty(&q->skb_list)) { +- if (wait_event_interruptible(q->rx_wq, +- !skb_queue_empty(&q->skb_list) || +- kthread_should_stop())) +- continue; +- +- if (kthread_should_stop()) +- break; +- } +- +- spin_lock_irqsave(&q->skb_list.lock, flags); +- skb = __skb_dequeue(&q->skb_list); +- spin_unlock_irqrestore(&q->skb_list.lock, flags); +- +- if (!skb) +- continue; +- +- cb->recv_skb(hif_ctrl->t7xx_dev, skb); +- cond_resched(); +- } +- +- return 0; +-} +- + static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int bat_cnt) + { +@@ -726,21 +690,10 @@ static int t7xx_dpmaifq_rx_notify_hw(str + return ret; + } + +-static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb) +-{ +- unsigned long flags; +- +- spin_lock_irqsave(&rxq->skb_list.lock, flags); +- if (rxq->skb_list.qlen < rxq->skb_list_max_len) +- __skb_queue_tail(&rxq->skb_list, skb); +- else +- dev_kfree_skb_any(skb); +- spin_unlock_irqrestore(&rxq->skb_list.lock, flags); +-} +- + static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq, + struct dpmaif_cur_rx_skb_info *skb_info) + { ++ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + struct sk_buff *skb = skb_info->cur_skb; + struct t7xx_skb_cb *skb_cb; + u8 netif_id; +@@ -758,11 +711,11 @@ static void t7xx_dpmaif_rx_skb(struct dp + skb_cb = T7XX_SKB_CB(skb); + skb_cb->netif_idx = netif_id; + skb_cb->rx_pkt_type = skb_info->pkt_type; +- t7xx_dpmaif_rx_skb_enqueue(rxq, skb); ++ dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi); + } + + static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt, +- const unsigned long timeout) ++ const unsigned int budget, int *once_more) + { + unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0; + struct device *dev = rxq->dpmaif_ctrl->dev; +@@ -777,13 +730,14 @@ static int t7xx_dpmaif_rx_start(struct d + struct dpmaif_pit *pkt_info; + u32 val; + +- if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout)) ++ if (!skb_info->msg_pit_received && recv_skb_cnt >= budget) + break; + + pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; + if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) { + dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); +- return -EAGAIN; ++ *once_more = 1; ++ return recv_skb_cnt; + } + + val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); +@@ -817,12 +771,7 @@ static int t7xx_dpmaif_rx_start(struct d + } + + memset(skb_info, 0, sizeof(*skb_info)); +- + recv_skb_cnt++; +- if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) { +- wake_up_all(&rxq->rx_wq); +- recv_skb_cnt = 0; +- } + } + } + +@@ -837,16 +786,13 @@ static int t7xx_dpmaif_rx_start(struct d + } + } + +- if (recv_skb_cnt) +- wake_up_all(&rxq->rx_wq); +- + if (!ret) + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + + if (ret) + return ret; + +- return rx_cnt; ++ return recv_skb_cnt; + } + + static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq) +@@ -863,53 +809,30 @@ static unsigned int t7xx_dpmaifq_poll_pi + return pit_cnt; + } + +-static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, +- const unsigned int q_num, const unsigned int budget) ++static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, ++ const unsigned int q_num, ++ const unsigned int budget, int *once_more) + { + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; +- unsigned long time_limit; + unsigned int cnt; ++ int ret = 0; + +- time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS); +- +- while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) { +- unsigned int rd_cnt; +- int real_cnt; +- +- rd_cnt = min(cnt, budget); +- +- real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit); +- if (real_cnt < 0) +- return real_cnt; +- +- if (real_cnt < cnt) +- return -EAGAIN; +- } +- +- return 0; +-} ++ cnt = t7xx_dpmaifq_poll_pit(rxq); ++ if (!cnt) ++ return ret; + +-static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq) +-{ +- struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; +- int ret; ++ ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more); ++ if (ret < 0) ++ dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret); + +- ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget); +- if (ret < 0) { +- /* Try one more time */ +- queue_work(rxq->worker, &rxq->dpmaif_rxq_work); +- t7xx_dpmaif_clr_ip_busy_sts(hw_info); +- } else { +- t7xx_dpmaif_clr_ip_busy_sts(hw_info); +- t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index); +- } ++ return ret; + } + +-static void t7xx_dpmaif_rxq_work(struct work_struct *work) ++int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget) + { +- struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); +- struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; +- int ret; ++ struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi); ++ struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev; ++ int ret, once_more = 0, work_done = 0; + + atomic_set(&rxq->rx_processing, 1); + /* Ensure rx_processing is changed to 1 before actually begin RX flow */ +@@ -917,22 +840,52 @@ static void t7xx_dpmaif_rxq_work(struct + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); +- dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); +- return; ++ dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); ++ return work_done; + } + +- ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); +- if (ret < 0 && ret != -EACCES) +- return; ++ if (!rxq->sleep_lock_pending) { ++ pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev); ++ t7xx_pci_disable_sleep(t7xx_dev); ++ } ++ ++ ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire); ++ if (!ret) { ++ napi_complete_done(napi, work_done); ++ rxq->sleep_lock_pending = true; ++ napi_reschedule(napi); ++ return work_done; ++ } ++ ++ rxq->sleep_lock_pending = false; ++ while (work_done < budget) { ++ int each_budget = budget - work_done; ++ int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index, ++ each_budget, &once_more); ++ if (rx_cnt > 0) ++ work_done += rx_cnt; ++ else ++ break; ++ } + +- t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); +- if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) +- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); ++ if (once_more) { ++ napi_gro_flush(napi, false); ++ work_done = budget; ++ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); ++ } else if (work_done < budget) { ++ napi_complete_done(napi, work_done); ++ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); ++ t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index); ++ } else { ++ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); ++ } + +- t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); +- pm_runtime_mark_last_busy(dpmaif_ctrl->dev); +- pm_runtime_put_autosuspend(dpmaif_ctrl->dev); ++ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); ++ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); ++ pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev); + atomic_set(&rxq->rx_processing, 0); ++ ++ return work_done; + } + + void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) +@@ -947,7 +900,7 @@ void t7xx_dpmaif_irq_rx_done(struct dpma + } + + rxq = &dpmaif_ctrl->rxq[qno]; +- queue_work(rxq->worker, &rxq->dpmaif_rxq_work); ++ napi_schedule(&rxq->napi); + } + + static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl, +@@ -1082,50 +1035,14 @@ int t7xx_dpmaif_rxq_init(struct dpmaif_r + int ret; + + ret = t7xx_dpmaif_rx_alloc(queue); +- if (ret < 0) { ++ if (ret < 0) + dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); +- return ret; +- } +- +- INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work); +- +- queue->worker = alloc_workqueue("dpmaif_rx%d_worker", +- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index); +- if (!queue->worker) { +- ret = -ENOMEM; +- goto err_free_rx_buffer; +- } +- +- init_waitqueue_head(&queue->rx_wq); +- skb_queue_head_init(&queue->skb_list); +- queue->skb_list_max_len = queue->bat_req->pkt_buf_sz; +- queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread, +- queue, "dpmaif_rx%d_push", queue->index); +- +- ret = PTR_ERR_OR_ZERO(queue->rx_thread); +- if (ret) +- goto err_free_workqueue; +- +- return 0; +- +-err_free_workqueue: +- destroy_workqueue(queue->worker); +- +-err_free_rx_buffer: +- t7xx_dpmaif_rx_buf_free(queue); + + return ret; + } + + void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue) + { +- if (queue->worker) +- destroy_workqueue(queue->worker); +- +- if (queue->rx_thread) +- kthread_stop(queue->rx_thread); +- +- skb_queue_purge(&queue->skb_list); + t7xx_dpmaif_rx_buf_free(queue); + } + +@@ -1188,8 +1105,6 @@ void t7xx_dpmaif_rx_stop(struct dpmaif_c + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; + int timeout, value; + +- flush_work(&rxq->dpmaif_rxq_work); +- + timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, + !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) +@@ -1205,7 +1120,6 @@ static void t7xx_dpmaif_stop_rxq(struct + { + int cnt, j = 0; + +- flush_work(&rxq->dpmaif_rxq_work); + rxq->que_started = false; + + do { +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h +@@ -112,5 +112,6 @@ int t7xx_dpmaif_bat_alloc(const struct d + const enum bat_type buf_type); + void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, + struct dpmaif_bat_request *bat_req); ++int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget); + + #endif /* __T7XX_HIF_DPMA_RX_H__ */ +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -29,6 +30,7 @@ + #include + #include + #include ++#include + #include + + #include "t7xx_hif_dpmaif_rx.h" +@@ -39,13 +41,47 @@ + #include "t7xx_state_monitor.h" + + #define IP_MUX_SESSION_DEFAULT 0 ++#define SBD_PACKET_TYPE_MASK GENMASK(7, 4) ++ ++static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ if (ctlb->is_napi_en) ++ return; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ napi_enable(ctlb->napi[i]); ++ napi_schedule(ctlb->napi[i]); ++ } ++ ctlb->is_napi_en = true; ++} ++ ++static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ if (!ctlb->is_napi_en) ++ return; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ napi_synchronize(ctlb->napi[i]); ++ napi_disable(ctlb->napi[i]); ++ } ++ ++ ctlb->is_napi_en = false; ++} + + static int t7xx_ccmni_open(struct net_device *dev) + { + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); ++ if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt)) ++ t7xx_ccmni_enable_napi(ccmni_ctl); ++ + atomic_inc(&ccmni->usage); + return 0; + } +@@ -53,8 +89,12 @@ static int t7xx_ccmni_open(struct net_de + static int t7xx_ccmni_close(struct net_device *dev) + { + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); ++ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; + + atomic_dec(&ccmni->usage); ++ if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt)) ++ t7xx_ccmni_disable_napi(ccmni_ctl); ++ + netif_carrier_off(dev); + netif_tx_disable(dev); + return 0; +@@ -127,6 +167,9 @@ static void t7xx_ccmni_start(struct t7xx + netif_carrier_on(ccmni->dev); + } + } ++ ++ if (atomic_read(&ctlb->napi_usr_refcnt)) ++ t7xx_ccmni_enable_napi(ctlb); + } + + static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) +@@ -149,6 +192,9 @@ static void t7xx_ccmni_post_stop(struct + struct t7xx_ccmni *ccmni; + int i; + ++ if (atomic_read(&ctlb->napi_usr_refcnt)) ++ t7xx_ccmni_disable_napi(ctlb); ++ + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) +@@ -183,6 +229,9 @@ static void t7xx_ccmni_wwan_setup(struct + dev->features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXCSUM; + ++ dev->features |= NETIF_F_GRO; ++ dev->hw_features |= NETIF_F_GRO; ++ + dev->needs_free_netdev = true; + + dev->type = ARPHRD_NONE; +@@ -190,6 +239,34 @@ static void t7xx_ccmni_wwan_setup(struct + dev->netdev_ops = &ccmni_netdev_ops; + } + ++static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ /* one HW, but shared with multiple net devices, ++ * so add a dummy device for NAPI. ++ */ ++ init_dummy_netdev(&ctlb->dummy_dev); ++ atomic_set(&ctlb->napi_usr_refcnt, 0); ++ ctlb->is_napi_en = false; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi; ++ netif_napi_add(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll, ++ NIC_NAPI_POLL_BUDGET); ++ } ++} ++ ++static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) ++{ ++ int i; ++ ++ for (i = 0; i < RXQ_NUM; i++) { ++ netif_napi_del(ctlb->napi[i]); ++ ctlb->napi[i] = NULL; ++ } ++} ++ + static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, + struct netlink_ext_ack *extack) + { +@@ -311,7 +388,8 @@ static void init_md_status_notifier(stru + t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); + } + +-static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb) ++static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb, ++ struct napi_struct *napi) + { + struct t7xx_skb_cb *skb_cb; + struct net_device *net_dev; +@@ -321,23 +399,22 @@ static void t7xx_ccmni_recv_skb(struct t + + skb_cb = T7XX_SKB_CB(skb); + netif_id = skb_cb->netif_idx; +- ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id]; ++ ccmni = ccmni_ctlb->ccmni_inst[netif_id]; + if (!ccmni) { + dev_kfree_skb(skb); + return; + } + + net_dev = ccmni->dev; +- skb->dev = net_dev; +- + pkt_type = skb_cb->rx_pkt_type; ++ skb->dev = net_dev; + if (pkt_type == PKT_TYPE_IP6) + skb->protocol = htons(ETH_P_IPV6); + else + skb->protocol = htons(ETH_P_IP); + + skb_len = skb->len; +- netif_rx(skb); ++ napi_gro_receive(napi, skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += skb_len; + } +@@ -404,6 +481,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev + if (!ctlb->hif_ctrl) + return -ENOMEM; + ++ t7xx_init_netdev_napi(ctlb); + init_md_status_notifier(t7xx_dev); + return 0; + } +@@ -419,5 +497,6 @@ void t7xx_ccmni_exit(struct t7xx_pci_dev + ctlb->wwan_is_registered = false; + } + ++ t7xx_uninit_netdev_napi(ctlb); + t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); + } +--- a/drivers/net/wwan/t7xx/t7xx_netdev.h ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.h +@@ -30,6 +30,7 @@ + + #define CCMNI_NETDEV_WDT_TO (1 * HZ) + #define CCMNI_MTU_MAX 3000 ++#define NIC_NAPI_POLL_BUDGET 128 + + struct t7xx_ccmni { + u8 index; +@@ -47,6 +48,10 @@ struct t7xx_ccmni_ctrl { + unsigned int md_sta; + struct t7xx_fsm_notifier md_status_notify; + bool wwan_is_registered; ++ struct net_device dummy_dev; ++ struct napi_struct *napi[RXQ_NUM]; ++ atomic_t napi_usr_refcnt; ++ bool is_napi_en; + }; + + int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev); diff --git a/target/linux/generic/backport-6.1/621-v6.2-05-net-wwan-t7xx-Fix-Runtime-PM-resume-sequence.patch b/target/linux/generic/backport-6.1/621-v6.2-05-net-wwan-t7xx-Fix-Runtime-PM-resume-sequence.patch new file mode 100644 index 0000000000..247a19fe3c --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.2-05-net-wwan-t7xx-Fix-Runtime-PM-resume-sequence.patch @@ -0,0 +1,154 @@ +From 364d0221f1788e5225006ba7a0026e5968431c29 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Kornel=20Dul=C4=99ba?= +Date: Thu, 26 Jan 2023 13:25:34 +0000 +Subject: [PATCH] net: wwan: t7xx: Fix Runtime PM resume sequence +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Resume device before calling napi_schedule, instead of doing in the napi +poll routine. Polling is done in softrq context. We can't call the PM +resume logic from there as it's blocking and not irq safe. +In order to make it work modify the interrupt handler to be run from irq +handler thread. + +Fixes: 5545b7b9f294 ("net: wwan: t7xx: Add NAPI support") +Signed-off-by: Kornel Dulęba +Signed-off-by: David S. Miller +--- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 11 +++++++- + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 29 +++++++++++++++------- + drivers/net/wwan/t7xx/t7xx_netdev.c | 16 +++++++++++- + 3 files changed, 45 insertions(+), 11 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c +@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handl + } + + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); ++ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data) ++{ ++ struct dpmaif_isr_para *isr_para = data; ++ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; ++ + t7xx_dpmaif_irq_cb(isr_para); + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + return IRQ_HANDLED; +@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_ir + t7xx_pcie_mac_clear_int(t7xx_dev, int_type); + + t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; +- t7xx_dev->intr_thread[int_type] = NULL; ++ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread; + t7xx_dev->callback_param[int_type] = isr_para; + + t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); ++ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); + dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); + return work_done; + } + +- if (!rxq->sleep_lock_pending) { +- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev); ++ if (!rxq->sleep_lock_pending) + t7xx_pci_disable_sleep(t7xx_dev); +- } + + ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire); + if (!ret) { +@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi + napi_complete_done(napi, work_done); + t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); + t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index); ++ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); ++ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); ++ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev); ++ atomic_set(&rxq->rx_processing, 0); + } else { + t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info); + } + +- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev); +- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev); +- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev); +- atomic_set(&rxq->rx_processing, 0); +- + return work_done; + } + + void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) + { + struct dpmaif_rx_queue *rxq; +- int qno; ++ struct dpmaif_ctrl *ctrl; ++ int qno, ret; + + qno = ffs(que_mask) - 1; + if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { +@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpma + } + + rxq = &dpmaif_ctrl->rxq[qno]; ++ ctrl = rxq->dpmaif_ctrl; ++ /* We need to make sure that the modem has been resumed before ++ * calling napi. This can't be done inside the polling function ++ * as we could be blocked waiting for device to be resumed, ++ * which can't be done from softirq context the poll function ++ * is running in. ++ */ ++ ret = pm_runtime_resume_and_get(ctrl->dev); ++ if (ret < 0 && ret != -EACCES) { ++ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret); ++ return; ++ } + napi_schedule(&rxq->napi); + } + +--- a/drivers/net/wwan/t7xx/t7xx_netdev.c ++++ b/drivers/net/wwan/t7xx/t7xx_netdev.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -45,12 +46,25 @@ + + static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb) + { +- int i; ++ struct dpmaif_ctrl *ctrl; ++ int i, ret; ++ ++ ctrl = ctlb->hif_ctrl; + + if (ctlb->is_napi_en) + return; + + for (i = 0; i < RXQ_NUM; i++) { ++ /* The usage count has to be bumped every time before calling ++ * napi_schedule. It will be decresed in the poll routine, ++ * right after napi_complete_done is called. ++ */ ++ ret = pm_runtime_resume_and_get(ctrl->dev); ++ if (ret < 0) { ++ dev_err(ctrl->dev, "Failed to resume device: %d\n", ++ ret); ++ return; ++ } + napi_enable(ctlb->napi[i]); + napi_schedule(ctlb->napi[i]); + } diff --git a/target/linux/generic/backport-6.1/621-v6.4-06-wwan-core-Support-slicing-in-port-TX-flow-of-WWAN-subsyst.patch b/target/linux/generic/backport-6.1/621-v6.4-06-wwan-core-Support-slicing-in-port-TX-flow-of-WWAN-subsyst.patch new file mode 100644 index 0000000000..3641780f8c --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.4-06-wwan-core-Support-slicing-in-port-TX-flow-of-WWAN-subsyst.patch @@ -0,0 +1,300 @@ +From 36bd28c1cb0dbf48645cfe43159907fb3253b33a Mon Sep 17 00:00:00 2001 +From: haozhe chang +Date: Thu, 16 Mar 2023 17:58:20 +0800 +Subject: [PATCH] wwan: core: Support slicing in port TX flow of WWAN subsystem + +wwan_port_fops_write inputs the SKB parameter to the TX callback of +the WWAN device driver. However, the WWAN device (e.g., t7xx) may +have an MTU less than the size of SKB, causing the TX buffer to be +sliced and copied once more in the WWAN device driver. + +This patch implements the slicing in the WWAN subsystem and gives +the WWAN devices driver the option to slice(by frag_len) or not. By +doing so, the additional memory copy is reduced. + +Meanwhile, this patch gives WWAN devices driver the option to reserve +headroom in fragments for the device-specific metadata. + +Signed-off-by: haozhe chang +Reviewed-by: Loic Poulain +Link: https://lore.kernel.org/r/20230316095826.181904-1-haozhe.chang@mediatek.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/iosm/iosm_ipc_port.c | 3 +- + drivers/net/wwan/mhi_wwan_ctrl.c | 2 +- + drivers/net/wwan/rpmsg_wwan_ctrl.c | 2 +- + drivers/net/wwan/t7xx/t7xx_port_wwan.c | 36 ++++++++-------- + drivers/net/wwan/wwan_core.c | 58 +++++++++++++++++++------- + drivers/net/wwan/wwan_hwsim.c | 2 +- + drivers/usb/class/cdc-wdm.c | 3 +- + include/linux/wwan.h | 11 +++++ + 8 files changed, 81 insertions(+), 36 deletions(-) + +--- a/drivers/net/wwan/iosm/iosm_ipc_port.c ++++ b/drivers/net/wwan/iosm/iosm_ipc_port.c +@@ -63,7 +63,8 @@ struct iosm_cdev *ipc_port_init(struct i + ipc_port->ipc_imem = ipc_imem; + + ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type, +- &ipc_wwan_ctrl_ops, ipc_port); ++ &ipc_wwan_ctrl_ops, NULL, ++ ipc_port); + + return ipc_port; + } +--- a/drivers/net/wwan/mhi_wwan_ctrl.c ++++ b/drivers/net/wwan/mhi_wwan_ctrl.c +@@ -237,7 +237,7 @@ static int mhi_wwan_ctrl_probe(struct mh + + /* Register as a wwan port, id->driver_data contains wwan port type */ + port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data, +- &wwan_pops, mhiwwan); ++ &wwan_pops, NULL, mhiwwan); + if (IS_ERR(port)) { + kfree(mhiwwan); + return PTR_ERR(port); +--- a/drivers/net/wwan/rpmsg_wwan_ctrl.c ++++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c +@@ -129,7 +129,7 @@ static int rpmsg_wwan_ctrl_probe(struct + + /* Register as a wwan port, id.driver_data contains wwan port type */ + port = wwan_create_port(parent, rpdev->id.driver_data, +- &rpmsg_wwan_pops, rpwwan); ++ &rpmsg_wwan_pops, NULL, rpwwan); + if (IS_ERR(port)) + return PTR_ERR(port); + +--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c +@@ -54,13 +54,13 @@ static void t7xx_port_ctrl_stop(struct w + static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) + { + struct t7xx_port *port_private = wwan_port_get_drvdata(port); +- size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU; + const struct t7xx_port_conf *port_conf; ++ struct sk_buff *cur = skb, *cloned; + struct t7xx_fsm_ctl *ctl; + enum md_state md_state; ++ int cnt = 0, ret; + +- len = skb->len; +- if (!len || !port_private->chan_enable) ++ if (!port_private->chan_enable) + return -EINVAL; + + port_conf = port_private->port_conf; +@@ -72,23 +72,21 @@ static int t7xx_port_ctrl_tx(struct wwan + return -ENODEV; + } + +- for (offset = 0; offset < len; offset += chunk_len) { +- struct sk_buff *skb_ccci; +- int ret; +- +- chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header)); +- skb_ccci = t7xx_port_alloc_skb(chunk_len); +- if (!skb_ccci) +- return -ENOMEM; +- +- skb_put_data(skb_ccci, skb->data + offset, chunk_len); +- ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0); ++ while (cur) { ++ cloned = skb_clone(cur, GFP_KERNEL); ++ cloned->len = skb_headlen(cur); ++ ret = t7xx_port_send_skb(port_private, cloned, 0, 0); + if (ret) { +- dev_kfree_skb_any(skb_ccci); ++ dev_kfree_skb(cloned); + dev_err(port_private->dev, "Write error on %s port, %d\n", + port_conf->name, ret); +- return ret; ++ return cnt ? cnt + ret : ret; + } ++ cnt += cur->len; ++ if (cur == skb) ++ cur = skb_shinfo(skb)->frag_list; ++ else ++ cur = cur->next; + } + + dev_kfree_skb(skb); +@@ -154,13 +152,17 @@ static int t7xx_port_wwan_disable_chl(st + static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) + { + const struct t7xx_port_conf *port_conf = port->port_conf; ++ unsigned int header_len = sizeof(struct ccci_header); ++ struct wwan_port_caps caps; + + if (state != MD_STATE_READY) + return; + + if (!port->wwan.wwan_port) { ++ caps.frag_len = CLDMA_MTU - header_len; ++ caps.headroom_len = header_len; + port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, +- &wwan_ops, port); ++ &wwan_ops, &caps, port); + if (IS_ERR(port->wwan.wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } +--- a/drivers/net/wwan/wwan_core.c ++++ b/drivers/net/wwan/wwan_core.c +@@ -67,6 +67,8 @@ struct wwan_device { + * @rxq: Buffer inbound queue + * @waitqueue: The waitqueue for port fops (read/write/poll) + * @data_lock: Port specific data access serialization ++ * @headroom_len: SKB reserved headroom size ++ * @frag_len: Length to fragment packet + * @at_data: AT port specific data + */ + struct wwan_port { +@@ -79,6 +81,8 @@ struct wwan_port { + struct sk_buff_head rxq; + wait_queue_head_t waitqueue; + struct mutex data_lock; /* Port specific data access serialization */ ++ size_t headroom_len; ++ size_t frag_len; + union { + struct { + struct ktermios termios; +@@ -422,6 +426,7 @@ static int __wwan_port_dev_assign_name(s + struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, ++ struct wwan_port_caps *caps, + void *drvdata) + { + struct wwan_device *wwandev; +@@ -455,6 +460,8 @@ struct wwan_port *wwan_create_port(struc + + port->type = type; + port->ops = ops; ++ port->frag_len = caps ? caps->frag_len : SIZE_MAX; ++ port->headroom_len = caps ? caps->headroom_len : 0; + mutex_init(&port->ops_lock); + skb_queue_head_init(&port->rxq); + init_waitqueue_head(&port->waitqueue); +@@ -698,30 +705,53 @@ static ssize_t wwan_port_fops_read(struc + static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, + size_t count, loff_t *offp) + { ++ struct sk_buff *skb, *head = NULL, *tail = NULL; + struct wwan_port *port = filp->private_data; +- struct sk_buff *skb; ++ size_t frag_len, remain = count; + int ret; + + ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); + if (ret) + return ret; + +- skb = alloc_skb(count, GFP_KERNEL); +- if (!skb) +- return -ENOMEM; +- +- if (copy_from_user(skb_put(skb, count), buf, count)) { +- kfree_skb(skb); +- return -EFAULT; +- } ++ do { ++ frag_len = min(remain, port->frag_len); ++ skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL); ++ if (!skb) { ++ ret = -ENOMEM; ++ goto freeskb; ++ } ++ skb_reserve(skb, port->headroom_len); ++ ++ if (!head) { ++ head = skb; ++ } else if (!tail) { ++ skb_shinfo(head)->frag_list = skb; ++ tail = skb; ++ } else { ++ tail->next = skb; ++ tail = skb; ++ } ++ ++ if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) { ++ ret = -EFAULT; ++ goto freeskb; ++ } ++ ++ if (skb != head) { ++ head->data_len += skb->len; ++ head->len += skb->len; ++ head->truesize += skb->truesize; ++ } ++ } while (remain -= frag_len); + +- ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK)); +- if (ret) { +- kfree_skb(skb); +- return ret; +- } ++ ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK)); ++ if (!ret) ++ return count; + +- return count; ++freeskb: ++ kfree_skb(head); ++ return ret; + } + + static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) +--- a/drivers/net/wwan/wwan_hwsim.c ++++ b/drivers/net/wwan/wwan_hwsim.c +@@ -205,7 +205,7 @@ static struct wwan_hwsim_port *wwan_hwsi + + port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT, + &wwan_hwsim_port_ops, +- port); ++ NULL, port); + if (IS_ERR(port->wwan)) { + err = PTR_ERR(port->wwan); + goto err_free_port; +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -929,7 +929,8 @@ static void wdm_wwan_init(struct wdm_dev + return; + } + +- port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, desc); ++ port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, ++ NULL, desc); + if (IS_ERR(port)) { + dev_err(&intf->dev, "%s: Unable to create WWAN port\n", + dev_name(intf->usb_dev)); +--- a/include/linux/wwan.h ++++ b/include/linux/wwan.h +@@ -62,11 +62,21 @@ struct wwan_port_ops { + poll_table *wait); + }; + ++/** struct wwan_port_caps - The WWAN port capbilities ++ * @frag_len: WWAN port TX fragments length ++ * @headroom_len: WWAN port TX fragments reserved headroom length ++ */ ++struct wwan_port_caps { ++ size_t frag_len; ++ unsigned int headroom_len; ++}; ++ + /** + * wwan_create_port - Add a new WWAN port + * @parent: Device to use as parent and shared by all WWAN ports + * @type: WWAN port type + * @ops: WWAN port operations ++ * @caps: WWAN port capabilities + * @drvdata: Pointer to caller driver data + * + * Allocate and register a new WWAN port. The port will be automatically exposed +@@ -84,6 +94,7 @@ struct wwan_port_ops { + struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, ++ struct wwan_port_caps *caps, + void *drvdata); + + /** diff --git a/target/linux/generic/backport-6.1/621-v6.5-07-net-wwan-t7xx-Use-alloc_ordered_workqueue-to-create.patch b/target/linux/generic/backport-6.1/621-v6.5-07-net-wwan-t7xx-Use-alloc_ordered_workqueue-to-create.patch new file mode 100644 index 0000000000..984ea3ab56 --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.5-07-net-wwan-t7xx-Use-alloc_ordered_workqueue-to-create.patch @@ -0,0 +1,121 @@ +From 72b1fe6cc6523908bfc339d07d18cb0f3469a643 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Thu, 25 May 2023 12:15:29 -1000 +Subject: [PATCH] net: wwan: t7xx: Use alloc_ordered_workqueue() to create + ordered workqueues + +BACKGROUND +========== + +When multiple work items are queued to a workqueue, their execution order +doesn't match the queueing order. They may get executed in any order and +simultaneously. When fully serialized execution - one by one in the queueing +order - is needed, an ordered workqueue should be used which can be created +with alloc_ordered_workqueue(). + +However, alloc_ordered_workqueue() was a later addition. Before it, an +ordered workqueue could be obtained by creating an UNBOUND workqueue with +@max_active==1. This originally was an implementation side-effect which was +broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be +ordered"). Because there were users that depended on the ordered execution, +5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered") +made workqueue allocation path to implicitly promote UNBOUND workqueues w/ +@max_active==1 to ordered workqueues. + +While this has worked okay, overloading the UNBOUND allocation interface +this way creates other issues. It's difficult to tell whether a given +workqueue actually needs to be ordered and users that legitimately want a +min concurrency level wq unexpectedly gets an ordered one instead. With +planned UNBOUND workqueue updates to improve execution locality and more +prevalence of chiplet designs which can benefit from such improvements, this +isn't a state we wanna be in forever. + +This patch series audits all callsites that create an UNBOUND workqueue w/ +@max_active==1 and converts them to alloc_ordered_workqueue() as necessary. + +WHAT TO LOOK FOR +================ + +The conversions are from + + alloc_workqueue(WQ_UNBOUND | flags, 1, args..) + +to + + alloc_ordered_workqueue(flags, args...) + +which don't cause any functional changes. If you know that fully ordered +execution is not necessary, please let me know. I'll drop the conversion and +instead add a comment noting the fact to reduce confusion while conversion +is in progress. + +If you aren't fully sure, it's completely fine to let the conversion +through. The behavior will stay exactly the same and we can always +reconsider later. + +As there are follow-up workqueue core changes, I'd really appreciate if the +patch can be routed through the workqueue tree w/ your acks. Thanks. + +Signed-off-by: Tejun Heo +Cc: Chandrashekar Devegowda +Cc: Intel Corporation +Cc: Chiranjeevi Rapolu +Cc: Liu Haijun +Cc: M Chetan Kumar +Cc: Ricardo Martinez +Cc: Loic Poulain +Cc: Sergey Ryazanov +Cc: Johannes Berg +Cc: "David S. Miller" +Cc: Eric Dumazet +Cc: Jakub Kicinski +Cc: Paolo Abeni +Cc: netdev@vger.kernel.org +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 +++++++------ + drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 5 +++-- + 2 files changed, 10 insertions(+), 8 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1293,9 +1293,9 @@ int t7xx_cldma_init(struct cldma_ctrl *m + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + md_ctrl->txq[i].worker = +- alloc_workqueue("md_hif%d_tx%d_worker", +- WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), +- 1, md_ctrl->hif_id, i); ++ alloc_ordered_workqueue("md_hif%d_tx%d_worker", ++ WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), ++ md_ctrl->hif_id, i); + if (!md_ctrl->txq[i].worker) + goto err_workqueue; + +@@ -1306,9 +1306,10 @@ int t7xx_cldma_init(struct cldma_ctrl *m + md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); + +- md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", +- WQ_UNBOUND | WQ_MEM_RECLAIM, +- 1, md_ctrl->hif_id, i); ++ md_ctrl->rxq[i].worker = ++ alloc_ordered_workqueue("md_hif%d_rx%d_worker", ++ WQ_MEM_RECLAIM, ++ md_ctrl->hif_id, i); + if (!md_ctrl->rxq[i].worker) + goto err_workqueue; + } +--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +@@ -618,8 +618,9 @@ int t7xx_dpmaif_txq_init(struct dpmaif_t + return ret; + } + +- txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | +- (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); ++ txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker", ++ WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI), ++ txq->index); + if (!txq->worker) + return -ENOMEM; + diff --git a/target/linux/generic/backport-6.1/621-v6.6-08-net-wwan-t7xx-Add-AP-CLDMA.patch b/target/linux/generic/backport-6.1/621-v6.6-08-net-wwan-t7xx-Add-AP-CLDMA.patch new file mode 100644 index 0000000000..3e7d7c2aee --- /dev/null +++ b/target/linux/generic/backport-6.1/621-v6.6-08-net-wwan-t7xx-Add-AP-CLDMA.patch @@ -0,0 +1,482 @@ +From ba2274dcfda859b8a27193e68ad37bfe4da28ddc Mon Sep 17 00:00:00 2001 +From: Jose Ignacio Tornos Martinez +Date: Tue, 11 Jul 2023 08:28:13 +0200 +Subject: [PATCH] net: wwan: t7xx: Add AP CLDMA +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +At this moment with the current status, t7xx is not functional due to +problems like this after connection, if there is no activity: +[ 57.370534] mtk_t7xx 0000:72:00.0: [PM] SAP suspend error: -110 +[ 57.370581] mtk_t7xx 0000:72:00.0: can't suspend + (t7xx_pci_pm_runtime_suspend [mtk_t7xx] returned -110) +because after this, the traffic no longer works. + +The complete series 'net: wwan: t7xx: fw flashing & coredump support' +was reverted because of issues with the pci implementation. +In order to have at least the modem working, it would be enough if just +the first commit of the series is re-applied: +d20ef656f994 net: wwan: t7xx: Add AP CLDMA +With that, the Application Processor would be controlled, correctly +suspended and the commented problems would be fixed (I am testing here +like this with no related issue). + +This commit is independent of the others and not related to the +commented pci implementation for the new features: fw flashing and +coredump collection. + +Use v2 patch version of d20ef656f994 as JinJian Song suggests +(https://patchwork.kernel.org/project/netdevbpf/patch/20230105154215.198828-1-m.chetan.kumar@linux.intel.com/). + +Original text from the commit that would be re-applied: + + d20ef656f994 net: wwan: t7xx: Add AP CLDMA + Author: Haijun Liu + Date: Tue Aug 16 09:53:28 2022 +0530 + + The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to + communicate with AP and Modem processors respectively. So far only + MD-CLDMA was being used, this patch enables AP-CLDMA. + + Rename small Application Processor (sAP) to AP. + + Signed-off-by: Haijun Liu + Co-developed-by: Madhusmita Sahu + Signed-off-by: Madhusmita Sahu + Signed-off-by: Moises Veleta + Signed-off-by: Devegowda Chandrashekar + Signed-off-by: M Chetan Kumar + Reviewed-by: Ilpo Järvinen + Reviewed-by: Sergey Ryazanov + Reviewed-by: Jesse Brandeburg + +Signed-off-by: Jose Ignacio Tornos Martinez +Reviewed-by: Simon Horman +Link: https://lore.kernel.org/r/20230711062817.6108-1-jtornosm@redhat.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 17 +++-- + drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 2 +- + drivers/net/wwan/t7xx/t7xx_mhccif.h | 1 + + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 76 +++++++++++++++++----- + drivers/net/wwan/t7xx/t7xx_modem_ops.h | 2 + + drivers/net/wwan/t7xx/t7xx_port.h | 6 +- + drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 8 ++- + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 18 ++++- + drivers/net/wwan/t7xx/t7xx_reg.h | 2 +- + drivers/net/wwan/t7xx/t7xx_state_monitor.c | 13 +++- + drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 + + 11 files changed, 116 insertions(+), 31 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cld + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + u32 phy_ao_base, phy_pd_base; + +- if (md_ctrl->hif_id != CLDMA_ID_MD) +- return; +- +- phy_ao_base = CLDMA1_AO_BASE; +- phy_pd_base = CLDMA1_PD_BASE; +- hw_info->phy_interrupt_id = CLDMA1_INT; + hw_info->hw_mode = MODE_BIT_64; ++ ++ if (md_ctrl->hif_id == CLDMA_ID_MD) { ++ phy_ao_base = CLDMA1_AO_BASE; ++ phy_pd_base = CLDMA1_PD_BASE; ++ hw_info->phy_interrupt_id = CLDMA1_INT; ++ } else { ++ phy_ao_base = CLDMA0_AO_BASE; ++ phy_pd_base = CLDMA0_PD_BASE; ++ hw_info->phy_interrupt_id = CLDMA0_INT; ++ } ++ + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_ao_base); + hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +@@ -34,7 +34,7 @@ + /** + * enum cldma_id - Identifiers for CLDMA HW units. + * @CLDMA_ID_MD: Modem control channel. +- * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). ++ * @CLDMA_ID_AP: Application Processor control channel. + * @CLDMA_NUM: Number of CLDMA HW units available. + */ + enum cldma_id { +--- a/drivers/net/wwan/t7xx/t7xx_mhccif.h ++++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h +@@ -25,6 +25,7 @@ + D2H_INT_EXCEPTION_CLEARQ_DONE | \ + D2H_INT_EXCEPTION_ALLQ_RESET | \ + D2H_INT_PORT_ENUM | \ ++ D2H_INT_ASYNC_AP_HK | \ + D2H_INT_ASYNC_MD_HK) + + void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c +@@ -44,6 +44,7 @@ + #include "t7xx_state_monitor.h" + + #define RT_ID_MD_PORT_ENUM 0 ++#define RT_ID_AP_PORT_ENUM 1 + /* Modem feature query identification code - "ICCC" */ + #define MD_FEATURE_QUERY_ID 0x49434343 + +@@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7x + } + + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); ++ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage); + + if (stage == HIF_EX_INIT) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); +@@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struc + if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) + return -EINVAL; + +- if (i == RT_ID_MD_PORT_ENUM) ++ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM) + t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); + } + +@@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_m + return 0; + } + +-static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, ++static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info, ++ struct t7xx_fsm_ctl *ctl, + enum t7xx_fsm_event_state event_id, + enum t7xx_fsm_event_state err_detect) + { + struct t7xx_fsm_event *event = NULL, *event_next; +- struct t7xx_sys_info *core_info = &md->core_md; + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned long flags; + int ret; +@@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_st + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); + md->core_md.handshake_ongoing = true; +- t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); ++ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); ++} ++ ++static void t7xx_ap_hk_wq(struct work_struct *work) ++{ ++ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work); ++ struct t7xx_fsm_ctl *ctl = md->fsm_ctl; ++ ++ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */ ++ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT); ++ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); ++ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]); ++ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); ++ md->core_ap.handshake_ongoing = true; ++ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); + } + + void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) + { + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; +- void __iomem *mhccif_base; + unsigned int int_sta; + unsigned long flags; + + switch (evt_id) { + case FSM_PRE_START: +- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); ++ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK | ++ D2H_INT_ASYNC_AP_HK); + break; + + case FSM_START: +@@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_mo + ctl->exp_flg = true; + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK; + } else if (ctl->exp_flg) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; +- } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { +- queue_work(md->handshake_wq, &md->handshake_work); +- md->exp_id &= ~D2H_INT_ASYNC_MD_HK; +- mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; +- iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); +- t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK; + } else { +- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; ++ ++ if (md->exp_id & D2H_INT_ASYNC_MD_HK) { ++ queue_work(md->handshake_wq, &md->handshake_work); ++ md->exp_id &= ~D2H_INT_ASYNC_MD_HK; ++ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ } ++ ++ if (md->exp_id & D2H_INT_ASYNC_AP_HK) { ++ queue_work(md->handshake_wq, &md->ap_handshake_work); ++ md->exp_id &= ~D2H_INT_ASYNC_AP_HK; ++ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); ++ } + } + spin_unlock_irqrestore(&md->exp_lock, flags); + +@@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_mo + + case FSM_READY: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); ++ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); + break; + + default: +@@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc( + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); ++ ++ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq); ++ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK; ++ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |= ++ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); ++ + return md; + } + +@@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t + md->exp_id = 0; + t7xx_fsm_reset(md); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); ++ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]); + t7xx_port_proxy_reset(md->port_prox); + md->md_init_finish = true; + return t7xx_core_reset(md); +@@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + if (ret) + goto err_destroy_hswq; + ++ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev); ++ if (ret) ++ goto err_destroy_hswq; ++ + ret = t7xx_fsm_init(md); + if (ret) + goto err_destroy_hswq; +@@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + if (ret) + goto err_uninit_ccmni; + +- ret = t7xx_port_proxy_init(md); ++ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]); + if (ret) + goto err_uninit_md_cldma; + ++ ret = t7xx_port_proxy_init(md); ++ if (ret) ++ goto err_uninit_ap_cldma; ++ + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); +- if (ret) /* fsm_uninit flushes cmd queue */ ++ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */ + goto err_uninit_proxy; + + t7xx_md_sys_sw_init(t7xx_dev); +@@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7 + err_uninit_proxy: + t7xx_port_proxy_uninit(md->port_prox); + ++err_uninit_ap_cldma: ++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); ++ + err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + +@@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t + + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_port_proxy_uninit(md->port_prox); ++ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_ccmni_exit(t7xx_dev); + t7xx_fsm_uninit(md); +--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h ++++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h +@@ -66,10 +66,12 @@ struct t7xx_modem { + struct cldma_ctrl *md_ctrl[CLDMA_NUM]; + struct t7xx_pci_dev *t7xx_dev; + struct t7xx_sys_info core_md; ++ struct t7xx_sys_info core_ap; + bool md_init_finish; + bool rgu_irq_asserted; + struct workqueue_struct *handshake_wq; + struct work_struct handshake_work; ++ struct work_struct ap_handshake_work; + struct t7xx_fsm_ctl *fsm_ctl; + struct port_proxy *port_prox; + unsigned int exp_id; +--- a/drivers/net/wwan/t7xx/t7xx_port.h ++++ b/drivers/net/wwan/t7xx/t7xx_port.h +@@ -36,9 +36,13 @@ + /* Channel ID and Message ID definitions. + * The channel number consists of peer_id(15:12) , channel_id(11:0) + * peer_id: +- * 0:reserved, 1: to sAP, 2: to MD ++ * 0:reserved, 1: to AP, 2: to MD + */ + enum port_ch { ++ /* to AP */ ++ PORT_CH_AP_CONTROL_RX = 0x1000, ++ PORT_CH_AP_CONTROL_TX = 0x1001, ++ + /* to MD */ + PORT_CH_CONTROL_RX = 0x2000, + PORT_CH_CONTROL_TX = 0x2001, +--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c +@@ -167,8 +167,12 @@ static int control_msg_handler(struct t7 + case CTL_ID_HS2_MSG: + skb_pull(skb, sizeof(*ctrl_msg_h)); + +- if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { +- ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, ++ if (port_conf->rx_ch == PORT_CH_CONTROL_RX || ++ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) { ++ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ? ++ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2; ++ ++ ret = t7xx_fsm_append_event(ctl, event, skb->data, + le32_to_cpu(ctrl_msg_h->data_length)); + if (ret) + dev_err(port->dev, "Failed to append Handshake 2 event"); +--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c ++++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c +@@ -48,7 +48,7 @@ + i < (proxy)->port_count; \ + i++, (p) = &(proxy)->ports[i]) + +-static const struct t7xx_port_conf t7xx_md_port_conf[] = { ++static const struct t7xx_port_conf t7xx_port_conf[] = { + { + .tx_ch = PORT_CH_UART2_TX, + .rx_ch = PORT_CH_UART2_RX, +@@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_ + .path_id = CLDMA_ID_MD, + .ops = &ctl_port_ops, + .name = "t7xx_ctrl", ++ }, { ++ .tx_ch = PORT_CH_AP_CONTROL_TX, ++ .rx_ch = PORT_CH_AP_CONTROL_RX, ++ .txq_index = Q_IDX_CTRL, ++ .rxq_index = Q_IDX_CTRL, ++ .path_id = CLDMA_ID_AP, ++ .ops = &ctl_port_ops, ++ .name = "t7xx_ap_ctrl", + }, + }; + +@@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(st + if (port_conf->tx_ch == PORT_CH_CONTROL_TX) + md->core_md.ctl_port = port; + ++ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX) ++ md->core_ap.ctl_port = port; ++ + port->t7xx_dev = md->t7xx_dev; + port->dev = &md->t7xx_dev->pdev->dev; + spin_lock_init(&port->port_update_lock); +@@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(st + + static int t7xx_proxy_alloc(struct t7xx_modem *md) + { +- unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); ++ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf); + struct device *dev = &md->t7xx_dev->pdev->dev; + struct port_proxy *port_prox; + int i; +@@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_ + port_prox->dev = dev; + + for (i = 0; i < port_count; i++) +- port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; ++ port_prox->ports[i].port_conf = &t7xx_port_conf[i]; + + port_prox->port_count = port_count; + t7xx_proxy_init_all_ports(md); +@@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_mod + if (ret) + return ret; + ++ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb); + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); + return 0; + } +--- a/drivers/net/wwan/t7xx/t7xx_reg.h ++++ b/drivers/net/wwan/t7xx/t7xx_reg.h +@@ -56,7 +56,7 @@ + #define D2H_INT_RESUME_ACK BIT(12) + #define D2H_INT_SUSPEND_ACK_AP BIT(13) + #define D2H_INT_RESUME_ACK_AP BIT(14) +-#define D2H_INT_ASYNC_SAP_HK BIT(15) ++#define D2H_INT_ASYNC_AP_HK BIT(15) + #define D2H_INT_ASYNC_MD_HK BIT(16) + + /* Register base */ +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c +@@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); + t7xx_md_event_notify(md, FSM_START); + +- wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, +- HZ * 60); ++ wait_event_interruptible_timeout(ctl->async_hk_wq, ++ (md->core_md.ready && md->core_ap.ready) || ++ ctl->exp_flg, HZ * 60); + dev = &md->t7xx_dev->pdev->dev; + + if (ctl->exp_flg) +@@ -299,6 +300,13 @@ static int fsm_routine_starting(struct t + + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; ++ } else if (!md->core_ap.ready) { ++ dev_err(dev, "AP handshake timeout\n"); ++ if (md->core_ap.handshake_ongoing) ++ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); ++ ++ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); ++ return -ETIMEDOUT; + } + + t7xx_pci_pm_init_late(md->t7xx_dev); +@@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7x + return; + } + ++ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); + fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); + } +--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h ++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h +@@ -38,10 +38,12 @@ enum t7xx_fsm_state { + enum t7xx_fsm_event_state { + FSM_EVENT_INVALID, + FSM_EVENT_MD_HS2, ++ FSM_EVENT_AP_HS2, + FSM_EVENT_MD_EX, + FSM_EVENT_MD_EX_REC_OK, + FSM_EVENT_MD_EX_PASS, + FSM_EVENT_MD_HS2_EXIT, ++ FSM_EVENT_AP_HS2_EXIT, + FSM_EVENT_MAX + }; +