kernel: backport MTK T7XX WWAN driver support

fbafc3e621/Documentation/networking/device_drivers/wwan/t7xx.rst

Signed-off-by: Tianling Shen <cnsztl@immortalwrt.org>
This commit is contained in:
Tianling Shen
2023-12-27 12:42:52 +08:00
parent 3f5491001b
commit 6ff8a2e28f
38 changed files with 17278 additions and 0 deletions

View File

@@ -1755,6 +1755,24 @@ endef
$(eval $(call KernelPackage,mhi-wwan-mbim))
define KernelPackage/mtk-t7xx
SUBMENU:=$(NETWORK_DEVICES_MENU)
TITLE:=MediaTek PCIe 5G WWAN modem T7xx device
DEPENDS:=@PCI_SUPPORT +kmod-wwan
KCONFIG:=CONFIG_MTK_T7XX
FILES:=$(LINUX_DIR)/drivers/net/wwan/t7xx/mtk_t7xx.ko
AUTOLOAD:=$(call AutoProbe,mtk_t7xx)
endef
define KernelPackage/mtk-t7xx/description
Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device.
Adapts WWAN framework and provides network interface like wwan0
and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0
(MBIM protocol), etc.
endef
$(eval $(call KernelPackage,mtk-t7xx))
define KernelPackage/atlantic
SUBMENU:=$(NETWORK_DEVICES_MENU)
TITLE:=Aquantia AQtion 10Gbps Ethernet NIC

View File

@@ -0,0 +1,59 @@
From 2fbdf45d7d26361a0c3ec8833fd96edf0f5812da Mon Sep 17 00:00:00 2001
From: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Date: Fri, 6 May 2022 11:12:57 -0700
Subject: [PATCH] list: Add list_next_entry_circular() and
list_prev_entry_circular()
Add macros to get the next or previous entries and wraparound if
needed. For example, calling list_next_entry_circular() on the last
element should return the first element in the list.
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/linux/list.h | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -562,6 +562,19 @@ static inline void list_splice_tail_init
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_next_entry_circular(pos, head, member) \
+ (list_is_last(&(pos)->member, head) ? \
+ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
+
+/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -570,6 +583,19 @@ static inline void list_splice_tail_init
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
+ * list_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_prev_entry_circular(pos, head, member) \
+ (list_is_first(&(pos)->member, head) ? \
+ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.

View File

@@ -0,0 +1,811 @@
From 48cc2f5ef846e76dc3bb1501a4014be18c644c1b Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Fri, 6 May 2022 11:13:01 -0700
Subject: [PATCH] net: wwan: t7xx: Add port proxy infrastructure
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Port-proxy provides a common interface to interact with different types
of ports. Ports export their configuration via `struct t7xx_port` and
operate as defined by `struct port_ops`.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/Makefile | 1 +
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 3 +-
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 14 +-
drivers/net/wwan/t7xx/t7xx_port.h | 132 ++++++
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 452 +++++++++++++++++++++
drivers/net/wwan/t7xx/t7xx_port_proxy.h | 72 ++++
drivers/net/wwan/t7xx/t7xx_state_monitor.c | 5 +
7 files changed, 677 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/wwan/t7xx/t7xx_port.h
create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.c
create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.h
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -10,3 +10,4 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_modem_ops.o \
t7xx_cldma.o \
t7xx_hif_cldma.o \
+ t7xx_port_proxy.o \
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -46,6 +46,7 @@
#include "t7xx_mhccif.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
+#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
@@ -55,7 +56,7 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
-#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -34,6 +34,8 @@
#include "t7xx_modem_ops.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
@@ -273,6 +275,7 @@ static void t7xx_md_exception(struct t7x
if (stage == HIF_EX_CLEARQ_DONE) {
/* Give DHL time to flush data */
msleep(PORT_RESET_DELAY_MS);
+ t7xx_port_proxy_reset(md->port_prox);
}
t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
@@ -426,6 +429,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t
md->exp_id = 0;
t7xx_fsm_reset(md);
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_port_proxy_reset(md->port_prox);
md->md_init_finish = true;
return 0;
}
@@ -462,14 +466,21 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
if (ret)
goto err_uninit_fsm;
+ ret = t7xx_port_proxy_init(md);
+ if (ret)
+ goto err_uninit_md_cldma;
+
ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
if (ret) /* fsm_uninit flushes cmd queue */
- goto err_uninit_md_cldma;
+ goto err_uninit_proxy;
t7xx_md_sys_sw_init(t7xx_dev);
md->md_init_finish = true;
return 0;
+err_uninit_proxy:
+ t7xx_port_proxy_uninit(md->port_prox);
+
err_uninit_md_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
@@ -492,6 +503,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t
return;
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_uninit(md);
destroy_workqueue(md->handshake_wq);
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ */
+
+#ifndef __T7XX_PORT_H__
+#define __T7XX_PORT_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/wwan.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_pci.h"
+
+#define PORT_CH_ID_MASK GENMASK(7, 0)
+
+/* Channel ID and Message ID definitions.
+ * The channel number consists of peer_id(15:12) , channel_id(11:0)
+ * peer_id:
+ * 0:reserved, 1: to sAP, 2: to MD
+ */
+enum port_ch {
+ /* to MD */
+ PORT_CH_CONTROL_RX = 0x2000,
+ PORT_CH_CONTROL_TX = 0x2001,
+ PORT_CH_UART1_RX = 0x2006, /* META */
+ PORT_CH_UART1_TX = 0x2008,
+ PORT_CH_UART2_RX = 0x200a, /* AT */
+ PORT_CH_UART2_TX = 0x200c,
+ PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */
+ PORT_CH_MD_LOG_TX = 0x202b,
+ PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */
+ PORT_CH_LB_IT_TX = 0x203f,
+ PORT_CH_STATUS_RX = 0x2043, /* Status events */
+ PORT_CH_MIPC_RX = 0x20ce, /* MIPC */
+ PORT_CH_MIPC_TX = 0x20cf,
+ PORT_CH_MBIM_RX = 0x20d0,
+ PORT_CH_MBIM_TX = 0x20d1,
+ PORT_CH_DSS0_RX = 0x20d2,
+ PORT_CH_DSS0_TX = 0x20d3,
+ PORT_CH_DSS1_RX = 0x20d4,
+ PORT_CH_DSS1_TX = 0x20d5,
+ PORT_CH_DSS2_RX = 0x20d6,
+ PORT_CH_DSS2_TX = 0x20d7,
+ PORT_CH_DSS3_RX = 0x20d8,
+ PORT_CH_DSS3_TX = 0x20d9,
+ PORT_CH_DSS4_RX = 0x20da,
+ PORT_CH_DSS4_TX = 0x20db,
+ PORT_CH_DSS5_RX = 0x20dc,
+ PORT_CH_DSS5_TX = 0x20dd,
+ PORT_CH_DSS6_RX = 0x20de,
+ PORT_CH_DSS6_TX = 0x20df,
+ PORT_CH_DSS7_RX = 0x20e0,
+ PORT_CH_DSS7_TX = 0x20e1,
+};
+
+struct t7xx_port;
+struct port_ops {
+ int (*init)(struct t7xx_port *port);
+ int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb);
+ void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state);
+ void (*uninit)(struct t7xx_port *port);
+ int (*enable_chl)(struct t7xx_port *port);
+ int (*disable_chl)(struct t7xx_port *port);
+};
+
+struct t7xx_port_conf {
+ enum port_ch tx_ch;
+ enum port_ch rx_ch;
+ unsigned char txq_index;
+ unsigned char rxq_index;
+ unsigned char txq_exp_index;
+ unsigned char rxq_exp_index;
+ enum cldma_id path_id;
+ struct port_ops *ops;
+ char *name;
+ enum wwan_port_type port_type;
+};
+
+struct t7xx_port {
+ /* Members not initialized in definition */
+ const struct t7xx_port_conf *port_conf;
+ struct wwan_port *wwan_port;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct device *dev;
+ u16 seq_nums[2]; /* TX/RX sequence numbers */
+ atomic_t usage_cnt;
+ struct list_head entry;
+ struct list_head queue_entry;
+ /* TX and RX flows are asymmetric since ports are multiplexed on
+ * queues.
+ *
+ * TX: data blocks are sent directly to a queue. Each port
+ * does not maintain a TX list; instead, they only provide
+ * a wait_queue_head for blocking writes.
+ *
+ * RX: Each port uses a RX list to hold packets,
+ * allowing the modem to dispatch RX packet as quickly as possible.
+ */
+ struct sk_buff_head rx_skb_list;
+ spinlock_t port_update_lock; /* Protects port configuration */
+ wait_queue_head_t rx_wq;
+ int rx_length_th;
+ bool chan_enable;
+ struct task_struct *thread;
+};
+
+struct sk_buff *t7xx_port_alloc_skb(int payload);
+int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
+int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
+ unsigned int ex_msg);
+
+#endif /* __T7XX_PORT_H__ */
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/wwan.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define Q_IDX_CTRL 0
+#define Q_IDX_MBIM 2
+#define Q_IDX_AT_CMD 5
+
+#define INVALID_SEQ_NUM GENMASK(15, 0)
+
+#define for_each_proxy_port(i, p, proxy) \
+ for (i = 0, (p) = &(proxy)->ports[i]; \
+ i < (proxy)->port_count; \
+ i++, (p) = &(proxy)->ports[i])
+
+static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+};
+
+static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
+{
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ port_conf = port->port_conf;
+ if (port_conf->rx_ch == ch || port_conf->tx_ch == ch)
+ return port;
+ }
+
+ return NULL;
+}
+
+static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h)
+{
+ u32 status = le32_to_cpu(ccci_h->status);
+ u16 seq_num, next_seq_num;
+ bool assert_bit;
+
+ seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status);
+ next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD);
+ assert_bit = status & CCCI_H_AST_BIT;
+ if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM)
+ return next_seq_num;
+
+ if (seq_num != port->seq_nums[MTK_RX])
+ dev_warn_ratelimited(port->dev,
+ "seq num out-of-order %u != %u (header %X, len %X)\n",
+ seq_num, port->seq_nums[MTK_RX],
+ le32_to_cpu(ccci_h->packet_header),
+ le32_to_cpu(ccci_h->packet_len));
+
+ return next_seq_num;
+}
+
+void t7xx_port_proxy_reset(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
+ port->seq_nums[MTK_TX] = 0;
+ }
+}
+
+static int t7xx_port_get_queue_no(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+
+ return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ?
+ port_conf->txq_exp_index : port_conf->txq_index;
+}
+
+static void t7xx_port_struct_init(struct t7xx_port *port)
+{
+ INIT_LIST_HEAD(&port->entry);
+ INIT_LIST_HEAD(&port->queue_entry);
+ skb_queue_head_init(&port->rx_skb_list);
+ init_waitqueue_head(&port->rx_wq);
+ port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
+ port->seq_nums[MTK_TX] = 0;
+ atomic_set(&port->usage_cnt, 0);
+}
+
+struct sk_buff *t7xx_port_alloc_skb(int payload)
+{
+ struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL);
+
+ if (skb)
+ skb_reserve(skb, sizeof(struct ccci_header));
+
+ return skb;
+}
+
+/**
+ * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
+ * @port: port context.
+ * @skb: received skb.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed.
+ */
+int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ if (port->rx_skb_list.qlen >= port->rx_length_th) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ return -ENOBUFS;
+ }
+ __skb_queue_tail(&port->rx_skb_list, skb);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ wake_up_all(&port->rx_wq);
+ return 0;
+}
+
+static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ struct cldma_ctrl *md_ctrl;
+ int ret, tx_qno;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ tx_qno = t7xx_port_get_queue_no(port);
+ ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb);
+ if (ret)
+ dev_err(port->dev, "Failed to send skb: %d\n", ret);
+
+ return ret;
+}
+
+static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb,
+ unsigned int pkt_header, unsigned int ex_msg)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct ccci_header *ccci_h;
+ u32 status;
+ int ret;
+
+ ccci_h = skb_push(skb, sizeof(*ccci_h));
+ status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) |
+ FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT;
+ ccci_h->status = cpu_to_le32(status);
+ ccci_h->packet_header = cpu_to_le32(pkt_header);
+ ccci_h->packet_len = cpu_to_le32(skb->len);
+ ccci_h->ex_msg = cpu_to_le32(ex_msg);
+
+ ret = t7xx_port_send_raw_skb(port, skb);
+ if (ret)
+ return ret;
+
+ port->seq_nums[MTK_TX]++;
+ return 0;
+}
+
+int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
+ unsigned int ex_msg)
+{
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+ unsigned int fsm_state;
+
+ fsm_state = t7xx_fsm_get_ctl_state(ctl);
+ if (fsm_state != FSM_STATE_PRE_START) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ enum md_state md_state = t7xx_fsm_get_md_state(ctl);
+
+ switch (md_state) {
+ case MD_STATE_EXCEPTION:
+ if (port_conf->tx_ch != PORT_CH_MD_LOG_TX)
+ return -EBUSY;
+ break;
+
+ case MD_STATE_WAITING_FOR_HS1:
+ case MD_STATE_WAITING_FOR_HS2:
+ case MD_STATE_STOPPED:
+ case MD_STATE_WAITING_TO_STOP:
+ case MD_STATE_INVALID:
+ return -ENODEV;
+
+ default:
+ break;
+ }
+ }
+
+ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
+}
+
+static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++)
+ INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]);
+
+ for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) {
+ for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++)
+ INIT_LIST_HEAD(&port_prox->queue_ports[j][i]);
+ }
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ enum cldma_id path_id = port_conf->path_id;
+ u8 ch_id;
+
+ ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch);
+ list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]);
+ list_add_tail(&port->queue_entry,
+ &port_prox->queue_ports[path_id][port_conf->rxq_index]);
+ }
+}
+
+static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
+ struct cldma_queue *queue, u16 channel)
+{
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ struct list_head *port_list;
+ struct t7xx_port *port;
+ u8 ch_id;
+
+ ch_id = FIELD_GET(PORT_CH_ID_MASK, channel);
+ port_list = &port_prox->rx_ch_ports[ch_id];
+ list_for_each_entry(port, port_list, entry) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (queue->md_ctrl->hif_id == port_conf->path_id &&
+ channel == port_conf->rx_ch)
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * t7xx_port_proxy_recv_skb() - Dispatch received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;
+ struct device *dev = queue->md_ctrl->dev;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ u16 seq_num, channel;
+ int ret;
+
+ if (!skb)
+ return -EINVAL;
+
+ channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
+ if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
+ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
+ goto drop_skb;
+ }
+
+ port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);
+ if (!port) {
+ dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel);
+ goto drop_skb;
+ }
+
+ seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
+ port_conf = port->port_conf;
+ skb_pull(skb, sizeof(*ccci_h));
+
+ ret = port_conf->ops->recv_skb(port, skb);
+ /* Error indicates to try again later */
+ if (ret) {
+ skb_push(skb, sizeof(*ccci_h));
+ return ret;
+ }
+
+ port->seq_nums[MTK_RX] = seq_num;
+ return 0;
+
+drop_skb:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+/**
+ * t7xx_port_proxy_md_status_notify() - Notify all ports of state.
+ *@port_prox: The port_proxy pointer.
+ *@state: State.
+ *
+ * Called by t7xx_fsm. Used to dispatch modem status for all ports,
+ * which want to know MD state transition.
+ */
+void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->ops->md_state_notify)
+ port_conf->ops->md_state_notify(port, state);
+ }
+}
+
+static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ t7xx_port_struct_init(port);
+
+ port->t7xx_dev = md->t7xx_dev;
+ port->dev = &md->t7xx_dev->pdev->dev;
+ spin_lock_init(&port->port_update_lock);
+ port->chan_enable = false;
+
+ if (port_conf->ops->init)
+ port_conf->ops->init(port);
+ }
+
+ t7xx_proxy_setup_ch_mapping(port_prox);
+}
+
+static int t7xx_proxy_alloc(struct t7xx_modem *md)
+{
+ unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct port_proxy *port_prox;
+ int i;
+
+ port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ GFP_KERNEL);
+ if (!port_prox)
+ return -ENOMEM;
+
+ md->port_prox = port_prox;
+ port_prox->dev = dev;
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
+
+ port_prox->port_count = port_count;
+ t7xx_proxy_init_all_ports(md);
+ return 0;
+}
+
+/**
+ * t7xx_port_proxy_init() - Initialize ports.
+ * @md: Modem.
+ *
+ * Create all port instances.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -ERROR - Error code from failure sub-initializations.
+ */
+int t7xx_port_proxy_init(struct t7xx_modem *md)
+{
+ int ret;
+
+ ret = t7xx_proxy_alloc(md);
+ if (ret)
+ return ret;
+
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
+ return 0;
+}
+
+void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
+{
+ struct t7xx_port *port;
+ int i;
+
+ for_each_proxy_port(i, port, port_prox) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->ops->uninit)
+ port_conf->ops->uninit(port);
+ }
+}
+
+int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
+ bool en_flag)
+{
+ struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id);
+ const struct t7xx_port_conf *port_conf;
+
+ if (!port)
+ return -EINVAL;
+
+ port_conf = port->port_conf;
+
+ if (en_flag) {
+ if (port_conf->ops->enable_chl)
+ port_conf->ops->enable_chl(port);
+ } else {
+ if (port_conf->ops->disable_chl)
+ port_conf->ops->disable_chl(port);
+ }
+
+ return 0;
+}
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#ifndef __T7XX_PORT_PROXY_H__
+#define __T7XX_PORT_PROXY_H__
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_cldma.h"
+#include "t7xx_modem_ops.h"
+#include "t7xx_port.h"
+
+#define MTK_QUEUES 16
+#define RX_QUEUE_MAXLEN 32
+#define CTRL_QUEUE_MAXLEN 16
+
+struct port_proxy {
+ int port_count;
+ struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
+ struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
+ struct device *dev;
+ struct t7xx_port ports[];
+};
+
+struct ccci_header {
+ __le32 packet_header;
+ __le32 packet_len;
+ __le32 status;
+ __le32 ex_msg;
+};
+
+/* Coupled with HW - indicates if there is data following the CCCI header or not */
+#define CCCI_HEADER_NO_DATA 0xffffffff
+
+#define CCCI_H_AST_BIT BIT(31)
+#define CCCI_H_SEQ_FLD GENMASK(30, 16)
+#define CCCI_H_CHN_FLD GENMASK(15, 0)
+
+#define PORT_INFO_RSRVD GENMASK(31, 16)
+#define PORT_INFO_ENFLG BIT(15)
+#define PORT_INFO_CH_ID GENMASK(14, 0)
+
+#define PORT_ENUM_VER 0
+#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a
+#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5
+#define PORT_ENUM_VER_MISMATCH 0x00657272
+
+void t7xx_port_proxy_reset(struct port_proxy *port_prox);
+void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
+int t7xx_port_proxy_init(struct t7xx_modem *md);
+void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state);
+int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
+ bool en_flag);
+
+#endif /* __T7XX_PORT_PROXY_H__ */
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -37,6 +37,7 @@
#include "t7xx_modem_ops.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
+#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
@@ -90,6 +91,9 @@ static void fsm_state_notify(struct t7xx
void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
{
ctl->md_state = state;
+
+ /* Update to port first, otherwise sending message on HS2 may fail */
+ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
fsm_state_notify(ctl->md, state);
}
@@ -258,6 +262,7 @@ static void t7xx_fsm_broadcast_ready_sta
ctl->md_state = MD_STATE_READY;
fsm_state_notify(ctl->md, MD_STATE_READY);
+ t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
}
static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)

View File

@@ -0,0 +1,759 @@
From da45d2566a1d4e260b894ff5d96be64b21c7fa79 Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Fri, 6 May 2022 11:13:02 -0700
Subject: [PATCH] net: wwan: t7xx: Add control port
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Control Port implements driver control messages such as modem-host
handshaking, controls port enumeration, and handles exception messages.
The handshaking process between the driver and the modem happens during
the init sequence. The process involves the exchange of a list of
supported runtime features to make sure that modem and host are ready
to provide proper feature lists including port enumeration. Further
features can be enabled and controlled in this handshaking process.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/Makefile | 1 +
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 214 +++++++++++++++-
drivers/net/wwan/t7xx/t7xx_modem_ops.h | 3 +
drivers/net/wwan/t7xx/t7xx_port.h | 3 +
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 273 +++++++++++++++++++++
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 40 +++
drivers/net/wwan/t7xx/t7xx_port_proxy.h | 25 ++
drivers/net/wwan/t7xx/t7xx_state_monitor.c | 3 +
drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 +
9 files changed, 561 insertions(+), 3 deletions(-)
create mode 100644 drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -11,3 +11,4 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_cldma.o \
t7xx_hif_cldma.o \
t7xx_port_proxy.o \
+ t7xx_port_ctrl_msg.o \
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -16,6 +16,8 @@
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -26,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_cldma.h"
@@ -39,11 +42,24 @@
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
+#define RT_ID_MD_PORT_ENUM 0
+/* Modem feature query identification code - "ICCC" */
+#define MD_FEATURE_QUERY_ID 0x49434343
+
+#define FEATURE_VER GENMASK(7, 4)
+#define FEATURE_MSK GENMASK(3, 0)
+
#define RGU_RESET_DELAY_MS 10
#define PORT_RESET_DELAY_MS 2000
#define EX_HS_TIMEOUT_MS 5000
#define EX_HS_POLL_DELAY_MS 10
+enum mtk_feature_support_type {
+ MTK_FEATURE_DOES_NOT_EXIST,
+ MTK_FEATURE_NOT_SUPPORTED,
+ MTK_FEATURE_MUST_BE_SUPPORTED,
+};
+
static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
{
return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
@@ -314,16 +330,205 @@ static void t7xx_md_sys_sw_init(struct t
t7xx_pcie_register_rgu_isr(t7xx_dev);
}
+struct feature_query {
+ __le32 head_pattern;
+ u8 feature_set[FEATURE_COUNT];
+ __le32 tail_pattern;
+};
+
+static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
+{
+ struct feature_query *ft_query;
+ struct sk_buff *skb;
+
+ skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
+ if (!skb)
+ return;
+
+ ft_query = skb_put(skb, sizeof(*ft_query));
+ ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
+ memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
+ ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
+
+ /* Send HS1 message to device */
+ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
+}
+
+static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
+ void *data)
+{
+ struct feature_query *md_feature = data;
+ struct mtk_runtime_feature *rt_feature;
+ unsigned int i, rt_data_len = 0;
+ struct sk_buff *skb;
+
+ /* Parse MD runtime data query */
+ if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
+ le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
+ dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
+ le32_to_cpu(md_feature->head_pattern),
+ le32_to_cpu(md_feature->tail_pattern));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < FEATURE_COUNT; i++) {
+ if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
+ MTK_FEATURE_MUST_BE_SUPPORTED)
+ rt_data_len += sizeof(*rt_feature);
+ }
+
+ skb = t7xx_ctrl_alloc_skb(rt_data_len);
+ if (!skb)
+ return -ENOMEM;
+
+ rt_feature = skb_put(skb, rt_data_len);
+ memset(rt_feature, 0, rt_data_len);
+
+ /* Fill runtime feature */
+ for (i = 0; i < FEATURE_COUNT; i++) {
+ u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
+
+ if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
+ continue;
+
+ rt_feature->feature_id = i;
+ if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
+ rt_feature->support_info = md_feature->feature_set[i];
+
+ rt_feature++;
+ }
+
+ /* Send HS3 message to device */
+ t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
+ return 0;
+}
+
+static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
+ struct device *dev, void *data, int data_length)
+{
+ enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
+ struct mtk_runtime_feature *rt_feature;
+ int i, offset;
+
+ offset = sizeof(struct feature_query);
+ for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
+ rt_feature = data + offset;
+ offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
+
+ ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
+ if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
+ continue;
+
+ ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
+ if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
+ return -EINVAL;
+
+ if (i == RT_ID_MD_PORT_ENUM)
+ t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
+ }
+
+ return 0;
+}
+
+static int t7xx_core_reset(struct t7xx_modem *md)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ md->core_md.ready = false;
+
+ if (!ctl) {
+ dev_err(dev, "FSM is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (md->core_md.handshake_ongoing) {
+ int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ md->core_md.handshake_ongoing = false;
+ return 0;
+}
+
+static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
+ enum t7xx_fsm_event_state event_id,
+ enum t7xx_fsm_event_state err_detect)
+{
+ struct t7xx_sys_info *core_info = &md->core_md;
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ struct t7xx_fsm_event *event, *event_next;
+ unsigned long flags;
+ int ret;
+
+ t7xx_prepare_host_rt_data_query(core_info);
+
+ while (!kthread_should_stop()) {
+ bool event_received = false;
+
+ spin_lock_irqsave(&ctl->event_lock, flags);
+ list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
+ if (event->event_id == err_detect) {
+ list_del(&event->entry);
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+ dev_err(dev, "Core handshake error event received\n");
+ goto err_free_event;
+ } else if (event->event_id == event_id) {
+ list_del(&event->entry);
+ event_received = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctl->event_lock, flags);
+
+ if (event_received)
+ break;
+
+ wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ goto err_free_event;
+ }
+
+ if (ctl->exp_flg)
+ goto err_free_event;
+
+ ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
+ if (ret) {
+ dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
+ goto err_free_event;
+ }
+
+ if (ctl->exp_flg)
+ goto err_free_event;
+
+ ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
+ if (ret) {
+ dev_err(dev, "Device failure parsing runtime data: %d", ret);
+ goto err_free_event;
+ }
+
+ core_info->ready = true;
+ core_info->handshake_ongoing = false;
+ wake_up(&ctl->async_hk_wq);
+err_free_event:
+ kfree(event);
+}
+
static void t7xx_md_hk_wq(struct work_struct *work)
{
struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+ /* Clear the HS2 EXIT event appended in core_reset() */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
- md->core_md.ready = true;
- wake_up(&ctl->async_hk_wq);
+ md->core_md.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
}
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
@@ -418,6 +623,9 @@ static struct t7xx_modem *t7xx_md_alloc(
return NULL;
INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
return md;
}
@@ -431,7 +639,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
t7xx_port_proxy_reset(md->port_prox);
md->md_init_finish = true;
- return 0;
+ return t7xx_core_reset(md);
}
/**
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -57,6 +57,9 @@ enum md_event_id {
struct t7xx_sys_info {
bool ready;
+ bool handshake_ongoing;
+ u8 feature_set[FEATURE_COUNT];
+ struct t7xx_port *ctl_port;
};
struct t7xx_modem {
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -125,8 +125,11 @@ struct t7xx_port {
};
struct sk_buff *t7xx_port_alloc_skb(int payload);
+struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg);
+int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+ unsigned int ex_msg);
#endif /* __T7XX_PORT_H__ */
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define PORT_MSG_VERSION GENMASK(31, 16)
+#define PORT_MSG_PRT_CNT GENMASK(15, 0)
+
+struct port_msg {
+ __le32 head_pattern;
+ __le32 info;
+ __le32 tail_pattern;
+ __le32 data[];
+};
+
+static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = t7xx_ctrl_alloc_skb(0);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg);
+ if (ret)
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl,
+ struct sk_buff *skb)
+{
+ struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
+ struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
+ enum md_state md_state;
+ int ret = -EINVAL;
+
+ md_state = t7xx_fsm_get_md_state(ctl);
+ if (md_state != MD_STATE_EXCEPTION) {
+ dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n",
+ ctrl_msg_h->ex_msg, md_state);
+ return -EINVAL;
+ }
+
+ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
+ case CTL_ID_MD_EX:
+ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) {
+ dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg);
+ break;
+ }
+
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID);
+ if (ret) {
+ dev_err(dev, "Failed to send exception message to modem\n");
+ break;
+ }
+
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception event");
+
+ break;
+
+ case CTL_ID_MD_EX_ACK:
+ if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) {
+ dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg);
+ break;
+ }
+
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception Received event");
+
+ break;
+
+ case CTL_ID_MD_EX_PASS:
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0);
+ if (ret)
+ dev_err(dev, "Failed to append Modem Exception Passed event");
+
+ break;
+
+ case CTL_ID_DRV_VER_ERROR:
+ dev_err(dev, "AP/MD driver version mismatch\n");
+ }
+
+ return ret;
+}
+
+/**
+ * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes.
+ * @md: Modem context.
+ * @msg: Message.
+ *
+ * Used to control create/remove device node.
+ *
+ * Return:
+ * * 0 - Success.
+ * * -EFAULT - Message check failure.
+ */
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
+{
+ struct device *dev = &md->t7xx_dev->pdev->dev;
+ unsigned int version, port_count, i;
+ struct port_msg *port_msg = msg;
+
+ version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info));
+ if (version != PORT_ENUM_VER ||
+ le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN ||
+ le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) {
+ dev_err(dev, "Invalid port control message %x:%x:%x\n",
+ version, le32_to_cpu(port_msg->head_pattern),
+ le32_to_cpu(port_msg->tail_pattern));
+ return -EFAULT;
+ }
+
+ port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info));
+ for (i = 0; i < port_count; i++) {
+ u32 port_info = le32_to_cpu(port_msg->data[i]);
+ unsigned int ch_id;
+ bool en_flag;
+
+ ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info);
+ en_flag = port_info & PORT_INFO_ENFLG;
+ if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag))
+ dev_dbg(dev, "Port:%x not found\n", ch_id);
+ }
+
+ return 0;
+}
+
+static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
+ struct ctrl_msg_header *ctrl_msg_h;
+ int ret = 0;
+
+ ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
+ switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
+ case CTL_ID_HS2_MSG:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+
+ if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
+ ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
+ le32_to_cpu(ctrl_msg_h->data_length));
+ if (ret)
+ dev_err(port->dev, "Failed to append Handshake 2 event");
+ }
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case CTL_ID_MD_EX:
+ case CTL_ID_MD_EX_ACK:
+ case CTL_ID_MD_EX_PASS:
+ case CTL_ID_DRV_VER_ERROR:
+ ret = fsm_ee_message_handler(port, ctl, skb);
+ dev_kfree_skb_any(skb);
+ break;
+
+ case CTL_ID_PORT_ENUM:
+ skb_pull(skb, sizeof(*ctrl_msg_h));
+ ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data);
+ if (!ret)
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0);
+ else
+ ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM,
+ PORT_ENUM_VER_MISMATCH);
+
+ break;
+
+ default:
+ ret = -EINVAL;
+ dev_err(port->dev, "Unknown control message ID to FSM %x\n",
+ le32_to_cpu(ctrl_msg_h->ctrl_msg_id));
+ break;
+ }
+
+ if (ret)
+ dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret);
+
+ return ret;
+}
+
+static int port_ctl_rx_thread(void *arg)
+{
+ while (!kthread_should_stop()) {
+ struct t7xx_port *port = arg;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ if (skb_queue_empty(&port->rx_skb_list) &&
+ wait_event_interruptible_locked_irq(port->rx_wq,
+ !skb_queue_empty(&port->rx_skb_list) ||
+ kthread_should_stop())) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+ continue;
+ }
+ if (kthread_should_stop()) {
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+ break;
+ }
+ skb = __skb_dequeue(&port->rx_skb_list);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+
+ control_msg_handler(port, skb);
+ }
+
+ return 0;
+}
+
+static int port_ctl_init(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name);
+ if (IS_ERR(port->thread)) {
+ dev_err(port->dev, "Failed to start port control thread\n");
+ return PTR_ERR(port->thread);
+ }
+
+ port->rx_length_th = CTRL_QUEUE_MAXLEN;
+ return 0;
+}
+
+static void port_ctl_uninit(struct t7xx_port *port)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ if (port->thread)
+ kthread_stop(port->thread);
+
+ spin_lock_irqsave(&port->rx_wq.lock, flags);
+ port->rx_length_th = 0;
+ while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL)
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&port->rx_wq.lock, flags);
+}
+
+struct port_ops ctl_port_ops = {
+ .init = port_ctl_init,
+ .recv_skb = t7xx_port_enqueue_skb,
+ .uninit = port_ctl_uninit,
+};
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -49,6 +49,15 @@
i++, (p) = &(proxy)->ports[i])
static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_CONTROL_TX,
+ .rx_ch = PORT_CH_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_MD,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ctrl",
+ },
};
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
@@ -129,6 +138,16 @@ struct sk_buff *t7xx_port_alloc_skb(int
return skb;
}
+struct sk_buff *t7xx_ctrl_alloc_skb(int payload)
+{
+ struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header));
+
+ if (skb)
+ skb_reserve(skb, sizeof(struct ctrl_msg_header));
+
+ return skb;
+}
+
/**
* t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
* @port: port context.
@@ -194,6 +213,24 @@ static int t7xx_port_send_ccci_skb(struc
return 0;
}
+int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
+ unsigned int ex_msg)
+{
+ struct ctrl_msg_header *ctrl_msg_h;
+ unsigned int msg_len = skb->len;
+ u32 pkt_header = 0;
+
+ ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h));
+ ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg);
+ ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg);
+ ctrl_msg_h->data_length = cpu_to_le32(msg_len);
+
+ if (!msg_len)
+ pkt_header = CCCI_HEADER_NO_DATA;
+
+ return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
+}
+
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg)
{
@@ -359,6 +396,9 @@ static void t7xx_proxy_init_all_ports(st
t7xx_port_struct_init(port);
+ if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
+ md->core_md.ctl_port = port;
+
port->t7xx_dev = md->t7xx_dev;
port->dev = &md->t7xx_dev->pdev->dev;
spin_lock_init(&port->port_update_lock);
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -53,6 +53,27 @@ struct ccci_header {
#define CCCI_H_SEQ_FLD GENMASK(30, 16)
#define CCCI_H_CHN_FLD GENMASK(15, 0)
+struct ctrl_msg_header {
+ __le32 ctrl_msg_id;
+ __le32 ex_msg;
+ __le32 data_length;
+};
+
+/* Control identification numbers for AP<->MD messages */
+#define CTL_ID_HS1_MSG 0x0
+#define CTL_ID_HS2_MSG 0x1
+#define CTL_ID_HS3_MSG 0x2
+#define CTL_ID_MD_EX 0x4
+#define CTL_ID_DRV_VER_ERROR 0x5
+#define CTL_ID_MD_EX_ACK 0x6
+#define CTL_ID_MD_EX_PASS 0x8
+#define CTL_ID_PORT_ENUM 0x9
+
+/* Modem exception check identification code - "EXCP" */
+#define MD_EX_CHK_ID 0x45584350
+/* Modem exception check acknowledge identification code - "EREC" */
+#define MD_EX_CHK_ACK_ID 0x45524543
+
#define PORT_INFO_RSRVD GENMASK(31, 16)
#define PORT_INFO_ENFLG BIT(15)
#define PORT_INFO_CH_ID GENMASK(14, 0)
@@ -62,10 +83,14 @@ struct ccci_header {
#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5
#define PORT_ENUM_VER_MISMATCH 0x00657272
+/* Port operations mapping */
+extern struct port_ops ctl_port_ops;
+
void t7xx_port_proxy_reset(struct port_proxy *port_prox);
void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
int t7xx_port_proxy_init(struct t7xx_modem *md);
void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state);
+int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag);
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -293,6 +293,9 @@ static int fsm_routine_starting(struct t
if (!md->core_md.ready) {
dev_err(dev, "MD handshake timeout\n");
+ if (md->core_md.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
+
fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
return -ETIMEDOUT;
}
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -37,9 +37,11 @@ enum t7xx_fsm_state {
enum t7xx_fsm_event_state {
FSM_EVENT_INVALID,
+ FSM_EVENT_MD_HS2,
FSM_EVENT_MD_EX,
FSM_EVENT_MD_EX_REC_OK,
FSM_EVENT_MD_EX_PASS,
+ FSM_EVENT_MD_HS2_EXIT,
FSM_EVENT_MAX
};

View File

@@ -0,0 +1,253 @@
From 61b7a2916a0ef91be2e9a4b0d0a5bdf9a371cbee Mon Sep 17 00:00:00 2001
From: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Date: Fri, 6 May 2022 11:13:03 -0700
Subject: [PATCH] net: wwan: t7xx: Add AT and MBIM WWAN ports
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Adds AT and MBIM ports to the port proxy infrastructure.
The initialization method is responsible for creating the corresponding
ports using the WWAN framework infrastructure. The implemented WWAN port
operations are start, stop, and TX.
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/Makefile | 1 +
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 20 +++
drivers/net/wwan/t7xx/t7xx_port_proxy.h | 1 +
drivers/net/wwan/t7xx/t7xx_port_wwan.c | 176 ++++++++++++++++++++++++
4 files changed, 198 insertions(+)
create mode 100644 drivers/net/wwan/t7xx/t7xx_port_wwan.c
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -12,3 +12,4 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_hif_cldma.o \
t7xx_port_proxy.o \
t7xx_port_ctrl_msg.o \
+ t7xx_port_wwan.o \
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -50,6 +50,26 @@
static const struct t7xx_port_conf t7xx_md_port_conf[] = {
{
+ .tx_ch = PORT_CH_UART2_TX,
+ .rx_ch = PORT_CH_UART2_RX,
+ .txq_index = Q_IDX_AT_CMD,
+ .rxq_index = Q_IDX_AT_CMD,
+ .txq_exp_index = 0xff,
+ .rxq_exp_index = 0xff,
+ .path_id = CLDMA_ID_MD,
+ .ops = &wwan_sub_port_ops,
+ .name = "AT",
+ .port_type = WWAN_PORT_AT,
+ }, {
+ .tx_ch = PORT_CH_MBIM_TX,
+ .rx_ch = PORT_CH_MBIM_RX,
+ .txq_index = Q_IDX_MBIM,
+ .rxq_index = Q_IDX_MBIM,
+ .path_id = CLDMA_ID_MD,
+ .ops = &wwan_sub_port_ops,
+ .name = "MBIM",
+ .port_type = WWAN_PORT_MBIM,
+ }, {
.tx_ch = PORT_CH_CONTROL_TX,
.rx_ch = PORT_CH_CONTROL_RX,
.txq_index = Q_IDX_CTRL,
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -84,6 +84,7 @@ struct ctrl_msg_header {
#define PORT_ENUM_VER_MISMATCH 0x00657272
/* Port operations mapping */
+extern struct port_ops wwan_sub_port_ops;
extern struct port_ops ctl_port_ops;
void t7xx_port_proxy_reset(struct port_proxy *port_prox);
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+static int t7xx_port_ctrl_start(struct wwan_port *port)
+{
+ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
+
+ if (atomic_read(&port_mtk->usage_cnt))
+ return -EBUSY;
+
+ atomic_inc(&port_mtk->usage_cnt);
+ return 0;
+}
+
+static void t7xx_port_ctrl_stop(struct wwan_port *port)
+{
+ struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
+
+ atomic_dec(&port_mtk->usage_cnt);
+}
+
+static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_fsm_ctl *ctl;
+ enum md_state md_state;
+
+ len = skb->len;
+ if (!len || !port_private->chan_enable)
+ return -EINVAL;
+
+ port_conf = port_private->port_conf;
+ ctl = port_private->t7xx_dev->md->fsm_ctl;
+ md_state = t7xx_fsm_get_md_state(ctl);
+ if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
+ dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ port_conf->name, md_state);
+ return -ENODEV;
+ }
+
+ for (offset = 0; offset < len; offset += chunk_len) {
+ struct sk_buff *skb_ccci;
+ int ret;
+
+ chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
+ skb_ccci = t7xx_port_alloc_skb(chunk_len);
+ if (!skb_ccci)
+ return -ENOMEM;
+
+ skb_put_data(skb_ccci, skb->data + offset, chunk_len);
+ ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
+ if (ret) {
+ dev_kfree_skb_any(skb_ccci);
+ dev_err(port_private->dev, "Write error on %s port, %d\n",
+ port_conf->name, ret);
+ return ret;
+ }
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_ops = {
+ .start = t7xx_port_ctrl_start,
+ .stop = t7xx_port_ctrl_stop,
+ .tx = t7xx_port_ctrl_tx,
+};
+
+static int t7xx_port_wwan_init(struct t7xx_port *port)
+{
+ port->rx_length_th = RX_QUEUE_MAXLEN;
+ return 0;
+}
+
+static void t7xx_port_wwan_uninit(struct t7xx_port *port)
+{
+ if (!port->wwan_port)
+ return;
+
+ port->rx_length_th = 0;
+ wwan_remove_port(port->wwan_port);
+ port->wwan_port = NULL;
+}
+
+static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ if (!atomic_read(&port->usage_cnt) || !port->chan_enable) {
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ dev_kfree_skb_any(skb);
+ dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n",
+ port_conf->name);
+ /* Dropping skb, caller should not access skb.*/
+ return 0;
+ }
+
+ wwan_port_rx(port->wwan_port, skb);
+ return 0;
+}
+
+static int t7xx_port_wwan_enable_chl(struct t7xx_port *port)
+{
+ spin_lock(&port->port_update_lock);
+ port->chan_enable = true;
+ spin_unlock(&port->port_update_lock);
+
+ return 0;
+}
+
+static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
+{
+ spin_lock(&port->port_update_lock);
+ port->chan_enable = false;
+ spin_unlock(&port->port_update_lock);
+
+ return 0;
+}
+
+static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (state != MD_STATE_READY)
+ return;
+
+ if (!port->wwan_port) {
+ port->wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, port);
+ if (IS_ERR(port->wwan_port))
+ dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
+ }
+}
+
+struct port_ops wwan_sub_port_ops = {
+ .init = t7xx_port_wwan_init,
+ .recv_skb = t7xx_port_wwan_recv_skb,
+ .uninit = t7xx_port_wwan_uninit,
+ .enable_chl = t7xx_port_wwan_enable_chl,
+ .disable_chl = t7xx_port_wwan_disable_chl,
+ .md_state_notify = t7xx_port_wwan_md_state_notify,
+};

View File

@@ -0,0 +1,567 @@
From 05d19bf500f8281f574713479b04679fa226d0a3 Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Fri, 6 May 2022 11:13:06 -0700
Subject: [PATCH] net: wwan: t7xx: Add WWAN network interface
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Creates the Cross Core Modem Network Interface (CCMNI) which implements
the wwan_ops for registration with the WWAN framework, CCMNI also
implements the net_device_ops functions used by the network device.
Network device operations include open, close, start transmission, TX
timeout and change MTU.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/Makefile | 1 +
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 11 +-
drivers/net/wwan/t7xx/t7xx_netdev.c | 423 +++++++++++++++++++++++++
drivers/net/wwan/t7xx/t7xx_netdev.h | 55 ++++
4 files changed, 489 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.c
create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.h
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -17,3 +17,4 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_hif_dpmaif_tx.o \
t7xx_hif_dpmaif_rx.o \
t7xx_dpmaif.o \
+ t7xx_netdev.o
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -35,6 +35,7 @@
#include "t7xx_hif_cldma.h"
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
+#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_port.h"
@@ -670,10 +671,14 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
if (ret)
goto err_destroy_hswq;
- ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
+ ret = t7xx_ccmni_init(t7xx_dev);
if (ret)
goto err_uninit_fsm;
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
+ if (ret)
+ goto err_uninit_ccmni;
+
ret = t7xx_port_proxy_init(md);
if (ret)
goto err_uninit_md_cldma;
@@ -692,6 +697,9 @@ err_uninit_proxy:
err_uninit_md_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+err_uninit_ccmni:
+ t7xx_ccmni_exit(t7xx_dev);
+
err_uninit_fsm:
t7xx_fsm_uninit(md);
@@ -713,6 +721,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_ccmni_exit(t7xx_dev);
t7xx_fsm_uninit(md);
destroy_workqueue(md->handshake_wq);
}
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Eliot Lee <eliot.lee@intel.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/wwan.h>
+#include <net/pkt_sched.h>
+
+#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_hif_dpmaif_tx.h"
+#include "t7xx_netdev.h"
+#include "t7xx_pci.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define IP_MUX_SESSION_DEFAULT 0
+
+static int t7xx_ccmni_open(struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+ atomic_inc(&ccmni->usage);
+ return 0;
+}
+
+static int t7xx_ccmni_close(struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+
+ atomic_dec(&ccmni->usage);
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ return 0;
+}
+
+static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
+ unsigned int txq_number)
+{
+ struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
+ struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
+
+ skb_cb->netif_idx = ccmni->index;
+
+ if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
+ return NETDEV_TX_BUSY;
+
+ return 0;
+}
+
+static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ int skb_len = skb->len;
+
+ /* If MTU is changed or there is no headroom, drop the packet */
+ if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
+ return NETDEV_TX_BUSY;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+}
+
+static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
+{
+ struct t7xx_ccmni *ccmni = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_tx_wake_all_queues(dev);
+}
+
+static const struct net_device_ops ccmni_netdev_ops = {
+ .ndo_open = t7xx_ccmni_open,
+ .ndo_stop = t7xx_ccmni_close,
+ .ndo_start_xmit = t7xx_ccmni_start_xmit,
+ .ndo_tx_timeout = t7xx_ccmni_tx_timeout,
+};
+
+static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0) {
+ netif_tx_start_all_queues(ccmni->dev);
+ netif_carrier_on(ccmni->dev);
+ }
+ }
+}
+
+static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_tx_disable(ccmni->dev);
+ }
+}
+
+static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct t7xx_ccmni *ccmni;
+ int i;
+
+ for (i = 0; i < ctlb->nic_dev_num; i++) {
+ ccmni = ctlb->ccmni_inst[i];
+ if (!ccmni)
+ continue;
+
+ if (atomic_read(&ccmni->usage) > 0)
+ netif_carrier_off(ccmni->dev);
+ }
+}
+
+static void t7xx_ccmni_wwan_setup(struct net_device *dev)
+{
+ dev->hard_header_len += sizeof(struct ccci_header);
+
+ dev->mtu = ETH_DATA_LEN;
+ dev->max_mtu = CCMNI_MTU_MAX;
+ BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
+
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
+
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ dev->features = NETIF_F_VLAN_CHALLENGED;
+
+ dev->features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_SG;
+
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_HW_CSUM;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM;
+
+ dev->needs_free_netdev = true;
+
+ dev->type = ARPHRD_NONE;
+
+ dev->netdev_ops = &ccmni_netdev_ops;
+}
+
+static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
+ struct netlink_ext_ack *extack)
+{
+ struct t7xx_ccmni_ctrl *ctlb = ctxt;
+ struct t7xx_ccmni *ccmni;
+ int ret;
+
+ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
+ return -EINVAL;
+
+ ccmni = wwan_netdev_drvpriv(dev);
+ ccmni->index = if_id;
+ ccmni->ctlb = ctlb;
+ ccmni->dev = dev;
+ atomic_set(&ccmni->usage, 0);
+ ctlb->ccmni_inst[if_id] = ccmni;
+
+ ret = register_netdevice(dev);
+ if (ret)
+ return ret;
+
+ netif_device_attach(dev);
+ return 0;
+}
+
+static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
+{
+ struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ctlb = ctxt;
+ u8 if_id = ccmni->index;
+
+ if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
+ return;
+
+ if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
+ return;
+
+ unregister_netdevice(dev);
+}
+
+static const struct wwan_ops ccmni_wwan_ops = {
+ .priv_size = sizeof(struct t7xx_ccmni),
+ .setup = t7xx_ccmni_wwan_setup,
+ .newlink = t7xx_ccmni_wwan_newlink,
+ .dellink = t7xx_ccmni_wwan_dellink,
+};
+
+static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
+{
+ struct device *dev = ctlb->hif_ctrl->dev;
+ int ret;
+
+ if (ctlb->wwan_is_registered)
+ return 0;
+
+ /* WWAN core will create a netdev for the default IP MUX channel */
+ ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
+ if (ret < 0) {
+ dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
+ return ret;
+ }
+
+ ctlb->wwan_is_registered = true;
+ return 0;
+}
+
+static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
+{
+ struct t7xx_ccmni_ctrl *ctlb = para;
+ struct device *dev;
+ int ret = 0;
+
+ dev = ctlb->hif_ctrl->dev;
+ ctlb->md_sta = state;
+
+ switch (state) {
+ case MD_STATE_READY:
+ ret = t7xx_ccmni_register_wwan(ctlb);
+ if (!ret)
+ t7xx_ccmni_start(ctlb);
+ break;
+
+ case MD_STATE_EXCEPTION:
+ case MD_STATE_STOPPED:
+ t7xx_ccmni_pre_stop(ctlb);
+
+ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
+ if (ret < 0)
+ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
+
+ t7xx_ccmni_post_stop(ctlb);
+ break;
+
+ case MD_STATE_WAITING_FOR_HS1:
+ case MD_STATE_WAITING_TO_STOP:
+ ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
+ if (ret < 0)
+ dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+ struct t7xx_fsm_notifier *md_status_notifier;
+
+ md_status_notifier = &ctlb->md_status_notify;
+ INIT_LIST_HEAD(&md_status_notifier->entry);
+ md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
+ md_status_notifier->data = ctlb;
+
+ t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
+}
+
+static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
+{
+ struct t7xx_skb_cb *skb_cb;
+ struct net_device *net_dev;
+ struct t7xx_ccmni *ccmni;
+ int pkt_type, skb_len;
+ u8 netif_id;
+
+ skb_cb = T7XX_SKB_CB(skb);
+ netif_id = skb_cb->netif_idx;
+ ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
+ if (!ccmni) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ net_dev = ccmni->dev;
+ skb->dev = net_dev;
+
+ pkt_type = skb_cb->rx_pkt_type;
+ if (pkt_type == PKT_TYPE_IP6)
+ skb->protocol = htons(ETH_P_IPV6);
+ else
+ skb->protocol = htons(ETH_P_IP);
+
+ skb_len = skb->len;
+ netif_rx(skb);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += skb_len;
+}
+
+static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+{
+ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct netdev_queue *net_queue;
+
+ if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
+ net_queue = netdev_get_tx_queue(ccmni->dev, qno);
+ if (netif_tx_queue_stopped(net_queue))
+ netif_tx_wake_queue(net_queue);
+ }
+}
+
+static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
+{
+ struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct netdev_queue *net_queue;
+
+ if (atomic_read(&ccmni->usage) > 0) {
+ netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
+ net_queue = netdev_get_tx_queue(ccmni->dev, qno);
+ netif_tx_stop_queue(net_queue);
+ }
+}
+
+static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
+ enum dpmaif_txq_state state, int qno)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+
+ if (ctlb->md_sta != MD_STATE_READY)
+ return;
+
+ if (!ctlb->ccmni_inst[0]) {
+ dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
+ return;
+ }
+
+ if (state == DMPAIF_TXQ_STATE_IRQ)
+ t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
+ else if (state == DMPAIF_TXQ_STATE_FULL)
+ t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
+}
+
+int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ struct t7xx_ccmni_ctrl *ctlb;
+
+ ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
+ if (!ctlb)
+ return -ENOMEM;
+
+ t7xx_dev->ccmni_ctlb = ctlb;
+ ctlb->t7xx_dev = t7xx_dev;
+ ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
+ ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
+ ctlb->nic_dev_num = NIC_DEV_DEFAULT;
+
+ ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
+ if (!ctlb->hif_ctrl)
+ return -ENOMEM;
+
+ init_md_status_notifier(t7xx_dev);
+ return 0;
+}
+
+void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
+
+ t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
+
+ if (ctlb->wwan_is_registered) {
+ wwan_unregister_ops(&t7xx_dev->pdev->dev);
+ ctlb->wwan_is_registered = false;
+ }
+
+ t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
+}
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021, MediaTek Inc.
+ * Copyright (c) 2021-2022, Intel Corporation.
+ *
+ * Authors:
+ * Haijun Liu <haijun.liu@mediatek.com>
+ * Moises Veleta <moises.veleta@intel.com>
+ *
+ * Contributors:
+ * Amir Hanania <amir.hanania@intel.com>
+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
+ */
+
+#ifndef __T7XX_NETDEV_H__
+#define __T7XX_NETDEV_H__
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "t7xx_hif_dpmaif.h"
+#include "t7xx_pci.h"
+#include "t7xx_state_monitor.h"
+
+#define RXQ_NUM DPMAIF_RXQ_NUM
+#define NIC_DEV_MAX 21
+#define NIC_DEV_DEFAULT 2
+
+#define CCMNI_NETDEV_WDT_TO (1 * HZ)
+#define CCMNI_MTU_MAX 3000
+
+struct t7xx_ccmni {
+ u8 index;
+ atomic_t usage;
+ struct net_device *dev;
+ struct t7xx_ccmni_ctrl *ctlb;
+};
+
+struct t7xx_ccmni_ctrl {
+ struct t7xx_pci_dev *t7xx_dev;
+ struct dpmaif_ctrl *hif_ctrl;
+ struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX];
+ struct dpmaif_callbacks callbacks;
+ unsigned int nic_dev_num;
+ unsigned int md_sta;
+ struct t7xx_fsm_notifier md_status_notify;
+ bool wwan_is_registered;
+};
+
+int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev);
+
+#endif /* __T7XX_NETDEV_H__ */

View File

@@ -0,0 +1,919 @@
From 46e8f49ed7b3063f51e28f3ea2084b3da29c1503 Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Fri, 6 May 2022 11:13:07 -0700
Subject: [PATCH] net: wwan: t7xx: Introduce power management
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Implements suspend, resumes, freeze, thaw, poweroff, and restore
`dev_pm_ops` callbacks.
From the host point of view, the t7xx driver is one entity. But, the
device has several modules that need to be addressed in different ways
during power management (PM) flows.
The driver uses the term 'PM entities' to refer to the 2 DPMA and
2 CLDMA HW blocks that need to be managed during PM flows.
When a dev_pm_ops function is called, the PM entities list is iterated
and the matching function is called for each entry in the list.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 123 +++++-
drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 1 +
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 90 +++++
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 1 +
drivers/net/wwan/t7xx/t7xx_mhccif.c | 17 +
drivers/net/wwan/t7xx/t7xx_pci.c | 421 +++++++++++++++++++++
drivers/net/wwan/t7xx/t7xx_pci.h | 46 +++
drivers/net/wwan/t7xx/t7xx_state_monitor.c | 2 +
8 files changed, 700 insertions(+), 1 deletion(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1076,6 +1076,120 @@ int t7xx_cldma_alloc(enum cldma_id hif_i
return 0;
}
+static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+ int qno_t;
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_restore(hw_info);
+ for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
+ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
+ MTK_TX);
+ t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
+ MTK_RX);
+ }
+ t7xx_cldma_enable_irq(md_ctrl);
+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ unsigned long flags;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
+
+ return 0;
+}
+
+static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
+ t7xx_cldma_clear_ip_busy(hw_info);
+ t7xx_cldma_disable_irq(md_ctrl);
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+}
+
+static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
+{
+ struct cldma_ctrl *md_ctrl = entity_param;
+ struct t7xx_cldma_hw *hw_info;
+ unsigned long flags;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
+
+ hw_info = &md_ctrl->hw_info;
+
+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
+ md_ctrl->txq_started = 0;
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ return 0;
+}
+
+static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
+{
+ md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
+ if (!md_ctrl->pm_entity)
+ return -ENOMEM;
+
+ md_ctrl->pm_entity->entity_param = md_ctrl;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD)
+ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
+ else
+ md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
+
+ md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
+ md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
+ md_ctrl->pm_entity->resume = t7xx_cldma_resume;
+ md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
+
+ return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
+}
+
+static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
+{
+ if (!md_ctrl->pm_entity)
+ return -EINVAL;
+
+ t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
+ kfree(md_ctrl->pm_entity);
+ md_ctrl->pm_entity = NULL;
+ return 0;
+}
+
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
@@ -1126,6 +1240,7 @@ static void t7xx_cldma_destroy_wqs(struc
* t7xx_cldma_init() - Initialize CLDMA.
* @md_ctrl: CLDMA context structure.
*
+ * Allocate and initialize device power management entity.
* Initialize HIF TX/RX queue structure.
* Register CLDMA callback ISR with PCIe driver.
*
@@ -1136,12 +1251,16 @@ static void t7xx_cldma_destroy_wqs(struc
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
- int i;
+ int ret, i;
md_ctrl->txq_active = 0;
md_ctrl->rxq_active = 0;
md_ctrl->is_late_init = false;
+ ret = t7xx_cldma_pm_init(md_ctrl);
+ if (ret)
+ return ret;
+
spin_lock_init(&md_ctrl->cldma_lock);
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
@@ -1176,6 +1295,7 @@ int t7xx_cldma_init(struct cldma_ctrl *m
err_workqueue:
t7xx_cldma_destroy_wqs(md_ctrl);
+ t7xx_cldma_pm_uninit(md_ctrl);
return -ENOMEM;
}
@@ -1190,4 +1310,5 @@ void t7xx_cldma_exit(struct cldma_ctrl *
t7xx_cldma_stop(md_ctrl);
t7xx_cldma_late_release(md_ctrl);
t7xx_cldma_destroy_wqs(md_ctrl);
+ t7xx_cldma_pm_uninit(md_ctrl);
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -98,6 +98,7 @@ struct cldma_ctrl {
struct dma_pool *gpd_dmapool;
struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
+ struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info;
bool is_late_init;
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -398,6 +398,90 @@ static int t7xx_dpmaif_stop(struct dpmai
return 0;
}
+static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = param;
+
+ t7xx_dpmaif_tx_stop(dpmaif_ctrl);
+ t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_disable_irq(dpmaif_ctrl);
+ t7xx_dpmaif_rx_stop(dpmaif_ctrl);
+ return 0;
+}
+
+static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ int qno;
+
+ for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
+ t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
+}
+
+static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct dpmaif_rx_queue *rxq;
+ struct dpmaif_tx_queue *txq;
+ unsigned int que_cnt;
+
+ for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
+ txq = &dpmaif_ctrl->txq[que_cnt];
+ txq->que_started = true;
+ }
+
+ for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
+ rxq = &dpmaif_ctrl->rxq[que_cnt];
+ rxq->que_started = true;
+ }
+}
+
+static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
+{
+ struct dpmaif_ctrl *dpmaif_ctrl = param;
+
+ if (!dpmaif_ctrl)
+ return 0;
+
+ t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
+ t7xx_dpmaif_enable_irq(dpmaif_ctrl);
+ t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
+ t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
+ wake_up(&dpmaif_ctrl->tx_wq);
+ return 0;
+}
+
+static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
+ int ret;
+
+ INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
+ dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
+ dpmaif_pm_entity->suspend_late = NULL;
+ dpmaif_pm_entity->resume_early = NULL;
+ dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
+ dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
+ dpmaif_pm_entity->entity_param = dpmaif_ctrl;
+
+ ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
+ if (ret)
+ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
+
+ return ret;
+}
+
+static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
+{
+ struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
+ int ret;
+
+ ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
+
+ return ret;
+}
+
int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
{
int ret = 0;
@@ -461,11 +545,16 @@ struct dpmaif_ctrl *t7xx_dpmaif_hif_init
dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
+ ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
+ if (ret)
+ return NULL;
+
t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
if (ret) {
+ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
return NULL;
}
@@ -478,6 +567,7 @@ void t7xx_dpmaif_hif_exit(struct dpmaif_
{
if (dpmaif_ctrl->dpmaif_sw_init_done) {
t7xx_dpmaif_stop(dpmaif_ctrl);
+ t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
t7xx_dpmaif_sw_release(dpmaif_ctrl);
dpmaif_ctrl->dpmaif_sw_init_done = false;
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
@@ -174,6 +174,7 @@ struct dpmaif_callbacks {
struct dpmaif_ctrl {
struct device *dev;
struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity dpmaif_pm_entity;
enum dpmaif_state state;
bool dpmaif_sw_init_done;
struct dpmaif_hw_info hw_info;
--- a/drivers/net/wwan/t7xx/t7xx_mhccif.c
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c
@@ -24,6 +24,11 @@
#include "t7xx_pcie_mac.h"
#include "t7xx_reg.h"
+#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \
+ D2H_INT_RESUME_ACK | \
+ D2H_INT_SUSPEND_ACK_AP | \
+ D2H_INT_RESUME_ACK_AP)
+
static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask)
{
void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
@@ -53,6 +58,18 @@ static irqreturn_t t7xx_mhccif_isr_threa
}
t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
+
+ if (int_status & D2H_INT_SR_ACK)
+ complete(&t7xx_dev->pm_sr_ack);
+
+ iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+
+ int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
+ if (!int_status) {
+ val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
+ iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ }
+
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
return IRQ_HANDLED;
}
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -18,23 +18,438 @@
#include <linux/atomic.h>
#include <linux/bits.h>
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_wakeup.h>
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_reg.h"
+#include "t7xx_state_monitor.h"
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
+#define PM_ACK_TIMEOUT_MS 1500
+#define PM_RESOURCE_POLL_TIMEOUT_US 10000
+#define PM_RESOURCE_POLL_STEP_US 100
+
+enum t7xx_pm_state {
+ MTK_PM_EXCEPTION,
+ MTK_PM_INIT, /* Device initialized, but handshake not completed */
+ MTK_PM_SUSPENDED,
+ MTK_PM_RESUMED,
+};
+
+static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
+{
+ int ret, val;
+
+ ret = read_poll_timeout(ioread32, val,
+ (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
+ PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
+ IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (ret == -ETIMEDOUT)
+ dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
+
+ return ret;
+}
+
+static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct pci_dev *pdev = t7xx_dev->pdev;
+
+ INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
+ mutex_init(&t7xx_dev->md_pm_entity_mtx);
+ init_completion(&t7xx_dev->pm_sr_ack);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+
+ device_init_wakeup(&pdev->dev, true);
+ dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
+ DPM_FLAG_NO_DIRECT_COMPLETE);
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+
+ return t7xx_wait_pm_config(t7xx_dev);
+}
+
+void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* Enable the PCIe resource lock only after MD deep sleep is done */
+ t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_SUSPEND_ACK |
+ D2H_INT_RESUME_ACK |
+ D2H_INT_SUSPEND_ACK_AP |
+ D2H_INT_RESUME_ACK_AP);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+}
+
+static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
+{
+ /* The device is kept in FSM re-init flow
+ * so just roll back PM setting to the init setting.
+ */
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ return t7xx_wait_pm_config(t7xx_dev);
+}
+
+void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
+{
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ t7xx_wait_pm_config(t7xx_dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
+}
+
+int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
+{
+ struct md_pm_entity *entity;
+
+ mutex_lock(&t7xx_dev->md_pm_entity_mtx);
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->id == pm_entity->id) {
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return -EEXIST;
+ }
+ }
+
+ list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return 0;
+}
+
+int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
+{
+ struct md_pm_entity *entity, *tmp_entity;
+
+ mutex_lock(&t7xx_dev->md_pm_entity_mtx);
+ list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->id == pm_entity->id) {
+ list_del(&pm_entity->entity);
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
+
+ return -ENXIO;
+}
+
+static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
+{
+ unsigned long wait_ret;
+
+ reinit_completion(&t7xx_dev->pm_sr_ack);
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
+ wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
+ msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
+ if (!wait_ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
+{
+ enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity *entity;
+ int ret;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
+ return -EFAULT;
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ ret = t7xx_wait_pm_config(t7xx_dev);
+ if (ret) {
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return ret;
+ }
+
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_dev->rgu_pci_irq_en = false;
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (!entity->suspend)
+ continue;
+
+ ret = entity->suspend(t7xx_dev, entity->entity_param);
+ if (ret) {
+ entity_id = entity->id;
+ dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
+ goto abort_suspend;
+ }
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
+ if (ret) {
+ dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
+ goto abort_suspend;
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
+ if (ret) {
+ t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
+ dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
+ goto abort_suspend;
+ }
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->suspend_late)
+ entity->suspend_late(t7xx_dev, entity->entity_param);
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return 0;
+
+abort_suspend:
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity_id == entity->id)
+ break;
+
+ if (entity->resume)
+ entity->resume(t7xx_dev, entity->entity_param);
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ return ret;
+}
+
+static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
+{
+ t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
+
+ /* Disable interrupt first and let the IPs enable them */
+ iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
+
+ /* Device disables PCIe interrupts during resume and
+ * following function will re-enable PCIe interrupts.
+ */
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+ t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
+}
+
+static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
+{
+ int ret;
+
+ ret = pcim_enable_device(t7xx_dev->pdev);
+ if (ret)
+ return ret;
+
+ t7xx_pcie_mac_atr_init(t7xx_dev);
+ t7xx_pcie_interrupt_reinit(t7xx_dev);
+
+ if (is_d3) {
+ t7xx_mhccif_init(t7xx_dev);
+ return t7xx_pci_pm_reinit(t7xx_dev);
+ }
+
+ return 0;
+}
+
+static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
+{
+ struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret = -EINVAL;
+
+ switch (event) {
+ case FSM_CMD_STOP:
+ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ break;
+
+ case FSM_CMD_START:
+ t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
+ t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret)
+ dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
+
+ return ret;
+}
+
+static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct md_pm_entity *entity;
+ u32 prev_state;
+ int ret = 0;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ return 0;
+ }
+
+ t7xx_pcie_mac_interrupts_en(t7xx_dev);
+ prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
+
+ if (state_check) {
+ /* For D3/L3 resume, the device could boot so quickly that the
+ * initial value of the dummy register might be overwritten.
+ * Identify new boots if the ATR source address register is not initialized.
+ */
+ u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
+ ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
+ if (prev_state == PM_RESUME_REG_STATE_L3 ||
+ (prev_state == PM_RESUME_REG_STATE_INIT &&
+ atr_reg_val == ATR_SRC_ADDR_INVALID)) {
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ ret = t7xx_pcie_reinit(t7xx_dev, true);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+ }
+
+ if (prev_state == PM_RESUME_REG_STATE_EXP ||
+ prev_state == PM_RESUME_REG_STATE_L2_EXP) {
+ if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
+ ret = t7xx_pcie_reinit(t7xx_dev, false);
+ if (ret)
+ return ret;
+ }
+
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+
+ t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_EXCEPTION_INIT |
+ D2H_INT_EXCEPTION_INIT_DONE |
+ D2H_INT_EXCEPTION_CLEARQ_DONE |
+ D2H_INT_EXCEPTION_ALLQ_RESET |
+ D2H_INT_PORT_ENUM);
+
+ return ret;
+ }
+
+ if (prev_state == PM_RESUME_REG_STATE_L2) {
+ ret = t7xx_pcie_reinit(t7xx_dev, false);
+ if (ret)
+ return ret;
+
+ } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
+ prev_state != PM_RESUME_REG_STATE_INIT) {
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
+ return 0;
+ }
+ }
+
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ t7xx_wait_pm_config(t7xx_dev);
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->resume_early)
+ entity->resume_early(t7xx_dev, entity->entity_param);
+ }
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
+
+ ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
+
+ list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
+ if (entity->resume) {
+ ret = entity->resume(t7xx_dev, entity->entity_param);
+ if (ret)
+ dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
+ entity->id, ret);
+ }
+ }
+
+ t7xx_dev->rgu_pci_irq_en = true;
+ t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
+ iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+
+ return ret;
+}
+
+static int t7xx_pci_pm_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct t7xx_pci_dev *t7xx_dev;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+
+ return 0;
+}
+
+static void t7xx_pci_shutdown(struct pci_dev *pdev)
+{
+ __t7xx_pci_pm_suspend(pdev);
+}
+
+static int t7xx_pci_pm_suspend(struct device *dev)
+{
+ return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int t7xx_pci_pm_resume(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
+}
+
+static int t7xx_pci_pm_thaw(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
+}
+
+static const struct dev_pm_ops t7xx_pci_pm_ops = {
+ .suspend = t7xx_pci_pm_suspend,
+ .resume = t7xx_pci_pm_resume,
+ .resume_noirq = t7xx_pci_pm_resume_noirq,
+ .freeze = t7xx_pci_pm_suspend,
+ .thaw = t7xx_pci_pm_thaw,
+ .poweroff = t7xx_pci_pm_suspend,
+ .restore = t7xx_pci_pm_resume,
+ .restore_noirq = t7xx_pci_pm_resume_noirq,
+};
+
static int t7xx_request_irq(struct pci_dev *pdev)
{
struct t7xx_pci_dev *t7xx_dev;
@@ -165,6 +580,10 @@ static int t7xx_pci_probe(struct pci_dev
IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
+ ret = t7xx_pci_pm_init(t7xx_dev);
+ if (ret)
+ return ret;
+
t7xx_pcie_mac_atr_init(t7xx_dev);
t7xx_pci_infracfg_ao_calc(t7xx_dev);
t7xx_mhccif_init(t7xx_dev);
@@ -216,6 +635,8 @@ static struct pci_driver t7xx_pci_driver
.id_table = t7xx_pci_table,
.probe = t7xx_pci_probe,
.remove = t7xx_pci_remove,
+ .driver.pm = &t7xx_pci_pm_ops,
+ .shutdown = t7xx_pci_shutdown,
};
module_pci_driver(t7xx_pci_driver);
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -17,7 +17,9 @@
#ifndef __T7XX_PCI_H__
#define __T7XX_PCI_H__
+#include <linux/completion.h>
#include <linux/irqreturn.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/types.h>
@@ -49,6 +51,10 @@ typedef irqreturn_t (*t7xx_intr_callback
* @md: modem interface
* @ccmni_ctlb: context structure used to control the network data path
* @rgu_pci_irq_en: RGU callback ISR registered and active
+ * @md_pm_entities: list of pm entities
+ * @md_pm_entity_mtx: protects md_pm_entities list
+ * @pm_sr_ack: ack from the device when went to sleep or woke up
+ * @md_pm_state: state for resume/suspend
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -59,6 +65,46 @@ struct t7xx_pci_dev {
struct t7xx_modem *md;
struct t7xx_ccmni_ctrl *ccmni_ctlb;
bool rgu_pci_irq_en;
+
+ /* Low Power Items */
+ struct list_head md_pm_entities;
+ struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */
+ struct completion pm_sr_ack;
+ atomic_t md_pm_state;
+};
+
+enum t7xx_pm_id {
+ PM_ENTITY_ID_CTRL1,
+ PM_ENTITY_ID_CTRL2,
+ PM_ENTITY_ID_DATA,
+ PM_ENTITY_ID_INVALID
};
+/* struct md_pm_entity - device power management entity
+ * @entity: list of PM Entities
+ * @suspend: callback invoked before sending D3 request to device
+ * @suspend_late: callback invoked after getting D3 ACK from device
+ * @resume_early: callback invoked before sending the resume request to device
+ * @resume: callback invoked after getting resume ACK from device
+ * @id: unique PM entity identifier
+ * @entity_param: parameter passed to the registered callbacks
+ *
+ * This structure is used to indicate PM operations required by internal
+ * HW modules such as CLDMA and DPMA.
+ */
+struct md_pm_entity {
+ struct list_head entity;
+ int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
+ enum t7xx_pm_id id;
+ void *entity_param;
+};
+
+int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
+int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
+void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
+
#endif /* __T7XX_PCI_H__ */
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -188,6 +188,7 @@ static void fsm_routine_exception(struct
case EXCEPTION_EVENT:
dev_err(dev, "Exception event\n");
t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
+ t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
t7xx_md_exception_handshake(ctl->md);
fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
@@ -300,6 +301,7 @@ static int fsm_routine_starting(struct t
return -ETIMEDOUT;
}
+ t7xx_pci_pm_init_late(md->t7xx_dev);
fsm_routine_ready(ctl);
return 0;
}

View File

@@ -0,0 +1,277 @@
From d10b3a695ba0227faf249537402bb72b283a36b8 Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Fri, 6 May 2022 11:13:08 -0700
Subject: [PATCH] net: wwan: t7xx: Runtime PM
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Enables runtime power management callbacks including runtime_suspend
and runtime_resume. Autosuspend is used to prevent overhead by frequent
wake-ups.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Eliot Lee <eliot.lee@intel.com>
Signed-off-by: Eliot Lee <eliot.lee@intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 14 ++++++++++++++
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 17 +++++++++++++++++
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 15 +++++++++++++++
drivers/net/wwan/t7xx/t7xx_pci.c | 22 ++++++++++++++++++++++
4 files changed, 68 insertions(+)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -251,6 +252,8 @@ static void t7xx_cldma_rx_done(struct wo
t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
}
static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
@@ -360,6 +363,9 @@ static void t7xx_cldma_tx_done(struct wo
t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
}
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
}
static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
@@ -568,6 +574,7 @@ static void t7xx_cldma_irq_work_cb(struc
if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
if (i < CLDMA_TXQ_NUM) {
+ pm_runtime_get(md_ctrl->dev);
t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
queue_work(md_ctrl->txq[i].worker,
@@ -592,6 +599,7 @@ static void t7xx_cldma_irq_work_cb(struc
if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
+ pm_runtime_get(md_ctrl->dev);
t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
@@ -922,6 +930,10 @@ int t7xx_cldma_send_skb(struct cldma_ctr
if (qno >= CLDMA_TXQ_NUM)
return -EINVAL;
+ ret = pm_runtime_resume_and_get(md_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
queue = &md_ctrl->txq[qno];
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
@@ -965,6 +977,8 @@ int t7xx_cldma_send_skb(struct cldma_ctr
} while (!ret);
allow_sleep:
+ pm_runtime_mark_last_busy(md_ctrl->dev);
+ pm_runtime_put_autosuspend(md_ctrl->dev);
return ret;
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -32,6 +32,7 @@
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -910,6 +911,7 @@ static void t7xx_dpmaif_rxq_work(struct
{
struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
+ int ret;
atomic_set(&rxq->rx_processing, 1);
/* Ensure rx_processing is changed to 1 before actually begin RX flow */
@@ -921,7 +923,14 @@ static void t7xx_dpmaif_rxq_work(struct
return;
}
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
}
@@ -1123,11 +1132,19 @@ static void t7xx_dpmaif_bat_release_work
{
struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
struct dpmaif_rx_queue *rxq;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
t7xx_dpmaif_bat_release_and_add(rxq);
t7xx_dpmaif_frag_bat_release_and_add(rxq);
+
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -28,6 +28,7 @@
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -161,6 +162,10 @@ static void t7xx_dpmaif_tx_done(struct w
struct dpmaif_hw_info *hw_info;
int ret;
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return;
+
hw_info = &dpmaif_ctrl->hw_info;
ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
if (ret == -EAGAIN ||
@@ -174,6 +179,9 @@ static void t7xx_dpmaif_tx_done(struct w
t7xx_dpmaif_clr_ip_busy_sts(hw_info);
t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
}
+
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
@@ -423,6 +431,7 @@ static void t7xx_do_tx_hw_push(struct dp
static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
{
struct dpmaif_ctrl *dpmaif_ctrl = arg;
+ int ret;
while (!kthread_should_stop()) {
if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
@@ -437,7 +446,13 @@ static int t7xx_dpmaif_tx_hw_push_thread
break;
}
+ ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
t7xx_do_tx_hw_push(dpmaif_ctrl);
+ pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
return 0;
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -31,6 +31,7 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include "t7xx_mhccif.h"
@@ -44,6 +45,7 @@
#define T7XX_PCI_EREG_BASE 2
#define PM_ACK_TIMEOUT_MS 1500
+#define PM_AUTOSUSPEND_MS 20000
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
#define PM_RESOURCE_POLL_STEP_US 100
@@ -82,6 +84,8 @@ static int t7xx_pci_pm_init(struct t7xx_
DPM_FLAG_NO_DIRECT_COMPLETE);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
return t7xx_wait_pm_config(t7xx_dev);
}
@@ -96,6 +100,8 @@ void t7xx_pci_pm_init_late(struct t7xx_p
D2H_INT_RESUME_ACK_AP);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+
+ pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
}
static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
@@ -104,6 +110,9 @@ static int t7xx_pci_pm_reinit(struct t7x
* so just roll back PM setting to the init setting.
*/
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+
+ pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
+
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
return t7xx_wait_pm_config(t7xx_dev);
}
@@ -403,6 +412,7 @@ static int __t7xx_pci_pm_resume(struct p
t7xx_dev->rgu_pci_irq_en = true;
t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ pm_runtime_mark_last_busy(&pdev->dev);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
return ret;
@@ -439,6 +449,16 @@ static int t7xx_pci_pm_thaw(struct devic
return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
}
+static int t7xx_pci_pm_runtime_suspend(struct device *dev)
+{
+ return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int t7xx_pci_pm_runtime_resume(struct device *dev)
+{
+ return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
+}
+
static const struct dev_pm_ops t7xx_pci_pm_ops = {
.suspend = t7xx_pci_pm_suspend,
.resume = t7xx_pci_pm_resume,
@@ -448,6 +468,8 @@ static const struct dev_pm_ops t7xx_pci_
.poweroff = t7xx_pci_pm_suspend,
.restore = t7xx_pci_pm_resume,
.restore_noirq = t7xx_pci_pm_resume_noirq,
+ .runtime_suspend = t7xx_pci_pm_runtime_suspend,
+ .runtime_resume = t7xx_pci_pm_runtime_resume
};
static int t7xx_request_irq(struct pci_dev *pdev)

View File

@@ -0,0 +1,368 @@
From de49ea38ba11c1f0fd9e126e93b2f7eb67ed5020 Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Fri, 6 May 2022 11:13:09 -0700
Subject: [PATCH] net: wwan: t7xx: Device deep sleep lock/unlock
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Introduce the mechanism to lock/unlock the device 'deep sleep' mode.
When the PCIe link state is L1.2 or L2, the host side still can keep
the device is in D0 state from the host side point of view. At the same
time, if the device's 'deep sleep' mode is unlocked, the device will
go to 'deep sleep' while it is still in D0 state on the host side.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 12 +++
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 14 +++-
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 41 +++++++---
drivers/net/wwan/t7xx/t7xx_mhccif.c | 3 +
drivers/net/wwan/t7xx/t7xx_pci.c | 93 ++++++++++++++++++++++
drivers/net/wwan/t7xx/t7xx_pci.h | 10 +++
6 files changed, 158 insertions(+), 15 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -934,6 +934,7 @@ int t7xx_cldma_send_skb(struct cldma_ctr
if (ret < 0 && ret != -EACCES)
return ret;
+ t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
queue = &md_ctrl->txq[qno];
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
@@ -955,6 +956,11 @@ int t7xx_cldma_send_skb(struct cldma_ctr
queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
/* Protect the access to the modem for queues operations (resume/start)
* which access shared locations by all the queues.
* cldma_lock is independent of ring_lock which is per queue.
@@ -967,6 +973,11 @@ int t7xx_cldma_send_skb(struct cldma_ctr
}
spin_unlock_irqrestore(&queue->ring_lock, flags);
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
@@ -977,6 +988,7 @@ int t7xx_cldma_send_skb(struct cldma_ctr
} while (!ret);
allow_sleep:
+ t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(md_ctrl->dev);
pm_runtime_put_autosuspend(md_ctrl->dev);
return ret;
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -927,8 +927,11 @@ static void t7xx_dpmaif_rxq_work(struct
if (ret < 0 && ret != -EACCES)
return;
- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
@@ -1138,11 +1141,16 @@ static void t7xx_dpmaif_bat_release_work
if (ret < 0 && ret != -EACCES)
return;
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+
/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
- t7xx_dpmaif_bat_release_and_add(rxq);
- t7xx_dpmaif_frag_bat_release_and_add(rxq);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ t7xx_dpmaif_bat_release_and_add(rxq);
+ t7xx_dpmaif_frag_bat_release_and_add(rxq);
+ }
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -166,20 +166,25 @@ static void t7xx_dpmaif_tx_done(struct w
if (ret < 0 && ret != -EACCES)
return;
- hw_info = &dpmaif_ctrl->hw_info;
- ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
- if (ret == -EAGAIN ||
- (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
- t7xx_dpmaif_drb_ring_not_empty(txq))) {
- queue_work(dpmaif_ctrl->txq[txq->index].worker,
- &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
- /* Give the device time to enter the low power state */
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- } else {
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
+ /* The device may be in low power state. Disable sleep if needed */
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ hw_info = &dpmaif_ctrl->hw_info;
+ ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
+ if (ret == -EAGAIN ||
+ (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
+ t7xx_dpmaif_drb_ring_not_empty(txq))) {
+ queue_work(dpmaif_ctrl->txq[txq->index].worker,
+ &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
+ /* Give the device time to enter the low power state */
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
+ }
}
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
@@ -405,6 +410,8 @@ static int t7xx_txq_burst_send_skb(struc
static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
{
+ bool wait_disable_sleep = true;
+
do {
struct dpmaif_tx_queue *txq;
int drb_send_cnt;
@@ -420,6 +427,14 @@ static void t7xx_do_tx_hw_push(struct dp
continue;
}
+ /* Wait for the PCIe resource to unlock */
+ if (wait_disable_sleep) {
+ if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ return;
+
+ wait_disable_sleep = false;
+ }
+
t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
@@ -450,7 +465,9 @@ static int t7xx_dpmaif_tx_hw_push_thread
if (ret < 0 && ret != -EACCES)
return ret;
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
t7xx_do_tx_hw_push(dpmaif_ctrl);
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
--- a/drivers/net/wwan/t7xx/t7xx_mhccif.c
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c
@@ -59,6 +59,9 @@ static irqreturn_t t7xx_mhccif_isr_threa
t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
+ if (int_status & D2H_INT_DS_LOCK_ACK)
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+
if (int_status & D2H_INT_SR_ACK)
complete(&t7xx_dev->pm_sr_ack);
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -33,6 +33,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
@@ -44,6 +45,7 @@
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
+#define PM_SLEEP_DIS_TIMEOUT_MS 20
#define PM_ACK_TIMEOUT_MS 1500
#define PM_AUTOSUSPEND_MS 20000
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
@@ -56,6 +58,21 @@ enum t7xx_pm_state {
MTK_PM_RESUMED,
};
+static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
+{
+ void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
+ u32 value;
+
+ value = ioread32(ctrl_reg);
+
+ if (enable)
+ value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+ else
+ value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+
+ iowrite32(value, ctrl_reg);
+}
+
static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
{
int ret, val;
@@ -76,6 +93,8 @@ static int t7xx_pci_pm_init(struct t7xx_
INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
mutex_init(&t7xx_dev->md_pm_entity_mtx);
+ spin_lock_init(&t7xx_dev->md_pm_lock);
+ init_completion(&t7xx_dev->sleep_lock_acquire);
init_completion(&t7xx_dev->pm_sr_ack);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
@@ -94,6 +113,7 @@ void t7xx_pci_pm_init_late(struct t7xx_p
{
/* Enable the PCIe resource lock only after MD deep sleep is done */
t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_DS_LOCK_ACK |
D2H_INT_SUSPEND_ACK |
D2H_INT_RESUME_ACK |
D2H_INT_SUSPEND_ACK_AP |
@@ -159,6 +179,79 @@ int t7xx_pci_pm_entity_unregister(struct
return -ENXIO;
}
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+
+ ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
+ msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
+ if (!ret)
+ dev_err_ratelimited(dev, "Resource wait complete timed out\n");
+
+ return ret;
+}
+
+/**
+ * t7xx_pci_disable_sleep() - Disable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * Lock the deep sleep capability, note that the device can still go into deep sleep
+ * state while device is in D0 state, from the host's point-of-view.
+ *
+ * If device is in deep sleep state, wake up the device and disable deep sleep capability.
+ */
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count++;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock_and_complete;
+
+ if (t7xx_dev->sleep_disable_count == 1) {
+ u32 status;
+
+ reinit_completion(&t7xx_dev->sleep_lock_acquire);
+ t7xx_dev_set_sleep_capability(t7xx_dev, false);
+
+ status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (status & T7XX_PCIE_RESOURCE_STS_MSK)
+ goto unlock_and_complete;
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
+ }
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ return;
+
+unlock_and_complete:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+}
+
+/**
+ * t7xx_pci_enable_sleep() - Enable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * After enabling deep sleep, device can enter into deep sleep state.
+ */
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count--;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock;
+
+ if (t7xx_dev->sleep_disable_count == 0)
+ t7xx_dev_set_sleep_capability(t7xx_dev, true);
+
+unlock:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+}
+
static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
{
unsigned long wait_ret;
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -21,6 +21,7 @@
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include "t7xx_reg.h"
@@ -55,6 +56,9 @@ typedef irqreturn_t (*t7xx_intr_callback
* @md_pm_entity_mtx: protects md_pm_entities list
* @pm_sr_ack: ack from the device when went to sleep or woke up
* @md_pm_state: state for resume/suspend
+ * @md_pm_lock: protects PCIe sleep lock
+ * @sleep_disable_count: PCIe L1.2 lock counter
+ * @sleep_lock_acquire: indicates that sleep has been disabled
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -71,6 +75,9 @@ struct t7xx_pci_dev {
struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */
struct completion pm_sr_ack;
atomic_t md_pm_state;
+ spinlock_t md_pm_lock; /* Protects PCI resource lock */
+ unsigned int sleep_disable_count;
+ struct completion sleep_lock_acquire;
};
enum t7xx_pm_id {
@@ -102,6 +109,9 @@ struct md_pm_entity {
void *entity_param;
};
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev);
int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);

View File

@@ -0,0 +1,34 @@
From b321dfafb0b99e285d14bcaae00b4f9093556eb6 Mon Sep 17 00:00:00 2001
From: YueHaibing <yuehaibing@huawei.com>
Date: Fri, 13 May 2022 15:56:11 +0800
Subject: [PATCH] net: wwan: t7xx: Fix return type of t7xx_dl_add_timedout()
t7xx_dl_add_timedout() now return int 'ret', but the return type
is bool. Change the return type to int for furthor errcode upstream.
Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_dpmaif.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
@@ -1043,15 +1043,13 @@ unsigned int t7xx_dpmaif_dl_dlq_pit_get_
return value & DPMAIF_DL_RD_WR_IDX_MSK;
}
-static bool t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
+static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
{
u32 value;
- int ret;
- ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
+ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
- return ret;
}
int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt)

View File

@@ -0,0 +1,75 @@
From 262d98b1193fec68c66f3d57772b72240fc4b9da Mon Sep 17 00:00:00 2001
From: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Date: Fri, 13 May 2022 10:33:59 -0700
Subject: [PATCH] net: wwan: t7xx: Avoid calls to skb_data_area_size()
skb_data_area_size() helper was used to calculate the size of the
DMA mapped buffer passed to the HW. Instead of doing this, use the
size passed to allocate the skbs.
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 7 +++----
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 6 ++----
2 files changed, 5 insertions(+), 8 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -97,8 +97,7 @@ static int t7xx_cldma_alloc_and_map_skb(
if (!req->skb)
return -ENOMEM;
- req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data,
- skb_data_area_size(req->skb), DMA_FROM_DEVICE);
+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
dev_kfree_skb_any(req->skb);
req->skb = NULL;
@@ -154,7 +153,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru
if (req->mapped_buff) {
dma_unmap_single(md_ctrl->dev, req->mapped_buff,
- skb_data_area_size(skb), DMA_FROM_DEVICE);
+ queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
req->mapped_buff = 0;
}
@@ -376,7 +375,7 @@ static void t7xx_cldma_ring_free(struct
list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
if (req_cur->mapped_buff && req_cur->skb) {
dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
- skb_data_area_size(req_cur->skb), tx_rx);
+ ring->pkt_size, tx_rx);
req_cur->mapped_buff = 0;
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -151,14 +151,12 @@ static bool t7xx_alloc_and_map_skb_info(
{
dma_addr_t data_bus_addr;
struct sk_buff *skb;
- size_t data_len;
skb = __dev_alloc_skb(size, GFP_KERNEL);
if (!skb)
return false;
- data_len = skb_data_area_size(skb);
- data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE);
+ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
dev_kfree_skb_any(skb);
@@ -167,7 +165,7 @@ static bool t7xx_alloc_and_map_skb_info(
cur_skb->skb = skb;
cur_skb->data_bus_addr = data_bus_addr;
- cur_skb->data_len = data_len;
+ cur_skb->data_len = size;
return true;
}

View File

@@ -0,0 +1,71 @@
From 86afd5a0e78eb9b84b158b33d85f711c5f748fd1 Mon Sep 17 00:00:00 2001
From: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Date: Wed, 18 May 2022 12:55:29 -0700
Subject: [PATCH] net: wwan: t7xx: Fix smatch errors
t7xx_request_irq() error: uninitialized symbol 'ret'.
t7xx_core_hk_handler() error: potentially dereferencing uninitialized 'event'.
If the condition to enter the loop that waits for the handshake event
is false on the first iteration then the uninitialized 'event' will be
dereferenced, fix this by initializing 'event' to NULL.
t7xx_port_proxy_recv_skb() warn: variable dereferenced before check 'skb'.
No need to check skb at t7xx_port_proxy_recv_skb() since we know it
is always called with a valid skb by t7xx_cldma_gpd_rx_from_q().
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Link: https://lore.kernel.org/r/20220518195529.126246-1-ricardo.martinez@linux.intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 4 ++--
drivers/net/wwan/t7xx/t7xx_pci.c | 2 +-
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 3 ---
3 files changed, 3 insertions(+), 6 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -458,9 +458,9 @@ static void t7xx_core_hk_handler(struct
enum t7xx_fsm_event_state event_id,
enum t7xx_fsm_event_state err_detect)
{
+ struct t7xx_fsm_event *event = NULL, *event_next;
struct t7xx_sys_info *core_info = &md->core_md;
struct device *dev = &md->t7xx_dev->pdev->dev;
- struct t7xx_fsm_event *event, *event_next;
unsigned long flags;
int ret;
@@ -493,7 +493,7 @@ static void t7xx_core_hk_handler(struct
goto err_free_event;
}
- if (ctl->exp_flg)
+ if (!event || ctl->exp_flg)
goto err_free_event;
ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -568,7 +568,7 @@ static const struct dev_pm_ops t7xx_pci_
static int t7xx_request_irq(struct pci_dev *pdev)
{
struct t7xx_pci_dev *t7xx_dev;
- int ret, i;
+ int ret = 0, i;
t7xx_dev = pci_get_drvdata(pdev);
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -350,9 +350,6 @@ static int t7xx_port_proxy_recv_skb(stru
u16 seq_num, channel;
int ret;
- if (!skb)
- return -EINVAL;
-
channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);

View File

@@ -0,0 +1,61 @@
From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001
From: Yang Yingliang <yangyingliang@huawei.com>
Date: Thu, 19 May 2022 11:21:08 +0800
Subject: [PATCH] net: wwan: t7xx: use GFP_ATOMIC under spin lock in
t7xx_cldma_gpd_set_next_ptr()
Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass
the flag.
Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
Reported-by: Hulk Robot <hulkci@huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -91,9 +91,9 @@ static void t7xx_cldma_gpd_set_next_ptr(
}
static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
- size_t size)
+ size_t size, gfp_t gfp_mask)
{
- req->skb = __dev_alloc_skb(size, GFP_KERNEL);
+ req->skb = __dev_alloc_skb(size, gfp_mask);
if (!req->skb)
return -ENOMEM;
@@ -174,7 +174,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru
spin_unlock_irqrestore(&queue->ring_lock, flags);
req = queue->rx_refill;
- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
if (ret)
return ret;
@@ -402,7 +402,7 @@ static struct cldma_request *t7xx_alloc_
if (!req->gpd)
goto err_free_req;
- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
if (val)
goto err_free_pool;
@@ -801,7 +801,7 @@ static int t7xx_cldma_clear_rxq(struct c
if (req->skb)
continue;
- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
if (ret)
break;

View File

@@ -0,0 +1,36 @@
From 73c99e26036529e633a0f2d628ad7ddff6594668 Mon Sep 17 00:00:00 2001
From: Nathan Huckleberry <nhuck@google.com>
Date: Mon, 12 Sep 2022 14:45:10 -0700
Subject: [PATCH] net: wwan: t7xx: Fix return type of t7xx_ccmni_start_xmit
The ndo_start_xmit field in net_device_ops is expected to be of type
netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev).
The mismatched return type breaks forward edge kCFI since the underlying
function definition does not match the function hook definition.
The return type of t7xx_ccmni_start_xmit should be changed from int to
netdev_tx_t.
Reported-by: Dan Carpenter <error27@gmail.com>
Link: https://github.com/ClangBuiltLinux/linux/issues/1703
Cc: llvm@lists.linux.dev
Signed-off-by: Nathan Huckleberry <nhuck@google.com>
Acked-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Link: https://lore.kernel.org/r/20220912214510.929070-1-nhuck@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/t7xx/t7xx_netdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -74,7 +74,7 @@ static int t7xx_ccmni_send_packet(struct
return 0;
}
-static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
int skb_len = skb->len;

View File

@@ -0,0 +1,28 @@
From 08e8a949f684e1fbc4b1efd2337d72ec8f3613d9 Mon Sep 17 00:00:00 2001
From: Hanjun Guo <guohanjun@huawei.com>
Date: Tue, 22 Nov 2022 20:19:40 +0800
Subject: [PATCH] net: wwan: t7xx: Fix the ACPI memory leak
The ACPI buffer memory (buffer.pointer) should be freed as the
buffer is not used after acpi_evaluate_object(), free it to
prevent memory leak.
Fixes: 13e920d93e37 ("net: wwan: t7xx: Add core components")
Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
Link: https://lore.kernel.org/r/1669119580-28977-1-git-send-email-guohanjun@huawei.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 2 ++
1 file changed, 2 insertions(+)
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -165,6 +165,8 @@ static int t7xx_acpi_reset(struct t7xx_p
return -EFAULT;
}
+ kfree(buffer.pointer);
+
#endif
return 0;
}

View File

@@ -0,0 +1,79 @@
From fece7a8c65d1476b901b969a07b2979e1b459e66 Mon Sep 17 00:00:00 2001
From: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Date: Fri, 28 Oct 2022 21:04:50 +0530
Subject: [PATCH] net: wwan: t7xx: use union to group port type specific data
Use union inside t7xx_port to group port type specific data members.
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_port.h | 6 +++++-
drivers/net/wwan/t7xx/t7xx_port_wwan.c | 16 ++++++++--------
2 files changed, 13 insertions(+), 9 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -99,7 +99,6 @@ struct t7xx_port_conf {
struct t7xx_port {
/* Members not initialized in definition */
const struct t7xx_port_conf *port_conf;
- struct wwan_port *wwan_port;
struct t7xx_pci_dev *t7xx_dev;
struct device *dev;
u16 seq_nums[2]; /* TX/RX sequence numbers */
@@ -122,6 +121,11 @@ struct t7xx_port {
int rx_length_th;
bool chan_enable;
struct task_struct *thread;
+ union {
+ struct {
+ struct wwan_port *wwan_port;
+ } wwan;
+ };
};
struct sk_buff *t7xx_port_alloc_skb(int payload);
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -109,12 +109,12 @@ static int t7xx_port_wwan_init(struct t7
static void t7xx_port_wwan_uninit(struct t7xx_port *port)
{
- if (!port->wwan_port)
+ if (!port->wwan.wwan_port)
return;
port->rx_length_th = 0;
- wwan_remove_port(port->wwan_port);
- port->wwan_port = NULL;
+ wwan_remove_port(port->wwan.wwan_port);
+ port->wwan.wwan_port = NULL;
}
static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
@@ -129,7 +129,7 @@ static int t7xx_port_wwan_recv_skb(struc
return 0;
}
- wwan_port_rx(port->wwan_port, skb);
+ wwan_port_rx(port->wwan.wwan_port, skb);
return 0;
}
@@ -158,10 +158,10 @@ static void t7xx_port_wwan_md_state_noti
if (state != MD_STATE_READY)
return;
- if (!port->wwan_port) {
- port->wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, port);
- if (IS_ERR(port->wwan_port))
+ if (!port->wwan.wwan_port) {
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, port);
+ if (IS_ERR(port->wwan.wwan_port))
dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
}
}

View File

@@ -0,0 +1,237 @@
From 3349e4a48acb0923fa98d2beac82a833a76116cb Mon Sep 17 00:00:00 2001
From: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Date: Fri, 28 Oct 2022 21:05:34 +0530
Subject: [PATCH] net: wwan: t7xx: Add port for modem logging
The Modem Logging (MDL) port provides an interface to collect modem
logs for debugging purposes. MDL is supported by the relay interface,
and the mtk_t7xx port infrastructure. MDL allows user-space apps to
control logging via mbim command and to collect logs via the relay
interface, while port infrastructure facilitates communication between
the driver and the modem.
Signed-off-by: Moises Veleta <moises.veleta@linux.intel.com>
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Signed-off-by: Devegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Acked-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/Kconfig | 1 +
drivers/net/wwan/t7xx/Makefile | 3 +
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 2 +
drivers/net/wwan/t7xx/t7xx_pci.h | 3 +
drivers/net/wwan/t7xx/t7xx_port.h | 3 +
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 12 +++
drivers/net/wwan/t7xx/t7xx_port_proxy.h | 4 +
drivers/net/wwan/t7xx/t7xx_port_trace.c | 116 ++++++++++++++++++++++++
8 files changed, 144 insertions(+)
create mode 100644 drivers/net/wwan/t7xx/t7xx_port_trace.c
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -82,6 +82,7 @@ config IOSM
config MTK_T7XX
tristate "MediaTek PCIe 5G WWAN modem T7xx device"
depends on PCI
+ select RELAY if WWAN_DEBUGFS
help
Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device.
Adapts WWAN framework and provides network interface like wwan0
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -18,3 +18,6 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_hif_dpmaif_rx.o \
t7xx_dpmaif.o \
t7xx_netdev.o
+
+mtk_t7xx-$(CONFIG_WWAN_DEBUGFS) += \
+ t7xx_port_trace.o \
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1018,6 +1018,8 @@ static int t7xx_cldma_late_init(struct c
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
+
+ md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -78,6 +78,9 @@ struct t7xx_pci_dev {
spinlock_t md_pm_lock; /* Protects PCI resource lock */
unsigned int sleep_disable_count;
struct completion sleep_lock_acquire;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
};
enum t7xx_pm_id {
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -125,6 +125,9 @@ struct t7xx_port {
struct {
struct wwan_port *wwan_port;
} wwan;
+ struct {
+ struct rchan *relaych;
+ } log;
};
};
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -70,6 +70,18 @@ static const struct t7xx_port_conf t7xx_
.name = "MBIM",
.port_type = WWAN_PORT_MBIM,
}, {
+#ifdef CONFIG_WWAN_DEBUGFS
+ .tx_ch = PORT_CH_MD_LOG_TX,
+ .rx_ch = PORT_CH_MD_LOG_RX,
+ .txq_index = 7,
+ .rxq_index = 7,
+ .txq_exp_index = 7,
+ .rxq_exp_index = 7,
+ .path_id = CLDMA_ID_MD,
+ .ops = &t7xx_trace_port_ops,
+ .name = "mdlog",
+ }, {
+#endif
.tx_ch = PORT_CH_CONTROL_TX,
.rx_ch = PORT_CH_CONTROL_RX,
.txq_index = Q_IDX_CTRL,
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -87,6 +87,10 @@ struct ctrl_msg_header {
extern struct port_ops wwan_sub_port_ops;
extern struct port_ops ctl_port_ops;
+#ifdef CONFIG_WWAN_DEBUGFS
+extern struct port_ops t7xx_trace_port_ops;
+#endif
+
void t7xx_port_proxy_reset(struct port_proxy *port_prox);
void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
int t7xx_port_proxy_init(struct t7xx_modem *md);
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/skbuff.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define T7XX_TRC_SUB_BUFF_SIZE 131072
+#define T7XX_TRC_N_SUB_BUFF 32
+
+static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = t7xx_trace_subbuf_start_handler,
+ .create_buf_file = t7xx_trace_create_buf_file_handler,
+ .remove_buf_file = t7xx_trace_remove_buf_file_handler,
+};
+
+static void t7xx_trace_port_uninit(struct t7xx_port *port)
+{
+ struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir;
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return;
+
+ relay_close(relaych);
+ debugfs_remove_recursive(debugfs_dir);
+}
+
+static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return -EINVAL;
+
+ relay_write(relaych, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ struct rchan *relaych = port->log.relaych;
+ struct dentry *debugfs_wwan_dir;
+ struct dentry *debugfs_dir;
+
+ if (state != MD_STATE_READY || relaych)
+ return;
+
+ debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev);
+ if (IS_ERR(debugfs_wwan_dir))
+ return;
+
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir);
+ if (IS_ERR_OR_NULL(debugfs_dir)) {
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create debugfs for trace");
+ return;
+ }
+
+ relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE,
+ T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL);
+ if (!relaych)
+ goto err_rm_debugfs_dir;
+
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ port->log.relaych = relaych;
+ port->t7xx_dev->debugfs_dir = debugfs_dir;
+ return;
+
+err_rm_debugfs_dir:
+ debugfs_remove_recursive(debugfs_dir);
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name);
+}
+
+struct port_ops t7xx_trace_port_ops = {
+ .recv_skb = t7xx_trace_port_recv_skb,
+ .uninit = t7xx_trace_port_uninit,
+ .md_state_notify = t7xx_port_trace_md_state_notify,
+};

View File

@@ -0,0 +1,31 @@
From c053d7b6bdcb45780036b32be6a950f71a78bf52 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= <ilpo.jarvinen@linux.intel.com>
Date: Thu, 3 Nov 2022 14:48:28 +0530
Subject: [PATCH] net: wwan: t7xx: Use needed_headroom instead of
hard_header_len
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
hard_header_len is used by gro_list_prepare() but on Rx, there
is no header so use needed_headroom instead.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: Sreehari Kancharla <sreehari.kancharla@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_netdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -161,7 +161,7 @@ static void t7xx_ccmni_post_stop(struct
static void t7xx_ccmni_wwan_setup(struct net_device *dev)
{
- dev->hard_header_len += sizeof(struct ccci_header);
+ dev->needed_headroom += sizeof(struct ccci_header);
dev->mtu = ETH_DATA_LEN;
dev->max_mtu = CCMNI_MTU_MAX;

View File

@@ -0,0 +1,652 @@
From 5545b7b9f294de7f95ec6a7cb1de0db52296001c Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Thu, 3 Nov 2022 14:48:29 +0530
Subject: [PATCH] net: wwan: t7xx: Add NAPI support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Replace the work queue based RX flow with a NAPI implementation
Remove rx_thread and dpmaif_rxq_work.
Enable GRO on RX path.
Introduce dummy network device. its responsibility is
- Binds one NAPI object for each DL HW queue and acts as
the agent of all those network devices.
- Use NAPI object to poll DL packets.
- Helps to dispatch each packet to the network interface.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Sreehari Kancharla <sreehari.kancharla@linux.intel.com>
Signed-off-by: Sreehari Kancharla <sreehari.kancharla@linux.intel.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Acked-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Acked-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 14 +-
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 218 +++++++--------------
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 1 +
drivers/net/wwan/t7xx/t7xx_netdev.c | 89 ++++++++-
drivers/net/wwan/t7xx/t7xx_netdev.h | 5 +
5 files changed, 161 insertions(+), 166 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
@@ -20,6 +20,7 @@
#include <linux/bitmap.h>
#include <linux/mm_types.h>
+#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -109,20 +110,14 @@ struct dpmaif_rx_queue {
struct dpmaif_bat_request *bat_req;
struct dpmaif_bat_request *bat_frag;
- wait_queue_head_t rx_wq;
- struct task_struct *rx_thread;
- struct sk_buff_head skb_list;
- unsigned int skb_list_max_len;
-
- struct workqueue_struct *worker;
- struct work_struct dpmaif_rxq_work;
-
atomic_t rx_processing;
struct dpmaif_ctrl *dpmaif_ctrl;
unsigned int expect_pit_seq;
unsigned int pit_remain_release_cnt;
struct dpmaif_cur_rx_skb_info rx_data_info;
+ struct napi_struct napi;
+ bool sleep_lock_pending;
};
struct dpmaif_tx_queue {
@@ -168,7 +163,8 @@ enum dpmaif_txq_state {
struct dpmaif_callbacks {
void (*state_notify)(struct t7xx_pci_dev *t7xx_dev,
enum dpmaif_txq_state state, int txq_number);
- void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb);
+ void (*recv_skb)(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi);
};
struct dpmaif_ctrl {
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -45,6 +45,7 @@
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#define DPMAIF_BAT_COUNT 8192
@@ -76,43 +77,6 @@ static unsigned int t7xx_normal_pit_bid(
return value;
}
-static int t7xx_dpmaif_net_rx_push_thread(void *arg)
-{
- struct dpmaif_rx_queue *q = arg;
- struct dpmaif_ctrl *hif_ctrl;
- struct dpmaif_callbacks *cb;
-
- hif_ctrl = q->dpmaif_ctrl;
- cb = hif_ctrl->callbacks;
-
- while (!kthread_should_stop()) {
- struct sk_buff *skb;
- unsigned long flags;
-
- if (skb_queue_empty(&q->skb_list)) {
- if (wait_event_interruptible(q->rx_wq,
- !skb_queue_empty(&q->skb_list) ||
- kthread_should_stop()))
- continue;
-
- if (kthread_should_stop())
- break;
- }
-
- spin_lock_irqsave(&q->skb_list.lock, flags);
- skb = __skb_dequeue(&q->skb_list);
- spin_unlock_irqrestore(&q->skb_list.lock, flags);
-
- if (!skb)
- continue;
-
- cb->recv_skb(hif_ctrl->t7xx_dev, skb);
- cond_resched();
- }
-
- return 0;
-}
-
static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
const unsigned int q_num, const unsigned int bat_cnt)
{
@@ -726,21 +690,10 @@ static int t7xx_dpmaifq_rx_notify_hw(str
return ret;
}
-static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->skb_list.lock, flags);
- if (rxq->skb_list.qlen < rxq->skb_list_max_len)
- __skb_queue_tail(&rxq->skb_list, skb);
- else
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
-}
-
static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
struct dpmaif_cur_rx_skb_info *skb_info)
{
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
struct sk_buff *skb = skb_info->cur_skb;
struct t7xx_skb_cb *skb_cb;
u8 netif_id;
@@ -758,11 +711,11 @@ static void t7xx_dpmaif_rx_skb(struct dp
skb_cb = T7XX_SKB_CB(skb);
skb_cb->netif_idx = netif_id;
skb_cb->rx_pkt_type = skb_info->pkt_type;
- t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
+ dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
}
static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
- const unsigned long timeout)
+ const unsigned int budget, int *once_more)
{
unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
struct device *dev = rxq->dpmaif_ctrl->dev;
@@ -777,13 +730,14 @@ static int t7xx_dpmaif_rx_start(struct d
struct dpmaif_pit *pkt_info;
u32 val;
- if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
+ if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
break;
pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
- return -EAGAIN;
+ *once_more = 1;
+ return recv_skb_cnt;
}
val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
@@ -817,12 +771,7 @@ static int t7xx_dpmaif_rx_start(struct d
}
memset(skb_info, 0, sizeof(*skb_info));
-
recv_skb_cnt++;
- if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
- wake_up_all(&rxq->rx_wq);
- recv_skb_cnt = 0;
- }
}
}
@@ -837,16 +786,13 @@ static int t7xx_dpmaif_rx_start(struct d
}
}
- if (recv_skb_cnt)
- wake_up_all(&rxq->rx_wq);
-
if (!ret)
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
if (ret)
return ret;
- return rx_cnt;
+ return recv_skb_cnt;
}
static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
@@ -863,53 +809,30 @@ static unsigned int t7xx_dpmaifq_poll_pi
return pit_cnt;
}
-static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
- const unsigned int q_num, const unsigned int budget)
+static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num,
+ const unsigned int budget, int *once_more)
{
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
- unsigned long time_limit;
unsigned int cnt;
+ int ret = 0;
- time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
-
- while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
- unsigned int rd_cnt;
- int real_cnt;
-
- rd_cnt = min(cnt, budget);
-
- real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
- if (real_cnt < 0)
- return real_cnt;
-
- if (real_cnt < cnt)
- return -EAGAIN;
- }
-
- return 0;
-}
+ cnt = t7xx_dpmaifq_poll_pit(rxq);
+ if (!cnt)
+ return ret;
-static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
-{
- struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
- int ret;
+ ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
- ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
- if (ret < 0) {
- /* Try one more time */
- queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- } else {
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
- }
+ return ret;
}
-static void t7xx_dpmaif_rxq_work(struct work_struct *work)
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
{
- struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
- struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
- int ret;
+ struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
+ struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
+ int ret, once_more = 0, work_done = 0;
atomic_set(&rxq->rx_processing, 1);
/* Ensure rx_processing is changed to 1 before actually begin RX flow */
@@ -917,22 +840,52 @@ static void t7xx_dpmaif_rxq_work(struct
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
- dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
- return;
+ dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
+ return work_done;
}
- ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
- if (ret < 0 && ret != -EACCES)
- return;
+ if (!rxq->sleep_lock_pending) {
+ pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ t7xx_pci_disable_sleep(t7xx_dev);
+ }
+
+ ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
+ if (!ret) {
+ napi_complete_done(napi, work_done);
+ rxq->sleep_lock_pending = true;
+ napi_reschedule(napi);
+ return work_done;
+ }
+
+ rxq->sleep_lock_pending = false;
+ while (work_done < budget) {
+ int each_budget = budget - work_done;
+ int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
+ each_budget, &once_more);
+ if (rx_cnt > 0)
+ work_done += rx_cnt;
+ else
+ break;
+ }
- t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
- if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ if (once_more) {
+ napi_gro_flush(napi, false);
+ work_done = budget;
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ } else if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ }
- t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
- pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
+
+ return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
@@ -947,7 +900,7 @@ void t7xx_dpmaif_irq_rx_done(struct dpma
}
rxq = &dpmaif_ctrl->rxq[qno];
- queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
+ napi_schedule(&rxq->napi);
}
static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
@@ -1082,50 +1035,14 @@ int t7xx_dpmaif_rxq_init(struct dpmaif_r
int ret;
ret = t7xx_dpmaif_rx_alloc(queue);
- if (ret < 0) {
+ if (ret < 0)
dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
- return ret;
- }
-
- INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
-
- queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
- if (!queue->worker) {
- ret = -ENOMEM;
- goto err_free_rx_buffer;
- }
-
- init_waitqueue_head(&queue->rx_wq);
- skb_queue_head_init(&queue->skb_list);
- queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
- queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
- queue, "dpmaif_rx%d_push", queue->index);
-
- ret = PTR_ERR_OR_ZERO(queue->rx_thread);
- if (ret)
- goto err_free_workqueue;
-
- return 0;
-
-err_free_workqueue:
- destroy_workqueue(queue->worker);
-
-err_free_rx_buffer:
- t7xx_dpmaif_rx_buf_free(queue);
return ret;
}
void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
{
- if (queue->worker)
- destroy_workqueue(queue->worker);
-
- if (queue->rx_thread)
- kthread_stop(queue->rx_thread);
-
- skb_queue_purge(&queue->skb_list);
t7xx_dpmaif_rx_buf_free(queue);
}
@@ -1188,8 +1105,6 @@ void t7xx_dpmaif_rx_stop(struct dpmaif_c
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
int timeout, value;
- flush_work(&rxq->dpmaif_rxq_work);
-
timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
!value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
if (timeout)
@@ -1205,7 +1120,6 @@ static void t7xx_dpmaif_stop_rxq(struct
{
int cnt, j = 0;
- flush_work(&rxq->dpmaif_rxq_work);
rxq->que_started = false;
do {
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
@@ -112,5 +112,6 @@ int t7xx_dpmaif_bat_alloc(const struct d
const enum bat_type buf_type);
void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl,
struct dpmaif_bat_request *bat_req);
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget);
#endif /* __T7XX_HIF_DPMA_RX_H__ */
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -22,6 +22,7 @@
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdev_features.h>
@@ -29,6 +30,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
+#include <net/ipv6.h>
#include <net/pkt_sched.h>
#include "t7xx_hif_dpmaif_rx.h"
@@ -39,13 +41,47 @@
#include "t7xx_state_monitor.h"
#define IP_MUX_SESSION_DEFAULT 0
+#define SBD_PACKET_TYPE_MASK GENMASK(7, 4)
+
+static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_enable(ctlb->napi[i]);
+ napi_schedule(ctlb->napi[i]);
+ }
+ ctlb->is_napi_en = true;
+}
+
+static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (!ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_synchronize(ctlb->napi[i]);
+ napi_disable(ctlb->napi[i]);
+ }
+
+ ctlb->is_napi_en = false;
+}
static int t7xx_ccmni_open(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ccmni_ctl);
+
atomic_inc(&ccmni->usage);
return 0;
}
@@ -53,8 +89,12 @@ static int t7xx_ccmni_open(struct net_de
static int t7xx_ccmni_close(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
atomic_dec(&ccmni->usage);
+ if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ccmni_ctl);
+
netif_carrier_off(dev);
netif_tx_disable(dev);
return 0;
@@ -127,6 +167,9 @@ static void t7xx_ccmni_start(struct t7xx
netif_carrier_on(ccmni->dev);
}
}
+
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ctlb);
}
static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
@@ -149,6 +192,9 @@ static void t7xx_ccmni_post_stop(struct
struct t7xx_ccmni *ccmni;
int i;
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ctlb);
+
for (i = 0; i < ctlb->nic_dev_num; i++) {
ccmni = ctlb->ccmni_inst[i];
if (!ccmni)
@@ -183,6 +229,9 @@ static void t7xx_ccmni_wwan_setup(struct
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features |= NETIF_F_GRO;
+
dev->needs_free_netdev = true;
dev->type = ARPHRD_NONE;
@@ -190,6 +239,34 @@ static void t7xx_ccmni_wwan_setup(struct
dev->netdev_ops = &ccmni_netdev_ops;
}
+static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ /* one HW, but shared with multiple net devices,
+ * so add a dummy device for NAPI.
+ */
+ init_dummy_netdev(&ctlb->dummy_dev);
+ atomic_set(&ctlb->napi_usr_refcnt, 0);
+ ctlb->is_napi_en = false;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
+ netif_napi_add(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
+ NIC_NAPI_POLL_BUDGET);
+ }
+}
+
+static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ netif_napi_del(ctlb->napi[i]);
+ ctlb->napi[i] = NULL;
+ }
+}
+
static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
struct netlink_ext_ack *extack)
{
@@ -311,7 +388,8 @@ static void init_md_status_notifier(stru
t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
}
-static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
+static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct t7xx_skb_cb *skb_cb;
struct net_device *net_dev;
@@ -321,23 +399,22 @@ static void t7xx_ccmni_recv_skb(struct t
skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx;
- ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
+ ccmni = ccmni_ctlb->ccmni_inst[netif_id];
if (!ccmni) {
dev_kfree_skb(skb);
return;
}
net_dev = ccmni->dev;
- skb->dev = net_dev;
-
pkt_type = skb_cb->rx_pkt_type;
+ skb->dev = net_dev;
if (pkt_type == PKT_TYPE_IP6)
skb->protocol = htons(ETH_P_IPV6);
else
skb->protocol = htons(ETH_P_IP);
skb_len = skb->len;
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb_len;
}
@@ -404,6 +481,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev
if (!ctlb->hif_ctrl)
return -ENOMEM;
+ t7xx_init_netdev_napi(ctlb);
init_md_status_notifier(t7xx_dev);
return 0;
}
@@ -419,5 +497,6 @@ void t7xx_ccmni_exit(struct t7xx_pci_dev
ctlb->wwan_is_registered = false;
}
+ t7xx_uninit_netdev_napi(ctlb);
t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
}
--- a/drivers/net/wwan/t7xx/t7xx_netdev.h
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -30,6 +30,7 @@
#define CCMNI_NETDEV_WDT_TO (1 * HZ)
#define CCMNI_MTU_MAX 3000
+#define NIC_NAPI_POLL_BUDGET 128
struct t7xx_ccmni {
u8 index;
@@ -47,6 +48,10 @@ struct t7xx_ccmni_ctrl {
unsigned int md_sta;
struct t7xx_fsm_notifier md_status_notify;
bool wwan_is_registered;
+ struct net_device dummy_dev;
+ struct napi_struct *napi[RXQ_NUM];
+ atomic_t napi_usr_refcnt;
+ bool is_napi_en;
};
int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev);

View File

@@ -0,0 +1,154 @@
From 364d0221f1788e5225006ba7a0026e5968431c29 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Kornel=20Dul=C4=99ba?= <mindal@semihalf.com>
Date: Thu, 26 Jan 2023 13:25:34 +0000
Subject: [PATCH] net: wwan: t7xx: Fix Runtime PM resume sequence
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Resume device before calling napi_schedule, instead of doing in the napi
poll routine. Polling is done in softrq context. We can't call the PM
resume logic from there as it's blocking and not irq safe.
In order to make it work modify the interrupt handler to be run from irq
handler thread.
Fixes: 5545b7b9f294 ("net: wwan: t7xx: Add NAPI support")
Signed-off-by: Kornel Dulęba <mindal@semihalf.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 11 +++++++-
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 29 +++++++++++++++-------
drivers/net/wwan/t7xx/t7xx_netdev.c | 16 +++++++++++-
3 files changed, 45 insertions(+), 11 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handl
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_ir
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
- t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
- if (!rxq->sleep_lock_pending) {
- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
- }
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
- atomic_set(&rxq->rx_processing, 0);
-
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
- int qno;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpma
}
rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
napi_schedule(&rxq->napi);
}
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
@@ -45,12 +46,25 @@
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
- int i;
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}

View File

@@ -0,0 +1,34 @@
From e3d6d152a1cbdee25f2e3962009a2751b54e2297 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Kornel=20Dul=C4=99ba?= <mindal@semihalf.com>
Date: Thu, 26 Jan 2023 13:25:35 +0000
Subject: [PATCH] net: wwan: t7xx: Fix Runtime PM initialization
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
For PCI devices the Runtime PM refcount is incremented twice:
1. During device enumeration with a call to pm_runtime_forbid.
2. Just before a driver probe logic is called.
Because of that in order to enable Runtime PM on a given device
we have to call both pm_runtime_allow and pm_runtime_put_noidle,
once it's ready to be runtime suspended.
The former was missing causing the pm refcount to never reach 0.
Fixes: d10b3a695ba0 ("net: wwan: t7xx: Runtime PM")
Signed-off-by: Kornel Dulęba <mindal@semihalf.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_pci.c | 2 ++
1 file changed, 2 insertions(+)
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_p
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
}

View File

@@ -0,0 +1,51 @@
From 362f0b6678ad1377c322a7dd237ea6785efc7342 Mon Sep 17 00:00:00 2001
From: "Jiri Slaby (SUSE)" <jirislaby@kernel.org>
Date: Fri, 31 Mar 2023 08:35:15 +0200
Subject: [PATCH] net: wwan: t7xx: do not compile with -Werror
When playing with various compilers or their versions, some choke on
the t7xx code. For example (with gcc 13):
In file included from ./arch/s390/include/generated/asm/rwonce.h:1,
from ../include/linux/compiler.h:247,
from ../include/linux/build_bug.h:5,
from ../include/linux/bits.h:22,
from ../drivers/net/wwan/t7xx/t7xx_state_monitor.c:17:
In function 'preempt_count',
inlined from 't7xx_fsm_append_event' at ../drivers/net/wwan/t7xx/t7xx_state_monitor.c:439:43:
../include/asm-generic/rwonce.h:44:26: error: array subscript 0 is outside array bounds of 'const volatile int[0]' [-Werror=array-bounds=]
There is no reason for any code in the kernel to be built with -Werror
by default. Note that we have generic CONFIG_WERROR. So if anyone wants
-Werror, they can enable that.
Signed-off-by: Jiri Slaby (SUSE) <jirislaby@kernel.org>
Link: https://lore.kernel.org/all/20230330232717.1f8bf5ea@kernel.org/
Cc: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Cc: Intel Corporation <linuxwwan@intel.com>
Cc: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
Cc: Liu Haijun <haijun.liu@mediatek.com>
Cc: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Cc: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Cc: Loic Poulain <loic.poulain@linaro.org>
Cc: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: netdev@vger.kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/Makefile | 2 --
1 file changed, 2 deletions(-)
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y += -Werror
-
obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
mtk_t7xx-y:= t7xx_pci.o \
t7xx_pcie_mac.o \

View File

@@ -0,0 +1,300 @@
From 36bd28c1cb0dbf48645cfe43159907fb3253b33a Mon Sep 17 00:00:00 2001
From: haozhe chang <haozhe.chang@mediatek.com>
Date: Thu, 16 Mar 2023 17:58:20 +0800
Subject: [PATCH] wwan: core: Support slicing in port TX flow of WWAN subsystem
wwan_port_fops_write inputs the SKB parameter to the TX callback of
the WWAN device driver. However, the WWAN device (e.g., t7xx) may
have an MTU less than the size of SKB, causing the TX buffer to be
sliced and copied once more in the WWAN device driver.
This patch implements the slicing in the WWAN subsystem and gives
the WWAN devices driver the option to slice(by frag_len) or not. By
doing so, the additional memory copy is reduced.
Meanwhile, this patch gives WWAN devices driver the option to reserve
headroom in fragments for the device-specific metadata.
Signed-off-by: haozhe chang <haozhe.chang@mediatek.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Link: https://lore.kernel.org/r/20230316095826.181904-1-haozhe.chang@mediatek.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/iosm/iosm_ipc_port.c | 3 +-
drivers/net/wwan/mhi_wwan_ctrl.c | 2 +-
drivers/net/wwan/rpmsg_wwan_ctrl.c | 2 +-
drivers/net/wwan/t7xx/t7xx_port_wwan.c | 36 ++++++++--------
drivers/net/wwan/wwan_core.c | 58 +++++++++++++++++++-------
drivers/net/wwan/wwan_hwsim.c | 2 +-
drivers/usb/class/cdc-wdm.c | 3 +-
include/linux/wwan.h | 11 +++++
8 files changed, 81 insertions(+), 36 deletions(-)
--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -63,7 +63,8 @@ struct iosm_cdev *ipc_port_init(struct i
ipc_port->ipc_imem = ipc_imem;
ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type,
- &ipc_wwan_ctrl_ops, ipc_port);
+ &ipc_wwan_ctrl_ops, NULL,
+ ipc_port);
return ipc_port;
}
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -237,7 +237,7 @@ static int mhi_wwan_ctrl_probe(struct mh
/* Register as a wwan port, id->driver_data contains wwan port type */
port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
- &wwan_pops, mhiwwan);
+ &wwan_pops, NULL, mhiwwan);
if (IS_ERR(port)) {
kfree(mhiwwan);
return PTR_ERR(port);
--- a/drivers/net/wwan/rpmsg_wwan_ctrl.c
+++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c
@@ -129,7 +129,7 @@ static int rpmsg_wwan_ctrl_probe(struct
/* Register as a wwan port, id.driver_data contains wwan port type */
port = wwan_create_port(parent, rpdev->id.driver_data,
- &rpmsg_wwan_pops, rpwwan);
+ &rpmsg_wwan_pops, NULL, rpwwan);
if (IS_ERR(port))
return PTR_ERR(port);
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -54,13 +54,13 @@ static void t7xx_port_ctrl_stop(struct w
static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct t7xx_port *port_private = wwan_port_get_drvdata(port);
- size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
const struct t7xx_port_conf *port_conf;
+ struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
+ int cnt = 0, ret;
- len = skb->len;
- if (!len || !port_private->chan_enable)
+ if (!port_private->chan_enable)
return -EINVAL;
port_conf = port_private->port_conf;
@@ -72,23 +72,21 @@ static int t7xx_port_ctrl_tx(struct wwan
return -ENODEV;
}
- for (offset = 0; offset < len; offset += chunk_len) {
- struct sk_buff *skb_ccci;
- int ret;
-
- chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
- skb_ccci = t7xx_port_alloc_skb(chunk_len);
- if (!skb_ccci)
- return -ENOMEM;
-
- skb_put_data(skb_ccci, skb->data + offset, chunk_len);
- ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
+ while (cur) {
+ cloned = skb_clone(cur, GFP_KERNEL);
+ cloned->len = skb_headlen(cur);
+ ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
if (ret) {
- dev_kfree_skb_any(skb_ccci);
+ dev_kfree_skb(cloned);
dev_err(port_private->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
- return ret;
+ return cnt ? cnt + ret : ret;
}
+ cnt += cur->len;
+ if (cur == skb)
+ cur = skb_shinfo(skb)->frag_list;
+ else
+ cur = cur->next;
}
dev_kfree_skb(skb);
@@ -154,13 +152,17 @@ static int t7xx_port_wwan_disable_chl(st
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header);
+ struct wwan_port_caps caps;
if (state != MD_STATE_READY)
return;
if (!port->wwan.wwan_port) {
+ caps.frag_len = CLDMA_MTU - header_len;
+ caps.headroom_len = header_len;
port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, port);
+ &wwan_ops, &caps, port);
if (IS_ERR(port->wwan.wwan_port))
dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
}
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -60,6 +60,8 @@ struct wwan_device {
* @rxq: Buffer inbound queue
* @waitqueue: The waitqueue for port fops (read/write/poll)
* @data_lock: Port specific data access serialization
+ * @headroom_len: SKB reserved headroom size
+ * @frag_len: Length to fragment packet
* @at_data: AT port specific data
*/
struct wwan_port {
@@ -72,6 +74,8 @@ struct wwan_port {
struct sk_buff_head rxq;
wait_queue_head_t waitqueue;
struct mutex data_lock; /* Port specific data access serialization */
+ size_t headroom_len;
+ size_t frag_len;
union {
struct {
struct ktermios termios;
@@ -355,6 +359,7 @@ static int __wwan_port_dev_assign_name(s
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
+ struct wwan_port_caps *caps,
void *drvdata)
{
struct wwan_device *wwandev;
@@ -388,6 +393,8 @@ struct wwan_port *wwan_create_port(struc
port->type = type;
port->ops = ops;
+ port->frag_len = caps ? caps->frag_len : SIZE_MAX;
+ port->headroom_len = caps ? caps->headroom_len : 0;
mutex_init(&port->ops_lock);
skb_queue_head_init(&port->rxq);
init_waitqueue_head(&port->waitqueue);
@@ -631,30 +638,53 @@ static ssize_t wwan_port_fops_read(struc
static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offp)
{
+ struct sk_buff *skb, *head = NULL, *tail = NULL;
struct wwan_port *port = filp->private_data;
- struct sk_buff *skb;
+ size_t frag_len, remain = count;
int ret;
ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
if (ret)
return ret;
- skb = alloc_skb(count, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- if (copy_from_user(skb_put(skb, count), buf, count)) {
- kfree_skb(skb);
- return -EFAULT;
- }
+ do {
+ frag_len = min(remain, port->frag_len);
+ skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto freeskb;
+ }
+ skb_reserve(skb, port->headroom_len);
+
+ if (!head) {
+ head = skb;
+ } else if (!tail) {
+ skb_shinfo(head)->frag_list = skb;
+ tail = skb;
+ } else {
+ tail->next = skb;
+ tail = skb;
+ }
+
+ if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) {
+ ret = -EFAULT;
+ goto freeskb;
+ }
+
+ if (skb != head) {
+ head->data_len += skb->len;
+ head->len += skb->len;
+ head->truesize += skb->truesize;
+ }
+ } while (remain -= frag_len);
- ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
- if (ret) {
- kfree_skb(skb);
- return ret;
- }
+ ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK));
+ if (!ret)
+ return count;
- return count;
+freeskb:
+ kfree_skb(head);
+ return ret;
}
static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -204,7 +204,7 @@ static struct wwan_hwsim_port *wwan_hwsi
port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
&wwan_hwsim_port_ops,
- port);
+ NULL, port);
if (IS_ERR(port->wwan)) {
err = PTR_ERR(port->wwan);
goto err_free_port;
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -929,7 +929,8 @@ static void wdm_wwan_init(struct wdm_dev
return;
}
- port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, desc);
+ port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops,
+ NULL, desc);
if (IS_ERR(port)) {
dev_err(&intf->dev, "%s: Unable to create WWAN port\n",
dev_name(intf->usb_dev));
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -61,11 +61,21 @@ struct wwan_port_ops {
poll_table *wait);
};
+/** struct wwan_port_caps - The WWAN port capbilities
+ * @frag_len: WWAN port TX fragments length
+ * @headroom_len: WWAN port TX fragments reserved headroom length
+ */
+struct wwan_port_caps {
+ size_t frag_len;
+ unsigned int headroom_len;
+};
+
/**
* wwan_create_port - Add a new WWAN port
* @parent: Device to use as parent and shared by all WWAN ports
* @type: WWAN port type
* @ops: WWAN port operations
+ * @caps: WWAN port capabilities
* @drvdata: Pointer to caller driver data
*
* Allocate and register a new WWAN port. The port will be automatically exposed
@@ -83,6 +93,7 @@ struct wwan_port_ops {
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
+ struct wwan_port_caps *caps,
void *drvdata);
/**

View File

@@ -0,0 +1,88 @@
From ab87603b251134441a67385ecc9d3371be17b7a7 Mon Sep 17 00:00:00 2001
From: Kai-Heng Feng <kai.heng.feng@canonical.com>
Date: Wed, 17 May 2023 13:24:51 +0800
Subject: [PATCH] net: wwan: t7xx: Ensure init is completed before system sleep
When the system attempts to sleep while mtk_t7xx is not ready, the driver
cannot put the device to sleep:
[ 12.472918] mtk_t7xx 0000:57:00.0: [PM] Exiting suspend, modem in invalid state
[ 12.472936] mtk_t7xx 0000:57:00.0: PM: pci_pm_suspend(): t7xx_pci_pm_suspend+0x0/0x20 [mtk_t7xx] returns -14
[ 12.473678] mtk_t7xx 0000:57:00.0: PM: dpm_run_callback(): pci_pm_suspend+0x0/0x1b0 returns -14
[ 12.473711] mtk_t7xx 0000:57:00.0: PM: failed to suspend async: error -14
[ 12.764776] PM: Some devices failed to suspend, or early wake event detected
Mediatek confirmed the device can take a rather long time to complete
its initialization, so wait for up to 20 seconds until init is done.
Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_pci.c | 18 ++++++++++++++++++
drivers/net/wwan/t7xx/t7xx_pci.h | 1 +
2 files changed, 19 insertions(+)
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -45,6 +45,7 @@
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
+#define T7XX_INIT_TIMEOUT 20
#define PM_SLEEP_DIS_TIMEOUT_MS 20
#define PM_ACK_TIMEOUT_MS 1500
#define PM_AUTOSUSPEND_MS 20000
@@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_
spin_lock_init(&t7xx_dev->md_pm_lock);
init_completion(&t7xx_dev->sleep_lock_acquire);
init_completion(&t7xx_dev->pm_sr_ack);
+ init_completion(&t7xx_dev->init_done);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
device_init_wakeup(&pdev->dev, true);
@@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_p
pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+ complete_all(&t7xx_dev->init_done);
}
static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
@@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci
__t7xx_pci_pm_suspend(pdev);
}
+static int t7xx_pci_pm_prepare(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct t7xx_pci_dev *t7xx_dev;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
+ dev_warn(dev, "Not ready for system sleep.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int t7xx_pci_pm_suspend(struct device *dev)
{
return __t7xx_pci_pm_suspend(to_pci_dev(dev));
@@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(st
}
static const struct dev_pm_ops t7xx_pci_pm_ops = {
+ .prepare = t7xx_pci_pm_prepare,
.suspend = t7xx_pci_pm_suspend,
.resume = t7xx_pci_pm_resume,
.resume_noirq = t7xx_pci_pm_resume_noirq,
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -69,6 +69,7 @@ struct t7xx_pci_dev {
struct t7xx_modem *md;
struct t7xx_ccmni_ctrl *ccmni_ctlb;
bool rgu_pci_irq_en;
+ struct completion init_done;
/* Low Power Items */
struct list_head md_pm_entities;

View File

@@ -0,0 +1,121 @@
From 72b1fe6cc6523908bfc339d07d18cb0f3469a643 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@kernel.org>
Date: Thu, 25 May 2023 12:15:29 -1000
Subject: [PATCH] net: wwan: t7xx: Use alloc_ordered_workqueue() to create
ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not necessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Cc: Intel Corporation <linuxwwan@intel.com>
Cc: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
Cc: Liu Haijun <haijun.liu@mediatek.com>
Cc: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Cc: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Cc: Loic Poulain <loic.poulain@linaro.org>
Cc: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: netdev@vger.kernel.org
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 +++++++------
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 5 +++--
2 files changed, 10 insertions(+), 8 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1293,9 +1293,9 @@ int t7xx_cldma_init(struct cldma_ctrl *m
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
md_ctrl->txq[i].worker =
- alloc_workqueue("md_hif%d_tx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
- 1, md_ctrl->hif_id, i);
+ alloc_ordered_workqueue("md_hif%d_tx%d_worker",
+ WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
+ md_ctrl->hif_id, i);
if (!md_ctrl->txq[i].worker)
goto err_workqueue;
@@ -1306,9 +1306,10 @@ int t7xx_cldma_init(struct cldma_ctrl *m
md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
- md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
- 1, md_ctrl->hif_id, i);
+ md_ctrl->rxq[i].worker =
+ alloc_ordered_workqueue("md_hif%d_rx%d_worker",
+ WQ_MEM_RECLAIM,
+ md_ctrl->hif_id, i);
if (!md_ctrl->rxq[i].worker)
goto err_workqueue;
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -618,8 +618,9 @@ int t7xx_dpmaif_txq_init(struct dpmaif_t
return ret;
}
- txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
- (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
+ txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
+ WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
+ txq->index);
if (!txq->worker)
return -ENOMEM;

View File

@@ -0,0 +1,482 @@
From ba2274dcfda859b8a27193e68ad37bfe4da28ddc Mon Sep 17 00:00:00 2001
From: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Date: Tue, 11 Jul 2023 08:28:13 +0200
Subject: [PATCH] net: wwan: t7xx: Add AP CLDMA
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
At this moment with the current status, t7xx is not functional due to
problems like this after connection, if there is no activity:
[ 57.370534] mtk_t7xx 0000:72:00.0: [PM] SAP suspend error: -110
[ 57.370581] mtk_t7xx 0000:72:00.0: can't suspend
(t7xx_pci_pm_runtime_suspend [mtk_t7xx] returned -110)
because after this, the traffic no longer works.
The complete series 'net: wwan: t7xx: fw flashing & coredump support'
was reverted because of issues with the pci implementation.
In order to have at least the modem working, it would be enough if just
the first commit of the series is re-applied:
d20ef656f994 net: wwan: t7xx: Add AP CLDMA
With that, the Application Processor would be controlled, correctly
suspended and the commented problems would be fixed (I am testing here
like this with no related issue).
This commit is independent of the others and not related to the
commented pci implementation for the new features: fw flashing and
coredump collection.
Use v2 patch version of d20ef656f994 as JinJian Song suggests
(https://patchwork.kernel.org/project/netdevbpf/patch/20230105154215.198828-1-m.chetan.kumar@linux.intel.com/).
Original text from the commit that would be re-applied:
d20ef656f994 net: wwan: t7xx: Add AP CLDMA
Author: Haijun Liu <haijun.liu@mediatek.com>
Date: Tue Aug 16 09:53:28 2022 +0530
The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to
communicate with AP and Modem processors respectively. So far only
MD-CLDMA was being used, this patch enables AP-CLDMA.
Rename small Application Processor (sAP) to AP.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: Moises Veleta <moises.veleta@linux.intel.com>
Signed-off-by: Devegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230711062817.6108-1-jtornosm@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 17 +++--
drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 2 +-
drivers/net/wwan/t7xx/t7xx_mhccif.h | 1 +
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 76 +++++++++++++++++-----
drivers/net/wwan/t7xx/t7xx_modem_ops.h | 2 +
drivers/net/wwan/t7xx/t7xx_port.h | 6 +-
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 8 ++-
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 18 ++++-
drivers/net/wwan/t7xx/t7xx_reg.h | 2 +-
drivers/net/wwan/t7xx/t7xx_state_monitor.c | 13 +++-
drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 +
11 files changed, 116 insertions(+), 31 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cld
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
u32 phy_ao_base, phy_pd_base;
- if (md_ctrl->hif_id != CLDMA_ID_MD)
- return;
-
- phy_ao_base = CLDMA1_AO_BASE;
- phy_pd_base = CLDMA1_PD_BASE;
- hw_info->phy_interrupt_id = CLDMA1_INT;
hw_info->hw_mode = MODE_BIT_64;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD) {
+ phy_ao_base = CLDMA1_AO_BASE;
+ phy_pd_base = CLDMA1_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA1_INT;
+ } else {
+ phy_ao_base = CLDMA0_AO_BASE;
+ phy_pd_base = CLDMA0_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA0_INT;
+ }
+
hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -34,7 +34,7 @@
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
- * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
+ * @CLDMA_ID_AP: Application Processor control channel.
* @CLDMA_NUM: Number of CLDMA HW units available.
*/
enum cldma_id {
--- a/drivers/net/wwan/t7xx/t7xx_mhccif.h
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h
@@ -25,6 +25,7 @@
D2H_INT_EXCEPTION_CLEARQ_DONE | \
D2H_INT_EXCEPTION_ALLQ_RESET | \
D2H_INT_PORT_ENUM | \
+ D2H_INT_ASYNC_AP_HK | \
D2H_INT_ASYNC_MD_HK)
void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -44,6 +44,7 @@
#include "t7xx_state_monitor.h"
#define RT_ID_MD_PORT_ENUM 0
+#define RT_ID_AP_PORT_ENUM 1
/* Modem feature query identification code - "ICCC" */
#define MD_FEATURE_QUERY_ID 0x49434343
@@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7x
}
t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
if (stage == HIF_EX_INIT)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
@@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struc
if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
return -EINVAL;
- if (i == RT_ID_MD_PORT_ENUM)
+ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
}
@@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_m
return 0;
}
-static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
+static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
+ struct t7xx_fsm_ctl *ctl,
enum t7xx_fsm_event_state event_id,
enum t7xx_fsm_event_state err_detect)
{
struct t7xx_fsm_event *event = NULL, *event_next;
- struct t7xx_sys_info *core_info = &md->core_md;
struct device *dev = &md->t7xx_dev->pdev->dev;
unsigned long flags;
int ret;
@@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_st
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
- t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+}
+
+static void t7xx_ap_hk_wq(struct work_struct *work)
+{
+ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
+ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
+ md->core_ap.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
}
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
- void __iomem *mhccif_base;
unsigned int int_sta;
unsigned long flags;
switch (evt_id) {
case FSM_PRE_START:
- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
+ D2H_INT_ASYNC_AP_HK);
break;
case FSM_START:
@@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_mo
ctl->exp_flg = true;
md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else if (ctl->exp_flg) {
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
- } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
- queue_work(md->handshake_wq, &md->handshake_work);
- md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
- mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
- iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
- t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else {
- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
+
+ if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
+ queue_work(md->handshake_wq, &md->handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ }
+
+ if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
+ queue_work(md->handshake_wq, &md->ap_handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
+ }
}
spin_unlock_irqrestore(&md->exp_lock, flags);
@@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_mo
case FSM_READY:
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
break;
default:
@@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc(
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
+ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
return md;
}
@@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t
md->exp_id = 0;
t7xx_fsm_reset(md);
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
t7xx_port_proxy_reset(md->port_prox);
md->md_init_finish = true;
return t7xx_core_reset(md);
@@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
if (ret)
goto err_destroy_hswq;
+ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
+ if (ret)
+ goto err_destroy_hswq;
+
ret = t7xx_fsm_init(md);
if (ret)
goto err_destroy_hswq;
@@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
if (ret)
goto err_uninit_ccmni;
- ret = t7xx_port_proxy_init(md);
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
if (ret)
goto err_uninit_md_cldma;
+ ret = t7xx_port_proxy_init(md);
+ if (ret)
+ goto err_uninit_ap_cldma;
+
ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
- if (ret) /* fsm_uninit flushes cmd queue */
+ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
goto err_uninit_proxy;
t7xx_md_sys_sw_init(t7xx_dev);
@@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
err_uninit_proxy:
t7xx_port_proxy_uninit(md->port_prox);
+err_uninit_ap_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
+
err_uninit_md_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
@@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
t7xx_ccmni_exit(t7xx_dev);
t7xx_fsm_uninit(md);
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -66,10 +66,12 @@ struct t7xx_modem {
struct cldma_ctrl *md_ctrl[CLDMA_NUM];
struct t7xx_pci_dev *t7xx_dev;
struct t7xx_sys_info core_md;
+ struct t7xx_sys_info core_ap;
bool md_init_finish;
bool rgu_irq_asserted;
struct workqueue_struct *handshake_wq;
struct work_struct handshake_work;
+ struct work_struct ap_handshake_work;
struct t7xx_fsm_ctl *fsm_ctl;
struct port_proxy *port_prox;
unsigned int exp_id;
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -36,9 +36,13 @@
/* Channel ID and Message ID definitions.
* The channel number consists of peer_id(15:12) , channel_id(11:0)
* peer_id:
- * 0:reserved, 1: to sAP, 2: to MD
+ * 0:reserved, 1: to AP, 2: to MD
*/
enum port_ch {
+ /* to AP */
+ PORT_CH_AP_CONTROL_RX = 0x1000,
+ PORT_CH_AP_CONTROL_TX = 0x1001,
+
/* to MD */
PORT_CH_CONTROL_RX = 0x2000,
PORT_CH_CONTROL_TX = 0x2001,
--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -167,8 +167,12 @@ static int control_msg_handler(struct t7
case CTL_ID_HS2_MSG:
skb_pull(skb, sizeof(*ctrl_msg_h));
- if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
- ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
+ if (port_conf->rx_ch == PORT_CH_CONTROL_RX ||
+ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) {
+ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ?
+ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2;
+
+ ret = t7xx_fsm_append_event(ctl, event, skb->data,
le32_to_cpu(ctrl_msg_h->data_length));
if (ret)
dev_err(port->dev, "Failed to append Handshake 2 event");
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,7 +48,7 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
-static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
.rx_ch = PORT_CH_UART2_RX,
@@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_
.path_id = CLDMA_ID_MD,
.ops = &ctl_port_ops,
.name = "t7xx_ctrl",
+ }, {
+ .tx_ch = PORT_CH_AP_CONTROL_TX,
+ .rx_ch = PORT_CH_AP_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_AP,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ap_ctrl",
},
};
@@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(st
if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
md->core_md.ctl_port = port;
+ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
+ md->core_ap.ctl_port = port;
+
port->t7xx_dev = md->t7xx_dev;
port->dev = &md->t7xx_dev->pdev->dev;
spin_lock_init(&port->port_update_lock);
@@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(st
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
+ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
int i;
@@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_
port_prox->dev = dev;
for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
+ port_prox->ports[i].port_conf = &t7xx_port_conf[i];
port_prox->port_count = port_count;
t7xx_proxy_init_all_ports(md);
@@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_mod
if (ret)
return ret;
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -56,7 +56,7 @@
#define D2H_INT_RESUME_ACK BIT(12)
#define D2H_INT_SUSPEND_ACK_AP BIT(13)
#define D2H_INT_RESUME_ACK_AP BIT(14)
-#define D2H_INT_ASYNC_SAP_HK BIT(15)
+#define D2H_INT_ASYNC_AP_HK BIT(15)
#define D2H_INT_ASYNC_MD_HK BIT(16)
/* Register base */
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
t7xx_md_event_notify(md, FSM_START);
- wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
- HZ * 60);
+ wait_event_interruptible_timeout(ctl->async_hk_wq,
+ (md->core_md.ready && md->core_ap.ready) ||
+ ctl->exp_flg, HZ * 60);
dev = &md->t7xx_dev->pdev->dev;
if (ctl->exp_flg)
@@ -299,6 +300,13 @@ static int fsm_routine_starting(struct t
fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
return -ETIMEDOUT;
+ } else if (!md->core_ap.ready) {
+ dev_err(dev, "AP handshake timeout\n");
+ if (md->core_ap.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
+
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
}
t7xx_pci_pm_init_late(md->t7xx_dev);
@@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7x
return;
}
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
}
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -38,10 +38,12 @@ enum t7xx_fsm_state {
enum t7xx_fsm_event_state {
FSM_EVENT_INVALID,
FSM_EVENT_MD_HS2,
+ FSM_EVENT_AP_HS2,
FSM_EVENT_MD_EX,
FSM_EVENT_MD_EX_REC_OK,
FSM_EVENT_MD_EX_PASS,
FSM_EVENT_MD_HS2_EXIT,
+ FSM_EVENT_AP_HS2_EXIT,
FSM_EVENT_MAX
};

View File

@@ -0,0 +1,79 @@
From fece7a8c65d1476b901b969a07b2979e1b459e66 Mon Sep 17 00:00:00 2001
From: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Date: Fri, 28 Oct 2022 21:04:50 +0530
Subject: [PATCH] net: wwan: t7xx: use union to group port type specific data
Use union inside t7xx_port to group port type specific data members.
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_port.h | 6 +++++-
drivers/net/wwan/t7xx/t7xx_port_wwan.c | 16 ++++++++--------
2 files changed, 13 insertions(+), 9 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -99,7 +99,6 @@ struct t7xx_port_conf {
struct t7xx_port {
/* Members not initialized in definition */
const struct t7xx_port_conf *port_conf;
- struct wwan_port *wwan_port;
struct t7xx_pci_dev *t7xx_dev;
struct device *dev;
u16 seq_nums[2]; /* TX/RX sequence numbers */
@@ -122,6 +121,11 @@ struct t7xx_port {
int rx_length_th;
bool chan_enable;
struct task_struct *thread;
+ union {
+ struct {
+ struct wwan_port *wwan_port;
+ } wwan;
+ };
};
struct sk_buff *t7xx_port_alloc_skb(int payload);
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -109,12 +109,12 @@ static int t7xx_port_wwan_init(struct t7
static void t7xx_port_wwan_uninit(struct t7xx_port *port)
{
- if (!port->wwan_port)
+ if (!port->wwan.wwan_port)
return;
port->rx_length_th = 0;
- wwan_remove_port(port->wwan_port);
- port->wwan_port = NULL;
+ wwan_remove_port(port->wwan.wwan_port);
+ port->wwan.wwan_port = NULL;
}
static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
@@ -129,7 +129,7 @@ static int t7xx_port_wwan_recv_skb(struc
return 0;
}
- wwan_port_rx(port->wwan_port, skb);
+ wwan_port_rx(port->wwan.wwan_port, skb);
return 0;
}
@@ -158,10 +158,10 @@ static void t7xx_port_wwan_md_state_noti
if (state != MD_STATE_READY)
return;
- if (!port->wwan_port) {
- port->wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, port);
- if (IS_ERR(port->wwan_port))
+ if (!port->wwan.wwan_port) {
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, port);
+ if (IS_ERR(port->wwan.wwan_port))
dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
}
}

View File

@@ -0,0 +1,237 @@
From 3349e4a48acb0923fa98d2beac82a833a76116cb Mon Sep 17 00:00:00 2001
From: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Date: Fri, 28 Oct 2022 21:05:34 +0530
Subject: [PATCH] net: wwan: t7xx: Add port for modem logging
The Modem Logging (MDL) port provides an interface to collect modem
logs for debugging purposes. MDL is supported by the relay interface,
and the mtk_t7xx port infrastructure. MDL allows user-space apps to
control logging via mbim command and to collect logs via the relay
interface, while port infrastructure facilitates communication between
the driver and the modem.
Signed-off-by: Moises Veleta <moises.veleta@linux.intel.com>
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Signed-off-by: Devegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Acked-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/Kconfig | 1 +
drivers/net/wwan/t7xx/Makefile | 3 +
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 2 +
drivers/net/wwan/t7xx/t7xx_pci.h | 3 +
drivers/net/wwan/t7xx/t7xx_port.h | 3 +
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 12 +++
drivers/net/wwan/t7xx/t7xx_port_proxy.h | 4 +
drivers/net/wwan/t7xx/t7xx_port_trace.c | 116 ++++++++++++++++++++++++
8 files changed, 144 insertions(+)
create mode 100644 drivers/net/wwan/t7xx/t7xx_port_trace.c
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -108,6 +108,7 @@ config IOSM
config MTK_T7XX
tristate "MediaTek PCIe 5G WWAN modem T7xx device"
depends on PCI
+ select RELAY if WWAN_DEBUGFS
help
Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device.
Adapts WWAN framework and provides network interface like wwan0
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -16,3 +16,6 @@ mtk_t7xx-y:= t7xx_pci.o \
t7xx_hif_dpmaif_rx.o \
t7xx_dpmaif.o \
t7xx_netdev.o
+
+mtk_t7xx-$(CONFIG_WWAN_DEBUGFS) += \
+ t7xx_port_trace.o \
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1018,6 +1018,8 @@ static int t7xx_cldma_late_init(struct c
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
+
+ md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -79,6 +79,9 @@ struct t7xx_pci_dev {
spinlock_t md_pm_lock; /* Protects PCI resource lock */
unsigned int sleep_disable_count;
struct completion sleep_lock_acquire;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
};
enum t7xx_pm_id {
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -125,6 +125,9 @@ struct t7xx_port {
struct {
struct wwan_port *wwan_port;
} wwan;
+ struct {
+ struct rchan *relaych;
+ } log;
};
};
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -70,6 +70,18 @@ static const struct t7xx_port_conf t7xx_
.name = "MBIM",
.port_type = WWAN_PORT_MBIM,
}, {
+#ifdef CONFIG_WWAN_DEBUGFS
+ .tx_ch = PORT_CH_MD_LOG_TX,
+ .rx_ch = PORT_CH_MD_LOG_RX,
+ .txq_index = 7,
+ .rxq_index = 7,
+ .txq_exp_index = 7,
+ .rxq_exp_index = 7,
+ .path_id = CLDMA_ID_MD,
+ .ops = &t7xx_trace_port_ops,
+ .name = "mdlog",
+ }, {
+#endif
.tx_ch = PORT_CH_CONTROL_TX,
.rx_ch = PORT_CH_CONTROL_RX,
.txq_index = Q_IDX_CTRL,
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -87,6 +87,10 @@ struct ctrl_msg_header {
extern struct port_ops wwan_sub_port_ops;
extern struct port_ops ctl_port_ops;
+#ifdef CONFIG_WWAN_DEBUGFS
+extern struct port_ops t7xx_trace_port_ops;
+#endif
+
void t7xx_port_proxy_reset(struct port_proxy *port_prox);
void t7xx_port_proxy_uninit(struct port_proxy *port_prox);
int t7xx_port_proxy_init(struct t7xx_modem *md);
--- /dev/null
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/skbuff.h>
+#include <linux/wwan.h>
+
+#include "t7xx_port.h"
+#include "t7xx_port_proxy.h"
+#include "t7xx_state_monitor.h"
+
+#define T7XX_TRC_SUB_BUFF_SIZE 131072
+#define T7XX_TRC_N_SUB_BUFF 32
+
+static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = t7xx_trace_subbuf_start_handler,
+ .create_buf_file = t7xx_trace_create_buf_file_handler,
+ .remove_buf_file = t7xx_trace_remove_buf_file_handler,
+};
+
+static void t7xx_trace_port_uninit(struct t7xx_port *port)
+{
+ struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir;
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return;
+
+ relay_close(relaych);
+ debugfs_remove_recursive(debugfs_dir);
+}
+
+static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct rchan *relaych = port->log.relaych;
+
+ if (!relaych)
+ return -EINVAL;
+
+ relay_write(relaych, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state)
+{
+ struct rchan *relaych = port->log.relaych;
+ struct dentry *debugfs_wwan_dir;
+ struct dentry *debugfs_dir;
+
+ if (state != MD_STATE_READY || relaych)
+ return;
+
+ debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev);
+ if (IS_ERR(debugfs_wwan_dir))
+ return;
+
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir);
+ if (IS_ERR_OR_NULL(debugfs_dir)) {
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create debugfs for trace");
+ return;
+ }
+
+ relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE,
+ T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL);
+ if (!relaych)
+ goto err_rm_debugfs_dir;
+
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ port->log.relaych = relaych;
+ port->t7xx_dev->debugfs_dir = debugfs_dir;
+ return;
+
+err_rm_debugfs_dir:
+ debugfs_remove_recursive(debugfs_dir);
+ wwan_put_debugfs_dir(debugfs_wwan_dir);
+ dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name);
+}
+
+struct port_ops t7xx_trace_port_ops = {
+ .recv_skb = t7xx_trace_port_recv_skb,
+ .uninit = t7xx_trace_port_uninit,
+ .md_state_notify = t7xx_port_trace_md_state_notify,
+};

View File

@@ -0,0 +1,31 @@
From c053d7b6bdcb45780036b32be6a950f71a78bf52 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= <ilpo.jarvinen@linux.intel.com>
Date: Thu, 3 Nov 2022 14:48:28 +0530
Subject: [PATCH] net: wwan: t7xx: Use needed_headroom instead of
hard_header_len
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
hard_header_len is used by gro_list_prepare() but on Rx, there
is no header so use needed_headroom instead.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: Sreehari Kancharla <sreehari.kancharla@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_netdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -161,7 +161,7 @@ static void t7xx_ccmni_post_stop(struct
static void t7xx_ccmni_wwan_setup(struct net_device *dev)
{
- dev->hard_header_len += sizeof(struct ccci_header);
+ dev->needed_headroom += sizeof(struct ccci_header);
dev->mtu = ETH_DATA_LEN;
dev->max_mtu = CCMNI_MTU_MAX;

View File

@@ -0,0 +1,652 @@
From 5545b7b9f294de7f95ec6a7cb1de0db52296001c Mon Sep 17 00:00:00 2001
From: Haijun Liu <haijun.liu@mediatek.com>
Date: Thu, 3 Nov 2022 14:48:29 +0530
Subject: [PATCH] net: wwan: t7xx: Add NAPI support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Replace the work queue based RX flow with a NAPI implementation
Remove rx_thread and dpmaif_rxq_work.
Enable GRO on RX path.
Introduce dummy network device. its responsibility is
- Binds one NAPI object for each DL HW queue and acts as
the agent of all those network devices.
- Use NAPI object to poll DL packets.
- Helps to dispatch each packet to the network interface.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Sreehari Kancharla <sreehari.kancharla@linux.intel.com>
Signed-off-by: Sreehari Kancharla <sreehari.kancharla@linux.intel.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Acked-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Acked-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 14 +-
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 218 +++++++--------------
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 1 +
drivers/net/wwan/t7xx/t7xx_netdev.c | 89 ++++++++-
drivers/net/wwan/t7xx/t7xx_netdev.h | 5 +
5 files changed, 161 insertions(+), 166 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
@@ -20,6 +20,7 @@
#include <linux/bitmap.h>
#include <linux/mm_types.h>
+#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -109,20 +110,14 @@ struct dpmaif_rx_queue {
struct dpmaif_bat_request *bat_req;
struct dpmaif_bat_request *bat_frag;
- wait_queue_head_t rx_wq;
- struct task_struct *rx_thread;
- struct sk_buff_head skb_list;
- unsigned int skb_list_max_len;
-
- struct workqueue_struct *worker;
- struct work_struct dpmaif_rxq_work;
-
atomic_t rx_processing;
struct dpmaif_ctrl *dpmaif_ctrl;
unsigned int expect_pit_seq;
unsigned int pit_remain_release_cnt;
struct dpmaif_cur_rx_skb_info rx_data_info;
+ struct napi_struct napi;
+ bool sleep_lock_pending;
};
struct dpmaif_tx_queue {
@@ -168,7 +163,8 @@ enum dpmaif_txq_state {
struct dpmaif_callbacks {
void (*state_notify)(struct t7xx_pci_dev *t7xx_dev,
enum dpmaif_txq_state state, int txq_number);
- void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb);
+ void (*recv_skb)(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi);
};
struct dpmaif_ctrl {
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -45,6 +45,7 @@
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_rx.h"
+#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#define DPMAIF_BAT_COUNT 8192
@@ -76,43 +77,6 @@ static unsigned int t7xx_normal_pit_bid(
return value;
}
-static int t7xx_dpmaif_net_rx_push_thread(void *arg)
-{
- struct dpmaif_rx_queue *q = arg;
- struct dpmaif_ctrl *hif_ctrl;
- struct dpmaif_callbacks *cb;
-
- hif_ctrl = q->dpmaif_ctrl;
- cb = hif_ctrl->callbacks;
-
- while (!kthread_should_stop()) {
- struct sk_buff *skb;
- unsigned long flags;
-
- if (skb_queue_empty(&q->skb_list)) {
- if (wait_event_interruptible(q->rx_wq,
- !skb_queue_empty(&q->skb_list) ||
- kthread_should_stop()))
- continue;
-
- if (kthread_should_stop())
- break;
- }
-
- spin_lock_irqsave(&q->skb_list.lock, flags);
- skb = __skb_dequeue(&q->skb_list);
- spin_unlock_irqrestore(&q->skb_list.lock, flags);
-
- if (!skb)
- continue;
-
- cb->recv_skb(hif_ctrl->t7xx_dev, skb);
- cond_resched();
- }
-
- return 0;
-}
-
static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
const unsigned int q_num, const unsigned int bat_cnt)
{
@@ -726,21 +690,10 @@ static int t7xx_dpmaifq_rx_notify_hw(str
return ret;
}
-static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->skb_list.lock, flags);
- if (rxq->skb_list.qlen < rxq->skb_list_max_len)
- __skb_queue_tail(&rxq->skb_list, skb);
- else
- dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
-}
-
static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
struct dpmaif_cur_rx_skb_info *skb_info)
{
+ struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
struct sk_buff *skb = skb_info->cur_skb;
struct t7xx_skb_cb *skb_cb;
u8 netif_id;
@@ -758,11 +711,11 @@ static void t7xx_dpmaif_rx_skb(struct dp
skb_cb = T7XX_SKB_CB(skb);
skb_cb->netif_idx = netif_id;
skb_cb->rx_pkt_type = skb_info->pkt_type;
- t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
+ dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
}
static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
- const unsigned long timeout)
+ const unsigned int budget, int *once_more)
{
unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
struct device *dev = rxq->dpmaif_ctrl->dev;
@@ -777,13 +730,14 @@ static int t7xx_dpmaif_rx_start(struct d
struct dpmaif_pit *pkt_info;
u32 val;
- if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
+ if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
break;
pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
- return -EAGAIN;
+ *once_more = 1;
+ return recv_skb_cnt;
}
val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
@@ -817,12 +771,7 @@ static int t7xx_dpmaif_rx_start(struct d
}
memset(skb_info, 0, sizeof(*skb_info));
-
recv_skb_cnt++;
- if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
- wake_up_all(&rxq->rx_wq);
- recv_skb_cnt = 0;
- }
}
}
@@ -837,16 +786,13 @@ static int t7xx_dpmaif_rx_start(struct d
}
}
- if (recv_skb_cnt)
- wake_up_all(&rxq->rx_wq);
-
if (!ret)
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
if (ret)
return ret;
- return rx_cnt;
+ return recv_skb_cnt;
}
static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
@@ -863,53 +809,30 @@ static unsigned int t7xx_dpmaifq_poll_pi
return pit_cnt;
}
-static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
- const unsigned int q_num, const unsigned int budget)
+static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
+ const unsigned int q_num,
+ const unsigned int budget, int *once_more)
{
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
- unsigned long time_limit;
unsigned int cnt;
+ int ret = 0;
- time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
-
- while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
- unsigned int rd_cnt;
- int real_cnt;
-
- rd_cnt = min(cnt, budget);
-
- real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
- if (real_cnt < 0)
- return real_cnt;
-
- if (real_cnt < cnt)
- return -EAGAIN;
- }
-
- return 0;
-}
+ cnt = t7xx_dpmaifq_poll_pit(rxq);
+ if (!cnt)
+ return ret;
-static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
-{
- struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
- int ret;
+ ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
+ if (ret < 0)
+ dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
- ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
- if (ret < 0) {
- /* Try one more time */
- queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- } else {
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
- }
+ return ret;
}
-static void t7xx_dpmaif_rxq_work(struct work_struct *work)
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
{
- struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
- struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
- int ret;
+ struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
+ struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
+ int ret, once_more = 0, work_done = 0;
atomic_set(&rxq->rx_processing, 1);
/* Ensure rx_processing is changed to 1 before actually begin RX flow */
@@ -917,22 +840,52 @@ static void t7xx_dpmaif_rxq_work(struct
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
- dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
- return;
+ dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
+ return work_done;
}
- ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
- if (ret < 0 && ret != -EACCES)
- return;
+ if (!rxq->sleep_lock_pending) {
+ pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ t7xx_pci_disable_sleep(t7xx_dev);
+ }
+
+ ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
+ if (!ret) {
+ napi_complete_done(napi, work_done);
+ rxq->sleep_lock_pending = true;
+ napi_reschedule(napi);
+ return work_done;
+ }
+
+ rxq->sleep_lock_pending = false;
+ while (work_done < budget) {
+ int each_budget = budget - work_done;
+ int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
+ each_budget, &once_more);
+ if (rx_cnt > 0)
+ work_done += rx_cnt;
+ else
+ break;
+ }
- t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
- if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ if (once_more) {
+ napi_gro_flush(napi, false);
+ work_done = budget;
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ } else if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
+ }
- t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
- pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
+
+ return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
@@ -947,7 +900,7 @@ void t7xx_dpmaif_irq_rx_done(struct dpma
}
rxq = &dpmaif_ctrl->rxq[qno];
- queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
+ napi_schedule(&rxq->napi);
}
static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
@@ -1082,50 +1035,14 @@ int t7xx_dpmaif_rxq_init(struct dpmaif_r
int ret;
ret = t7xx_dpmaif_rx_alloc(queue);
- if (ret < 0) {
+ if (ret < 0)
dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
- return ret;
- }
-
- INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
-
- queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
- if (!queue->worker) {
- ret = -ENOMEM;
- goto err_free_rx_buffer;
- }
-
- init_waitqueue_head(&queue->rx_wq);
- skb_queue_head_init(&queue->skb_list);
- queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
- queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
- queue, "dpmaif_rx%d_push", queue->index);
-
- ret = PTR_ERR_OR_ZERO(queue->rx_thread);
- if (ret)
- goto err_free_workqueue;
-
- return 0;
-
-err_free_workqueue:
- destroy_workqueue(queue->worker);
-
-err_free_rx_buffer:
- t7xx_dpmaif_rx_buf_free(queue);
return ret;
}
void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
{
- if (queue->worker)
- destroy_workqueue(queue->worker);
-
- if (queue->rx_thread)
- kthread_stop(queue->rx_thread);
-
- skb_queue_purge(&queue->skb_list);
t7xx_dpmaif_rx_buf_free(queue);
}
@@ -1188,8 +1105,6 @@ void t7xx_dpmaif_rx_stop(struct dpmaif_c
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
int timeout, value;
- flush_work(&rxq->dpmaif_rxq_work);
-
timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
!value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
if (timeout)
@@ -1205,7 +1120,6 @@ static void t7xx_dpmaif_stop_rxq(struct
{
int cnt, j = 0;
- flush_work(&rxq->dpmaif_rxq_work);
rxq->que_started = false;
do {
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h
@@ -112,5 +112,6 @@ int t7xx_dpmaif_bat_alloc(const struct d
const enum bat_type buf_type);
void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl,
struct dpmaif_bat_request *bat_req);
+int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget);
#endif /* __T7XX_HIF_DPMA_RX_H__ */
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -22,6 +22,7 @@
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdev_features.h>
@@ -29,6 +30,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
+#include <net/ipv6.h>
#include <net/pkt_sched.h>
#include "t7xx_hif_dpmaif_rx.h"
@@ -39,13 +41,47 @@
#include "t7xx_state_monitor.h"
#define IP_MUX_SESSION_DEFAULT 0
+#define SBD_PACKET_TYPE_MASK GENMASK(7, 4)
+
+static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_enable(ctlb->napi[i]);
+ napi_schedule(ctlb->napi[i]);
+ }
+ ctlb->is_napi_en = true;
+}
+
+static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ if (!ctlb->is_napi_en)
+ return;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ napi_synchronize(ctlb->napi[i]);
+ napi_disable(ctlb->napi[i]);
+ }
+
+ ctlb->is_napi_en = false;
+}
static int t7xx_ccmni_open(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ccmni_ctl);
+
atomic_inc(&ccmni->usage);
return 0;
}
@@ -53,8 +89,12 @@ static int t7xx_ccmni_open(struct net_de
static int t7xx_ccmni_close(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
+ struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
atomic_dec(&ccmni->usage);
+ if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ccmni_ctl);
+
netif_carrier_off(dev);
netif_tx_disable(dev);
return 0;
@@ -127,6 +167,9 @@ static void t7xx_ccmni_start(struct t7xx
netif_carrier_on(ccmni->dev);
}
}
+
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_enable_napi(ctlb);
}
static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
@@ -149,6 +192,9 @@ static void t7xx_ccmni_post_stop(struct
struct t7xx_ccmni *ccmni;
int i;
+ if (atomic_read(&ctlb->napi_usr_refcnt))
+ t7xx_ccmni_disable_napi(ctlb);
+
for (i = 0; i < ctlb->nic_dev_num; i++) {
ccmni = ctlb->ccmni_inst[i];
if (!ccmni)
@@ -183,6 +229,9 @@ static void t7xx_ccmni_wwan_setup(struct
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features |= NETIF_F_GRO;
+
dev->needs_free_netdev = true;
dev->type = ARPHRD_NONE;
@@ -190,6 +239,34 @@ static void t7xx_ccmni_wwan_setup(struct
dev->netdev_ops = &ccmni_netdev_ops;
}
+static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ /* one HW, but shared with multiple net devices,
+ * so add a dummy device for NAPI.
+ */
+ init_dummy_netdev(&ctlb->dummy_dev);
+ atomic_set(&ctlb->napi_usr_refcnt, 0);
+ ctlb->is_napi_en = false;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
+ netif_napi_add(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
+ NIC_NAPI_POLL_BUDGET);
+ }
+}
+
+static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+{
+ int i;
+
+ for (i = 0; i < RXQ_NUM; i++) {
+ netif_napi_del(ctlb->napi[i]);
+ ctlb->napi[i] = NULL;
+ }
+}
+
static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
struct netlink_ext_ack *extack)
{
@@ -311,7 +388,8 @@ static void init_md_status_notifier(stru
t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
}
-static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
+static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct t7xx_skb_cb *skb_cb;
struct net_device *net_dev;
@@ -321,23 +399,22 @@ static void t7xx_ccmni_recv_skb(struct t
skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx;
- ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
+ ccmni = ccmni_ctlb->ccmni_inst[netif_id];
if (!ccmni) {
dev_kfree_skb(skb);
return;
}
net_dev = ccmni->dev;
- skb->dev = net_dev;
-
pkt_type = skb_cb->rx_pkt_type;
+ skb->dev = net_dev;
if (pkt_type == PKT_TYPE_IP6)
skb->protocol = htons(ETH_P_IPV6);
else
skb->protocol = htons(ETH_P_IP);
skb_len = skb->len;
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb_len;
}
@@ -404,6 +481,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev
if (!ctlb->hif_ctrl)
return -ENOMEM;
+ t7xx_init_netdev_napi(ctlb);
init_md_status_notifier(t7xx_dev);
return 0;
}
@@ -419,5 +497,6 @@ void t7xx_ccmni_exit(struct t7xx_pci_dev
ctlb->wwan_is_registered = false;
}
+ t7xx_uninit_netdev_napi(ctlb);
t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
}
--- a/drivers/net/wwan/t7xx/t7xx_netdev.h
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -30,6 +30,7 @@
#define CCMNI_NETDEV_WDT_TO (1 * HZ)
#define CCMNI_MTU_MAX 3000
+#define NIC_NAPI_POLL_BUDGET 128
struct t7xx_ccmni {
u8 index;
@@ -47,6 +48,10 @@ struct t7xx_ccmni_ctrl {
unsigned int md_sta;
struct t7xx_fsm_notifier md_status_notify;
bool wwan_is_registered;
+ struct net_device dummy_dev;
+ struct napi_struct *napi[RXQ_NUM];
+ atomic_t napi_usr_refcnt;
+ bool is_napi_en;
};
int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev);

View File

@@ -0,0 +1,154 @@
From 364d0221f1788e5225006ba7a0026e5968431c29 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Kornel=20Dul=C4=99ba?= <mindal@semihalf.com>
Date: Thu, 26 Jan 2023 13:25:34 +0000
Subject: [PATCH] net: wwan: t7xx: Fix Runtime PM resume sequence
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Resume device before calling napi_schedule, instead of doing in the napi
poll routine. Polling is done in softrq context. We can't call the PM
resume logic from there as it's blocking and not irq safe.
In order to make it work modify the interrupt handler to be run from irq
handler thread.
Fixes: 5545b7b9f294 ("net: wwan: t7xx: Add NAPI support")
Signed-off-by: Kornel Dulęba <mindal@semihalf.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 11 +++++++-
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 29 +++++++++++++++-------
drivers/net/wwan/t7xx/t7xx_netdev.c | 16 +++++++++++-
3 files changed, 45 insertions(+), 11 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handl
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_ir
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
- t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
- if (!rxq->sleep_lock_pending) {
- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
- }
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
- atomic_set(&rxq->rx_processing, 0);
-
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
- int qno;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpma
}
rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
napi_schedule(&rxq->napi);
}
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
@@ -45,12 +46,25 @@
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
- int i;
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}

View File

@@ -0,0 +1,300 @@
From 36bd28c1cb0dbf48645cfe43159907fb3253b33a Mon Sep 17 00:00:00 2001
From: haozhe chang <haozhe.chang@mediatek.com>
Date: Thu, 16 Mar 2023 17:58:20 +0800
Subject: [PATCH] wwan: core: Support slicing in port TX flow of WWAN subsystem
wwan_port_fops_write inputs the SKB parameter to the TX callback of
the WWAN device driver. However, the WWAN device (e.g., t7xx) may
have an MTU less than the size of SKB, causing the TX buffer to be
sliced and copied once more in the WWAN device driver.
This patch implements the slicing in the WWAN subsystem and gives
the WWAN devices driver the option to slice(by frag_len) or not. By
doing so, the additional memory copy is reduced.
Meanwhile, this patch gives WWAN devices driver the option to reserve
headroom in fragments for the device-specific metadata.
Signed-off-by: haozhe chang <haozhe.chang@mediatek.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Link: https://lore.kernel.org/r/20230316095826.181904-1-haozhe.chang@mediatek.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/iosm/iosm_ipc_port.c | 3 +-
drivers/net/wwan/mhi_wwan_ctrl.c | 2 +-
drivers/net/wwan/rpmsg_wwan_ctrl.c | 2 +-
drivers/net/wwan/t7xx/t7xx_port_wwan.c | 36 ++++++++--------
drivers/net/wwan/wwan_core.c | 58 +++++++++++++++++++-------
drivers/net/wwan/wwan_hwsim.c | 2 +-
drivers/usb/class/cdc-wdm.c | 3 +-
include/linux/wwan.h | 11 +++++
8 files changed, 81 insertions(+), 36 deletions(-)
--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -63,7 +63,8 @@ struct iosm_cdev *ipc_port_init(struct i
ipc_port->ipc_imem = ipc_imem;
ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type,
- &ipc_wwan_ctrl_ops, ipc_port);
+ &ipc_wwan_ctrl_ops, NULL,
+ ipc_port);
return ipc_port;
}
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -237,7 +237,7 @@ static int mhi_wwan_ctrl_probe(struct mh
/* Register as a wwan port, id->driver_data contains wwan port type */
port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
- &wwan_pops, mhiwwan);
+ &wwan_pops, NULL, mhiwwan);
if (IS_ERR(port)) {
kfree(mhiwwan);
return PTR_ERR(port);
--- a/drivers/net/wwan/rpmsg_wwan_ctrl.c
+++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c
@@ -129,7 +129,7 @@ static int rpmsg_wwan_ctrl_probe(struct
/* Register as a wwan port, id.driver_data contains wwan port type */
port = wwan_create_port(parent, rpdev->id.driver_data,
- &rpmsg_wwan_pops, rpwwan);
+ &rpmsg_wwan_pops, NULL, rpwwan);
if (IS_ERR(port))
return PTR_ERR(port);
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -54,13 +54,13 @@ static void t7xx_port_ctrl_stop(struct w
static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct t7xx_port *port_private = wwan_port_get_drvdata(port);
- size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
const struct t7xx_port_conf *port_conf;
+ struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
+ int cnt = 0, ret;
- len = skb->len;
- if (!len || !port_private->chan_enable)
+ if (!port_private->chan_enable)
return -EINVAL;
port_conf = port_private->port_conf;
@@ -72,23 +72,21 @@ static int t7xx_port_ctrl_tx(struct wwan
return -ENODEV;
}
- for (offset = 0; offset < len; offset += chunk_len) {
- struct sk_buff *skb_ccci;
- int ret;
-
- chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
- skb_ccci = t7xx_port_alloc_skb(chunk_len);
- if (!skb_ccci)
- return -ENOMEM;
-
- skb_put_data(skb_ccci, skb->data + offset, chunk_len);
- ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
+ while (cur) {
+ cloned = skb_clone(cur, GFP_KERNEL);
+ cloned->len = skb_headlen(cur);
+ ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
if (ret) {
- dev_kfree_skb_any(skb_ccci);
+ dev_kfree_skb(cloned);
dev_err(port_private->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
- return ret;
+ return cnt ? cnt + ret : ret;
}
+ cnt += cur->len;
+ if (cur == skb)
+ cur = skb_shinfo(skb)->frag_list;
+ else
+ cur = cur->next;
}
dev_kfree_skb(skb);
@@ -154,13 +152,17 @@ static int t7xx_port_wwan_disable_chl(st
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header);
+ struct wwan_port_caps caps;
if (state != MD_STATE_READY)
return;
if (!port->wwan.wwan_port) {
+ caps.frag_len = CLDMA_MTU - header_len;
+ caps.headroom_len = header_len;
port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, port);
+ &wwan_ops, &caps, port);
if (IS_ERR(port->wwan.wwan_port))
dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
}
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -67,6 +67,8 @@ struct wwan_device {
* @rxq: Buffer inbound queue
* @waitqueue: The waitqueue for port fops (read/write/poll)
* @data_lock: Port specific data access serialization
+ * @headroom_len: SKB reserved headroom size
+ * @frag_len: Length to fragment packet
* @at_data: AT port specific data
*/
struct wwan_port {
@@ -79,6 +81,8 @@ struct wwan_port {
struct sk_buff_head rxq;
wait_queue_head_t waitqueue;
struct mutex data_lock; /* Port specific data access serialization */
+ size_t headroom_len;
+ size_t frag_len;
union {
struct {
struct ktermios termios;
@@ -422,6 +426,7 @@ static int __wwan_port_dev_assign_name(s
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
+ struct wwan_port_caps *caps,
void *drvdata)
{
struct wwan_device *wwandev;
@@ -455,6 +460,8 @@ struct wwan_port *wwan_create_port(struc
port->type = type;
port->ops = ops;
+ port->frag_len = caps ? caps->frag_len : SIZE_MAX;
+ port->headroom_len = caps ? caps->headroom_len : 0;
mutex_init(&port->ops_lock);
skb_queue_head_init(&port->rxq);
init_waitqueue_head(&port->waitqueue);
@@ -698,30 +705,53 @@ static ssize_t wwan_port_fops_read(struc
static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offp)
{
+ struct sk_buff *skb, *head = NULL, *tail = NULL;
struct wwan_port *port = filp->private_data;
- struct sk_buff *skb;
+ size_t frag_len, remain = count;
int ret;
ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
if (ret)
return ret;
- skb = alloc_skb(count, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- if (copy_from_user(skb_put(skb, count), buf, count)) {
- kfree_skb(skb);
- return -EFAULT;
- }
+ do {
+ frag_len = min(remain, port->frag_len);
+ skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto freeskb;
+ }
+ skb_reserve(skb, port->headroom_len);
+
+ if (!head) {
+ head = skb;
+ } else if (!tail) {
+ skb_shinfo(head)->frag_list = skb;
+ tail = skb;
+ } else {
+ tail->next = skb;
+ tail = skb;
+ }
+
+ if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) {
+ ret = -EFAULT;
+ goto freeskb;
+ }
+
+ if (skb != head) {
+ head->data_len += skb->len;
+ head->len += skb->len;
+ head->truesize += skb->truesize;
+ }
+ } while (remain -= frag_len);
- ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
- if (ret) {
- kfree_skb(skb);
- return ret;
- }
+ ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK));
+ if (!ret)
+ return count;
- return count;
+freeskb:
+ kfree_skb(head);
+ return ret;
}
static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -205,7 +205,7 @@ static struct wwan_hwsim_port *wwan_hwsi
port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
&wwan_hwsim_port_ops,
- port);
+ NULL, port);
if (IS_ERR(port->wwan)) {
err = PTR_ERR(port->wwan);
goto err_free_port;
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -929,7 +929,8 @@ static void wdm_wwan_init(struct wdm_dev
return;
}
- port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops, desc);
+ port = wwan_create_port(&intf->dev, desc->wwanp_type, &wdm_wwan_port_ops,
+ NULL, desc);
if (IS_ERR(port)) {
dev_err(&intf->dev, "%s: Unable to create WWAN port\n",
dev_name(intf->usb_dev));
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -62,11 +62,21 @@ struct wwan_port_ops {
poll_table *wait);
};
+/** struct wwan_port_caps - The WWAN port capbilities
+ * @frag_len: WWAN port TX fragments length
+ * @headroom_len: WWAN port TX fragments reserved headroom length
+ */
+struct wwan_port_caps {
+ size_t frag_len;
+ unsigned int headroom_len;
+};
+
/**
* wwan_create_port - Add a new WWAN port
* @parent: Device to use as parent and shared by all WWAN ports
* @type: WWAN port type
* @ops: WWAN port operations
+ * @caps: WWAN port capabilities
* @drvdata: Pointer to caller driver data
*
* Allocate and register a new WWAN port. The port will be automatically exposed
@@ -84,6 +94,7 @@ struct wwan_port_ops {
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
+ struct wwan_port_caps *caps,
void *drvdata);
/**

View File

@@ -0,0 +1,121 @@
From 72b1fe6cc6523908bfc339d07d18cb0f3469a643 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@kernel.org>
Date: Thu, 25 May 2023 12:15:29 -1000
Subject: [PATCH] net: wwan: t7xx: Use alloc_ordered_workqueue() to create
ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not necessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Cc: Intel Corporation <linuxwwan@intel.com>
Cc: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
Cc: Liu Haijun <haijun.liu@mediatek.com>
Cc: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Cc: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Cc: Loic Poulain <loic.poulain@linaro.org>
Cc: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: netdev@vger.kernel.org
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 +++++++------
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 5 +++--
2 files changed, 10 insertions(+), 8 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1293,9 +1293,9 @@ int t7xx_cldma_init(struct cldma_ctrl *m
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
md_ctrl->txq[i].worker =
- alloc_workqueue("md_hif%d_tx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
- 1, md_ctrl->hif_id, i);
+ alloc_ordered_workqueue("md_hif%d_tx%d_worker",
+ WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
+ md_ctrl->hif_id, i);
if (!md_ctrl->txq[i].worker)
goto err_workqueue;
@@ -1306,9 +1306,10 @@ int t7xx_cldma_init(struct cldma_ctrl *m
md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
- md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
- 1, md_ctrl->hif_id, i);
+ md_ctrl->rxq[i].worker =
+ alloc_ordered_workqueue("md_hif%d_rx%d_worker",
+ WQ_MEM_RECLAIM,
+ md_ctrl->hif_id, i);
if (!md_ctrl->rxq[i].worker)
goto err_workqueue;
}
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -618,8 +618,9 @@ int t7xx_dpmaif_txq_init(struct dpmaif_t
return ret;
}
- txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
- (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
+ txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
+ WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
+ txq->index);
if (!txq->worker)
return -ENOMEM;

View File

@@ -0,0 +1,482 @@
From ba2274dcfda859b8a27193e68ad37bfe4da28ddc Mon Sep 17 00:00:00 2001
From: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Date: Tue, 11 Jul 2023 08:28:13 +0200
Subject: [PATCH] net: wwan: t7xx: Add AP CLDMA
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
At this moment with the current status, t7xx is not functional due to
problems like this after connection, if there is no activity:
[ 57.370534] mtk_t7xx 0000:72:00.0: [PM] SAP suspend error: -110
[ 57.370581] mtk_t7xx 0000:72:00.0: can't suspend
(t7xx_pci_pm_runtime_suspend [mtk_t7xx] returned -110)
because after this, the traffic no longer works.
The complete series 'net: wwan: t7xx: fw flashing & coredump support'
was reverted because of issues with the pci implementation.
In order to have at least the modem working, it would be enough if just
the first commit of the series is re-applied:
d20ef656f994 net: wwan: t7xx: Add AP CLDMA
With that, the Application Processor would be controlled, correctly
suspended and the commented problems would be fixed (I am testing here
like this with no related issue).
This commit is independent of the others and not related to the
commented pci implementation for the new features: fw flashing and
coredump collection.
Use v2 patch version of d20ef656f994 as JinJian Song suggests
(https://patchwork.kernel.org/project/netdevbpf/patch/20230105154215.198828-1-m.chetan.kumar@linux.intel.com/).
Original text from the commit that would be re-applied:
d20ef656f994 net: wwan: t7xx: Add AP CLDMA
Author: Haijun Liu <haijun.liu@mediatek.com>
Date: Tue Aug 16 09:53:28 2022 +0530
The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to
communicate with AP and Modem processors respectively. So far only
MD-CLDMA was being used, this patch enables AP-CLDMA.
Rename small Application Processor (sAP) to AP.
Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: Moises Veleta <moises.veleta@linux.intel.com>
Signed-off-by: Devegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230711062817.6108-1-jtornosm@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 17 +++--
drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 2 +-
drivers/net/wwan/t7xx/t7xx_mhccif.h | 1 +
drivers/net/wwan/t7xx/t7xx_modem_ops.c | 76 +++++++++++++++++-----
drivers/net/wwan/t7xx/t7xx_modem_ops.h | 2 +
drivers/net/wwan/t7xx/t7xx_port.h | 6 +-
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 8 ++-
drivers/net/wwan/t7xx/t7xx_port_proxy.c | 18 ++++-
drivers/net/wwan/t7xx/t7xx_reg.h | 2 +-
drivers/net/wwan/t7xx/t7xx_state_monitor.c | 13 +++-
drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 +
11 files changed, 116 insertions(+), 31 deletions(-)
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cld
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
u32 phy_ao_base, phy_pd_base;
- if (md_ctrl->hif_id != CLDMA_ID_MD)
- return;
-
- phy_ao_base = CLDMA1_AO_BASE;
- phy_pd_base = CLDMA1_PD_BASE;
- hw_info->phy_interrupt_id = CLDMA1_INT;
hw_info->hw_mode = MODE_BIT_64;
+
+ if (md_ctrl->hif_id == CLDMA_ID_MD) {
+ phy_ao_base = CLDMA1_AO_BASE;
+ phy_pd_base = CLDMA1_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA1_INT;
+ } else {
+ phy_ao_base = CLDMA0_AO_BASE;
+ phy_pd_base = CLDMA0_PD_BASE;
+ hw_info->phy_interrupt_id = CLDMA0_INT;
+ }
+
hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -34,7 +34,7 @@
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
- * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
+ * @CLDMA_ID_AP: Application Processor control channel.
* @CLDMA_NUM: Number of CLDMA HW units available.
*/
enum cldma_id {
--- a/drivers/net/wwan/t7xx/t7xx_mhccif.h
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h
@@ -25,6 +25,7 @@
D2H_INT_EXCEPTION_CLEARQ_DONE | \
D2H_INT_EXCEPTION_ALLQ_RESET | \
D2H_INT_PORT_ENUM | \
+ D2H_INT_ASYNC_AP_HK | \
D2H_INT_ASYNC_MD_HK)
void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -44,6 +44,7 @@
#include "t7xx_state_monitor.h"
#define RT_ID_MD_PORT_ENUM 0
+#define RT_ID_AP_PORT_ENUM 1
/* Modem feature query identification code - "ICCC" */
#define MD_FEATURE_QUERY_ID 0x49434343
@@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7x
}
t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
+ t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
if (stage == HIF_EX_INIT)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
@@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struc
if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
return -EINVAL;
- if (i == RT_ID_MD_PORT_ENUM)
+ if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
}
@@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_m
return 0;
}
-static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
+static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
+ struct t7xx_fsm_ctl *ctl,
enum t7xx_fsm_event_state event_id,
enum t7xx_fsm_event_state err_detect)
{
struct t7xx_fsm_event *event = NULL, *event_next;
- struct t7xx_sys_info *core_info = &md->core_md;
struct device *dev = &md->t7xx_dev->pdev->dev;
unsigned long flags;
int ret;
@@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_st
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
- t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+ t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
+}
+
+static void t7xx_ap_hk_wq(struct work_struct *work)
+{
+ struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
+ struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
+
+ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
+ t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
+ t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
+ md->core_ap.handshake_ongoing = true;
+ t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
}
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
- void __iomem *mhccif_base;
unsigned int int_sta;
unsigned long flags;
switch (evt_id) {
case FSM_PRE_START:
- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
+ D2H_INT_ASYNC_AP_HK);
break;
case FSM_START:
@@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_mo
ctl->exp_flg = true;
md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else if (ctl->exp_flg) {
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
- } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
- queue_work(md->handshake_wq, &md->handshake_work);
- md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
- mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
- iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
- t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else {
- t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
+
+ if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
+ queue_work(md->handshake_wq, &md->handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
+ iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ }
+
+ if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
+ queue_work(md->handshake_wq, &md->ap_handshake_work);
+ md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
+ iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
+ }
}
spin_unlock_irqrestore(&md->exp_lock, flags);
@@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_mo
case FSM_READY:
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
+ t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
break;
default:
@@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc(
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
+ INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
+ md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
+ FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
+
return md;
}
@@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t
md->exp_id = 0;
t7xx_fsm_reset(md);
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
t7xx_port_proxy_reset(md->port_prox);
md->md_init_finish = true;
return t7xx_core_reset(md);
@@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
if (ret)
goto err_destroy_hswq;
+ ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
+ if (ret)
+ goto err_destroy_hswq;
+
ret = t7xx_fsm_init(md);
if (ret)
goto err_destroy_hswq;
@@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
if (ret)
goto err_uninit_ccmni;
- ret = t7xx_port_proxy_init(md);
+ ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
if (ret)
goto err_uninit_md_cldma;
+ ret = t7xx_port_proxy_init(md);
+ if (ret)
+ goto err_uninit_ap_cldma;
+
ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
- if (ret) /* fsm_uninit flushes cmd queue */
+ if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
goto err_uninit_proxy;
t7xx_md_sys_sw_init(t7xx_dev);
@@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7
err_uninit_proxy:
t7xx_port_proxy_uninit(md->port_prox);
+err_uninit_ap_cldma:
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
+
err_uninit_md_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
@@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
+ t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
t7xx_ccmni_exit(t7xx_dev);
t7xx_fsm_uninit(md);
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -66,10 +66,12 @@ struct t7xx_modem {
struct cldma_ctrl *md_ctrl[CLDMA_NUM];
struct t7xx_pci_dev *t7xx_dev;
struct t7xx_sys_info core_md;
+ struct t7xx_sys_info core_ap;
bool md_init_finish;
bool rgu_irq_asserted;
struct workqueue_struct *handshake_wq;
struct work_struct handshake_work;
+ struct work_struct ap_handshake_work;
struct t7xx_fsm_ctl *fsm_ctl;
struct port_proxy *port_prox;
unsigned int exp_id;
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -36,9 +36,13 @@
/* Channel ID and Message ID definitions.
* The channel number consists of peer_id(15:12) , channel_id(11:0)
* peer_id:
- * 0:reserved, 1: to sAP, 2: to MD
+ * 0:reserved, 1: to AP, 2: to MD
*/
enum port_ch {
+ /* to AP */
+ PORT_CH_AP_CONTROL_RX = 0x1000,
+ PORT_CH_AP_CONTROL_TX = 0x1001,
+
/* to MD */
PORT_CH_CONTROL_RX = 0x2000,
PORT_CH_CONTROL_TX = 0x2001,
--- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
@@ -167,8 +167,12 @@ static int control_msg_handler(struct t7
case CTL_ID_HS2_MSG:
skb_pull(skb, sizeof(*ctrl_msg_h));
- if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
- ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
+ if (port_conf->rx_ch == PORT_CH_CONTROL_RX ||
+ port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) {
+ int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ?
+ FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2;
+
+ ret = t7xx_fsm_append_event(ctl, event, skb->data,
le32_to_cpu(ctrl_msg_h->data_length));
if (ret)
dev_err(port->dev, "Failed to append Handshake 2 event");
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,7 +48,7 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
-static const struct t7xx_port_conf t7xx_md_port_conf[] = {
+static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
.rx_ch = PORT_CH_UART2_RX,
@@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_
.path_id = CLDMA_ID_MD,
.ops = &ctl_port_ops,
.name = "t7xx_ctrl",
+ }, {
+ .tx_ch = PORT_CH_AP_CONTROL_TX,
+ .rx_ch = PORT_CH_AP_CONTROL_RX,
+ .txq_index = Q_IDX_CTRL,
+ .rxq_index = Q_IDX_CTRL,
+ .path_id = CLDMA_ID_AP,
+ .ops = &ctl_port_ops,
+ .name = "t7xx_ap_ctrl",
},
};
@@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(st
if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
md->core_md.ctl_port = port;
+ if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
+ md->core_ap.ctl_port = port;
+
port->t7xx_dev = md->t7xx_dev;
port->dev = &md->t7xx_dev->pdev->dev;
spin_lock_init(&port->port_update_lock);
@@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(st
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
+ unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
int i;
@@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_
port_prox->dev = dev;
for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
+ port_prox->ports[i].port_conf = &t7xx_port_conf[i];
port_prox->port_count = port_count;
t7xx_proxy_init_all_ports(md);
@@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_mod
if (ret)
return ret;
+ t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -56,7 +56,7 @@
#define D2H_INT_RESUME_ACK BIT(12)
#define D2H_INT_SUSPEND_ACK_AP BIT(13)
#define D2H_INT_RESUME_ACK_AP BIT(14)
-#define D2H_INT_ASYNC_SAP_HK BIT(15)
+#define D2H_INT_ASYNC_AP_HK BIT(15)
#define D2H_INT_ASYNC_MD_HK BIT(16)
/* Register base */
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
t7xx_md_event_notify(md, FSM_START);
- wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
- HZ * 60);
+ wait_event_interruptible_timeout(ctl->async_hk_wq,
+ (md->core_md.ready && md->core_ap.ready) ||
+ ctl->exp_flg, HZ * 60);
dev = &md->t7xx_dev->pdev->dev;
if (ctl->exp_flg)
@@ -299,6 +300,13 @@ static int fsm_routine_starting(struct t
fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
return -ETIMEDOUT;
+ } else if (!md->core_ap.ready) {
+ dev_err(dev, "AP handshake timeout\n");
+ if (md->core_ap.handshake_ongoing)
+ t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
+
+ fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
+ return -ETIMEDOUT;
}
t7xx_pci_pm_init_late(md->t7xx_dev);
@@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7x
return;
}
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
}
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -38,10 +38,12 @@ enum t7xx_fsm_state {
enum t7xx_fsm_event_state {
FSM_EVENT_INVALID,
FSM_EVENT_MD_HS2,
+ FSM_EVENT_AP_HS2,
FSM_EVENT_MD_EX,
FSM_EVENT_MD_EX_REC_OK,
FSM_EVENT_MD_EX_PASS,
FSM_EVENT_MD_HS2_EXIT,
+ FSM_EVENT_AP_HS2_EXIT,
FSM_EVENT_MAX
};