From b2a91105b359f4c458572f2c3b0e643e68bbf46f Mon Sep 17 00:00:00 2001 From: ling <1042585959@qq.com> Date: Mon, 22 May 2023 18:20:11 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9B=B4=E6=96=B0pcie?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- quectel_MHI/src/ReleaseNote.txt | 103 +++ quectel_MHI/src/controllers/mhi_qti.c | 167 ++++- quectel_MHI/src/controllers/mhi_qti.h | 2 +- quectel_MHI/src/core/mhi.h | 74 ++- quectel_MHI/src/core/mhi_boot.c | 169 ++--- quectel_MHI/src/core/mhi_dtr.c | 10 + quectel_MHI/src/core/mhi_init.c | 595 ++++++++++++++++- quectel_MHI/src/core/mhi_internal.h | 17 +- quectel_MHI/src/core/mhi_main.c | 349 +++++++--- quectel_MHI/src/core/mhi_pm.c | 64 +- quectel_MHI/src/devices/mhi_netdev_quectel.c | 659 +++++++++++++++---- quectel_MHI/src/devices/mhi_uci.c | 134 ++-- quectel_MHI/src/devices/rmnet_nss.c | 2 +- 13 files changed, 1905 insertions(+), 440 deletions(-) create mode 100644 quectel_MHI/src/ReleaseNote.txt diff --git a/quectel_MHI/src/ReleaseNote.txt b/quectel_MHI/src/ReleaseNote.txt new file mode 100644 index 0000000..f70e45c --- /dev/null +++ b/quectel_MHI/src/ReleaseNote.txt @@ -0,0 +1,103 @@ +Release Notes + +[V1.3.4] +Date: 12/8/2022 +enhancement: + 1. only allow to enable autosuspend when module is in MHI_EE_AMSS + 2. show pcie link speed and width when driver probe + 3. check pcie link status by read pcie vid and pid when driver probe, + if pcie link is down, return -EIO + 4. support RM520 (1eac:1004) + 5. support qmap command packet +fix: + 1. fix tx queue is wrong stop when do uplink TPUT + 2. fix after QFirehose, module fail to bootup at very small probability + 3. mhi uci add mutex lock for concurrent reads/writes + +[V1.3.3] +Date: 30/6/2022 +enhancement: + 1. remove one un-necessary kmalloc when do qfirehose + 2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon + 3. set ring size of event 0 to 256 (from 1024), required by x6x + 4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled + 5. porting IPQ5018 mhi rate controll code from spf11.5 + 6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver) + 7. support set different mac address for rmnet net card + 8. when mhi netdev fail to malloc, use delay_work instead work + 9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU' +fix: + 1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU + 2. set dma mask when driver probe, some SOC like rpi_4 need it + +[V1.3.2] +Date: 12/16/2021 +enhancement: + 1. support Linux Kernel V5.14 + 2. mhi_netdev_quectel.c do not print log in softirq context + +[V1.3.1] +Date: 9/26/2021 +enhancement: +fix: + +[V1.3.0.19] +Date: 9/18/2021 +enhancement: + 1. support sdx62 (17cb:0308) + 2. support IPQ5018's NSS + 3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c + and pcie_mhi.ko must load after then rmnet_nss.ko + 4. allow bhi irq is not 0 (for ipq5018) +fix: + +[V1.3.0.18] +Date: 4/14/2021 +enhancement: + 1. support mbim multiple call, usage: + # insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4 + # quectel-mbim-proxy -d /dev/mhi_MBIM & + # quectel-CM -n X +fix: + +[V1.3.0.17] +Date: 3/11/2021 +enhancement: +fix: + 1. fix CPU loading very high when TPUT test when only one MSI interrupt + 2. fix error on latest X24 modem + +[V1.3.0.16] +Date: 11/18/2020 +enhancement: +fix: + 1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan + +[V1.3.0.15] +Date: 10/30/2020 +enhancement: + 1. support multi-modems, named as /dev/mhi_X +fix: + 1. fix compile error on kernel v5.8 + +[V1.3.0.14] +Date: 10/9/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix compile error on kernel v5.6 + 2. support runtime suspend + +[V1.3.0.13] +Date: 9/7/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix error on X55 + PCIE2.0(e.g IPQ4019) + 2. support runtime suspend + +[V1.3.0.12] +Date: 7/7/2020 +enhancement: + 1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE), +fix: \ No newline at end of file diff --git a/quectel_MHI/src/controllers/mhi_qti.c b/quectel_MHI/src/controllers/mhi_qti.c index ff4b90b..735df9e 100644 --- a/quectel_MHI/src/controllers/mhi_qti.c +++ b/quectel_MHI/src/controllers/mhi_qti.c @@ -74,6 +74,10 @@ static void pci_free_irq_vectors(struct pci_dev *dev) static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) { +#if 0//defined(CONFIG_PINCTRL_IPQ5018) + struct pcie_port *pp = dev->bus->sysdata; + pp->msi[nr]; //msi maybe not continuous +#endif return dev->irq + nr; } #endif @@ -171,8 +175,28 @@ static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) pci_set_master(pci_dev); +#if 1 //some SOC like rpi_4b need next codes + ret = -EIO; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) + if((ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)))) + ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); +#else + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + } +#endif + if (ret) { + MHI_ERR("Error dma mask\n"); + } +#endif + mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn); len = pci_resource_len(pci_dev, mhi_dev->resn); +#ifndef ioremap_nocache //4bdc0d676a643140bdf17dbf7eafedee3d496a3c +#define ioremap_nocache ioremap +#endif mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len); if (!mhi_cntrl->regs) { MHI_ERR("Error ioremap region\n"); @@ -280,6 +304,7 @@ error_enable_device: return ret; } +#ifdef CONFIG_PM static int mhi_runtime_suspend(struct device *dev) { int ret = 0; @@ -296,6 +321,12 @@ static int mhi_runtime_suspend(struct device *dev) return 0; } + if (mhi_cntrl->ee != MHI_EE_AMSS) { + MHI_LOG("Not AMSS, return busy\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return -EBUSY; + } + ret = mhi_pm_suspend(mhi_cntrl); if (ret) { MHI_LOG("Abort due to ret:%d\n", ret); @@ -315,9 +346,14 @@ exit_runtime_suspend: static int mhi_runtime_idle(struct device *dev) { - //struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); - //MHI_LOG("Entered returning -EBUSY\n"); + if ((mhi_cntrl->dev_state == MHI_STATE_M0 || mhi_cntrl->dev_state == MHI_STATE_M3) + && mhi_cntrl->ee == MHI_EE_AMSS) { + return 0; + } + MHI_LOG("Entered returning -EBUSY, mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); /* * RPM framework during runtime resume always calls @@ -374,8 +410,8 @@ static int mhi_system_resume(struct device *dev) if (ret) { MHI_ERR("Failed to resume link\n"); } else { - pm_runtime_set_active(dev); - pm_runtime_enable(dev); + //pm_runtime_set_active(dev); + //pm_runtime_enable(dev); } return ret; @@ -388,6 +424,11 @@ int mhi_system_suspend(struct device *dev) MHI_LOG("Entered\n"); + if (atomic_read(&mhi_cntrl->pending_pkts)) { + MHI_LOG("Abort due to pending_pkts:%d\n", atomic_read(&mhi_cntrl->pending_pkts)); + return -EBUSY; + } + /* if rpm status still active then force suspend */ if (!pm_runtime_status_suspended(dev)) { ret = mhi_runtime_suspend(dev); @@ -397,12 +438,13 @@ int mhi_system_suspend(struct device *dev) } } - pm_runtime_set_suspended(dev); - pm_runtime_disable(dev); + //pm_runtime_set_suspended(dev); + //pm_runtime_disable(dev); MHI_LOG("Exit\n"); return 0; } +#endif /* checks if link is down */ static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) @@ -535,7 +577,16 @@ static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) struct mhi_dev *mhi_dev = priv; struct device *dev = &mhi_dev->pci_dev->dev; - pm_runtime_put_noidle(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put(dev); +} + +static void mhi_runtime_mark_last_busy(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); } static void mhi_status_cb(struct mhi_controller *mhi_cntrl, @@ -545,10 +596,16 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, struct mhi_dev *mhi_dev = priv; struct device *dev = &mhi_dev->pci_dev->dev; - if (reason == MHI_CB_IDLE) { - MHI_LOG("Schedule runtime suspend 1\n"); - pm_runtime_mark_last_busy(dev); - pm_request_autosuspend(dev); + switch (reason) { + case MHI_CB_FATAL_ERROR: + case MHI_CB_SYS_ERROR: + pm_runtime_forbid(dev); + break; + case MHI_CB_EE_MISSION_MODE: + //pm_runtime_allow(dev); + break; + default: + break; } } @@ -633,7 +690,9 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_cntrl->dev = &pci_dev->dev; mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->vendor = pci_dev->vendor; mhi_cntrl->dev_id = pci_dev->device; mhi_cntrl->bus = pci_dev->bus->number; mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); @@ -691,6 +750,7 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) mhi_cntrl->status_cb = mhi_status_cb; mhi_cntrl->runtime_get = mhi_runtime_get; mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->runtime_mark_last_busy = mhi_runtime_mark_last_busy; mhi_cntrl->link_status = mhi_link_status; mhi_cntrl->lpm_disable = mhi_lpm_disable; @@ -725,6 +785,66 @@ error_register: return ERR_PTR(-EINVAL); } +static bool mhi_pci_is_alive(struct pci_dev *pdev) +{ + u16 vendor = 0; + + if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + return false; + + if (vendor == (u16) ~0 || vendor == 0) + return false; + + return true; +} + +static void mhi_pci_show_link(struct mhi_controller *mhi_cntrl, struct pci_dev *pci_dev) +{ + int pcie_cap_reg; + u16 stat; + u32 caps; + const char *speed; + + pcie_cap_reg = pci_find_capability(pci_dev, PCI_CAP_ID_EXP); + + if (!pcie_cap_reg) + return; + + pci_read_config_word(pci_dev, + pcie_cap_reg + PCI_EXP_LNKSTA, + &stat); + pci_read_config_dword(pci_dev, + pcie_cap_reg + PCI_EXP_LNKCAP, + &caps); + + switch (caps & PCI_EXP_LNKCAP_SLS) { + case PCI_EXP_LNKCAP_SLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKCAP_SLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkCap: Speed %sGT/s, Width x%d\n", speed, + (caps & PCI_EXP_LNKCAP_MLW) >> 4); + + switch (stat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKSTA_CLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkSta: Speed %sGT/s, Width x%d\n", speed, + (stat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); + +} + int mhi_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *device_id) { @@ -739,6 +859,18 @@ int mhi_pci_probe(struct pci_dev *pci_dev, pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + if (!mhi_pci_is_alive(pci_dev)) { + /* + root@OpenWrt:~# hexdump /sys/bus/pci/devices/0000:01:00.0/config + 0000000 ffff ffff ffff ffff ffff ffff ffff ffff + * + 0001000 + */ + pr_err("mhi_pci is not alive! pcie link is down\n"); + pr_err("double check by 'hexdump /sys/bus/pci/devices/%s/config'\n", dev_name(&pci_dev->dev)); + return -EIO; + } + /* see if we already registered */ mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); if (!mhi_cntrl) @@ -767,7 +899,8 @@ int mhi_pci_probe(struct pci_dev *pci_dev, } pm_runtime_mark_last_busy(&pci_dev->dev); - pm_runtime_allow(&pci_dev->dev); + + mhi_pci_show_link(mhi_cntrl, pci_dev); MHI_LOG("Return successful\n"); @@ -838,9 +971,13 @@ static const struct dev_pm_ops pm_ops = { static struct pci_device_id mhi_pcie_device_id[] = { {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308)}, //SDX62 + {PCI_DEVICE(0x1eac, 0x1001)}, //EM120 + {PCI_DEVICE(0x1eac, 0x1002)}, //EM160 + {PCI_DEVICE(0x1eac, 0x1004)}, //RM520 {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, {0}, }; diff --git a/quectel_MHI/src/controllers/mhi_qti.h b/quectel_MHI/src/controllers/mhi_qti.h index 34e6e4a..7ac021a 100644 --- a/quectel_MHI/src/controllers/mhi_qti.h +++ b/quectel_MHI/src/controllers/mhi_qti.h @@ -15,7 +15,7 @@ #define MHI_PCIE_DEBUG_ID (0xffff) /* runtime suspend timer */ -#define MHI_RPM_SUSPEND_TMR_MS (250) +#define MHI_RPM_SUSPEND_TMR_MS (2000) #define MHI_PCI_BAR_NUM (0) struct mhi_dev { diff --git a/quectel_MHI/src/core/mhi.h b/quectel_MHI/src/core/mhi.h index d68d313..d61150e 100644 --- a/quectel_MHI/src/core/mhi.h +++ b/quectel_MHI/src/core/mhi.h @@ -4,7 +4,9 @@ #ifndef _MHI_H_ #define _MHI_H_ -#define PCIE_MHI_DRIVER_VERSION "V1.3.0.13" +#define PCIE_MHI_DRIVER_VERSION "V1.3.4" +#define ENABLE_MHI_MON +//#define ENABLE_IP_SW0 #include typedef enum @@ -90,11 +92,13 @@ typedef enum { SW_EVT_RING = 0, PRIMARY_EVENT_RING = SW_EVT_RING, - HW_0_OUT_EVT_RING = 1, - IPA_OUT_EVENT_RING = HW_0_OUT_EVT_RING, - HW_0_IN_EVT_RING = 2, - IPA_IN_EVENT_RING = HW_0_IN_EVT_RING, - ADPL_EVT_RING = 3, +#ifdef ENABLE_IP_SW0 + SW_0_OUT_EVT_RING, + SW_0_IN_EVT_RING, +#endif + IPA_OUT_EVENT_RING, + IPA_IN_EVENT_RING, + ADPL_EVT_RING, MAX_EVT_RING_IDX }MHI_EVT_RING_IDX; @@ -111,7 +115,15 @@ typedef enum #define NUM_MHI_IPA_IN_RING_ELEMENTS 512 #define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase #define NUM_MHI_DIAG_IN_RING_ELEMENTS 128 -#define NUM_MHI_CHAN_RING_ELEMENTS 8 +#define NUM_MHI_SW_IP_RING_ELEMENTS 512 + +/* +* for if set Interrupt moderation time as 1ms, +and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms. +e.g. firehose upgrade. +modem will not trigger irq for these transfer. +*/ +#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8 #define MHI_EVT_CMD_QUEUE_SIZE 160 #define MHI_EVT_STATE_QUEUE_SIZE 128 #define MHI_EVT_XFER_QUEUE_SIZE 1024 @@ -141,6 +153,7 @@ struct mhi_buf_info; * @MHI_CB_LPM_ENTER: MHI host entered low power mode * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover) * @MHI_CB_FATAL_ERROR: MHI device entered fatal error */ @@ -150,6 +163,7 @@ enum MHI_CB { MHI_CB_LPM_ENTER, MHI_CB_LPM_EXIT, MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, MHI_CB_SYS_ERROR, MHI_CB_FATAL_ERROR, }; @@ -322,10 +336,13 @@ struct mhi_controller { void __iomem *wake_db; /* device topology */ + u32 vendor; u32 dev_id; u32 domain; u32 bus; u32 slot; + u32 cntrl_idx; + struct device *cntrl_dev; /* addressing window */ dma_addr_t iova_start; @@ -356,6 +373,7 @@ struct mhi_controller { u32 sw_ev_rings; u32 msi_required; u32 msi_allocated; + u32 msi_irq_base; int *irq; /* interrupt table */ struct mhi_event *mhi_event; @@ -377,6 +395,7 @@ struct mhi_controller { bool wake_set; atomic_t dev_wake; atomic_t alloc_size; + atomic_t pending_pkts; struct list_head transition_list; spinlock_t transition_lock; spinlock_t wlock; @@ -388,6 +407,7 @@ struct mhi_controller { struct work_struct st_worker; struct work_struct fw_worker; struct work_struct syserr_worker; + struct delayed_work ready_worker; wait_queue_head_t state_event; /* shadow functions */ @@ -398,6 +418,7 @@ struct mhi_controller { void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv); void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv); u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv); int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); @@ -431,8 +452,34 @@ struct mhi_controller { struct dentry *parent; struct miscdevice miscdev; + +#ifdef ENABLE_MHI_MON + spinlock_t lock; + + /* Ref */ + int nreaders; /* Under mon_lock AND mbus->lock */ + struct list_head r_list; /* Chain of readers (usually one) */ + struct kref ref; /* Under mon_lock */ + + /* Stats */ + unsigned int cnt_events; + unsigned int cnt_text_lost; +#endif }; +#ifdef ENABLE_MHI_MON +struct mhi_tre; +struct mon_reader { + struct list_head r_link; + struct mhi_controller *m_bus; + void *r_data; /* Use container_of instead? */ + + void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre); +}; +#endif + /** * struct mhi_device - mhi device structure associated bind to channel * @dev: Device associated with the channels @@ -444,6 +491,7 @@ struct mhi_controller { */ struct mhi_device { struct device dev; + u32 vendor; u32 dev_id; u32 domain; u32 bus; @@ -797,7 +845,7 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl); #define MHI_VERB(fmt, ...) do { \ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \ - pr_debug("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ + pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ } while (0) #else @@ -808,25 +856,23 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl); #define MHI_LOG(fmt, ...) do { \ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \ - pr_info("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ + pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ else if (!mhi_cntrl->klog_slient) \ - printk(KERN_DEBUG "[I][%s] " fmt, __func__, ##__VA_ARGS__);\ + printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ } while (0) #define MHI_ERR(fmt, ...) do { \ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \ - pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ } while (0) #define MHI_CRITICAL(fmt, ...) do { \ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \ - pr_alert("[C][%s] " fmt, __func__, ##__VA_ARGS__); \ + pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ } while (0) int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl); void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); -int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl); -void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl); #ifndef MHI_NAME_SIZE #define MHI_NAME_SIZE 32 diff --git a/quectel_MHI/src/core/mhi_boot.c b/quectel_MHI/src/core/mhi_boot.c index d02aa05..ebd7c99 100644 --- a/quectel_MHI/src/core/mhi_boot.c +++ b/quectel_MHI/src/core/mhi_boot.c @@ -19,9 +19,6 @@ #include "mhi.h" #include "mhi_internal.h" -#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1 -#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2 - /* Software defines */ /* BHI Version */ #define BHI_MAJOR_VERSION 0x1 @@ -60,31 +57,30 @@ typedef struct _bhi_info_type ULONG bhi_rsvd5; }BHI_INFO_TYPE, *PBHI_INFO_TYPE; -static void PrintBhiInfo(BHI_INFO_TYPE *bhi_info) +static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) { ULONG index; + char str[128]; - printk("BHI Device Info...\n"); - printk("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor); - printk("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee); - printk("BHI Status = 0x%X\n", bhi_info->bhi_status); - printk("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3); - printk("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum); - printk("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver); - printk("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs); - printk("BHI MSM HW-Id = "); + MHI_LOG("BHI Device Info...\n"); + MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor); + MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee); + MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status); + MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3); + MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum); + MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver); + MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs); for (index = 0; index < 6; index++) { - printk("0x%X ", bhi_info->bhi_msmhwid[index]); + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]); } - printk("\n"); + MHI_LOG("BHI MSM HW-Id = %s\n", str); - printk("BHI OEM PK Hash = \n"); for (index = 0; index < 24; index++) { - printk("0x%X ", bhi_info->bhi_oempkhash[index]); + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]); } - printk("\n"); + MHI_LOG("BHI OEM PK Hash = %s\n", str); } static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset) @@ -130,11 +126,11 @@ static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index)); } bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5); - PrintBhiInfo(bhi_info); + PrintBhiInfo(mhi_cntrl, bhi_info); /* Check the Execution Environment */ if (!IsPBLExecEnv(bhi_info->bhi_ee)) { - printk("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee); + MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee); } /* Return the number of bytes read */ @@ -204,8 +200,11 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) lower_32_bits(mhi_buf->dma_addr)); mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) + sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#else sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; - +#endif if (unlikely(!sequence_id)) sequence_id = 1; @@ -316,8 +315,11 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) lower_32_bits(mhi_buf->dma_addr)); mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); - +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) + sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#else sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#endif mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, sequence_id); @@ -368,8 +370,12 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, lower_32_bits(mhi_buf->dma_addr)); mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) + mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; +#else mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; +#endif + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, mhi_cntrl->sequence_id); @@ -433,7 +439,8 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, lower_32_bits(dma_addr)); mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb); read_unlock_bh(pm_lock); @@ -720,7 +727,7 @@ error_alloc_fw_table: release_firmware(firmware); } -int BhiWrite(struct mhi_controller *mhi_cntrl, void *buf, size_t size) +int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size) { int ret; dma_addr_t dma_addr; @@ -753,12 +760,18 @@ int BhiWrite(struct mhi_controller *mhi_cntrl, void *buf, size_t size) } dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); - if (!buf) { + if (!dma_buf) { MHI_ERR("Could not allocate memory for image\n"); return -ENOMEM; } - memcpy(dma_buf, buf, size); + ret = copy_from_user(dma_buf, ubuf, size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);; + return ret; + } + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr); @@ -804,94 +817,40 @@ error_state: return ret; } -static int mhi_cntrl_open(struct inode *inode, struct file *f) -{ - return 0; -} - -static int mhi_cntrl_release(struct inode *inode, struct file *f) -{ - return 0; -} - -static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf) { long ret = -EINVAL; - void *ubuf = (void *)__arg; - struct miscdevice *c = (struct miscdevice *)f->private_data; - struct mhi_controller *mhi_cntrl = container_of(c, struct mhi_controller, miscdev); + BHI_INFO_TYPE bhi_info; - switch (cmd) { - case IOCTL_BHI_GETDEVINFO: - { - BHI_INFO_TYPE bhi_info; - ret = BhiRead(mhi_cntrl, &bhi_info); - if (ret) { - MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret); - return ret; - } + ret = BhiRead(mhi_cntrl, &bhi_info); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret); + return ret; + } - ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info)); - if (ret) { - MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret); - } - } - break; - - case IOCTL_BHI_WRITEIMAGE: - { - void *buf; - size_t size; - - ret = copy_from_user(&size, ubuf, sizeof(size)); - if (ret) { - MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret); - return ret; - } - - buf = kmalloc(size, GFP_KERNEL); - if (buf == NULL) { - return -ENOMEM; - } - - ret = copy_from_user(buf, ubuf+sizeof(size), size); - if (ret) { - MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %ld\n", ret); - kfree(buf); - return ret; - } - - ret = BhiWrite(mhi_cntrl, buf, size); - if (ret) { - MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret); - } - kfree(buf); - } - break; - - default: - break; + ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info)); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret); } return ret; } -static const struct file_operations mhi_cntrl_fops = { - .unlocked_ioctl = mhi_cntrl_ioctl, - .open = mhi_cntrl_open, - .release = mhi_cntrl_release, -}; - -int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl) +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf) { - mhi_cntrl->miscdev.minor = MISC_DYNAMIC_MINOR; - mhi_cntrl->miscdev.name = "mhi_BHI"; - mhi_cntrl->miscdev.fops = &mhi_cntrl_fops; + long ret = -EINVAL; + size_t size; - return misc_register(&mhi_cntrl->miscdev); -} + ret = copy_from_user(&size, ubuf, sizeof(size)); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret); + return ret; + } -void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl) -{ - misc_deregister(&mhi_cntrl->miscdev); + ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret); + } + + return ret; } diff --git a/quectel_MHI/src/core/mhi_dtr.c b/quectel_MHI/src/core/mhi_dtr.c index 398ea31..7ce44b3 100644 --- a/quectel_MHI/src/core/mhi_dtr.c +++ b/quectel_MHI/src/core/mhi_dtr.c @@ -68,6 +68,16 @@ static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, if (tiocm & TIOCM_RTS) dtr_msg->msg |= CTRL_MSG_RTS; +/* +* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit. +* RTS:0 will prevent modem output AT response. +* But 'busybox microcom' do not send any RTS to modem. +* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1 +* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0 +*/ + dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__, + !!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS)); + reinit_completion(&dtr_chan->completion); ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg, sizeof(*dtr_msg), MHI_EOT); diff --git a/quectel_MHI/src/core/mhi_init.c b/quectel_MHI/src/core/mhi_init.c index e0cdf98..4d21d39 100644 --- a/quectel_MHI/src/core/mhi_init.c +++ b/quectel_MHI/src/core/mhi_init.c @@ -12,9 +12,35 @@ #include #include #include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,11,0 )) +#include +#else +#include +#endif #include "mhi.h" #include "mhi_internal.h" +struct mhi_controller_map { + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; +}; + +#define MAX_MHI_CONTROLLER 16 +struct mhi_controller_map mhi_controller_minors[MAX_MHI_CONTROLLER]; + +#define MHI_CNTRL_DRIVER_NAME "mhi_cntrl_q" +struct mhi_cntrl_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; +}; +static struct mhi_cntrl_drv mhi_cntrl_drv; + const char * const mhi_ee_str[MHI_EE_MAX] = { [MHI_EE_PBL] = "PBL", [MHI_EE_SBL] = "SBL", @@ -109,7 +135,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) if (mhi_cntrl->msi_allocated == 1) { - free_irq(mhi_cntrl->irq[0], mhi_cntrl); + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); return; } @@ -120,7 +146,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); } - free_irq(mhi_cntrl->irq[0], mhi_cntrl); + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); } int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) @@ -134,8 +160,8 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) mhi_event->msi = 0; } - ret = request_irq(mhi_cntrl->irq[0], - mhi_one_msi_handlr, IRQF_SHARED, "mhi", mhi_cntrl); + ret = request_threaded_irq(mhi_cntrl->irq[0], NULL, + mhi_one_msi_handlr, IRQF_ONESHOT, "mhi", mhi_cntrl); if (ret) { MHI_ERR("Error requesting irq:%d, ret=%d\n", mhi_cntrl->irq[0], ret); } @@ -143,7 +169,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) } /* for BHI INTVEC msi */ - ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handlr, + ret = request_threaded_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_intvec_handlr, mhi_intvec_threaded_handlr, IRQF_ONESHOT, "mhi", mhi_cntrl); if (ret) @@ -268,11 +294,374 @@ static const struct file_operations debugfs_chan_ops = { DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL, mhi_debugfs_trigger_reset, "%llu\n"); +#ifdef ENABLE_MHI_MON +struct mon_event_text { + struct list_head e_link; + int type; /* submit, complete, etc. */ + unsigned int tstamp; + u32 chan; + dma_addr_t wp; + struct mhi_tre mhi_tre; + u8 data[32]; + size_t len; +}; + +#define EVENT_MAX (16*PAGE_SIZE / sizeof(struct mon_event_text)) +#define PRINTF_DFL 250 +#define SLAB_NAME_SZ 30 + +struct mon_reader_text { + struct kmem_cache *e_slab; + int nevents; + struct list_head e_list; + struct mon_reader r; /* In C, parent class can be placed anywhere */ + + wait_queue_head_t wait; + int printf_size; + char *printf_buf; + int left_size; + int left_pos; + struct mutex printf_lock; + + char slab_name[SLAB_NAME_SZ]; +}; + +struct mon_text_ptr { + int cnt, limit; + char *pbuf; +}; + +static DEFINE_MUTEX(mon_lock); + +static inline unsigned int mon_get_timestamp(void) +{ + struct timespec64 now; + unsigned int stamp; + + ktime_get_ts64(&now); + stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC; + return stamp; +} + +static void mon_text_event(struct mon_reader_text *rp, + u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len, + char ev_type) +{ + struct mon_event_text *ep; + + if (rp->nevents >= EVENT_MAX || + (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + rp->r.m_bus->cnt_text_lost++; + return; + } + + ep->type = ev_type; + ep->tstamp = mon_get_timestamp(); + ep->chan = chan; + ep->wp = wp; + ep->mhi_tre = *mhi_tre; + if (len > sizeof(ep->data)) + len = sizeof(ep->data); + memcpy(ep->data, buf, len); + ep->len = len; + rp->nevents++; + list_add_tail(&ep->e_link, &rp->e_list); + wake_up(&rp->wait); +} + +static void mon_text_submit(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'W'); +} + +static void mon_text_receive(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'R'); +} + +static void mon_text_complete(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, NULL, 0, 'E'); +} + +void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->nreaders++; + list_add_tail(&r->r_link, &mbus->r_list); + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_get(&mbus->ref); +} + +static void mon_bus_drop(struct kref *r) +{ + struct mhi_controller *mbus = container_of(r, struct mhi_controller, ref); + kfree(mbus); +} + +static void mon_reader_del(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + list_del(&r->r_link); + --mbus->nreaders; + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_put(&mbus->ref, mon_bus_drop); +} + +static void mon_text_ctor(void *mem) +{ + /* + * Nothing to initialize. No, really! + * So, we fill it with garbage to emulate a reused object. + */ + memset(mem, 0xe5, sizeof(struct mon_event_text)); +} + +static int mon_text_open(struct inode *inode, struct file *file) +{ + struct mhi_controller *mbus; + struct mon_reader_text *rp; + int rc; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL); + if (rp == NULL) { + rc = -ENOMEM; + goto err_alloc; + } + INIT_LIST_HEAD(&rp->e_list); + init_waitqueue_head(&rp->wait); + mutex_init(&rp->printf_lock); + + rp->printf_size = PRINTF_DFL; + rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL); + if (rp->printf_buf == NULL) { + rc = -ENOMEM; + goto err_alloc_pr; + } + + rp->r.m_bus = mbus; + rp->r.r_data = rp; + rp->r.rnf_submit = mon_text_submit; + rp->r.rnf_receive = mon_text_receive; + rp->r.rnf_complete = mon_text_complete; + + snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); + rp->e_slab = kmem_cache_create(rp->slab_name, + sizeof(struct mon_event_text), sizeof(long), 0, + mon_text_ctor); + if (rp->e_slab == NULL) { + rc = -ENOMEM; + goto err_slab; + } + + mon_reader_add(mbus, &rp->r); + + file->private_data = rp; + mutex_unlock(&mon_lock); + return 0; + +// err_busy: +// kmem_cache_destroy(rp->e_slab); +err_slab: + kfree(rp->printf_buf); +err_alloc_pr: + kfree(rp); +err_alloc: + mutex_unlock(&mon_lock); + return rc; +} + +static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp, + struct mhi_controller *mbus) +{ + struct list_head *p; + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + if (list_empty(&rp->e_list)) { + spin_unlock_irqrestore(&mbus->lock, flags); + return NULL; + } + p = rp->e_list.next; + list_del(p); + --rp->nevents; + spin_unlock_irqrestore(&mbus->lock, flags); + return list_entry(p, struct mon_event_text, e_link); +} + +static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, + struct file *file) +{ + struct mhi_controller *mbus = rp->r.m_bus; + DECLARE_WAITQUEUE(waita, current); + struct mon_event_text *ep; + + add_wait_queue(&rp->wait, &waita); + set_current_state(TASK_INTERRUPTIBLE); + while ((ep = mon_text_fetch(rp, mbus)) == NULL) { + if (file->f_flags & O_NONBLOCK) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EWOULDBLOCK); + } + /* + * We do not count nwaiters, because ->release is supposed + * to be called when all openers are gone only. + */ + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EINTR); + } + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ep; +} + +static ssize_t mon_text_read_u(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; + + if (rp->left_size) { + int cnt = rp->left_size; + + if (cnt > nbytes) + cnt = nbytes; + if (copy_to_user(buf, rp->printf_buf + rp->left_pos, cnt)) + return -EFAULT; + rp->left_pos += cnt; + rp->left_size -= cnt; + return cnt; + } + + if (IS_ERR(ep = mon_text_read_wait(rp, file))) + return PTR_ERR(ep); + mutex_lock(&rp->printf_lock); + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + "%u %c %03d WP:%llx TRE: %llx %08x %08x", + ep->tstamp, ep->type, ep->chan, ep->wp, + ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]); + + if (ep->len) { + struct mon_text_ptr *p = &ptr; + size_t i = 0; + + for (i = 0; i < ep->len; i++) { + if (i % 4 == 0) { + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + " "); + } + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + "%02x", ep->data[i]); + } + + } + + ptr.cnt += snprintf(ptr.pbuf +ptr.cnt, ptr.limit - ptr.cnt, "\n"); + + if (ptr.cnt > nbytes) { + rp->left_pos = nbytes; + rp->left_size = ptr.cnt - nbytes; + ptr.cnt = nbytes; + } + + if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) + ptr.cnt = -EFAULT; + mutex_unlock(&rp->printf_lock); + kmem_cache_free(rp->e_slab, ep); + return ptr.cnt; +} + +static int mon_text_release(struct inode *inode, struct file *file) +{ + struct mon_reader_text *rp = file->private_data; + struct mhi_controller *mbus; + /* unsigned long flags; */ + struct list_head *p; + struct mon_event_text *ep; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + if (mbus->nreaders <= 0) { + mutex_unlock(&mon_lock); + return 0; + } + mon_reader_del(mbus, &rp->r); + + /* + * In theory, e_list is protected by mbus->lock. However, + * after mon_reader_del has finished, the following is the case: + * - we are not on reader list anymore, so new events won't be added; + * - whole mbus may be dropped if it was orphaned. + * So, we better not touch mbus. + */ + /* spin_lock_irqsave(&mbus->lock, flags); */ + while (!list_empty(&rp->e_list)) { + p = rp->e_list.next; + ep = list_entry(p, struct mon_event_text, e_link); + list_del(p); + --rp->nevents; + kmem_cache_free(rp->e_slab, ep); + } + /* spin_unlock_irqrestore(&mbus->lock, flags); */ + + kmem_cache_destroy(rp->e_slab); + kfree(rp->printf_buf); + kfree(rp); + + mutex_unlock(&mon_lock); + return 0; +} + + +static const struct file_operations mon_fops_text_u = { + .owner = THIS_MODULE, + .open = mon_text_open, + .llseek = no_llseek, + .read = mon_text_read_u, + .release = mon_text_release, +}; +#endif + void mhi_init_debugfs(struct mhi_controller *mhi_cntrl) { struct dentry *dentry; char node[32]; +#ifdef ENABLE_MHI_MON + struct mhi_controller *mbus = mhi_cntrl; + + mbus->nreaders = 0; + kref_init(&mbus->ref); + spin_lock_init(&mbus->lock); + INIT_LIST_HEAD(&mbus->r_list); +#endif + if (!mhi_cntrl->parent) snprintf(node, sizeof(node), "mhi_%04x_%02u:%02u.%02u", mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, @@ -294,6 +683,10 @@ void mhi_init_debugfs(struct mhi_controller *mhi_cntrl) &debugfs_chan_ops); debugfs_create_file("reset", 0444, dentry, mhi_cntrl, &debugfs_trigger_reset_fops); +#ifdef ENABLE_MHI_MON + debugfs_create_file("mhimon", 0444, dentry, mhi_cntrl, + &mon_fops_text_u); +#endif mhi_cntrl->dentry = dentry; } @@ -316,6 +709,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) atomic_set(&mhi_cntrl->dev_wake, 0); atomic_set(&mhi_cntrl->alloc_size, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); if (!mhi_ctxt) @@ -707,9 +1101,11 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) /* setup wake db */ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); +#if 0 //'EM120RGLAPR02A07M4G_11' will treate as chan 127's interrput, and report complete event over cmd ring, but cmd ring is not set by now mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); mhi_cntrl->wake_set = false; +#endif /* setup channel db addresses */ mhi_chan = mhi_cntrl->mhi_chan; @@ -788,6 +1184,16 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_out_chan_ring[mhi_chan->ring]; tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_out_chan_ring[mhi_chan->ring]); } +#ifdef ENABLE_IP_SW0 + else if (MHI_CLIENT_IP_SW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_SW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_out_chan_ring[mhi_chan->ring]); + } +#endif else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) { tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring]; tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]); @@ -1191,6 +1597,7 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->mhi_event) return -ENOMEM; + mhi_cntrl->msi_irq_base = 0; /* populate ev ring */ mhi_event = mhi_cntrl->mhi_event; i = 0; @@ -1199,6 +1606,8 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, mhi_event->er_index = i; mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements + if (i == PRIMARY_EVENT_RING || i == ADPL_EVT_RING) + mhi_event->ring.elements = 256; //256 is enough, and 1024 some times make driver fail to open channel (reason is x6x fail to malloc) mhi_event->intmod = 1; //Interrupt moderation time in ms @@ -1209,12 +1618,23 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, if (i == IPA_IN_EVENT_RING) mhi_event->intmod = 5; - mhi_event->msi = 1 + i; //MSI associated with this event ring +#ifdef ENABLE_IP_SW0 + if (i == SW_0_IN_EVT_RING) + mhi_event->intmod = 5; +#endif + + mhi_event->msi = 1 + i + mhi_cntrl->msi_irq_base; //MSI associated with this event ring if (i == IPA_OUT_EVENT_RING) mhi_event->chan = MHI_CLIENT_IP_HW_0_OUT; //Dedicated channel number, if it's a dedicated event ring else if (i == IPA_IN_EVENT_RING) mhi_event->chan = MHI_CLIENT_IP_HW_0_IN; //Dedicated channel number, if it's a dedicated event ring +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_OUT; + else if (i == SW_0_IN_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_IN; +#endif else mhi_event->chan = 0; @@ -1235,6 +1655,10 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING) mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING || i == SW_0_IN_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif else mhi_event->data_type = MHI_ER_CTRL_ELEMENT_TYPE; @@ -1261,14 +1685,14 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, mhi_cntrl->sw_ev_rings++; mhi_event->cl_manage = false; - if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN) + if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN || mhi_event->chan == MHI_CLIENT_IP_SW_0_IN) mhi_event->cl_manage = true; mhi_event->offload_ev = false; mhi_event++; } /* we need msi for each event ring + additional one for BHI */ - mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1; + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1 + mhi_cntrl->msi_irq_base; return 0; } @@ -1321,10 +1745,10 @@ static struct chan_cfg_t chan_cfg[] = { //"Qualcomm EDL " {"EDL", MHI_CLIENT_EDL_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, {"EDL", MHI_CLIENT_EDL_IN, NUM_MHI_CHAN_RING_ELEMENTS}, -#if 0 //AG15 +#ifdef ENABLE_IP_SW0 //"Qualcomm PCIe LOCAL Adapter" - {"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, - {"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_SW_IP_RING_ELEMENTS}, + {"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_SW_IP_RING_ELEMENTS}, #endif //"Qualcomm PCIe WWAN Adapter" {"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS}, @@ -1381,7 +1805,8 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, */ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; - if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN) { + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN + || chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN) { mhi_chan->ring = 0; } else { @@ -1393,6 +1818,12 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, mhi_chan->er_index = IPA_OUT_EVENT_RING; else if (chan == MHI_CLIENT_IP_HW_0_IN) mhi_chan->er_index = IPA_IN_EVENT_RING; +#ifdef ENABLE_IP_SW0 + else if (chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->er_index = SW_0_OUT_EVT_RING; + else if (chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->er_index = SW_0_IN_EVT_RING; +#endif else mhi_chan->er_index = PRIMARY_EVENT_RING; @@ -1549,6 +1980,30 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) //if (!mhi_cntrl->of_node) // return -EINVAL; + for (i = 0; i < MAX_MHI_CONTROLLER; i++) { + if (mhi_controller_minors[i].dev_id == mhi_cntrl->dev_id + && mhi_controller_minors[i].domain == mhi_cntrl->domain + && mhi_controller_minors[i].bus == mhi_cntrl->bus + && mhi_controller_minors[i].slot == mhi_cntrl->slot) { + mhi_cntrl->cntrl_idx = i; + break; + } + else if (mhi_controller_minors[i].dev_id == 0 + && mhi_controller_minors[i].domain == 0 + && mhi_controller_minors[i].bus == 0 + && mhi_controller_minors[i].slot == 0) { + mhi_controller_minors[i].dev_id = mhi_cntrl->dev_id; + mhi_controller_minors[i].domain = mhi_cntrl->domain; + mhi_controller_minors[i].bus = mhi_cntrl->bus; + mhi_controller_minors[i].slot = mhi_cntrl->slot; + mhi_cntrl->cntrl_idx = i; + break; + } + } + + if (i == MAX_MHI_CONTROLLER) + return -EINVAL; + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) return -EINVAL; @@ -1574,6 +2029,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); + INIT_DELAYED_WORK(&mhi_cntrl->ready_worker, mhi_pm_ready_worker); init_waitqueue_head(&mhi_cntrl->state_event); mhi_cmd = mhi_cntrl->mhi_cmd; @@ -1621,10 +2077,20 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) mhi_dev->mhi_cntrl = mhi_cntrl; dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + ret = device_add(&mhi_dev->dev); if (ret) goto error_add_dev; + if (mhi_cntrl->cntrl_idx) + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI%d", mhi_cntrl->cntrl_idx); + else + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI"); + mhi_cntrl->mhi_dev = mhi_dev; mhi_cntrl->parent = mhi_bus.dentry; @@ -1660,6 +2126,8 @@ void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) kfree(mhi_cntrl->mhi_chan); kfree(mhi_cntrl->mhi_tsync); + if (mhi_cntrl->cntrl_dev) + device_destroy(mhi_cntrl_drv.class, MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx)); device_del(&mhi_dev->dev); put_device(&mhi_dev->dev); @@ -1991,6 +2459,7 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) dev->release = mhi_release_device; dev->parent = mhi_cntrl->dev; mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->vendor = mhi_cntrl->vendor; mhi_dev->dev_id = mhi_cntrl->dev_id; mhi_dev->domain = mhi_cntrl->domain; mhi_dev->bus = mhi_cntrl->bus; @@ -2001,6 +2470,102 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) return mhi_dev; } +static int mhi_cntrl_open(struct inode *inode, struct file *f) +{ + int ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (MINOR(inode->i_rdev) == mhi_cntrl->cntrl_idx) { + ret = 0; + f->private_data = mhi_cntrl; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + return ret; +} + +static int mhi_cntrl_release(struct inode *inode, struct file *f) +{ + f->private_data = NULL; + return 0; +} + +#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1 +#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2 +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *to); +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *from); + +static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +{ + long ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (mhi_cntrl == (struct mhi_controller *)f->private_data) { + ret = 0; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + if (ret) + return ret; + + switch (cmd) { + case IOCTL_BHI_GETDEVINFO: + ret = bhi_get_dev_info(mhi_cntrl, (void __user *)__arg); + break; + + case IOCTL_BHI_WRITEIMAGE: + ret = bhi_write_image(mhi_cntrl, (void __user *)__arg); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct file_operations mhi_cntrl_fops = { + .unlocked_ioctl = mhi_cntrl_ioctl, + .open = mhi_cntrl_open, + .release = mhi_cntrl_release, +}; + +static int __init mhi_cntrl_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_CNTRL_DRIVER_NAME, &mhi_cntrl_fops); + if (ret < 0) + return ret; + + mhi_cntrl_drv.major = ret; + mhi_cntrl_drv.class = class_create(THIS_MODULE, MHI_CNTRL_DRIVER_NAME); + if (IS_ERR(mhi_cntrl_drv.class)) { + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_cntrl_drv.lock); + INIT_LIST_HEAD(&mhi_cntrl_drv.head); + + return 0; +} + +void mhi_cntrl_exit(void) +{ + class_destroy(mhi_cntrl_drv.class); + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); +} + extern int mhi_dtr_init(void); extern void mhi_dtr_exit(void); extern int mhi_device_netdev_init(struct dentry *parent); @@ -2048,6 +2613,11 @@ static int __init mhi_init(void) pr_err("Error mhi_device_uci_init ret:%d\n", ret); } + ret = mhi_cntrl_init(); + if (ret) { + pr_err("Error mhi_cntrl_init ret:%d\n", ret); + } + ret = mhi_controller_qcom_init(); if (ret) { pr_err("Error mhi_controller_qcom_init ret:%d\n", ret); @@ -2059,6 +2629,7 @@ static int __init mhi_init(void) static void mhi_exit(void) { mhi_controller_qcom_exit(); + mhi_cntrl_exit(); mhi_device_uci_exit(); mhi_device_netdev_exit(); mhi_dtr_exit(); diff --git a/quectel_MHI/src/core/mhi_internal.h b/quectel_MHI/src/core/mhi_internal.h index 12f5361..31bd617 100644 --- a/quectel_MHI/src/core/mhi_internal.h +++ b/quectel_MHI/src/core/mhi_internal.h @@ -854,12 +854,16 @@ struct state_transition { /* Control Segment */ struct mhi_ctrl_seg { - struct __packed mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); - struct __packed mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); - struct __packed mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); - struct __packed mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16); - struct __packed mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); - struct __packed mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __aligned(CMD_EL_PER_RING*16); + struct mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#ifdef ENABLE_IP_SW0 + struct mhi_tre sw_in_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre sw_out_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#endif + struct mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); + struct mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __packed __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16); + struct mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __packed __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); + struct mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __packed __aligned(CMD_EL_PER_RING*16); struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128); struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128); @@ -1031,6 +1035,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, void mhi_pm_st_worker(struct work_struct *work); void mhi_fw_load_worker(struct work_struct *work); void mhi_pm_sys_err_worker(struct work_struct *work); +void mhi_pm_ready_worker(struct work_struct *work); int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); void mhi_ctrl_ev_task(unsigned long data); int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); diff --git a/quectel_MHI/src/core/mhi_main.c b/quectel_MHI/src/core/mhi_main.c index f978965..335b945 100644 --- a/quectel_MHI/src/core/mhi_main.c +++ b/quectel_MHI/src/core/mhi_main.c @@ -68,6 +68,8 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, MISC_CAP_MASK, MISC_CAP_SHIFT, offset); if (ret) return ret; + if (*offset >= 0x1000) + return -ENXIO; do { ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, CAP_CAPID_MASK, CAP_CAPID_SHIFT, @@ -173,6 +175,13 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) mhi_write_db(mhi_cntrl, ring->db_addr, db); } +//#define DEBUG_CHAN100_DB +#ifdef DEBUG_CHAN100_DB +static atomic_t chan100_seq = ATOMIC_INIT(0); +#define CHAN100_SIZE 0x1000 +static unsigned int chan100_t[CHAN100_SIZE]; +#endif + void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) { @@ -180,6 +189,11 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, dma_addr_t db; db = ring->iommu_base + (ring->wp - ring->base); + /* + * Writes to the new ring element must be visible to the hardware + * before letting h/w know there is new element to fetch. + */ + dma_wmb(); *ring->ctxt_wp = db; mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db); @@ -223,9 +237,11 @@ int mhi_queue_nop(struct mhi_device *mhi_dev, static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { - ring->wp += ring->el_size; - if (ring->wp >= (ring->base + ring->len)) - ring->wp = ring->base; + void *wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) + wp = ring->base; + ring->wp = wp; /* smp update */ smp_wmb(); } @@ -233,9 +249,11 @@ static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { - ring->rp += ring->el_size; - if (ring->rp >= (ring->base + ring->len)) - ring->rp = ring->base; + void *rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; /* smp update */ smp_wmb(); } @@ -281,23 +299,24 @@ dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr) static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { - dma_addr_t ctxt_wp; + void *rp, *wp; /* update the WP */ - ring->wp += ring->el_size; - ctxt_wp = *ring->ctxt_wp + ring->el_size; - - if (ring->wp >= (ring->base + ring->len)) { - ring->wp = ring->base; - ctxt_wp = ring->iommu_base; + wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) { + wp = ring->base; } + ring->wp = wp; - *ring->ctxt_wp = ctxt_wp; + *ring->ctxt_wp = ring->iommu_base + (ring->wp - ring->base); /* update the RP */ - ring->rp += ring->el_size; - if (ring->rp >= (ring->base + ring->len)) - ring->rp = ring->base; + rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; /* visible to other cores */ smp_wmb(); @@ -359,6 +378,53 @@ void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, buf_info->p_addr); } +#ifdef ENABLE_MHI_MON +static void mon_bus_submit(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_submit(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_receive(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_receive(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_complete(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_complete(r->r_data, chan, wp, mhi_tre); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} +#endif + int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, void *buf, @@ -420,29 +486,40 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, mhi_tre->dword[0], mhi_tre->dword[1]); + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); /* increment WP */ mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { - read_lock_bh(&mhi_chan->lock); - mhi_ring_chan_db(mhi_cntrl, mhi_chan); - read_unlock_bh(&mhi_chan->lock); +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (0<<30); } +#endif + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) mhi_cntrl->wake_put(mhi_cntrl, true); - if (mhi_chan->dir == DMA_TO_DEVICE) { - unsigned used_elements = get_used_ring_elements(tre_ring->rp, tre_ring->wp, tre_ring->elements); - if (used_elements > mhi_chan->used_elements) - mhi_chan->used_elements = used_elements; - } - read_unlock_bh(&mhi_cntrl->pm_lock); return 0; @@ -524,19 +601,31 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); } +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0: buf_info->len); + } +#endif + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, mhi_tre->dword[0], mhi_tre->dword[1]); + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); /* increment WP */ mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { - read_lock_bh(&mhi_chan->lock); mhi_ring_chan_db(mhi_cntrl, mhi_chan); - read_unlock_bh(&mhi_chan->lock); } + read_unlock_bh(&mhi_chan->lock); if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) mhi_cntrl->wake_put(mhi_cntrl, true); @@ -583,6 +672,12 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, mhi_tre->dword[0], mhi_tre->dword[1]); @@ -645,6 +740,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, mhi_cntrl->wake_get(mhi_cntrl, false); } + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { unsigned long flags; @@ -929,7 +1029,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) mhi_dev->chan_name); mhi_dealloc_device(mhi_cntrl, mhi_dev); } - sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + ret = sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); } } @@ -1036,11 +1136,6 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, if (dev_rp >= (tre_ring->base + tre_ring->len)) dev_rp = tre_ring->base; - if (mhi_chan->dir == DMA_FROM_DEVICE) { - u32 used_elements = get_used_ring_elements(tre_ring->rp, dev_rp, tre_ring->elements); - if (used_elements > mhi_chan->used_elements) - mhi_chan->used_elements = used_elements; - } mhi_chan->used_events[ev_code]++; result.dir = mhi_chan->dir; @@ -1058,6 +1153,24 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, result.buf_addr = buf_info->cb_buf; result.bytes_xferd = xfer_len; +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + void *buf = NULL; + size_t len = 0; + + if (mhi_chan->queue_xfer == mhi_queue_skb) { + struct sk_buff *skb = result.buf_addr; + buf = skb->data; + len = result.bytes_xferd; + } + else if (CHAN_INBOUND(mhi_chan->chan)) { + buf = result.buf_addr; + len = result.bytes_xferd; + } + mon_bus_receive(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, local_rp), local_rp, buf, len); + } +#endif mhi_del_ring_element(mhi_cntrl, buf_ring); mhi_del_ring_element(mhi_cntrl, tre_ring); local_rp = tre_ring->rp; @@ -1065,6 +1178,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, /* notify client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + if (mhi_chan->dir == DMA_TO_DEVICE) { read_lock_bh(&mhi_cntrl->pm_lock); mhi_cntrl->wake_put(mhi_cntrl, false); @@ -1098,6 +1216,12 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan); mhi_chan->db_cfg.db_mode = true; read_lock_irqsave(&mhi_cntrl->pm_lock, flags); +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->rp)&0xffff) | (0xf0000); + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (1<<30); + } +#endif if (tre_ring->wp != tre_ring->rp && MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { mhi_ring_chan_db(mhi_cntrl, mhi_chan); @@ -1247,9 +1371,25 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, while (dev_rp != local_rp) { enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); +//#define QL_READ_EVENT_WA //from Quectel Windows driver +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + if (local_rp->ptr == 0 && local_rp->dword[0] == 0 && local_rp->dword[1] == 0) { + // event content no sync to memory, just break and wait next event. + MHI_ERR("Zero Event!\n"); + break; + } + } +#endif + mhi_dump_tre(mhi_cntrl, local_rp); MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif switch (type) { case MHI_PKT_TYPE_STATE_CHANGE_EVENT: @@ -1350,12 +1490,23 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, break; } +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + local_rp->ptr = 0; + local_rp->dword[0] = local_rp->dword[1] = 0; + } +#endif + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); count++; } + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + read_lock_bh(&mhi_cntrl->pm_lock); if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) mhi_ring_er_db(mhi_event); @@ -1376,11 +1527,9 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; int count = 0; u32 chan; - struct mhi_chan *mhi_chan; - int used_elements = 0; - void *chan_local_rp = NULL, *evt_local_rp = NULL; - if (mhi_event->mhi_chan) - chan_local_rp = mhi_event->mhi_chan->tre_ring.rp; + struct mhi_chan *mhi_chan = NULL; + u32 chan_count = 0; + void *chan_local_rp = NULL; if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { MHI_ERR("No EV access, PM_STATE:%s\n", @@ -1390,7 +1539,6 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); local_rp = ev_ring->rp; - evt_local_rp = local_rp; while (dev_rp != local_rp && event_quota > 0) { enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); @@ -1401,7 +1549,13 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, chan = MHI_TRE_GET_EV_CHID(local_rp); mhi_chan = &mhi_cntrl->mhi_chan[chan]; + chan_local_rp = mhi_chan->tre_ring.rp; +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); event_quota--; @@ -1410,21 +1564,20 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, event_quota--; } + chan_count += get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements); mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp; - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (local_rp == dev_rp || event_quota == 0) { + if (chan_count > mhi_chan->used_elements) + mhi_chan->used_elements = chan_count; + chan_count = 0; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + } count++; } - used_elements = get_used_ring_elements(evt_local_rp, dev_rp, ev_ring->elements); - if (used_elements > mhi_event->used_elements) - mhi_event->used_elements = used_elements; - - mhi_chan = mhi_event->mhi_chan; - if (chan_local_rp && mhi_chan && mhi_chan->dir == DMA_FROM_DEVICE) { - used_elements = get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements); - if (used_elements > mhi_chan->used_elements) - mhi_chan->used_elements = used_elements; + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; } read_lock_bh(&mhi_cntrl->pm_lock); @@ -1522,6 +1675,7 @@ void mhi_ev_task(unsigned long data) MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); /* process all pending events */ spin_lock_bh(&mhi_event->lock); mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); @@ -1546,6 +1700,7 @@ void mhi_ctrl_ev_task(unsigned long data) if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) return; + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); /* process ctrl events events */ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); @@ -1608,8 +1763,9 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev) if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { state = mhi_get_mhi_state(mhi_cntrl); ee = mhi_get_exec_env(mhi_cntrl); - MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee), - TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); + if (mhi_cntrl->msi_allocated >= 5 ||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); } if (state == MHI_STATE_SYS_ERR) { @@ -1633,14 +1789,11 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev) else schedule_work(&mhi_cntrl->syserr_worker); } + if (mhi_cntrl->msi_allocated >= 5||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); - MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee), - TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); - - if (state == MHI_STATE_READY && ee == MHI_EE_AMSS && mhi_cntrl->ee == MHI_EE_PTHRU) { - mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY); - } - else if (pm_state == MHI_PM_POR) { + if (pm_state == MHI_PM_POR) { wake_up_all(&mhi_cntrl->state_event); } @@ -1730,7 +1883,11 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl, break; } - +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, 128, mhi_to_physical(ring, cmd_tre), cmd_tre, NULL, 0); + } +#endif MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr, cmd_tre->dword[0], cmd_tre->dword[1]); @@ -1791,7 +1948,6 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, mhi_cntrl->wake_get(mhi_cntrl, false); read_unlock_bh(&mhi_cntrl->pm_lock); mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); - mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); if (ret) { @@ -1808,6 +1964,8 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, goto error_send_cmd; } + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + write_lock_irq(&mhi_chan->lock); mhi_chan->ch_state = MHI_CH_STATE_ENABLED; write_unlock_irq(&mhi_chan->lock); @@ -1858,6 +2016,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, return 0; error_send_cmd: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); read_lock_bh(&mhi_cntrl->pm_lock); mhi_cntrl->wake_put(mhi_cntrl, false); read_unlock_bh(&mhi_cntrl->pm_lock); @@ -1931,6 +2090,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, while (tre_ring->rp != tre_ring->wp) { struct mhi_buf_info *buf_info = buf_ring->rp; + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + if (mhi_chan->dir == DMA_TO_DEVICE) mhi_cntrl->wake_put(mhi_cntrl, false); if (!buf_info->pre_mapped) @@ -2028,7 +2192,6 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, read_unlock_bh(&mhi_cntrl->pm_lock); mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); - mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); if (ret) { MHI_ERR("Failed to send reset chan cmd\n"); @@ -2042,6 +2205,7 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, MHI_ERR("Failed to receive cmd completion, still resetting\n"); error_completion: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); read_lock_bh(&mhi_cntrl->pm_lock); mhi_cntrl->wake_put(mhi_cntrl, false); read_unlock_bh(&mhi_cntrl->pm_lock); @@ -2059,6 +2223,7 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d) { struct mhi_controller *mhi_cntrl = m->private; int reg = 0; + int ret; u32 val[4]; seq_printf(m, @@ -2078,20 +2243,20 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d) seq_printf(m, "dump mhi reg addr:%p\n", mhi_cntrl->regs); for (reg = 0; reg < 0x100; reg+=16) { val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; - mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]); - mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]); - mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]); - mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]); seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); } seq_printf(m, "dump bhi reg addr:%p\n", mhi_cntrl->bhi); for (reg = 0; reg < 0x100; reg+=16) { val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; - mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]); - mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]); - mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]); - mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]); seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); } @@ -2120,28 +2285,28 @@ int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d) i, er_ctxt->intmodc, er_ctxt->intmodt, er_ctxt->rbase, er_ctxt->rlen); seq_printf(m, - " rp:0x%llx wp:0x%llx local_rp:0x%llx db:0x%llx\n", + " rp:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", er_ctxt->rp, er_ctxt->wp, (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), (unsigned long long)mhi_event->db_cfg.db_val); seq_printf(m, "used:%u\n", mhi_event->used_elements); - } - } +#ifdef DEBUG_CHAN100_DB + if (mhi_event->mhi_chan && mhi_event->chan == 100) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t j; -#if 0 - { - struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[PRIMARY_EVENT_RING]; - struct mhi_ring *ring = &mhi_event->ring; - struct mhi_tre *tre = (struct mhi_tre *)ring->base; - size_t i; - for (i = 0; i < ring->elements; i++, tre++) { - seq_printf(m, - "%llx, %08x, %08x\n", - tre->ptr, tre->dword[0], tre->dword[1]); + for (j = 0; j < ring->elements; j++, tre++) { + seq_printf(m, + "%08x: %llx, %08x, %08x\n", + (unsigned int)(j*sizeof(struct mhi_tre)), + tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif } } -#endif return 0; } @@ -2172,9 +2337,9 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) chan_ctxt->pollcfg, chan_ctxt->chtype, chan_ctxt->erindex); seq_printf(m, - " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + " base:0x%llx len:0x%llx rp:%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", chan_ctxt->rbase, chan_ctxt->rlen, - chan_ctxt->wp, + chan_ctxt->rp, chan_ctxt->wp, (unsigned long long)mhi_to_physical(ring, ring->rp), (unsigned long long)mhi_to_physical(ring, ring->wp), (unsigned long long)mhi_chan->db_cfg.db_val); @@ -2182,6 +2347,16 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) mhi_chan->used_events[MHI_EV_CC_EOB], mhi_chan->used_events[MHI_EV_CC_EOT], mhi_chan->used_events[MHI_EV_CC_OOB],mhi_chan->used_events[MHI_EV_CC_DB_MODE]); +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + unsigned int n = 0; + seq_printf(m, "chan100_seq = %04x\n", atomic_read(&chan100_seq)%CHAN100_SIZE); + for (n = 0; n < CHAN100_SIZE; n++) { + seq_printf(m, "%04x: %08x\n", n, chan100_t[n]); + } + } +#endif + #if 0 if (ring->base && /*(i&1) &&*/ (i < MHI_CLIENT_IP_HW_0_OUT)) { struct mhi_tre *tre = (struct mhi_tre *)ring->base; diff --git a/quectel_MHI/src/core/mhi_pm.c b/quectel_MHI/src/core/mhi_pm.c index ee77055..fd4bd96 100644 --- a/quectel_MHI/src/core/mhi_pm.c +++ b/quectel_MHI/src/core/mhi_pm.c @@ -157,6 +157,15 @@ void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) { unsigned long flags; +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + if (mhi_cntrl->dev_state == MHI_STATE_M2) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } + return; +#endif + /* if set, regardless of count set the bit if not set */ if (unlikely(force)) { spin_lock_irqsave(&mhi_cntrl->wlock, flags); @@ -188,6 +197,10 @@ void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override) { unsigned long flags; +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + return; +#endif + #if 1 //Add by Quectel if (atomic_read(&mhi_cntrl->dev_wake) == 0) return; @@ -423,6 +436,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) struct mhi_event *mhi_event; MHI_LOG("Processing Mission Mode Transition\n"); + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_EE_MISSION_MODE); /* force MHI to be in M0 state before continuing */ ret = __mhi_device_get_sync(mhi_cntrl); @@ -553,12 +567,13 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, /* Set the numbers of Event Rings supported */ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); /* * device cleares INTVEC as part of RESET processing, * re-program it */ - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); } MHI_LOG("Waiting for all pending event ring processing to complete\n"); @@ -580,6 +595,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, MHI_LOG("Waiting for all pending threads to complete\n"); wake_up_all(&mhi_cntrl->state_event); + flush_delayed_work(&mhi_cntrl->ready_worker); flush_work(&mhi_cntrl->st_worker); flush_work(&mhi_cntrl->fw_worker); @@ -713,6 +729,27 @@ void mhi_pm_sys_err_worker(struct work_struct *work) mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); } +void mhi_pm_ready_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + ready_worker.work); + enum mhi_ee ee = MHI_EE_MAX; + + if (mhi_cntrl->dev_state != MHI_STATE_RESET) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (ee == MHI_EE_PTHRU) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else if (ee == MHI_EE_AMSS || ee == MHI_EE_SBL) + mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY); +} + void mhi_pm_st_worker(struct work_struct *work) { struct state_transition *itr, *tmp; @@ -794,7 +831,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) #if 1 //GLUE.SDX55_LE.1.0-00098-NOOP_TEST-1\common\hostdrivers\win\MhiHost MhiInitNewDev() /* Check device Channels support */ - mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, ®Val); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, ®Val); #if 0 val = MHI_READ_REG_FIELD(regVal, MHICFG, NCH); MHI_LOG("Device CHs: %d\n", val); @@ -819,6 +856,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) mutex_lock(&mhi_cntrl->pm_mutex); mhi_cntrl->pm_state = MHI_PM_DISABLE; + mhi_cntrl->dev_state = MHI_STATE_RESET; if (!mhi_cntrl->pre_init) { /* setup device context */ @@ -858,7 +896,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) mhi_cntrl->bhie = mhi_cntrl->regs + val; } - mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); mhi_cntrl->pm_state = MHI_PM_POR; mhi_cntrl->ee = MHI_EE_MAX; current_ee = mhi_get_exec_env(mhi_cntrl); @@ -868,19 +906,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); - if (current_ee == MHI_EE_PTHRU) { - for (val = 0; val < 30; val++) { - msleep(1); - current_ee = mhi_get_exec_env(mhi_cntrl); - if (current_ee != MHI_EE_PTHRU) { - MHI_LOG("dev_state:%s ee:%s\n", - TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), - TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); - break; - } - } - } - /* confirm device is in valid exec env */ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { //MHI_ERR("Not a valid ee for power on\n"); @@ -895,10 +920,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) //if (next_state == MHI_ST_TRANSITION_PBL) // schedule_work(&mhi_cntrl->fw_worker); - mhi_queue_state_transition(mhi_cntrl, next_state); + if (next_state == MHI_ST_TRANSITION_PBL) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else + mhi_queue_state_transition(mhi_cntrl, next_state); mhi_init_debugfs(mhi_cntrl); - mhi_cntrl_register_miscdev(mhi_cntrl); mutex_unlock(&mhi_cntrl->pm_mutex); @@ -940,7 +967,6 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) } mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); - mhi_cntrl_deregister_miscdev(mhi_cntrl); mhi_deinit_debugfs(mhi_cntrl); if (!mhi_cntrl->pre_init) { diff --git a/quectel_MHI/src/devices/mhi_netdev_quectel.c b/quectel_MHI/src/devices/mhi_netdev_quectel.c index 0864d7d..5d7e6e8 100644 --- a/quectel_MHI/src/devices/mhi_netdev_quectel.c +++ b/quectel_MHI/src/devices/mhi_netdev_quectel.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -46,21 +47,26 @@ #define ARPHRD_RAWIP ARPHRD_NONE #endif -#ifdef CONFIG_PINCTRL_IPQ807x -#define CONFIG_QCA_NSS_DRV +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,2,0 )) +static bool netdev_is_rx_handler_busy(struct net_device *dev) +{ + ASSERT_RTNL(); + return dev && rtnl_dereference(dev->rx_handler); +} #endif -#if 1//def CONFIG_QCA_NSS_DRV -#define _RMNET_NSS_H_ -#define _RMENT_NSS_H_ struct rmnet_nss_cb { - int (*nss_create)(struct net_device *dev); - int (*nss_free)(struct net_device *dev); - int (*nss_tx)(struct sk_buff *skb); + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); }; -static struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; -#ifdef CONFIG_QCA_NSS_DRV -static uint qca_nss_enabled; +static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) +#ifdef CONFIG_RMNET_DATA +#define CONFIG_QCA_NSS_DRV +/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ +/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ +extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; #endif #endif @@ -92,6 +98,22 @@ enum rmnet_map_v5_header_type { RMNET_MAP_HEADER_TYPE_ENUM_LENGTH }; +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + /* Main QMAP header */ struct rmnet_map_header { u8 pad_len:6; @@ -110,6 +132,24 @@ struct rmnet_map_v5_csum_header { __be16 reserved; } __aligned(1); +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u8 reserved2; + u8 ip_family:2; + u8 reserved:6; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + struct mhi_mbim_hdr { struct usb_cdc_ncm_nth16 nth16; struct usb_cdc_ncm_ndp16 ndp16; @@ -239,6 +279,13 @@ typedef struct { u32 rx_max; } MHI_MBIM_CTX; +enum mhi_net_type { + MHI_NET_UNKNOW, + MHI_NET_RMNET, + MHI_NET_MBIM, + MHI_NET_ETHER +}; + //#define TS_DEBUG struct mhi_netdev { int alias; @@ -247,7 +294,7 @@ struct mhi_netdev { bool enabled; rwlock_t pm_lock; /* state change lock */ int (*rx_queue)(struct mhi_netdev *mhi_netdev, gfp_t gfp_t); - struct work_struct alloc_work; + struct delayed_work alloc_work; int wake; struct sk_buff_head tx_allocated; @@ -267,6 +314,7 @@ struct mhi_netdev { const char *interface_name; struct napi_struct napi; struct net_device *ndev; + enum mhi_net_type net_type; struct sk_buff *frag_skb; bool recycle_buf; @@ -283,6 +331,7 @@ struct mhi_netdev { u32 qmap_version; // 5 ~ QMAP V1, 9 ~ QMAP V5 u32 qmap_size; u32 link_state; + u32 flow_control; u32 dl_minimum_padding; #ifdef QUECTEL_BRIDGE_MODE @@ -292,6 +341,16 @@ struct mhi_netdev { #endif uint use_rmnet_usb; RMNET_INFO rmnet_info; + +#if defined(CONFIG_PINCTRL_IPQ5018) + u64 first_jiffy; + u64 bytes_received_1; + u64 bytes_received_2; + u32 cntfrq_per_msec; + bool mhi_rate_control; +#endif + + u32 rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH]; }; struct mhi_netdev_priv { @@ -322,6 +381,7 @@ struct qmap_priv { uint bridge_ipv4; unsigned char bridge_mac[ETH_ALEN]; #endif + uint use_qca_nss; }; static struct mhi_netdev *ndev_to_mhi(struct net_device *ndev) { @@ -389,8 +449,11 @@ static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint br __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; reply->pkt_type = PACKET_HOST; - +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) netif_rx_ni(reply); +#else + netif_rx(reply); +#endif } return 1; } @@ -468,18 +531,21 @@ static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *att static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = simple_strtoul(buf, NULL, 0); -#ifdef CONFIG_QCA_NSS_DRV - if (qca_nss_enabled) + if (ndev->type != ARPHRD_ETHER) { + if (bridge_mode) + netdev_err(ndev, "netdevice is not ARPHRD_ETHER\n"); return count; -#endif + } + if (is_qmap_netdev(ndev)) { struct qmap_priv *priv = netdev_priv(ndev); - priv->bridge_mode = simple_strtoul(buf, NULL, 0); + priv->bridge_mode = bridge_mode; } else { struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); - mhi_netdev->bridge_mode = simple_strtoul(buf, NULL, 0); + mhi_netdev->bridge_mode = bridge_mode; } return count; @@ -546,6 +612,9 @@ static struct sk_buff * add_mbim_hdr(struct sk_buff *skb, u8 mux_id) { u16 tci = mux_id - QUECTEL_QMAP_MUX_ID; unsigned int skb_len = skb->len; + if (qmap_mode > 1) + tci += 1; //rmnet_mhi0.X map to session X + if (skb_headroom(skb) < sizeof(struct mhi_mbim_hdr)) { printk("skb_headroom small! headroom is %u, need %zd\n", skb_headroom(skb), sizeof(struct mhi_mbim_hdr)); return NULL; @@ -655,6 +724,117 @@ static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { return skb; } +static void rmnet_map_send_ack(struct mhi_netdev *pQmapDev, + unsigned char type, + struct rmnet_map_header *map_header) +{ + struct rmnet_map_control_command *cmd; + struct sk_buff *skb; + size_t skb_len = sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_control_command); + + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (!skb) + return; + + skb_put(skb, skb_len); + memcpy(skb->data, map_header, skb_len); + cmd = (struct rmnet_map_control_command *)(skb->data + sizeof(struct rmnet_map_header)); + cmd->cmd_type = type & 0x03; + skb->protocol = htons(ETH_P_MAP); + skb->dev = pQmapDev->ndev; + dev_queue_xmit(skb); +} + +static int rmnet_data_vnd_do_flow_control(struct net_device *dev, + uint32_t map_flow_id, + uint16_t v4_seq, + uint16_t v6_seq, + int enable) +{ + //TODO + return 0; +} + +static uint8_t rmnet_map_do_flow_control(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header, + int enable) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + struct net_device *vnd; + uint8_t mux_id; + uint16_t ip_family; + uint16_t fc_seq; + uint32_t qos_id; + int r; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + vnd = pQmapDev->mpQmapNetDev[mux_id]; + if (vnd == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + if (enable) + pQmapDev->flow_control |= (1 << mux_id); + else + pQmapDev->flow_control &= ~(1 << mux_id); + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_data_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable); + netdev_dbg(vnd, "qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d", + qos_id, ip_family & 3, fc_seq, enable); + + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_data_map_command(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + command_name = cmd->command_name; + + if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH) + pQmapDev->rmnet_map_command_stats[command_name]++; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 0); + break; + + default: + pQmapDev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++; + netdev_info(ndev, "UNSupport MAP command: %d", command_name); + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(pQmapDev, rc, map_header); + + return; +} + #ifndef MHI_NETDEV_ONE_CARD_MODE static void rmnet_vnd_upate_rx_stats(struct net_device *net, unsigned rx_packets, unsigned rx_bytes) { @@ -663,8 +843,13 @@ static void rmnet_vnd_upate_rx_stats(struct net_device *net, struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); u64_stats_update_begin(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) stats64->rx_packets += rx_packets; stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes,rx_bytes); +#endif u64_stats_update_end(&stats64->syncp); #else priv->self_dev->stats.rx_packets += rx_packets; @@ -679,8 +864,13 @@ static void rmnet_vnd_upate_tx_stats(struct net_device *net, struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); u64_stats_update_begin(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) stats64->tx_packets += tx_packets; stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes,tx_bytes); +#endif u64_stats_update_end(&stats64->syncp); #else net->stats.rx_packets += tx_packets; @@ -694,12 +884,10 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct qmap_priv *dev = netdev_priv(net); unsigned int start; int cpu; - struct rmnet_nss_cb *nss_cb; netdev_stats_to_stats64(stats, &net->stats); - nss_cb = rcu_dereference(rmnet_nss_callbacks); - if (nss_cb) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats + if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats stats->rx_packets = 0; stats->rx_bytes = 0; } @@ -713,10 +901,17 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, do { start = u64_stats_fetch_begin_irq(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) rx_packets = stats64->rx_packets; rx_bytes = stats64->rx_bytes; tx_packets = stats64->tx_packets; tx_bytes = stats64->tx_bytes; +#else + rx_packets = u64_stats_read(&stats64->rx_packets); + rx_bytes = u64_stats_read(&stats64->rx_bytes); + tx_packets = u64_stats_read(&stats64->tx_packets); + tx_bytes = u64_stats_read(&stats64->tx_bytes); +#endif } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); stats->rx_packets += rx_packets; @@ -794,7 +989,7 @@ static int rmnet_vnd_open(struct net_device *dev) static int rmnet_vnd_stop(struct net_device *pNet) { - netif_carrier_off(pNet); + netif_carrier_off(pNet); return 0; } @@ -803,6 +998,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, { int err; struct qmap_priv *priv = netdev_priv(pNet); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(priv->real_dev); int skb_len = skb->len; if (netif_queue_stopped(priv->real_dev)) { @@ -816,8 +1012,8 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, #ifdef QUECTEL_BRIDGE_MODE if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) { - dev_kfree_skb_any (skb); - return NETDEV_TX_OK; + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; } #endif @@ -827,7 +1023,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, } } //printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); - if (mhi_mbim_enabled) { + if (mhi_netdev->net_type == MHI_NET_MBIM) { if (add_mbim_hdr(skb, priv->mux_id) == NULL) { dev_kfree_skb_any (skb); return NETDEV_TX_OK; @@ -878,7 +1074,7 @@ static void rmnet_vnd_rawip_setup(struct net_device *rmnet_dev) /* Raw IP mode */ rmnet_dev->header_ops = NULL; /* No header */ //for Qualcomm's NSS, must set type as ARPHRD_RAWIP, or NSS performace is very bad. - rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->type = ARPHRD_RAWIP; // do not support moify mac, for dev_set_mac_address() need ARPHRD_ETHER rmnet_dev->hard_header_len = 0; //for Qualcomm's SFE, do not add IFF_POINTOPOINT to type, or SFE donot work. rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); @@ -892,12 +1088,13 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_get_stats64 = rmnet_vnd_get_stats64, #endif .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, }; -static rx_handler_result_t rmnet_vnd_rx_handler(struct sk_buff **pskb) +static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; - struct rmnet_nss_cb *nss_cb; if (!skb) return RX_HANDLER_CONSUMED; @@ -914,7 +1111,6 @@ static rx_handler_result_t rmnet_vnd_rx_handler(struct sk_buff **pskb) return RX_HANDLER_PASS; } - nss_cb = rcu_dereference(rmnet_nss_callbacks); if (nss_cb) { nss_cb->nss_tx(skb); return RX_HANDLER_CONSUMED; @@ -1008,12 +1204,12 @@ static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in) goto error; } - if (tci != 0) { + if ((qmap_mode == 1 && tci != 0) || (qmap_mode > 1 && tci > qmap_mode)) { MSG_ERR("unsupported tci %d by now\n", tci); goto error; } - qmap_net = pQmapDev->mpQmapNetDev[0]; + qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1]; dpe16 = ndp16->dpe16; @@ -1033,15 +1229,32 @@ static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in) qmap_skb = netdev_alloc_skb(qmap_net, skb_len); if (!qmap_skb) { - MSG_ERR("skb_clone fail\n"); + mhi_netdev->stats.alloc_failed++; + //MSG_ERR("skb_clone fail\n"); //do not print in softirq goto error; } switch (skb_in->data[offset] & 0xf0) { case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[offset]); + if (ip4h->protocol == IPPROTO_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif qmap_skb->protocol = htons(ETH_P_IP); break; case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[offset]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif qmap_skb->protocol = htons(ETH_P_IPV6); break; default: @@ -1107,6 +1320,7 @@ static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) int skb_len; __be16 protocol; int mux_id; + int skip_nss = 0; if (map_header->next_hdr) { ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); @@ -1134,15 +1348,31 @@ static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) } if (map_header->cd_bit) { - netdev_info(ndev, "skip qmap command packet\n"); + rmnet_data_map_command(pQmapDev, map_header); goto skip_pkt; } switch (skb_in->data[hdr_size] & 0xf0) { case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ip4h->protocol == IPPROTO_ICMP) { + skip_nss = 1; + } + } +#endif protocol = htons(ETH_P_IP); break; case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[hdr_size]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + skip_nss = 1; + } + } +#endif protocol = htons(ETH_P_IPV6); break; default: @@ -1172,8 +1402,9 @@ static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) } if (qmap_skb == NULL) { - netdev_info(ndev, "fail to alloc skb, pkt_len = %d\n", skb_len); - goto error_pkt;; + pQmapDev->stats.alloc_failed++; + //netdev_info(ndev, "fail to alloc skb, pkt_len = %d\n", skb_len); //do not print in softirq + goto error_pkt; } skb_reset_transport_header(qmap_skb); @@ -1182,6 +1413,9 @@ static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) skb_set_mac_header(qmap_skb, 0); qmap_skb->protocol = protocol; + if(skip_nss) + qmap_skb->cb[0] = 1; + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD && ul_header->csum_valid_required) { #if 0 //TODO @@ -1220,7 +1454,7 @@ error_pkt: static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; - void *dev; + struct mhi_netdev *mhi_netdev; if (!skb) goto done; @@ -1234,22 +1468,21 @@ static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) WARN_ON(1); return RX_HANDLER_PASS; } - /* when open hyfi function, run cm will make system crash */ + /* when open hyfi function, run cm will make system crash */ //dev = rcu_dereference(skb->dev->rx_handler_data); - dev = (void *)ndev_to_mhi(skb->dev); + mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(skb->dev); - if (dev == NULL) { + if (mhi_netdev == NULL) { WARN_ON(1); return RX_HANDLER_PASS; } - if (mhi_mbim_enabled) - rmnet_mbim_rx_handler(dev, skb); + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); else - rmnet_qmi_rx_handler(dev, skb); + rmnet_qmi_rx_handler(mhi_netdev, skb); if (!skb_cloned(skb)) { - struct mhi_netdev *mhi_netdev = (struct mhi_netdev *)dev; if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { skb->data = skb->head; skb_reset_tail_pointer(skb); @@ -1271,9 +1504,8 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev struct net_device *qmap_net; struct qmap_priv *priv; int err; - struct rmnet_nss_cb *nss_cb; - int rawip_for_nss = 1; - + int use_qca_nss = !!nss_cb; + u8 maddr[ETH_ALEN]; qmap_net = alloc_etherdev(sizeof(*priv)); if (!qmap_net) return NULL; @@ -1287,7 +1519,10 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev priv->qmap_version = pQmapDev->qmap_version; priv->mux_id = mux_id; sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1); - memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); + ether_addr_copy(maddr, real_dev->dev_addr); + maddr[5]= offset_id + 1; + ether_addr_copy((u8*)qmap_net->dev_addr, maddr); + //eth_random_addr(qmap_net->dev_addr); #if defined(MHI_NETDEV_STATUS64) priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!priv->stats64) @@ -1298,7 +1533,7 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev priv->bridge_mode = !!(pQmapDev->bridge_mode & BIT(offset_id)); qmap_net->sysfs_groups[0] = &pcie_mhi_qmap_sysfs_attr_group; if (priv->bridge_mode) - rawip_for_nss = 0; + use_qca_nss = 0; #endif priv->agg_skb = NULL; @@ -1308,17 +1543,18 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev INIT_WORK(&priv->agg_wq, rmnet_vnd_tx_agg_work); ktime_get_ts64(&priv->agg_time); spin_lock_init(&priv->agg_lock); + priv->use_qca_nss = 0; qmap_net->ethtool_ops = &rmnet_vnd_ethtool_ops; qmap_net->netdev_ops = &rmnet_vnd_ops; qmap_net->flags |= IFF_NOARP; qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); - nss_cb = rcu_dereference(rmnet_nss_callbacks); - if (nss_cb && rawip_for_nss) { + if (nss_cb && use_qca_nss) { rmnet_vnd_rawip_setup(qmap_net); } - else if (mhi_mbim_enabled) { + + if (pQmapDev->net_type == MHI_NET_MBIM) { qmap_net->needed_headroom = sizeof(struct mhi_mbim_hdr); } @@ -1333,18 +1569,17 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev netif_device_attach (qmap_net); netif_carrier_off(qmap_net); - nss_cb = rcu_dereference(rmnet_nss_callbacks); - if (nss_cb && rawip_for_nss) { + if (nss_cb && use_qca_nss) { int rc = nss_cb->nss_create(qmap_net); WARN_ON(rc); if (rc) { - RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); /* Log, but don't fail the device creation */ netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc); } else { + priv->use_qca_nss = 1; netdev_info(qmap_net, "NSS context created\n"); rtnl_lock(); - netdev_rx_handler_register(qmap_net, rmnet_vnd_rx_handler, NULL); + netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL); rtnl_unlock(); } } @@ -1357,10 +1592,8 @@ out_free_newdev: } static void rmnet_vnd_unregister_device(struct net_device *qmap_net) { - struct rmnet_nss_cb *nss_cb; struct qmap_priv *priv; unsigned long flags; - int rawip_for_nss = 1; pr_info("%s(%s)\n", __func__, qmap_net->name); netif_carrier_off(qmap_net); @@ -1377,15 +1610,9 @@ static void rmnet_vnd_unregister_device(struct net_device *qmap_net) { } spin_unlock_irqrestore(&priv->agg_lock, flags); -#ifdef QUECTEL_BRIDGE_MODE - if (priv->bridge_mode) - rawip_for_nss = 0; -#endif - nss_cb = rcu_dereference(rmnet_nss_callbacks); - if (nss_cb && rawip_for_nss) { + if (nss_cb && priv->use_qca_nss) { rtnl_lock(); - if (netdev_is_rx_handler_busy(qmap_net)) - netdev_rx_handler_unregister(qmap_net); + netdev_rx_handler_unregister(qmap_net); rtnl_unlock(); nss_cb->nss_free(qmap_net); } @@ -1499,8 +1726,14 @@ static void mhi_netdev_upate_rx_stats(struct mhi_netdev *mhi_netdev, struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); u64_stats_update_begin(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) stats64->rx_packets += rx_packets; stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes,rx_bytes); +#endif + u64_stats_update_begin(&stats64->syncp); #else mhi_netdev->ndev->stats.rx_packets += rx_packets; @@ -1514,8 +1747,14 @@ static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev, struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); u64_stats_update_begin(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) stats64->tx_packets += tx_packets; stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes,tx_bytes); +#endif + u64_stats_update_begin(&stats64->syncp); #else mhi_netdev->ndev->stats.tx_packets += tx_packets; @@ -1523,6 +1762,26 @@ static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev, #endif } +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + protocol = htons(ETH_P_MAP); + break; + } + + return protocol; +} + static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) { u32 cur_mru = mhi_netdev->mru; @@ -1536,7 +1795,7 @@ static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) for (i = 0; i < no_tre; i++) { skb = skb_dequeue(&mhi_netdev->rx_allocated); if (!skb) { - skb = alloc_skb(32+cur_mru, gfp_t); + skb = alloc_skb(/*32+*/cur_mru, gfp_t); if (skb) mhi_netdev->stats.rx_allocated++; } @@ -1555,7 +1814,7 @@ static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) skb_priv->size = cur_mru; skb_priv->bind_netdev = mhi_netdev; skb->dev = mhi_netdev->ndev; - skb_reserve(skb, 32); //for ethernet header + //skb_reserve(skb, 32); //for ethernet header spin_lock_bh(&mhi_netdev->rx_lock); ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, @@ -1585,7 +1844,7 @@ error_queue: static void mhi_netdev_alloc_work(struct work_struct *work) { struct mhi_netdev *mhi_netdev = container_of(work, struct mhi_netdev, - alloc_work); + alloc_work.work); /* sleep about 1 sec and retry, that should be enough time * for system to reclaim freed memory back. */ @@ -1598,6 +1857,8 @@ static void mhi_netdev_alloc_work(struct work_struct *work) ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); /* sleep and try again */ if (ret == -ENOMEM) { + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + return; msleep(sleep_ms); retry--; } @@ -1638,12 +1899,13 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget) return 0; } + if (mhi_netdev->net_type == MHI_NET_MBIM || mhi_netdev->net_type == MHI_NET_RMNET) { while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { #ifdef MHI_NETDEV_ONE_CARD_MODE int recly_skb = 0; mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); - if (mhi_mbim_enabled) + if (mhi_netdev->net_type == MHI_NET_MBIM) rmnet_mbim_rx_handler(mhi_netdev, skb); else rmnet_qmi_rx_handler(mhi_netdev, skb); @@ -1666,14 +1928,25 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget) netif_receive_skb(skb); #endif } + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(skb->data[0]); + netif_receive_skb(skb); + } + } /* queue new buffers */ - ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); - if (ret == -ENOMEM) { - MSG_LOG("out of tre, queuing bg worker\n"); - mhi_netdev->stats.alloc_failed++; - schedule_work(&mhi_netdev->alloc_work); - } + if (!delayed_work_pending(&mhi_netdev->alloc_work)) { + ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); + if (ret == -ENOMEM) { + //MSG_LOG("out of tre, queuing bg worker\n"); //do not print in softirq + mhi_netdev->stats.alloc_failed++; + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + } + } /* complete work if # of packet processed less than allocated budget */ if (rx_work < budget) @@ -1715,7 +1988,7 @@ static int mhi_netdev_change_mtu(struct net_device *ndev, int new_mtu) return 0; } -static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; @@ -1738,24 +2011,25 @@ static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef QUECTEL_BRIDGE_MODE if (mhi_netdev->bridge_mode && bridge_mode_tx_fixup(dev, skb, mhi_netdev->bridge_ipv4, mhi_netdev->bridge_mac) == NULL) { - dev_kfree_skb_any (skb); - return NETDEV_TX_OK; + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; } #endif - if (skb_pull(skb, ETH_HLEN) == NULL) { + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && (skb_pull(skb, ETH_HLEN) == NULL)) { dev_kfree_skb_any (skb); return NETDEV_TX_OK; } } - if (mhi_mbim_enabled) { + if (mhi_netdev->net_type == MHI_NET_MBIM) { if (add_mbim_hdr(skb, QUECTEL_QMAP_MUX_ID) == NULL) { dev_kfree_skb_any (skb); return NETDEV_TX_OK; } } - else { + else if (mhi_netdev->net_type == MHI_NET_RMNET) { if (mhi_netdev->qmap_version == 5) { add_qhdr(skb, QUECTEL_QMAP_MUX_ID); } @@ -1768,35 +2042,40 @@ static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - if (skb->protocol != htons(ETH_P_MAP)) { + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && skb->protocol != htons(ETH_P_MAP)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } #endif - if (mhi_mbim_enabled) { + if (mhi_netdev->net_type == MHI_NET_MBIM) { struct mhi_mbim_hdr *mhdr = (struct mhi_mbim_hdr *)skb->data; mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->mbim_ctx.tx_seq++); } - res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, - MHI_EOT); - if (res) { - int i = 0; + if (unlikely(mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) < 16)) { + u32 i = 0; for (i = 0; i < mhi_netdev->qmap_mode; i++) { struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; if (qmap_net) { netif_stop_queue(qmap_net); } } - MSG_VERB("Failed to queue with reason:%d\n", res); + netif_stop_queue(dev); - res = NETDEV_TX_BUSY; + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (unlikely(res)) { + dev_kfree_skb_any(skb); + dev->stats.tx_errors++; } MSG_VERB("Exited\n"); - return res; + return NETDEV_TX_OK; } #if defined(MHI_NETDEV_STATUS64) @@ -1818,10 +2097,17 @@ static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *nde do { start = u64_stats_fetch_begin_irq(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) rx_packets = stats64->rx_packets; rx_bytes = stats64->rx_bytes; tx_packets = stats64->tx_packets; tx_bytes = stats64->tx_bytes; +#else + rx_packets = u64_stats_read(&stats64->rx_packets); + rx_bytes = u64_stats_read(&stats64->rx_bytes); + tx_packets = u64_stats_read(&stats64->tx_packets); + tx_bytes = u64_stats_read(&stats64->tx_bytes); +#endif } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); stats->rx_packets += rx_packets; @@ -1892,6 +2178,15 @@ static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return rc; } +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,14,0 )) //b9067f5dc4a07c8e24e01a1b277c6722d91be39e +#define use_ndo_siocdevprivate +#endif +#ifdef use_ndo_siocdevprivate +static int qmap_ndo_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { + return qmap_ndo_do_ioctl(dev, ifr, cmd); +} +#endif + static const struct net_device_ops mhi_netdev_ops_ip = { .ndo_open = mhi_netdev_open, .ndo_start_xmit = mhi_netdev_xmit, @@ -1903,6 +2198,9 @@ static const struct net_device_ops mhi_netdev_ops_ip = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = qmap_ndo_do_ioctl, +#ifdef use_ndo_siocdevprivate + .ndo_siocdevprivate = qmap_ndo_siocdevprivate, +#endif }; static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) @@ -1973,8 +2271,8 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) mhi_netdev->alias); #endif - snprintf(ifname, sizeof(ifname), "%s%%d", - mhi_netdev->interface_name); + snprintf(ifname, sizeof(ifname), "%s%d", + mhi_netdev->interface_name, mhi_netdev->mhi_dev->mhi_cntrl->cntrl_idx); rtnl_lock(); #ifdef NET_NAME_PREDICTABLE @@ -2001,14 +2299,21 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); mhi_netdev_priv->mhi_netdev = mhi_netdev; + if (mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) { #ifdef QUECTEL_BRIDGE_MODE mhi_netdev->bridge_mode = bridge_mode; #endif mhi_netdev->ndev->sysfs_groups[0] = &pcie_mhi_sysfs_attr_group; + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->ndev->mtu = mhi_netdev->mru; + } rtnl_unlock(); - - netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, - mhi_netdev_poll, poll_weight); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) + netif_napi_add_weight(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); +#else + netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); +#endif ret = register_netdev(mhi_netdev->ndev); if (ret) { MSG_ERR("Network device registration failed\n"); @@ -2026,7 +2331,7 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); if (ret) - schedule_work(&mhi_netdev->alloc_work); + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); napi_enable(&mhi_netdev->napi); @@ -2064,7 +2369,8 @@ static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, if (likely(mhi_result->transaction_status == 0)) { mhi_netdev_upate_tx_stats(mhi_netdev, entry->packets, entry->length); - if (netif_queue_stopped(ndev) && mhi_netdev->enabled) { + if (netif_queue_stopped(ndev) && mhi_netdev->enabled + && mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 32) { int i = 0; netif_wake_queue(ndev); @@ -2091,12 +2397,12 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, struct sk_buff *skb = mhi_result->buf_addr; struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); - if (skb_priv->bind_netdev != mhi_netdev) { + if (unlikely(skb_priv->bind_netdev != mhi_netdev)) { MSG_ERR("%s error!\n", __func__); return; } - if (mhi_result->transaction_status) { + if (unlikely(mhi_result->transaction_status)) { if (mhi_result->transaction_status != -ENOTCONN) MSG_ERR("%s transaction_status = %d!\n", __func__, mhi_result->transaction_status); skb_priv->bind_netdev = NULL; @@ -2104,6 +2410,65 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, return; } +#if defined(CONFIG_PINCTRL_IPQ5018) + if (likely(mhi_netdev->mhi_rate_control)) { + u32 time_interval = 0; + u32 time_difference = 0; + u32 cntfrq; + u64 second_jiffy; + u64 bytes_received_2; + struct net_device *ndev = mhi_netdev->ndev; + + if (mhi_netdev->first_jiffy) { + second_jiffy = arch_counter_get_cntvct(); + bytes_received_2 = mhi_netdev->bytes_received_2; + if ((second_jiffy > mhi_netdev->first_jiffy) && + (bytes_received_2 > mhi_netdev->bytes_received_1)) { + + time_difference = (second_jiffy - mhi_netdev->first_jiffy); + time_interval = (time_difference / mhi_netdev->cntfrq_per_msec); + + /* 1.8Gbps is 225,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 225000 bytes */ + /* For 100ms 22,500,000 bytes */ + /* For 10ms 2,250,000 bytes */ + + /* 1.7Gbps is 212,500,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 212500 bytes */ + /* For 100ms 21,250,000 bytes */ + /* For 10ms 2,125,000 bytes */ + + /* 1.6Gbps is 200,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 200,000 bytes */ + /* For 100ms 20,000,000 bytes */ + /* For 10ms 2,000,000 bytes */ + + if (time_interval < 100) { + if ((bytes_received_2 - mhi_netdev->bytes_received_1) > 22500000) { + ndev->stats.rx_dropped ++; + dev_kfree_skb(skb); + return; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + mhi_netdev->first_jiffy = arch_counter_get_cntvct(); + cntfrq = arch_timer_get_cntfrq(); + mhi_netdev->cntfrq_per_msec = cntfrq / 1000; + } + mhi_netdev->bytes_received_2 += mhi_result->bytes_xferd; + } +#endif + #if 0 { static size_t bytes_xferd = 0; @@ -2162,8 +2527,21 @@ static int mhi_netdev_init_debugfs_states_show(struct seq_file *m, void *d) mhi_netdev->rx_allocated.qlen); seq_printf(m, - "netif_queue_stopped:%d\n", - netif_queue_stopped(mhi_netdev->ndev)); + "netif_queue_stopped:%d, link_state:0x%x, flow_control:0x%x\n", + netif_queue_stopped(mhi_netdev->ndev), mhi_netdev->link_state, mhi_netdev->flow_control); + + seq_printf(m, + "rmnet_map_command_stats: %u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_NONE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_DISABLE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_ENABLE], + mhi_netdev->rmnet_map_command_stats[3], + mhi_netdev->rmnet_map_command_stats[4], + mhi_netdev->rmnet_map_command_stats[5], + mhi_netdev->rmnet_map_command_stats[6], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_START], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_END], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]); #ifdef TS_DEBUG seq_printf(m, @@ -2225,7 +2603,6 @@ static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) char node_name[32]; int i; const umode_t mode = 0600; - struct dentry *file; struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; struct dentry *dentry = mhi_netdev_debugfs_dentry; @@ -2278,22 +2655,18 @@ static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) if (IS_ERR_OR_NULL(mhi_netdev->dentry)) return; - file = debugfs_create_u32("msg_lvl", mode, mhi_netdev->dentry, + debugfs_create_u32("msg_lvl", mode, mhi_netdev->dentry, (u32 *)&mhi_netdev->msg_lvl); - if (IS_ERR_OR_NULL(file)) - return; /* Add debug stats table */ for (i = 0; debugfs_table[i].name; i++) { - file = debugfs_create_u32(debugfs_table[i].name, mode, + debugfs_create_u32(debugfs_table[i].name, mode, mhi_netdev->dentry, debugfs_table[i].ptr); - if (IS_ERR_OR_NULL(file)) - return; } debugfs_create_file("reset", mode, mhi_netdev->dentry, mhi_netdev, - &mhi_netdev_debugfs_trigger_reset_fops); + &mhi_netdev_debugfs_trigger_reset_fops); debugfs_create_file("states", 0444, mhi_netdev->dentry, mhi_netdev, &mhi_netdev_debugfs_state_ops); } @@ -2361,7 +2734,7 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev) free_percpu(mhi_netdev->stats64); #endif free_netdev(mhi_netdev->ndev); - flush_work(&mhi_netdev->alloc_work); + flush_delayed_work(&mhi_netdev->alloc_work); if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) debugfs_remove_recursive(mhi_netdev->dentry); @@ -2378,16 +2751,32 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev, if (!mhi_netdev) return -ENOMEM; + if (!strcmp(id->chan, "IP_HW0")) { + if (mhi_mbim_enabled) + mhi_netdev->net_type = MHI_NET_MBIM; + else + mhi_netdev->net_type = MHI_NET_RMNET; + } + else if (!strcmp(id->chan, "IP_SW0")) { + mhi_netdev->net_type = MHI_NET_ETHER; + } + else { + return -EINVAL; + } + mhi_netdev->alias = 0; mhi_netdev->mhi_dev = mhi_dev; mhi_device_set_devdata(mhi_dev, mhi_netdev); - mhi_netdev->mru = 0x4000; - if (mhi_mbim_enabled) { + mhi_netdev->mru = 15360; ///etc/data/qnicorn_config.xml dataformat_agg_dl_size 15*1024 + if (mhi_netdev->net_type == MHI_NET_MBIM) { mhi_netdev->mru = ncmNTBParams.dwNtbInMaxSize; mhi_netdev->mbim_ctx.rx_max = mhi_netdev->mru; } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mru = 8*1024; + } mhi_netdev->qmap_size = mhi_netdev->mru; #if defined(MHI_NETDEV_STATUS64) @@ -2399,23 +2788,31 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev, if (!strcmp(id->chan, "IP_HW0")) mhi_netdev->interface_name = "rmnet_mhi"; else if (!strcmp(id->chan, "IP_SW0")) - mhi_netdev->interface_name = "pcie_swip"; + mhi_netdev->interface_name = "mhi_swip"; else mhi_netdev->interface_name = id->chan; mhi_netdev->qmap_mode = qmap_mode; mhi_netdev->qmap_version = 5; mhi_netdev->use_rmnet_usb = 1; - if (mhi_dev->dev_id == 0x0306) { + if ((mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0306) + || (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0308) + || (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x1004) + ) { mhi_netdev->qmap_version = 9; } + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->qmap_mode = 1; + mhi_netdev->qmap_version = 0; + mhi_netdev->use_rmnet_usb = 0; + } rmnet_info_set(mhi_netdev, &mhi_netdev->rmnet_info); mhi_netdev->rx_queue = mhi_netdev_alloc_skb; spin_lock_init(&mhi_netdev->rx_lock); rwlock_init(&mhi_netdev->pm_lock); - INIT_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work); + INIT_DELAYED_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work); skb_queue_head_init(&mhi_netdev->qmap_chain); skb_queue_head_init(&mhi_netdev->skb_chain); skb_queue_head_init(&mhi_netdev->tx_allocated); @@ -2432,7 +2829,11 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev, mhi_netdev_create_debugfs(mhi_netdev); - if (mhi_netdev->use_rmnet_usb) { + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + netif_carrier_on(mhi_netdev->ndev); + } + else if (mhi_netdev->use_rmnet_usb) { #ifdef MHI_NETDEV_ONE_CARD_MODE mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; strcpy(mhi_netdev->rmnet_info.ifname[0], mhi_netdev->mpQmapNetDev[0]->name); @@ -2457,6 +2858,10 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev, #endif } +#if defined(CONFIG_PINCTRL_IPQ5018) + mhi_netdev->mhi_rate_control = 1; +#endif + return 0; } @@ -2480,23 +2885,13 @@ static struct mhi_driver mhi_netdev_driver = { } }; -#ifdef CONFIG_QCA_NSS_DRV -static uint qca_nss_enabled = 1; -module_param( qca_nss_enabled, uint, S_IRUGO); - -/* - EXTRA_CFLAGS="-I$(STAGING_DIR)/usr/include/qca-nss-drv $(EXTRA_CFLAGS)" - qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c -*/ -#include "rmnet_nss.c" -#endif - int __init mhi_device_netdev_init(struct dentry *parent) { - RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); #ifdef CONFIG_QCA_NSS_DRV - if (qca_nss_enabled) - rmnet_nss_init(); + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) { + printk(KERN_ERR "mhi_device_netdev_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n"); + } #endif mhi_netdev_create_debugfs_dir(parent); @@ -2506,10 +2901,6 @@ int __init mhi_device_netdev_init(struct dentry *parent) void mhi_device_netdev_exit(void) { -#ifdef CONFIG_QCA_NSS_DRV - rmnet_nss_exit(); -#endif - #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(mhi_netdev_debugfs_dentry); #endif diff --git a/quectel_MHI/src/devices/mhi_uci.c b/quectel_MHI/src/devices/mhi_uci.c index 12e122b..dde90b2 100644 --- a/quectel_MHI/src/devices/mhi_uci.c +++ b/quectel_MHI/src/devices/mhi_uci.c @@ -8,6 +8,8 @@ #include #include #include +#include + #if 1 static inline void *ipc_log_context_create(int max_num_pages, const char *modname, uint16_t user_version) @@ -53,12 +55,13 @@ struct uci_dev { struct mhi_device *mhi_dev; const char *chan; struct mutex mutex; /* sync open and close */ + struct mutex r_mutex; + struct mutex w_mutex; struct uci_chan ul_chan; struct uci_chan dl_chan; size_t mtu; int ref_count; bool enabled; - void *ipc_log; unsigned rx_error; unsigned nr_trb; unsigned nr_trbs; @@ -75,50 +78,22 @@ struct mhi_uci_drv { dev_t dev_t; }; -enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR; - -#ifdef CONFIG_MHI_DEBUG - -#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE) -#define MHI_UCI_IPC_LOG_PAGES (25) - -#else - -#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR) -#define MHI_UCI_IPC_LOG_PAGES (1) - -#endif - -#ifdef CONFIG_MHI_DEBUG +static int uci_msg_lvl = MHI_MSG_LVL_ERROR; +module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR); #define MSG_VERB(fmt, ...) do { \ - if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \ pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_VERBOSE)) \ - ipc_log_string(uci_dev->ipc_log, "[D][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) -#else - -#define MSG_VERB(fmt, ...) - -#endif - #define MSG_LOG(fmt, ...) do { \ - if (msg_lvl <= MHI_MSG_LVL_INFO) \ + if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \ pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_INFO)) \ - ipc_log_string(uci_dev->ipc_log, "[I][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #define MSG_ERR(fmt, ...) do { \ - if (msg_lvl <= MHI_MSG_LVL_ERROR) \ + if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ - if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_ERROR)) \ - ipc_log_string(uci_dev->ipc_log, "[E][%s] " fmt, \ - __func__, ##__VA_ARGS__); \ } while (0) #define MAX_UCI_DEVICES (64) @@ -185,7 +160,20 @@ static int mhi_queue_inbound(struct uci_dev *uci_dev) return ret; } - +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) +#ifdef TCGETS2 +static int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +static int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} +#endif +#endif static long mhi_uci_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -520,7 +508,7 @@ static ssize_t mhi_uci_read(struct file *file, ret = -ERESTARTSYS; if (ret) { - MSG_ERR("Failed to recycle element, ret=%d\n", ret); + MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret); #if 0 kfree(uci_buf->data); #endif @@ -540,6 +528,42 @@ read_error: return ret; } +static ssize_t mhi_uci_write_mutex(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_write(file, buf, count, offp); + mutex_unlock(&uci_dev->w_mutex); + + return ret; +} + +static ssize_t mhi_uci_read_mutex(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_read(file, buf, count, ppos); + mutex_unlock(&uci_dev->r_mutex); + + return ret; +} + static int mhi_uci_open(struct inode *inode, struct file *filp) { struct uci_dev *uci_dev = NULL, *tmp_dev; @@ -617,8 +641,8 @@ error_exit: static const struct file_operations mhidev_fops = { .open = mhi_uci_open, .release = mhi_uci_release, - .read = mhi_uci_read, - .write = mhi_uci_write, + .read = mhi_uci_read_mutex, + .write = mhi_uci_write_mutex, .poll = mhi_uci_poll, .unlocked_ioctl = mhi_uci_ioctl, }; @@ -681,6 +705,8 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev, return -ENOMEM; mutex_init(&uci_dev->mutex); + mutex_init(&uci_dev->r_mutex); + mutex_init(&uci_dev->w_mutex); uci_dev->mhi_dev = mhi_dev; minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES); @@ -694,10 +720,16 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev, uci_dev->devt = MKDEV(mhi_uci_drv.major, minor); #if 1 - uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, - uci_dev->devt, uci_dev, - DEVICE_NAME "_%s", - mhi_dev->chan_name); + if (mhi_dev->mhi_cntrl->cntrl_idx) + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s%d", + mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx); + else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s", + mhi_dev->chan_name); #else uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, uci_dev->devt, uci_dev, @@ -706,14 +738,13 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev, mhi_dev->bus, mhi_dev->slot, "_pipe_", mhi_dev->ul_chan_id); #endif + set_bit(minor, uci_minors); /* create debugging buffer */ snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d", mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, mhi_dev->ul_chan_id); - uci_dev->ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES, - node_name, 0); for (dir = 0; dir < 2; dir++) { struct uci_chan *uci_chan = (dir) ? @@ -800,9 +831,20 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, buf->data = mhi_result->buf_addr; #endif buf->len = mhi_result->bytes_xferd; - if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN || mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN) + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN) { - struct uci_buf *tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC); + struct uci_buf *tmp_buf = NULL; + int skip_buf = 0; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count == 1) + skip_buf++; +#endif + if (!skip_buf) + tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);; + if (tmp_buf) { tmp_buf->page = NULL; tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf); diff --git a/quectel_MHI/src/devices/rmnet_nss.c b/quectel_MHI/src/devices/rmnet_nss.c index deda11c..e6e8414 100644 --- a/quectel_MHI/src/devices/rmnet_nss.c +++ b/quectel_MHI/src/devices/rmnet_nss.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include