更新pcie

This commit is contained in:
ling 2023-05-22 18:20:11 +08:00
parent 657317a011
commit b2a91105b3
13 changed files with 1905 additions and 440 deletions

View File

@ -0,0 +1,103 @@
Release Notes
[V1.3.4]
Date: 12/8/2022
enhancement:
1. only allow to enable autosuspend when module is in MHI_EE_AMSS
2. show pcie link speed and width when driver probe
3. check pcie link status by read pcie vid and pid when driver probe,
if pcie link is down, return -EIO
4. support RM520 (1eac:1004)
5. support qmap command packet
fix:
1. fix tx queue is wrong stop when do uplink TPUT
2. fix after QFirehose, module fail to bootup at very small probability
3. mhi uci add mutex lock for concurrent reads/writes
[V1.3.3]
Date: 30/6/2022
enhancement:
1. remove one un-necessary kmalloc when do qfirehose
2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon
3. set ring size of event 0 to 256 (from 1024), required by x6x
4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled
5. porting IPQ5018 mhi rate controll code from spf11.5
6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver)
7. support set different mac address for rmnet net card
8. when mhi netdev fail to malloc, use delay_work instead work
9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU'
fix:
1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU
2. set dma mask when driver probe, some SOC like rpi_4 need it
[V1.3.2]
Date: 12/16/2021
enhancement:
1. support Linux Kernel V5.14
2. mhi_netdev_quectel.c do not print log in softirq context
[V1.3.1]
Date: 9/26/2021
enhancement:
fix:
[V1.3.0.19]
Date: 9/18/2021
enhancement:
1. support sdx62 (17cb:0308)
2. support IPQ5018's NSS
3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and pcie_mhi.ko must load after then rmnet_nss.ko
4. allow bhi irq is not 0 (for ipq5018)
fix:
[V1.3.0.18]
Date: 4/14/2021
enhancement:
1. support mbim multiple call, usage:
# insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4
# quectel-mbim-proxy -d /dev/mhi_MBIM &
# quectel-CM -n X
fix:
[V1.3.0.17]
Date: 3/11/2021
enhancement:
fix:
1. fix CPU loading very high when TPUT test when only one MSI interrupt
2. fix error on latest X24 modem
[V1.3.0.16]
Date: 11/18/2020
enhancement:
fix:
1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan
[V1.3.0.15]
Date: 10/30/2020
enhancement:
1. support multi-modems, named as /dev/mhi_<chan_name>X
fix:
1. fix compile error on kernel v5.8
[V1.3.0.14]
Date: 10/9/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix compile error on kernel v5.6
2. support runtime suspend
[V1.3.0.13]
Date: 9/7/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix error on X55 + PCIE2.0(e.g IPQ4019)
2. support runtime suspend
[V1.3.0.12]
Date: 7/7/2020
enhancement:
1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE),
fix:

View File

@ -74,6 +74,10 @@ static void pci_free_irq_vectors(struct pci_dev *dev)
static int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
#if 0//defined(CONFIG_PINCTRL_IPQ5018)
struct pcie_port *pp = dev->bus->sysdata;
pp->msi[nr]; //msi maybe not continuous
#endif
return dev->irq + nr;
}
#endif
@ -171,8 +175,28 @@ static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
pci_set_master(pci_dev);
#if 1 //some SOC like rpi_4b need next codes
ret = -EIO;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
if((ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64))))
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
#else
if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
}
#endif
if (ret) {
MHI_ERR("Error dma mask\n");
}
#endif
mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn);
len = pci_resource_len(pci_dev, mhi_dev->resn);
#ifndef ioremap_nocache //4bdc0d676a643140bdf17dbf7eafedee3d496a3c
#define ioremap_nocache ioremap
#endif
mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len);
if (!mhi_cntrl->regs) {
MHI_ERR("Error ioremap region\n");
@ -280,6 +304,7 @@ error_enable_device:
return ret;
}
#ifdef CONFIG_PM
static int mhi_runtime_suspend(struct device *dev)
{
int ret = 0;
@ -296,6 +321,12 @@ static int mhi_runtime_suspend(struct device *dev)
return 0;
}
if (mhi_cntrl->ee != MHI_EE_AMSS) {
MHI_LOG("Not AMSS, return busy\n");
mutex_unlock(&mhi_cntrl->pm_mutex);
return -EBUSY;
}
ret = mhi_pm_suspend(mhi_cntrl);
if (ret) {
MHI_LOG("Abort due to ret:%d\n", ret);
@ -315,9 +346,14 @@ exit_runtime_suspend:
static int mhi_runtime_idle(struct device *dev)
{
//struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
//MHI_LOG("Entered returning -EBUSY\n");
if ((mhi_cntrl->dev_state == MHI_STATE_M0 || mhi_cntrl->dev_state == MHI_STATE_M3)
&& mhi_cntrl->ee == MHI_EE_AMSS) {
return 0;
}
MHI_LOG("Entered returning -EBUSY, mhi_state:%s exec_env:%s\n",
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)));
/*
* RPM framework during runtime resume always calls
@ -374,8 +410,8 @@ static int mhi_system_resume(struct device *dev)
if (ret) {
MHI_ERR("Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
//pm_runtime_set_active(dev);
//pm_runtime_enable(dev);
}
return ret;
@ -388,6 +424,11 @@ int mhi_system_suspend(struct device *dev)
MHI_LOG("Entered\n");
if (atomic_read(&mhi_cntrl->pending_pkts)) {
MHI_LOG("Abort due to pending_pkts:%d\n", atomic_read(&mhi_cntrl->pending_pkts));
return -EBUSY;
}
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev)) {
ret = mhi_runtime_suspend(dev);
@ -397,12 +438,13 @@ int mhi_system_suspend(struct device *dev)
}
}
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
//pm_runtime_set_suspended(dev);
//pm_runtime_disable(dev);
MHI_LOG("Exit\n");
return 0;
}
#endif
/* checks if link is down */
static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
@ -535,7 +577,16 @@ static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_put_noidle(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put(dev);
}
static void mhi_runtime_mark_last_busy(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_mark_last_busy(dev);
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
@ -545,10 +596,16 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
if (reason == MHI_CB_IDLE) {
MHI_LOG("Schedule runtime suspend 1\n");
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
switch (reason) {
case MHI_CB_FATAL_ERROR:
case MHI_CB_SYS_ERROR:
pm_runtime_forbid(dev);
break;
case MHI_CB_EE_MISSION_MODE:
//pm_runtime_allow(dev);
break;
default:
break;
}
}
@ -633,7 +690,9 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &pci_dev->dev;
mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
mhi_cntrl->vendor = pci_dev->vendor;
mhi_cntrl->dev_id = pci_dev->device;
mhi_cntrl->bus = pci_dev->bus->number;
mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
@ -691,6 +750,7 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->runtime_mark_last_busy = mhi_runtime_mark_last_busy;
mhi_cntrl->link_status = mhi_link_status;
mhi_cntrl->lpm_disable = mhi_lpm_disable;
@ -725,6 +785,66 @@ error_register:
return ERR_PTR(-EINVAL);
}
static bool mhi_pci_is_alive(struct pci_dev *pdev)
{
u16 vendor = 0;
if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
return false;
return true;
}
static void mhi_pci_show_link(struct mhi_controller *mhi_cntrl, struct pci_dev *pci_dev)
{
int pcie_cap_reg;
u16 stat;
u32 caps;
const char *speed;
pcie_cap_reg = pci_find_capability(pci_dev, PCI_CAP_ID_EXP);
if (!pcie_cap_reg)
return;
pci_read_config_word(pci_dev,
pcie_cap_reg + PCI_EXP_LNKSTA,
&stat);
pci_read_config_dword(pci_dev,
pcie_cap_reg + PCI_EXP_LNKCAP,
&caps);
switch (caps & PCI_EXP_LNKCAP_SLS) {
case PCI_EXP_LNKCAP_SLS_2_5GB: speed = "2.5"; break;
case PCI_EXP_LNKCAP_SLS_5_0GB: speed = "5"; break;
case 3: speed = "8"; break;
case 4: speed = "16"; break;
case 5: speed = "32"; break;
case 6: speed = "64"; break;
default: speed = "0"; break;
}
MHI_LOG("LnkCap: Speed %sGT/s, Width x%d\n", speed,
(caps & PCI_EXP_LNKCAP_MLW) >> 4);
switch (stat & PCI_EXP_LNKSTA_CLS) {
case PCI_EXP_LNKSTA_CLS_2_5GB: speed = "2.5"; break;
case PCI_EXP_LNKSTA_CLS_5_0GB: speed = "5"; break;
case 3: speed = "8"; break;
case 4: speed = "16"; break;
case 5: speed = "32"; break;
case 6: speed = "64"; break;
default: speed = "0"; break;
}
MHI_LOG("LnkSta: Speed %sGT/s, Width x%d\n", speed,
(stat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
}
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id)
{
@ -739,6 +859,18 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
if (!mhi_pci_is_alive(pci_dev)) {
/*
root@OpenWrt:~# hexdump /sys/bus/pci/devices/0000:01:00.0/config
0000000 ffff ffff ffff ffff ffff ffff ffff ffff
*
0001000
*/
pr_err("mhi_pci is not alive! pcie link is down\n");
pr_err("double check by 'hexdump /sys/bus/pci/devices/%s/config'\n", dev_name(&pci_dev->dev));
return -EIO;
}
/* see if we already registered */
mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id);
if (!mhi_cntrl)
@ -767,7 +899,8 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
}
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_allow(&pci_dev->dev);
mhi_pci_show_link(mhi_cntrl, pci_dev);
MHI_LOG("Return successful\n");
@ -838,9 +971,13 @@ static const struct dev_pm_ops pm_ops = {
static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX20
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, //SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308)}, //SDX62
{PCI_DEVICE(0x1eac, 0x1001)}, //EM120
{PCI_DEVICE(0x1eac, 0x1002)}, //EM160
{PCI_DEVICE(0x1eac, 0x1004)}, //RM520
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};

View File

@ -15,7 +15,7 @@
#define MHI_PCIE_DEBUG_ID (0xffff)
/* runtime suspend timer */
#define MHI_RPM_SUSPEND_TMR_MS (250)
#define MHI_RPM_SUSPEND_TMR_MS (2000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {

View File

@ -4,7 +4,9 @@
#ifndef _MHI_H_
#define _MHI_H_
#define PCIE_MHI_DRIVER_VERSION "V1.3.0.13"
#define PCIE_MHI_DRIVER_VERSION "V1.3.4"
#define ENABLE_MHI_MON
//#define ENABLE_IP_SW0
#include <linux/miscdevice.h>
typedef enum
@ -90,11 +92,13 @@ typedef enum
{
SW_EVT_RING = 0,
PRIMARY_EVENT_RING = SW_EVT_RING,
HW_0_OUT_EVT_RING = 1,
IPA_OUT_EVENT_RING = HW_0_OUT_EVT_RING,
HW_0_IN_EVT_RING = 2,
IPA_IN_EVENT_RING = HW_0_IN_EVT_RING,
ADPL_EVT_RING = 3,
#ifdef ENABLE_IP_SW0
SW_0_OUT_EVT_RING,
SW_0_IN_EVT_RING,
#endif
IPA_OUT_EVENT_RING,
IPA_IN_EVENT_RING,
ADPL_EVT_RING,
MAX_EVT_RING_IDX
}MHI_EVT_RING_IDX;
@ -111,7 +115,15 @@ typedef enum
#define NUM_MHI_IPA_IN_RING_ELEMENTS 512
#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
#define NUM_MHI_CHAN_RING_ELEMENTS 8
#define NUM_MHI_SW_IP_RING_ELEMENTS 512
/*
* for if set Interrupt moderation time as 1ms,
and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms.
e.g. firehose upgrade.
modem will not trigger irq for these transfer.
*/
#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8
#define MHI_EVT_CMD_QUEUE_SIZE 160
#define MHI_EVT_STATE_QUEUE_SIZE 128
#define MHI_EVT_XFER_QUEUE_SIZE 1024
@ -141,6 +153,7 @@ struct mhi_buf_info;
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
* @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
* @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error
*/
@ -150,6 +163,7 @@ enum MHI_CB {
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
};
@ -322,10 +336,13 @@ struct mhi_controller {
void __iomem *wake_db;
/* device topology */
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
u32 cntrl_idx;
struct device *cntrl_dev;
/* addressing window */
dma_addr_t iova_start;
@ -356,6 +373,7 @@ struct mhi_controller {
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
u32 msi_irq_base;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
@ -377,6 +395,7 @@ struct mhi_controller {
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
atomic_t pending_pkts;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
@ -388,6 +407,7 @@ struct mhi_controller {
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
struct delayed_work ready_worker;
wait_queue_head_t state_event;
/* shadow functions */
@ -398,6 +418,7 @@ struct mhi_controller {
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv);
u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
@ -431,8 +452,34 @@ struct mhi_controller {
struct dentry *parent;
struct miscdevice miscdev;
#ifdef ENABLE_MHI_MON
spinlock_t lock;
/* Ref */
int nreaders; /* Under mon_lock AND mbus->lock */
struct list_head r_list; /* Chain of readers (usually one) */
struct kref ref; /* Under mon_lock */
/* Stats */
unsigned int cnt_events;
unsigned int cnt_text_lost;
#endif
};
#ifdef ENABLE_MHI_MON
struct mhi_tre;
struct mon_reader {
struct list_head r_link;
struct mhi_controller *m_bus;
void *r_data; /* Use container_of instead? */
void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre);
};
#endif
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
@ -444,6 +491,7 @@ struct mhi_controller {
*/
struct mhi_device {
struct device dev;
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
@ -797,7 +845,7 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_debug("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#else
@ -808,25 +856,23 @@ void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_info("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
else if (!mhi_cntrl->klog_slient) \
printk(KERN_DEBUG "[I][%s] " fmt, __func__, ##__VA_ARGS__);\
printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_alert("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl);
void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl);
#ifndef MHI_NAME_SIZE
#define MHI_NAME_SIZE 32

View File

@ -19,9 +19,6 @@
#include "mhi.h"
#include "mhi_internal.h"
#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1
#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2
/* Software defines */
/* BHI Version */
#define BHI_MAJOR_VERSION 0x1
@ -60,31 +57,30 @@ typedef struct _bhi_info_type
ULONG bhi_rsvd5;
}BHI_INFO_TYPE, *PBHI_INFO_TYPE;
static void PrintBhiInfo(BHI_INFO_TYPE *bhi_info)
static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
char str[128];
printk("BHI Device Info...\n");
printk("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
printk("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
printk("BHI Status = 0x%X\n", bhi_info->bhi_status);
printk("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
printk("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
printk("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
printk("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
printk("BHI MSM HW-Id = ");
MHI_LOG("BHI Device Info...\n");
MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status);
MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
for (index = 0; index < 6; index++)
{
printk("0x%X ", bhi_info->bhi_msmhwid[index]);
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]);
}
printk("\n");
MHI_LOG("BHI MSM HW-Id = %s\n", str);
printk("BHI OEM PK Hash = \n");
for (index = 0; index < 24; index++)
{
printk("0x%X ", bhi_info->bhi_oempkhash[index]);
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]);
}
printk("\n");
MHI_LOG("BHI OEM PK Hash = %s\n", str);
}
static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset)
@ -130,11 +126,11 @@ static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index));
}
bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5);
PrintBhiInfo(bhi_info);
PrintBhiInfo(mhi_cntrl, bhi_info);
/* Check the Execution Environment */
if (!IsPBLExecEnv(bhi_info->bhi_ee))
{
printk("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
}
/* Return the number of bytes read */
@ -204,8 +200,11 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
if (unlikely(!sequence_id))
sequence_id = 1;
@ -316,8 +315,11 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
@ -368,8 +370,12 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#else
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id);
@ -433,7 +439,8 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb);
read_unlock_bh(pm_lock);
@ -720,7 +727,7 @@ error_alloc_fw_table:
release_firmware(firmware);
}
int BhiWrite(struct mhi_controller *mhi_cntrl, void *buf, size_t size)
int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size)
{
int ret;
dma_addr_t dma_addr;
@ -753,12 +760,18 @@ int BhiWrite(struct mhi_controller *mhi_cntrl, void *buf, size_t size)
}
dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!buf) {
if (!dma_buf) {
MHI_ERR("Could not allocate memory for image\n");
return -ENOMEM;
}
memcpy(dma_buf, buf, size);
ret = copy_from_user(dma_buf, ubuf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);;
return ret;
}
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);
@ -804,94 +817,40 @@ error_state:
return ret;
}
static int mhi_cntrl_open(struct inode *inode, struct file *f)
{
return 0;
}
static int mhi_cntrl_release(struct inode *inode, struct file *f)
{
return 0;
}
static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
void *ubuf = (void *)__arg;
struct miscdevice *c = (struct miscdevice *)f->private_data;
struct mhi_controller *mhi_cntrl = container_of(c, struct mhi_controller, miscdev);
BHI_INFO_TYPE bhi_info;
switch (cmd) {
case IOCTL_BHI_GETDEVINFO:
{
BHI_INFO_TYPE bhi_info;
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
}
break;
case IOCTL_BHI_WRITEIMAGE:
{
void *buf;
size_t size;
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL) {
return -ENOMEM;
}
ret = copy_from_user(buf, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %ld\n", ret);
kfree(buf);
return ret;
}
ret = BhiWrite(mhi_cntrl, buf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
kfree(buf);
}
break;
default:
break;
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
return ret;
}
static const struct file_operations mhi_cntrl_fops = {
.unlocked_ioctl = mhi_cntrl_ioctl,
.open = mhi_cntrl_open,
.release = mhi_cntrl_release,
};
int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl)
long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
mhi_cntrl->miscdev.minor = MISC_DYNAMIC_MINOR;
mhi_cntrl->miscdev.name = "mhi_BHI";
mhi_cntrl->miscdev.fops = &mhi_cntrl_fops;
long ret = -EINVAL;
size_t size;
return misc_register(&mhi_cntrl->miscdev);
}
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl)
{
misc_deregister(&mhi_cntrl->miscdev);
ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
return ret;
}

View File

@ -68,6 +68,16 @@ static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
if (tiocm & TIOCM_RTS)
dtr_msg->msg |= CTRL_MSG_RTS;
/*
* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit.
* RTS:0 will prevent modem output AT response.
* But 'busybox microcom' do not send any RTS to modem.
* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1
* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0
*/
dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__,
!!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS));
reinit_completion(&dtr_chan->completion);
ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
sizeof(*dtr_msg), MHI_EOT);

View File

@ -12,9 +12,35 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <asm/uaccess.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,11,0 ))
#include <linux/sched/signal.h>
#else
#include <linux/signal.h>
#endif
#include "mhi.h"
#include "mhi_internal.h"
struct mhi_controller_map {
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
};
#define MAX_MHI_CONTROLLER 16
struct mhi_controller_map mhi_controller_minors[MAX_MHI_CONTROLLER];
#define MHI_CNTRL_DRIVER_NAME "mhi_cntrl_q"
struct mhi_cntrl_drv {
struct list_head head;
struct mutex lock;
struct class *class;
int major;
};
static struct mhi_cntrl_drv mhi_cntrl_drv;
const char * const mhi_ee_str[MHI_EE_MAX] = {
[MHI_EE_PBL] = "PBL",
[MHI_EE_SBL] = "SBL",
@ -109,7 +135,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
if (mhi_cntrl->msi_allocated == 1) {
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl);
return;
}
@ -120,7 +146,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
}
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl);
}
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
@ -134,8 +160,8 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
mhi_event->msi = 0;
}
ret = request_irq(mhi_cntrl->irq[0],
mhi_one_msi_handlr, IRQF_SHARED, "mhi", mhi_cntrl);
ret = request_threaded_irq(mhi_cntrl->irq[0], NULL,
mhi_one_msi_handlr, IRQF_ONESHOT, "mhi", mhi_cntrl);
if (ret) {
MHI_ERR("Error requesting irq:%d, ret=%d\n", mhi_cntrl->irq[0], ret);
}
@ -143,7 +169,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
}
/* for BHI INTVEC msi */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handlr,
ret = request_threaded_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_intvec_handlr,
mhi_intvec_threaded_handlr, IRQF_ONESHOT,
"mhi", mhi_cntrl);
if (ret)
@ -268,11 +294,374 @@ static const struct file_operations debugfs_chan_ops = {
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL,
mhi_debugfs_trigger_reset, "%llu\n");
#ifdef ENABLE_MHI_MON
struct mon_event_text {
struct list_head e_link;
int type; /* submit, complete, etc. */
unsigned int tstamp;
u32 chan;
dma_addr_t wp;
struct mhi_tre mhi_tre;
u8 data[32];
size_t len;
};
#define EVENT_MAX (16*PAGE_SIZE / sizeof(struct mon_event_text))
#define PRINTF_DFL 250
#define SLAB_NAME_SZ 30
struct mon_reader_text {
struct kmem_cache *e_slab;
int nevents;
struct list_head e_list;
struct mon_reader r; /* In C, parent class can be placed anywhere */
wait_queue_head_t wait;
int printf_size;
char *printf_buf;
int left_size;
int left_pos;
struct mutex printf_lock;
char slab_name[SLAB_NAME_SZ];
};
struct mon_text_ptr {
int cnt, limit;
char *pbuf;
};
static DEFINE_MUTEX(mon_lock);
static inline unsigned int mon_get_timestamp(void)
{
struct timespec64 now;
unsigned int stamp;
ktime_get_ts64(&now);
stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC;
return stamp;
}
static void mon_text_event(struct mon_reader_text *rp,
u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len,
char ev_type)
{
struct mon_event_text *ep;
if (rp->nevents >= EVENT_MAX ||
(ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
rp->r.m_bus->cnt_text_lost++;
return;
}
ep->type = ev_type;
ep->tstamp = mon_get_timestamp();
ep->chan = chan;
ep->wp = wp;
ep->mhi_tre = *mhi_tre;
if (len > sizeof(ep->data))
len = sizeof(ep->data);
memcpy(ep->data, buf, len);
ep->len = len;
rp->nevents++;
list_add_tail(&ep->e_link, &rp->e_list);
wake_up(&rp->wait);
}
static void mon_text_submit(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len)
{
struct mon_reader_text *rp = data;
mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'W');
}
static void mon_text_receive(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len)
{
struct mon_reader_text *rp = data;
mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'R');
}
static void mon_text_complete(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre)
{
struct mon_reader_text *rp = data;
mon_text_event(rp, chan, wp, mhi_tre, NULL, 0, 'E');
}
void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r)
{
unsigned long flags;
spin_lock_irqsave(&mbus->lock, flags);
mbus->nreaders++;
list_add_tail(&r->r_link, &mbus->r_list);
spin_unlock_irqrestore(&mbus->lock, flags);
kref_get(&mbus->ref);
}
static void mon_bus_drop(struct kref *r)
{
struct mhi_controller *mbus = container_of(r, struct mhi_controller, ref);
kfree(mbus);
}
static void mon_reader_del(struct mhi_controller *mbus, struct mon_reader *r)
{
unsigned long flags;
spin_lock_irqsave(&mbus->lock, flags);
list_del(&r->r_link);
--mbus->nreaders;
spin_unlock_irqrestore(&mbus->lock, flags);
kref_put(&mbus->ref, mon_bus_drop);
}
static void mon_text_ctor(void *mem)
{
/*
* Nothing to initialize. No, really!
* So, we fill it with garbage to emulate a reused object.
*/
memset(mem, 0xe5, sizeof(struct mon_event_text));
}
static int mon_text_open(struct inode *inode, struct file *file)
{
struct mhi_controller *mbus;
struct mon_reader_text *rp;
int rc;
mutex_lock(&mon_lock);
mbus = inode->i_private;
rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL);
if (rp == NULL) {
rc = -ENOMEM;
goto err_alloc;
}
INIT_LIST_HEAD(&rp->e_list);
init_waitqueue_head(&rp->wait);
mutex_init(&rp->printf_lock);
rp->printf_size = PRINTF_DFL;
rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL);
if (rp->printf_buf == NULL) {
rc = -ENOMEM;
goto err_alloc_pr;
}
rp->r.m_bus = mbus;
rp->r.r_data = rp;
rp->r.rnf_submit = mon_text_submit;
rp->r.rnf_receive = mon_text_receive;
rp->r.rnf_complete = mon_text_complete;
snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
rp->e_slab = kmem_cache_create(rp->slab_name,
sizeof(struct mon_event_text), sizeof(long), 0,
mon_text_ctor);
if (rp->e_slab == NULL) {
rc = -ENOMEM;
goto err_slab;
}
mon_reader_add(mbus, &rp->r);
file->private_data = rp;
mutex_unlock(&mon_lock);
return 0;
// err_busy:
// kmem_cache_destroy(rp->e_slab);
err_slab:
kfree(rp->printf_buf);
err_alloc_pr:
kfree(rp);
err_alloc:
mutex_unlock(&mon_lock);
return rc;
}
static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp,
struct mhi_controller *mbus)
{
struct list_head *p;
unsigned long flags;
spin_lock_irqsave(&mbus->lock, flags);
if (list_empty(&rp->e_list)) {
spin_unlock_irqrestore(&mbus->lock, flags);
return NULL;
}
p = rp->e_list.next;
list_del(p);
--rp->nevents;
spin_unlock_irqrestore(&mbus->lock, flags);
return list_entry(p, struct mon_event_text, e_link);
}
static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
struct file *file)
{
struct mhi_controller *mbus = rp->r.m_bus;
DECLARE_WAITQUEUE(waita, current);
struct mon_event_text *ep;
add_wait_queue(&rp->wait, &waita);
set_current_state(TASK_INTERRUPTIBLE);
while ((ep = mon_text_fetch(rp, mbus)) == NULL) {
if (file->f_flags & O_NONBLOCK) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->wait, &waita);
return ERR_PTR(-EWOULDBLOCK);
}
/*
* We do not count nwaiters, because ->release is supposed
* to be called when all openers are gone only.
*/
schedule();
if (signal_pending(current)) {
remove_wait_queue(&rp->wait, &waita);
return ERR_PTR(-EINTR);
}
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&rp->wait, &waita);
return ep;
}
static ssize_t mon_text_read_u(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
if (rp->left_size) {
int cnt = rp->left_size;
if (cnt > nbytes)
cnt = nbytes;
if (copy_to_user(buf, rp->printf_buf + rp->left_pos, cnt))
return -EFAULT;
rp->left_pos += cnt;
rp->left_size -= cnt;
return cnt;
}
if (IS_ERR(ep = mon_text_read_wait(rp, file)))
return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
ptr.cnt = 0;
ptr.pbuf = rp->printf_buf;
ptr.limit = rp->printf_size;
ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
"%u %c %03d WP:%llx TRE: %llx %08x %08x",
ep->tstamp, ep->type, ep->chan, ep->wp,
ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]);
if (ep->len) {
struct mon_text_ptr *p = &ptr;
size_t i = 0;
for (i = 0; i < ep->len; i++) {
if (i % 4 == 0) {
p->cnt += snprintf(p->pbuf + p->cnt,
p->limit - p->cnt,
" ");
}
p->cnt += snprintf(p->pbuf + p->cnt,
p->limit - p->cnt,
"%02x", ep->data[i]);
}
}
ptr.cnt += snprintf(ptr.pbuf +ptr.cnt, ptr.limit - ptr.cnt, "\n");
if (ptr.cnt > nbytes) {
rp->left_pos = nbytes;
rp->left_size = ptr.cnt - nbytes;
ptr.cnt = nbytes;
}
if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
ptr.cnt = -EFAULT;
mutex_unlock(&rp->printf_lock);
kmem_cache_free(rp->e_slab, ep);
return ptr.cnt;
}
static int mon_text_release(struct inode *inode, struct file *file)
{
struct mon_reader_text *rp = file->private_data;
struct mhi_controller *mbus;
/* unsigned long flags; */
struct list_head *p;
struct mon_event_text *ep;
mutex_lock(&mon_lock);
mbus = inode->i_private;
if (mbus->nreaders <= 0) {
mutex_unlock(&mon_lock);
return 0;
}
mon_reader_del(mbus, &rp->r);
/*
* In theory, e_list is protected by mbus->lock. However,
* after mon_reader_del has finished, the following is the case:
* - we are not on reader list anymore, so new events won't be added;
* - whole mbus may be dropped if it was orphaned.
* So, we better not touch mbus.
*/
/* spin_lock_irqsave(&mbus->lock, flags); */
while (!list_empty(&rp->e_list)) {
p = rp->e_list.next;
ep = list_entry(p, struct mon_event_text, e_link);
list_del(p);
--rp->nevents;
kmem_cache_free(rp->e_slab, ep);
}
/* spin_unlock_irqrestore(&mbus->lock, flags); */
kmem_cache_destroy(rp->e_slab);
kfree(rp->printf_buf);
kfree(rp);
mutex_unlock(&mon_lock);
return 0;
}
static const struct file_operations mon_fops_text_u = {
.owner = THIS_MODULE,
.open = mon_text_open,
.llseek = no_llseek,
.read = mon_text_read_u,
.release = mon_text_release,
};
#endif
void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
{
struct dentry *dentry;
char node[32];
#ifdef ENABLE_MHI_MON
struct mhi_controller *mbus = mhi_cntrl;
mbus->nreaders = 0;
kref_init(&mbus->ref);
spin_lock_init(&mbus->lock);
INIT_LIST_HEAD(&mbus->r_list);
#endif
if (!mhi_cntrl->parent)
snprintf(node, sizeof(node), "mhi_%04x_%02u:%02u.%02u",
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
@ -294,6 +683,10 @@ void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
&debugfs_chan_ops);
debugfs_create_file("reset", 0444, dentry, mhi_cntrl,
&debugfs_trigger_reset_fops);
#ifdef ENABLE_MHI_MON
debugfs_create_file("mhimon", 0444, dentry, mhi_cntrl,
&mon_fops_text_u);
#endif
mhi_cntrl->dentry = dentry;
}
@ -316,6 +709,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
atomic_set(&mhi_cntrl->dev_wake, 0);
atomic_set(&mhi_cntrl->alloc_size, 0);
atomic_set(&mhi_cntrl->pending_pkts, 0);
mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
if (!mhi_ctxt)
@ -707,9 +1101,11 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
/* setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
#if 0 //'EM120RGLAPR02A07M4G_11' will treate as chan 127's interrput, and report complete event over cmd ring, but cmd ring is not set by now
mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
mhi_cntrl->wake_set = false;
#endif
/* setup channel db addresses */
mhi_chan = mhi_cntrl->mhi_chan;
@ -788,6 +1184,16 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_out_chan_ring[mhi_chan->ring];
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_out_chan_ring[mhi_chan->ring]);
}
#ifdef ENABLE_IP_SW0
else if (MHI_CLIENT_IP_SW_0_IN == mhi_chan->chan) {
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_in_chan_ring[mhi_chan->ring];
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_in_chan_ring[mhi_chan->ring]);
}
else if (MHI_CLIENT_IP_SW_0_OUT == mhi_chan->chan) {
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_out_chan_ring[mhi_chan->ring];
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_out_chan_ring[mhi_chan->ring]);
}
#endif
else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) {
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring];
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]);
@ -1191,6 +1597,7 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
mhi_cntrl->msi_irq_base = 0;
/* populate ev ring */
mhi_event = mhi_cntrl->mhi_event;
i = 0;
@ -1199,6 +1606,8 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
mhi_event->er_index = i;
mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements
if (i == PRIMARY_EVENT_RING || i == ADPL_EVT_RING)
mhi_event->ring.elements = 256; //256 is enough, and 1024 some times make driver fail to open channel (reason is x6x fail to malloc)
mhi_event->intmod = 1; //Interrupt moderation time in ms
@ -1209,12 +1618,23 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
if (i == IPA_IN_EVENT_RING)
mhi_event->intmod = 5;
mhi_event->msi = 1 + i; //MSI associated with this event ring
#ifdef ENABLE_IP_SW0
if (i == SW_0_IN_EVT_RING)
mhi_event->intmod = 5;
#endif
mhi_event->msi = 1 + i + mhi_cntrl->msi_irq_base; //MSI associated with this event ring
if (i == IPA_OUT_EVENT_RING)
mhi_event->chan = MHI_CLIENT_IP_HW_0_OUT; //Dedicated channel number, if it's a dedicated event ring
else if (i == IPA_IN_EVENT_RING)
mhi_event->chan = MHI_CLIENT_IP_HW_0_IN; //Dedicated channel number, if it's a dedicated event ring
#ifdef ENABLE_IP_SW0
else if (i == SW_0_OUT_EVT_RING)
mhi_event->chan = MHI_CLIENT_IP_SW_0_OUT;
else if (i == SW_0_IN_EVT_RING)
mhi_event->chan = MHI_CLIENT_IP_SW_0_IN;
#endif
else
mhi_event->chan = 0;
@ -1235,6 +1655,10 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING)
mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
#ifdef ENABLE_IP_SW0
else if (i == SW_0_OUT_EVT_RING || i == SW_0_IN_EVT_RING)
mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
#endif
else
mhi_event->data_type = MHI_ER_CTRL_ELEMENT_TYPE;
@ -1261,14 +1685,14 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
mhi_cntrl->sw_ev_rings++;
mhi_event->cl_manage = false;
if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN)
if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN || mhi_event->chan == MHI_CLIENT_IP_SW_0_IN)
mhi_event->cl_manage = true;
mhi_event->offload_ev = false;
mhi_event++;
}
/* we need msi for each event ring + additional one for BHI */
mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1;
mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1 + mhi_cntrl->msi_irq_base;
return 0;
}
@ -1321,10 +1745,10 @@ static struct chan_cfg_t chan_cfg[] = {
//"Qualcomm EDL "
{"EDL", MHI_CLIENT_EDL_OUT, NUM_MHI_CHAN_RING_ELEMENTS},
{"EDL", MHI_CLIENT_EDL_IN, NUM_MHI_CHAN_RING_ELEMENTS},
#if 0 //AG15
#ifdef ENABLE_IP_SW0
//"Qualcomm PCIe LOCAL Adapter"
{"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_CHAN_RING_ELEMENTS},
{"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_CHAN_RING_ELEMENTS},
{"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_SW_IP_RING_ELEMENTS},
{"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_SW_IP_RING_ELEMENTS},
#endif
//"Qualcomm PCIe WWAN Adapter"
{"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS},
@ -1381,7 +1805,8 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
*/
mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN) {
if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN
|| chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN) {
mhi_chan->ring = 0;
}
else {
@ -1393,6 +1818,12 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
mhi_chan->er_index = IPA_OUT_EVENT_RING;
else if (chan == MHI_CLIENT_IP_HW_0_IN)
mhi_chan->er_index = IPA_IN_EVENT_RING;
#ifdef ENABLE_IP_SW0
else if (chan == MHI_CLIENT_IP_SW_0_OUT)
mhi_chan->er_index = SW_0_OUT_EVT_RING;
else if (chan == MHI_CLIENT_IP_SW_0_IN)
mhi_chan->er_index = SW_0_IN_EVT_RING;
#endif
else
mhi_chan->er_index = PRIMARY_EVENT_RING;
@ -1549,6 +1980,30 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
//if (!mhi_cntrl->of_node)
// return -EINVAL;
for (i = 0; i < MAX_MHI_CONTROLLER; i++) {
if (mhi_controller_minors[i].dev_id == mhi_cntrl->dev_id
&& mhi_controller_minors[i].domain == mhi_cntrl->domain
&& mhi_controller_minors[i].bus == mhi_cntrl->bus
&& mhi_controller_minors[i].slot == mhi_cntrl->slot) {
mhi_cntrl->cntrl_idx = i;
break;
}
else if (mhi_controller_minors[i].dev_id == 0
&& mhi_controller_minors[i].domain == 0
&& mhi_controller_minors[i].bus == 0
&& mhi_controller_minors[i].slot == 0) {
mhi_controller_minors[i].dev_id = mhi_cntrl->dev_id;
mhi_controller_minors[i].domain = mhi_cntrl->domain;
mhi_controller_minors[i].bus = mhi_cntrl->bus;
mhi_controller_minors[i].slot = mhi_cntrl->slot;
mhi_cntrl->cntrl_idx = i;
break;
}
}
if (i == MAX_MHI_CONTROLLER)
return -EINVAL;
if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put)
return -EINVAL;
@ -1574,6 +2029,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
INIT_DELAYED_WORK(&mhi_cntrl->ready_worker, mhi_pm_ready_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cmd = mhi_cntrl->mhi_cmd;
@ -1621,10 +2077,20 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
mhi_dev->mhi_cntrl = mhi_cntrl;
dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id,
mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
ret = device_add(&mhi_dev->dev);
if (ret)
goto error_add_dev;
if (mhi_cntrl->cntrl_idx)
mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev,
MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL,
"mhi_BHI%d", mhi_cntrl->cntrl_idx);
else
mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev,
MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL,
"mhi_BHI");
mhi_cntrl->mhi_dev = mhi_dev;
mhi_cntrl->parent = mhi_bus.dentry;
@ -1660,6 +2126,8 @@ void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl)
kfree(mhi_cntrl->mhi_chan);
kfree(mhi_cntrl->mhi_tsync);
if (mhi_cntrl->cntrl_dev)
device_destroy(mhi_cntrl_drv.class, MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx));
device_del(&mhi_dev->dev);
put_device(&mhi_dev->dev);
@ -1991,6 +2459,7 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
dev->release = mhi_release_device;
dev->parent = mhi_cntrl->dev;
mhi_dev->mhi_cntrl = mhi_cntrl;
mhi_dev->vendor = mhi_cntrl->vendor;
mhi_dev->dev_id = mhi_cntrl->dev_id;
mhi_dev->domain = mhi_cntrl->domain;
mhi_dev->bus = mhi_cntrl->bus;
@ -2001,6 +2470,102 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
return mhi_dev;
}
static int mhi_cntrl_open(struct inode *inode, struct file *f)
{
int ret = -ENODEV;
struct mhi_controller *mhi_cntrl;
mutex_lock(&mhi_bus.lock);
list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) {
if (MINOR(inode->i_rdev) == mhi_cntrl->cntrl_idx) {
ret = 0;
f->private_data = mhi_cntrl;
break;
}
}
mutex_unlock(&mhi_bus.lock);
return ret;
}
static int mhi_cntrl_release(struct inode *inode, struct file *f)
{
f->private_data = NULL;
return 0;
}
#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1
#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2
long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *to);
long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *from);
static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
{
long ret = -ENODEV;
struct mhi_controller *mhi_cntrl;
mutex_lock(&mhi_bus.lock);
list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) {
if (mhi_cntrl == (struct mhi_controller *)f->private_data) {
ret = 0;
break;
}
}
mutex_unlock(&mhi_bus.lock);
if (ret)
return ret;
switch (cmd) {
case IOCTL_BHI_GETDEVINFO:
ret = bhi_get_dev_info(mhi_cntrl, (void __user *)__arg);
break;
case IOCTL_BHI_WRITEIMAGE:
ret = bhi_write_image(mhi_cntrl, (void __user *)__arg);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static const struct file_operations mhi_cntrl_fops = {
.unlocked_ioctl = mhi_cntrl_ioctl,
.open = mhi_cntrl_open,
.release = mhi_cntrl_release,
};
static int __init mhi_cntrl_init(void)
{
int ret;
ret = register_chrdev(0, MHI_CNTRL_DRIVER_NAME, &mhi_cntrl_fops);
if (ret < 0)
return ret;
mhi_cntrl_drv.major = ret;
mhi_cntrl_drv.class = class_create(THIS_MODULE, MHI_CNTRL_DRIVER_NAME);
if (IS_ERR(mhi_cntrl_drv.class)) {
unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME);
return -ENODEV;
}
mutex_init(&mhi_cntrl_drv.lock);
INIT_LIST_HEAD(&mhi_cntrl_drv.head);
return 0;
}
void mhi_cntrl_exit(void)
{
class_destroy(mhi_cntrl_drv.class);
unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME);
}
extern int mhi_dtr_init(void);
extern void mhi_dtr_exit(void);
extern int mhi_device_netdev_init(struct dentry *parent);
@ -2048,6 +2613,11 @@ static int __init mhi_init(void)
pr_err("Error mhi_device_uci_init ret:%d\n", ret);
}
ret = mhi_cntrl_init();
if (ret) {
pr_err("Error mhi_cntrl_init ret:%d\n", ret);
}
ret = mhi_controller_qcom_init();
if (ret) {
pr_err("Error mhi_controller_qcom_init ret:%d\n", ret);
@ -2059,6 +2629,7 @@ static int __init mhi_init(void)
static void mhi_exit(void)
{
mhi_controller_qcom_exit();
mhi_cntrl_exit();
mhi_device_uci_exit();
mhi_device_netdev_exit();
mhi_dtr_exit();

View File

@ -854,12 +854,16 @@ struct state_transition {
/* Control Segment */
struct mhi_ctrl_seg
{
struct __packed mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
struct __packed mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
struct __packed mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
struct __packed mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16);
struct __packed mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __aligned(NUM_MHI_EVT_RING_ELEMENTS*16);
struct __packed mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __aligned(CMD_EL_PER_RING*16);
struct mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
struct mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
#ifdef ENABLE_IP_SW0
struct mhi_tre sw_in_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
struct mhi_tre sw_out_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
#endif
struct mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
struct mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __packed __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16);
struct mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __packed __aligned(NUM_MHI_EVT_RING_ELEMENTS*16);
struct mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __packed __aligned(CMD_EL_PER_RING*16);
struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128);
struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128);
@ -1031,6 +1035,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
void mhi_pm_st_worker(struct work_struct *work);
void mhi_fw_load_worker(struct work_struct *work);
void mhi_pm_sys_err_worker(struct work_struct *work);
void mhi_pm_ready_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);

View File

@ -68,6 +68,8 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
MISC_CAP_MASK, MISC_CAP_SHIFT, offset);
if (ret)
return ret;
if (*offset >= 0x1000)
return -ENXIO;
do {
ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
CAP_CAPID_MASK, CAP_CAPID_SHIFT,
@ -173,6 +175,13 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
mhi_write_db(mhi_cntrl, ring->db_addr, db);
}
//#define DEBUG_CHAN100_DB
#ifdef DEBUG_CHAN100_DB
static atomic_t chan100_seq = ATOMIC_INIT(0);
#define CHAN100_SIZE 0x1000
static unsigned int chan100_t[CHAN100_SIZE];
#endif
void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan)
{
@ -180,6 +189,11 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
dma_addr_t db;
db = ring->iommu_base + (ring->wp - ring->base);
/*
* Writes to the new ring element must be visible to the hardware
* before letting h/w know there is new element to fetch.
*/
dma_wmb();
*ring->ctxt_wp = db;
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr,
db);
@ -223,9 +237,11 @@ int mhi_queue_nop(struct mhi_device *mhi_dev,
static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
ring->wp += ring->el_size;
if (ring->wp >= (ring->base + ring->len))
ring->wp = ring->base;
void *wp = ring->wp;
wp += ring->el_size;
if (wp >= (ring->base + ring->len))
wp = ring->base;
ring->wp = wp;
/* smp update */
smp_wmb();
}
@ -233,9 +249,11 @@ static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
ring->rp += ring->el_size;
if (ring->rp >= (ring->base + ring->len))
ring->rp = ring->base;
void *rp = ring->rp;
rp += ring->el_size;
if (rp >= (ring->base + ring->len))
rp = ring->base;
ring->rp = rp;
/* smp update */
smp_wmb();
}
@ -281,23 +299,24 @@ dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr)
static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring)
{
dma_addr_t ctxt_wp;
void *rp, *wp;
/* update the WP */
ring->wp += ring->el_size;
ctxt_wp = *ring->ctxt_wp + ring->el_size;
if (ring->wp >= (ring->base + ring->len)) {
ring->wp = ring->base;
ctxt_wp = ring->iommu_base;
wp = ring->wp;
wp += ring->el_size;
if (wp >= (ring->base + ring->len)) {
wp = ring->base;
}
ring->wp = wp;
*ring->ctxt_wp = ctxt_wp;
*ring->ctxt_wp = ring->iommu_base + (ring->wp - ring->base);
/* update the RP */
ring->rp += ring->el_size;
if (ring->rp >= (ring->base + ring->len))
ring->rp = ring->base;
rp = ring->rp;
rp += ring->el_size;
if (rp >= (ring->base + ring->len))
rp = ring->base;
ring->rp = rp;
/* visible to other cores */
smp_wmb();
@ -359,6 +378,53 @@ void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
buf_info->p_addr);
}
#ifdef ENABLE_MHI_MON
static void mon_bus_submit(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_submit(r->r_data, chan, wp, mhi_tre, buf, len);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_bus_receive(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_receive(r->r_data, chan, wp, mhi_tre, buf, len);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_bus_complete(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_complete(r->r_data, chan, wp, mhi_tre);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
#endif
int mhi_queue_skb(struct mhi_device *mhi_dev,
struct mhi_chan *mhi_chan,
void *buf,
@ -420,29 +486,40 @@ int mhi_queue_skb(struct mhi_device *mhi_dev,
mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
mon_bus_submit(mhi_cntrl, mhi_chan->chan,
mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len);
}
#endif
MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
(u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
mhi_tre->dword[0], mhi_tre->dword[1]);
if (mhi_chan->dir == DMA_TO_DEVICE) {
if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1)
mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
}
read_lock_bh(&mhi_chan->lock);
/* increment WP */
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
#ifdef DEBUG_CHAN100_DB
if (mhi_chan->chan == 100) {
chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (0<<30);
}
#endif
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
}
read_unlock_bh(&mhi_chan->lock);
if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake)
mhi_cntrl->wake_put(mhi_cntrl, true);
if (mhi_chan->dir == DMA_TO_DEVICE) {
unsigned used_elements = get_used_ring_elements(tre_ring->rp, tre_ring->wp, tre_ring->elements);
if (used_elements > mhi_chan->used_elements)
mhi_chan->used_elements = used_elements;
}
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0;
@ -524,19 +601,31 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
}
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
mon_bus_submit(mhi_cntrl, mhi_chan->chan,
mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0: buf_info->len);
}
#endif
MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
(u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
mhi_tre->dword[0], mhi_tre->dword[1]);
if (mhi_chan->dir == DMA_TO_DEVICE) {
if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1)
mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
}
read_lock_bh(&mhi_chan->lock);
/* increment WP */
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
}
read_unlock_bh(&mhi_chan->lock);
if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake)
mhi_cntrl->wake_put(mhi_cntrl, true);
@ -583,6 +672,12 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl,
mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
mon_bus_submit(mhi_cntrl, mhi_chan->chan,
mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len);
}
#endif
MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
(u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
mhi_tre->dword[0], mhi_tre->dword[1]);
@ -645,6 +740,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev,
mhi_cntrl->wake_get(mhi_cntrl, false);
}
if (mhi_chan->dir == DMA_TO_DEVICE) {
if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1)
mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
}
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
unsigned long flags;
@ -929,7 +1029,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
mhi_dev->chan_name);
mhi_dealloc_device(mhi_cntrl, mhi_dev);
}
sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group);
ret = sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group);
}
}
@ -1036,11 +1136,6 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
if (dev_rp >= (tre_ring->base + tre_ring->len))
dev_rp = tre_ring->base;
if (mhi_chan->dir == DMA_FROM_DEVICE) {
u32 used_elements = get_used_ring_elements(tre_ring->rp, dev_rp, tre_ring->elements);
if (used_elements > mhi_chan->used_elements)
mhi_chan->used_elements = used_elements;
}
mhi_chan->used_events[ev_code]++;
result.dir = mhi_chan->dir;
@ -1058,6 +1153,24 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
result.buf_addr = buf_info->cb_buf;
result.bytes_xferd = xfer_len;
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
void *buf = NULL;
size_t len = 0;
if (mhi_chan->queue_xfer == mhi_queue_skb) {
struct sk_buff *skb = result.buf_addr;
buf = skb->data;
len = result.bytes_xferd;
}
else if (CHAN_INBOUND(mhi_chan->chan)) {
buf = result.buf_addr;
len = result.bytes_xferd;
}
mon_bus_receive(mhi_cntrl, mhi_chan->chan,
mhi_to_physical(tre_ring, local_rp), local_rp, buf, len);
}
#endif
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
@ -1065,6 +1178,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
if (mhi_chan->dir == DMA_TO_DEVICE) {
if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0)
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
}
if (mhi_chan->dir == DMA_TO_DEVICE) {
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_put(mhi_cntrl, false);
@ -1098,6 +1216,12 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan);
mhi_chan->db_cfg.db_mode = true;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
#ifdef DEBUG_CHAN100_DB
if (mhi_chan->chan == 100) {
chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->rp)&0xffff) | (0xf0000);
chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (1<<30);
}
#endif
if (tre_ring->wp != tre_ring->rp &&
MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) {
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
@ -1247,9 +1371,25 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp) {
enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
//#define QL_READ_EVENT_WA //from Quectel Windows driver
#ifdef QL_READ_EVENT_WA
if (mhi_event->er_index == 0) {
if (local_rp->ptr == 0 && local_rp->dword[0] == 0 && local_rp->dword[1] == 0) {
// event content no sync to memory, just break and wait next event.
MHI_ERR("Zero Event!\n");
break;
}
}
#endif
mhi_dump_tre(mhi_cntrl, local_rp);
MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp);
}
#endif
switch (type) {
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
@ -1350,12 +1490,23 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
break;
}
#ifdef QL_READ_EVENT_WA
if (mhi_event->er_index == 0) {
local_rp->ptr = 0;
local_rp->dword[0] = local_rp->dword[1] = 0;
}
#endif
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
count++;
}
if (count > mhi_event->used_elements) {
mhi_event->used_elements = count;
}
read_lock_bh(&mhi_cntrl->pm_lock);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)))
mhi_ring_er_db(mhi_event);
@ -1376,11 +1527,9 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
int count = 0;
u32 chan;
struct mhi_chan *mhi_chan;
int used_elements = 0;
void *chan_local_rp = NULL, *evt_local_rp = NULL;
if (mhi_event->mhi_chan)
chan_local_rp = mhi_event->mhi_chan->tre_ring.rp;
struct mhi_chan *mhi_chan = NULL;
u32 chan_count = 0;
void *chan_local_rp = NULL;
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
MHI_ERR("No EV access, PM_STATE:%s\n",
@ -1390,7 +1539,6 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
local_rp = ev_ring->rp;
evt_local_rp = local_rp;
while (dev_rp != local_rp && event_quota > 0) {
enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
@ -1401,7 +1549,13 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
chan = MHI_TRE_GET_EV_CHID(local_rp);
mhi_chan = &mhi_cntrl->mhi_chan[chan];
chan_local_rp = mhi_chan->tre_ring.rp;
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp);
}
#endif
if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
event_quota--;
@ -1410,21 +1564,20 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
event_quota--;
}
chan_count += get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements);
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
local_rp = ev_ring->rp;
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
if (local_rp == dev_rp || event_quota == 0) {
if (chan_count > mhi_chan->used_elements)
mhi_chan->used_elements = chan_count;
chan_count = 0;
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
}
count++;
}
used_elements = get_used_ring_elements(evt_local_rp, dev_rp, ev_ring->elements);
if (used_elements > mhi_event->used_elements)
mhi_event->used_elements = used_elements;
mhi_chan = mhi_event->mhi_chan;
if (chan_local_rp && mhi_chan && mhi_chan->dir == DMA_FROM_DEVICE) {
used_elements = get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements);
if (used_elements > mhi_chan->used_elements)
mhi_chan->used_elements = used_elements;
if (count > mhi_event->used_elements) {
mhi_event->used_elements = count;
}
read_lock_bh(&mhi_cntrl->pm_lock);
@ -1522,6 +1675,7 @@ void mhi_ev_task(unsigned long data)
MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index);
mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data);
/* process all pending events */
spin_lock_bh(&mhi_event->lock);
mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
@ -1546,6 +1700,7 @@ void mhi_ctrl_ev_task(unsigned long data)
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
return;
mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data);
/* process ctrl events events */
ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
@ -1608,8 +1763,9 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_get_exec_env(mhi_cntrl);
MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee),
TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state));
if (mhi_cntrl->msi_allocated >= 5 ||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee)))
MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee),
TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state));
}
if (state == MHI_STATE_SYS_ERR) {
@ -1633,14 +1789,11 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
else
schedule_work(&mhi_cntrl->syserr_worker);
}
if (mhi_cntrl->msi_allocated >= 5||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee)))
MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee),
TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee));
MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee),
TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee));
if (state == MHI_STATE_READY && ee == MHI_EE_AMSS && mhi_cntrl->ee == MHI_EE_PTHRU) {
mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY);
}
else if (pm_state == MHI_PM_POR) {
if (pm_state == MHI_PM_POR) {
wake_up_all(&mhi_cntrl->state_event);
}
@ -1730,7 +1883,11 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
break;
}
#ifdef ENABLE_MHI_MON
if (mhi_cntrl->nreaders) {
mon_bus_submit(mhi_cntrl, 128, mhi_to_physical(ring, cmd_tre), cmd_tre, NULL, 0);
}
#endif
MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n",
(u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr,
cmd_tre->dword[0], cmd_tre->dword[1]);
@ -1791,7 +1948,6 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
mhi_cntrl->wake_get(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
if (ret) {
@ -1808,6 +1964,8 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
goto error_send_cmd;
}
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
write_lock_irq(&mhi_chan->lock);
mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
write_unlock_irq(&mhi_chan->lock);
@ -1858,6 +2016,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
return 0;
error_send_cmd:
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
@ -1931,6 +2090,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
while (tre_ring->rp != tre_ring->wp) {
struct mhi_buf_info *buf_info = buf_ring->rp;
if (mhi_chan->dir == DMA_TO_DEVICE) {
if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0)
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
}
if (mhi_chan->dir == DMA_TO_DEVICE)
mhi_cntrl->wake_put(mhi_cntrl, false);
if (!buf_info->pre_mapped)
@ -2028,7 +2192,6 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
read_unlock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
if (ret) {
MHI_ERR("Failed to send reset chan cmd\n");
@ -2042,6 +2205,7 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
MHI_ERR("Failed to receive cmd completion, still resetting\n");
error_completion:
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
@ -2059,6 +2223,7 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
{
struct mhi_controller *mhi_cntrl = m->private;
int reg = 0;
int ret;
u32 val[4];
seq_printf(m,
@ -2078,20 +2243,20 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
seq_printf(m, "dump mhi reg addr:%p\n", mhi_cntrl->regs);
for (reg = 0; reg < 0x100; reg+=16) {
val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF;
mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]);
mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]);
mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]);
mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]);
seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]);
}
seq_printf(m, "dump bhi reg addr:%p\n", mhi_cntrl->bhi);
for (reg = 0; reg < 0x100; reg+=16) {
val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF;
mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]);
mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]);
mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]);
mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]);
seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]);
}
@ -2120,28 +2285,28 @@ int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d)
i, er_ctxt->intmodc, er_ctxt->intmodt,
er_ctxt->rbase, er_ctxt->rlen);
seq_printf(m,
" rp:0x%llx wp:0x%llx local_rp:0x%llx db:0x%llx\n",
" rp:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n",
er_ctxt->rp, er_ctxt->wp,
(unsigned long long)mhi_to_physical(ring, ring->rp),
(unsigned long long)mhi_to_physical(ring, ring->wp),
(unsigned long long)mhi_event->db_cfg.db_val);
seq_printf(m, "used:%u\n", mhi_event->used_elements);
}
}
#ifdef DEBUG_CHAN100_DB
if (mhi_event->mhi_chan && mhi_event->chan == 100) {
struct mhi_tre *tre = (struct mhi_tre *)ring->base;
size_t j;
#if 0
{
struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[PRIMARY_EVENT_RING];
struct mhi_ring *ring = &mhi_event->ring;
struct mhi_tre *tre = (struct mhi_tre *)ring->base;
size_t i;
for (i = 0; i < ring->elements; i++, tre++) {
seq_printf(m,
"%llx, %08x, %08x\n",
tre->ptr, tre->dword[0], tre->dword[1]);
for (j = 0; j < ring->elements; j++, tre++) {
seq_printf(m,
"%08x: %llx, %08x, %08x\n",
(unsigned int)(j*sizeof(struct mhi_tre)),
tre->ptr, tre->dword[0], tre->dword[1]);
}
}
#endif
}
}
#endif
return 0;
}
@ -2172,9 +2337,9 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d)
chan_ctxt->pollcfg, chan_ctxt->chtype,
chan_ctxt->erindex);
seq_printf(m,
" base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n",
" base:0x%llx len:0x%llx rp:%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n",
chan_ctxt->rbase, chan_ctxt->rlen,
chan_ctxt->wp,
chan_ctxt->rp, chan_ctxt->wp,
(unsigned long long)mhi_to_physical(ring, ring->rp),
(unsigned long long)mhi_to_physical(ring, ring->wp),
(unsigned long long)mhi_chan->db_cfg.db_val);
@ -2182,6 +2347,16 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d)
mhi_chan->used_events[MHI_EV_CC_EOB], mhi_chan->used_events[MHI_EV_CC_EOT],
mhi_chan->used_events[MHI_EV_CC_OOB],mhi_chan->used_events[MHI_EV_CC_DB_MODE]);
#ifdef DEBUG_CHAN100_DB
if (mhi_chan->chan == 100) {
unsigned int n = 0;
seq_printf(m, "chan100_seq = %04x\n", atomic_read(&chan100_seq)%CHAN100_SIZE);
for (n = 0; n < CHAN100_SIZE; n++) {
seq_printf(m, "%04x: %08x\n", n, chan100_t[n]);
}
}
#endif
#if 0
if (ring->base && /*(i&1) &&*/ (i < MHI_CLIENT_IP_HW_0_OUT)) {
struct mhi_tre *tre = (struct mhi_tre *)ring->base;

View File

@ -157,6 +157,15 @@ void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
{
unsigned long flags;
#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert
if (mhi_cntrl->dev_state == MHI_STATE_M2) {
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
}
return;
#endif
/* if set, regardless of count set the bit if not set */
if (unlikely(force)) {
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
@ -188,6 +197,10 @@ void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override)
{
unsigned long flags;
#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert
return;
#endif
#if 1 //Add by Quectel
if (atomic_read(&mhi_cntrl->dev_wake) == 0)
return;
@ -423,6 +436,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
struct mhi_event *mhi_event;
MHI_LOG("Processing Mission Mode Transition\n");
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_EE_MISSION_MODE);
/* force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
@ -553,12 +567,13 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
/* Set the numbers of Event Rings supported */
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS);
/*
* device cleares INTVEC as part of RESET processing,
* re-program it
*/
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
}
MHI_LOG("Waiting for all pending event ring processing to complete\n");
@ -580,6 +595,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
MHI_LOG("Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
flush_delayed_work(&mhi_cntrl->ready_worker);
flush_work(&mhi_cntrl->st_worker);
flush_work(&mhi_cntrl->fw_worker);
@ -713,6 +729,27 @@ void mhi_pm_sys_err_worker(struct work_struct *work)
mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
}
void mhi_pm_ready_worker(struct work_struct *work)
{
struct mhi_controller *mhi_cntrl = container_of(work,
struct mhi_controller,
ready_worker.work);
enum mhi_ee ee = MHI_EE_MAX;
if (mhi_cntrl->dev_state != MHI_STATE_RESET)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (ee == MHI_EE_PTHRU)
schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10));
else if (ee == MHI_EE_AMSS || ee == MHI_EE_SBL)
mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY);
}
void mhi_pm_st_worker(struct work_struct *work)
{
struct state_transition *itr, *tmp;
@ -794,7 +831,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
#if 1 //GLUE.SDX55_LE.1.0-00098-NOOP_TEST-1\common\hostdrivers\win\MhiHost MhiInitNewDev()
/* Check device Channels support */
mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, &regVal);
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, &regVal);
#if 0
val = MHI_READ_REG_FIELD(regVal, MHICFG, NCH);
MHI_LOG("Device CHs: %d\n", val);
@ -819,6 +856,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
mhi_cntrl->dev_state = MHI_STATE_RESET;
if (!mhi_cntrl->pre_init) {
/* setup device context */
@ -858,7 +896,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->bhie = mhi_cntrl->regs + val;
}
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
mhi_cntrl->pm_state = MHI_PM_POR;
mhi_cntrl->ee = MHI_EE_MAX;
current_ee = mhi_get_exec_env(mhi_cntrl);
@ -868,19 +906,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)),
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)));
if (current_ee == MHI_EE_PTHRU) {
for (val = 0; val < 30; val++) {
msleep(1);
current_ee = mhi_get_exec_env(mhi_cntrl);
if (current_ee != MHI_EE_PTHRU) {
MHI_LOG("dev_state:%s ee:%s\n",
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)),
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)));
break;
}
}
}
/* confirm device is in valid exec env */
if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
//MHI_ERR("Not a valid ee for power on\n");
@ -895,10 +920,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
//if (next_state == MHI_ST_TRANSITION_PBL)
// schedule_work(&mhi_cntrl->fw_worker);
mhi_queue_state_transition(mhi_cntrl, next_state);
if (next_state == MHI_ST_TRANSITION_PBL)
schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10));
else
mhi_queue_state_transition(mhi_cntrl, next_state);
mhi_init_debugfs(mhi_cntrl);
mhi_cntrl_register_miscdev(mhi_cntrl);
mutex_unlock(&mhi_cntrl->pm_mutex);
@ -940,7 +967,6 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
}
mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
mhi_cntrl_deregister_miscdev(mhi_cntrl);
mhi_deinit_debugfs(mhi_cntrl);
if (!mhi_cntrl->pre_init) {

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,8 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/version.h>
#if 1
static inline void *ipc_log_context_create(int max_num_pages,
const char *modname, uint16_t user_version)
@ -53,12 +55,13 @@ struct uci_dev {
struct mhi_device *mhi_dev;
const char *chan;
struct mutex mutex; /* sync open and close */
struct mutex r_mutex;
struct mutex w_mutex;
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
int ref_count;
bool enabled;
void *ipc_log;
unsigned rx_error;
unsigned nr_trb;
unsigned nr_trbs;
@ -75,50 +78,22 @@ struct mhi_uci_drv {
dev_t dev_t;
};
enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR;
#ifdef CONFIG_MHI_DEBUG
#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE)
#define MHI_UCI_IPC_LOG_PAGES (25)
#else
#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR)
#define MHI_UCI_IPC_LOG_PAGES (1)
#endif
#ifdef CONFIG_MHI_DEBUG
static int uci_msg_lvl = MHI_MSG_LVL_ERROR;
module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
#define MSG_VERB(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \
if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_VERBOSE)) \
ipc_log_string(uci_dev->ipc_log, "[D][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#else
#define MSG_VERB(fmt, ...)
#endif
#define MSG_LOG(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_INFO) \
if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_INFO)) \
ipc_log_string(uci_dev->ipc_log, "[I][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_ERROR) \
if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_ERROR)) \
ipc_log_string(uci_dev->ipc_log, "[E][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)
@ -185,7 +160,20 @@ static int mhi_queue_inbound(struct uci_dev *uci_dev)
return ret;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#ifdef TCGETS2
static int kernel_termios_to_user_termios_1(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
static int user_termios_to_kernel_termios_1(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
#endif
#endif
static long mhi_uci_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
@ -520,7 +508,7 @@ static ssize_t mhi_uci_read(struct file *file,
ret = -ERESTARTSYS;
if (ret) {
MSG_ERR("Failed to recycle element, ret=%d\n", ret);
MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret);
#if 0
kfree(uci_buf->data);
#endif
@ -540,6 +528,42 @@ read_error:
return ret;
}
static ssize_t mhi_uci_write_mutex(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_write(file, buf, count, offp);
mutex_unlock(&uci_dev->w_mutex);
return ret;
}
static ssize_t mhi_uci_read_mutex(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_read(file, buf, count, ppos);
mutex_unlock(&uci_dev->r_mutex);
return ret;
}
static int mhi_uci_open(struct inode *inode, struct file *filp)
{
struct uci_dev *uci_dev = NULL, *tmp_dev;
@ -617,8 +641,8 @@ error_exit:
static const struct file_operations mhidev_fops = {
.open = mhi_uci_open,
.release = mhi_uci_release,
.read = mhi_uci_read,
.write = mhi_uci_write,
.read = mhi_uci_read_mutex,
.write = mhi_uci_write_mutex,
.poll = mhi_uci_poll,
.unlocked_ioctl = mhi_uci_ioctl,
};
@ -681,6 +705,8 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev,
return -ENOMEM;
mutex_init(&uci_dev->mutex);
mutex_init(&uci_dev->r_mutex);
mutex_init(&uci_dev->w_mutex);
uci_dev->mhi_dev = mhi_dev;
minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
@ -694,10 +720,16 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev,
uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
#if 1
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
if (mhi_dev->mhi_cntrl->cntrl_idx)
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s%d",
mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx);
else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
#else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
@ -706,14 +738,13 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev,
mhi_dev->bus, mhi_dev->slot, "_pipe_",
mhi_dev->ul_chan_id);
#endif
set_bit(minor, uci_minors);
/* create debugging buffer */
snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
mhi_dev->ul_chan_id);
uci_dev->ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
node_name, 0);
for (dir = 0; dir < 2; dir++) {
struct uci_chan *uci_chan = (dir) ?
@ -800,9 +831,20 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
buf->data = mhi_result->buf_addr;
#endif
buf->len = mhi_result->bytes_xferd;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN || mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN)
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN)
{
struct uci_buf *tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);
struct uci_buf *tmp_buf = NULL;
int skip_buf = 0;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count == 1)
skip_buf++;
#endif
if (!skip_buf)
tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);;
if (tmp_buf) {
tmp_buf->page = NULL;
tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf);

View File

@ -18,7 +18,7 @@
#include <linux/hashtable.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <nss_api_if.h>
#include <qca-nss-drv/nss_api_if.h>
#include <linux/rmnet_nss.h>