Feat: add quectel_MHI v1.3.8 driver support

This commit is contained in:
sfwtw 2025-03-05 17:43:47 +08:00
parent 4d1c9e4fdd
commit fcef51b6c2
31 changed files with 20436 additions and 0 deletions

46
driver/quectel_MHI/Makefile Executable file
View File

@ -0,0 +1,46 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=pcie_mhi
PKG_VERSION:=1.3.8
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/pcie_mhi
SUBMENU:=WWAN Support
TITLE:=Kernel pcie driver for MHI device
FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko
AUTOLOAD:=$(call AutoLoad,90,pcie_mhi)
endef
define KernelPackage/pcie_mhi/description
Kernel module for register a custom pciemhi platform device.
endef
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
$(MAKE_OPTS) \
modules
endef
$(eval $(call KernelPackage,pcie_mhi))

View File

@ -0,0 +1,34 @@
#ccflags-y += -g
obj-m += pcie_mhi.o
pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o
pcie_mhi-objs += devices/mhi_uci.o
ifeq (1,1)
pcie_mhi-objs += devices/mhi_netdev_quectel.o
else
pcie_mhi-objs += devices/mhi_netdev.o
pcie_mhi-objs += devices/rmnet_handler.o
endif
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
pcie_mhi: clean
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
#cp pcie_mhi.ko /tftpboot/
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean
find . -name *.o.ur-safe | xargs rm -f
install: pcie_mhi
sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/
sudo depmod

View File

@ -0,0 +1,36 @@
1. porting pcie_mhi driver as next
$ git diff drivers/Makefile
diff --git a/drivers/Makefile b/drivers/Makefile
index 77fbc52..e45837e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
+obj-y += pcie_mhi/
$ tree drivers/pcie_mhi/ -L 1
drivers/pcie_mhi/
controllers
core
devices
Makefile
2. check RG500 attach pcie_mhi driver successful
root@OpenWrt:/# lspci
00:00.0 Class 0604: 17cb:0302
01:00.0 Class ff00: 17cb:0306
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
3. how to use, see next logs
log/QXDM_OVER_PCIE.txt
log/AT_OVER_PCIE.txt
log/MBIM_OVER_PCIE.txt
log/QMI_OVER_PCIE.txt

View File

@ -0,0 +1,103 @@
Release Notes
[V1.3.4]
Date: 12/8/2022
enhancement:
1. only allow to enable autosuspend when module is in MHI_EE_AMSS
2. show pcie link speed and width when driver probe
3. check pcie link status by read pcie vid and pid when driver probe,
if pcie link is down, return -EIO
4. support RM520 (1eac:1004)
5. support qmap command packet
fix:
1. fix tx queue is wrong stop when do uplink TPUT
2. fix after QFirehose, module fail to bootup at very small probability
3. mhi uci add mutex lock for concurrent reads/writes
[V1.3.3]
Date: 30/6/2022
enhancement:
1. remove one un-necessary kmalloc when do qfirehose
2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon
3. set ring size of event 0 to 256 (from 1024), required by x6x
4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled
5. porting IPQ5018 mhi rate controll code from spf11.5
6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver)
7. support set different mac address for rmnet net card
8. when mhi netdev fail to malloc, use delay_work instead work
9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU'
fix:
1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU
2. set dma mask when driver probe, some SOC like rpi_4 need it
[V1.3.2]
Date: 12/16/2021
enhancement:
1. support Linux Kernel V5.14
2. mhi_netdev_quectel.c do not print log in softirq context
[V1.3.1]
Date: 9/26/2021
enhancement:
fix:
[V1.3.0.19]
Date: 9/18/2021
enhancement:
1. support sdx62 (17cb:0308)
2. support IPQ5018's NSS
3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and pcie_mhi.ko must load after then rmnet_nss.ko
4. allow bhi irq is not 0 (for ipq5018)
fix:
[V1.3.0.18]
Date: 4/14/2021
enhancement:
1. support mbim multiple call, usage:
# insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4
# quectel-mbim-proxy -d /dev/mhi_MBIM &
# quectel-CM -n X
fix:
[V1.3.0.17]
Date: 3/11/2021
enhancement:
fix:
1. fix CPU loading very high when TPUT test when only one MSI interrupt
2. fix error on latest X24 modem
[V1.3.0.16]
Date: 11/18/2020
enhancement:
fix:
1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan
[V1.3.0.15]
Date: 10/30/2020
enhancement:
1. support multi-modems, named as /dev/mhi_<chan_name>X
fix:
1. fix compile error on kernel v5.8
[V1.3.0.14]
Date: 10/9/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix compile error on kernel v5.6
2. support runtime suspend
[V1.3.0.13]
Date: 9/7/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix error on X55 + PCIE2.0(e.g IPQ4019)
2. support runtime suspend
[V1.3.0.12]
Date: 7/7/2020
enhancement:
1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE),
fix:

View File

@ -0,0 +1,13 @@
menu "MHI controllers"
config MHI_QTI
tristate "MHI QTI"
depends on MHI_BUS
help
If you say yes to this option, MHI bus support for QTI modem chipsets
will be enabled. QTI PCIe based modems uses MHI as the communication
protocol. MHI control driver is the bus master for such modems. As the
bus master driver, it oversees power management operations such as
suspend, resume, powering on and off the device.
endmenu

View File

@ -0,0 +1 @@
obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o

View File

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/async.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/msm-bus.h>
#include <linux/msm_pcie.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "../core/mhi.h"
#include "mhi_qti.h"
struct arch_info {
struct mhi_dev *mhi_dev;
struct msm_bus_scale_pdata *msm_bus_pdata;
u32 bus_client;
struct pci_saved_state *pcie_state;
struct pci_saved_state *ref_pcie_state;
struct dma_iommu_mapping *mapping;
};
struct mhi_bl_info {
struct mhi_device *mhi_device;
async_cookie_t cookie;
void *ipc_log;
};
/* ipc log markings */
#define DLOG "Dev->Host: "
#define HLOG "Host: "
#ifdef CONFIG_MHI_DEBUG
#define MHI_IPC_LOG_PAGES (100)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;
#else
#define MHI_IPC_LOG_PAGES (10)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
#endif
static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
MHI_LOG("Setting bus request to index %d\n", index);
if (arch_info->bus_client)
return msm_bus_scale_client_update_request(
arch_info->bus_client,
index);
/* default return success */
return 0;
}
static void mhi_bl_dl_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
char *buf = mhi_result->buf_addr;
/* force a null at last character */
buf[mhi_result->bytes_xferd - 1] = 0;
ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf);
}
static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
}
static void mhi_bl_remove(struct mhi_device *mhi_dev)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n");
/* wait for boot monitor to exit */
async_synchronize_cookie(mhi_bl_info->cookie + 1);
}
static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie)
{
struct mhi_bl_info *mhi_bl_info = data;
struct mhi_device *mhi_device = mhi_bl_info->mhi_device;
struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
/* 15 sec timeout for booting device */
const u32 timeout = msecs_to_jiffies(15000);
/* wait for device to enter boot stage */
wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
|| mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
timeout);
if (mhi_cntrl->ee == MHI_EE_AMSS) {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device successfully booted to mission mode\n");
mhi_unprepare_from_transfer(mhi_device);
} else {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device failed to boot to mission mode, ee = %s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
}
}
static int mhi_bl_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
char node_name[32];
struct mhi_bl_info *mhi_bl_info;
mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info),
GFP_KERNEL);
if (!mhi_bl_info)
return -ENOMEM;
snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node_name, 0);
if (!mhi_bl_info->ipc_log)
return -EINVAL;
mhi_bl_info->mhi_device = mhi_dev;
mhi_device_set_devdata(mhi_dev, mhi_bl_info);
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Entered SBL, Session ID:0x%x\n",
mhi_dev->mhi_cntrl->session_id);
/* start a thread to monitor entering mission mode */
mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info);
return 0;
}
static const struct mhi_device_id mhi_bl_match_table[] = {
{ .chan = "BL" },
{},
};
static struct mhi_driver mhi_bl_driver = {
.id_table = mhi_bl_match_table,
.remove = mhi_bl_remove,
.probe = mhi_bl_probe,
.ul_xfer_cb = mhi_bl_dummy_cb,
.dl_xfer_cb = mhi_bl_dl_cb,
.driver = {
.name = "MHI_BL",
.owner = THIS_MODULE,
},
};
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
char node[32];
if (!arch_info) {
arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
sizeof(*arch_info), GFP_KERNEL);
if (!arch_info)
return -ENOMEM;
mhi_dev->arch_info = arch_info;
snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u",
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
mhi_cntrl->slot);
mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node, 0);
mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
/* save reference state for pcie config space */
arch_info->ref_pcie_state = pci_store_saved_state(
mhi_dev->pci_dev);
mhi_driver_register(&mhi_bl_driver);
}
return mhi_arch_set_bus_request(mhi_cntrl, 1);
}
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
mhi_arch_set_bus_request(mhi_cntrl, 0);
}
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
if (graceful) {
pci_clear_master(pci_dev);
ret = pci_save_state(mhi_dev->pci_dev);
if (ret) {
MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
return ret;
}
arch_info->pcie_state = pci_store_saved_state(pci_dev);
pci_disable_device(pci_dev);
}
/*
* We will always attempt to put link into D3hot, however
* link down may have happened due to error fatal, so
* ignoring the return code
*/
pci_set_power_state(pci_dev, PCI_D3hot);
/* release the resources */
mhi_arch_set_bus_request(mhi_cntrl, 0);
MHI_LOG("Exited\n");
return 0;
}
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
/* request resources and establish link trainning */
ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
if (ret)
MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
ret = pci_set_power_state(pci_dev, PCI_D0);
if (ret) {
MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Failed to enable device, ret:%d\n", ret);
return ret;
}
ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
if (ret)
MHI_LOG("Failed to load saved cfg state\n");
pci_restore_state(pci_dev);
pci_set_master(pci_dev);
MHI_LOG("Exited\n");
return 0;
}

View File

@ -0,0 +1,715 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include "../core/mhi.h"
#include "mhi_qcom.h"
#if 1
#ifndef PCI_IRQ_MSI
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 ))
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
int nvec = maxvec;
int rc;
if (maxvec < minvec)
return -ERANGE;
do {
rc = pci_enable_msi_block(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
return nvec;
}
#endif
static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
return pci_enable_msi_range(dev, min_vecs, max_vecs);
}
static void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msi(dev);
}
static int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
return dev->irq + nr;
}
#endif
#endif
static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(0x2C7C, 0x0512)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};
MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id);
static struct pci_driver mhi_pcie_driver;
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
pci_free_irq_vectors(pci_dev);
iounmap(mhi_cntrl->regs);
mhi_cntrl->regs = NULL;
pci_clear_master(pci_dev);
pci_release_region(pci_dev, mhi_dev->resn);
pci_disable_device(pci_dev);
}
static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
resource_size_t start, len;
int i;
mhi_dev->resn = MHI_PCI_BAR_NUM;
ret = pci_assign_resource(pci_dev, mhi_dev->resn);
if (ret) {
MHI_ERR("Error assign pci resources, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Error enabling device, ret:%d\n", ret);
goto error_enable_device;
}
ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
if (ret) {
MHI_ERR("Error pci_request_region, ret:%d\n", ret);
goto error_request_region;
}
pci_set_master(pci_dev);
start = pci_resource_start(pci_dev, mhi_dev->resn);
len = pci_resource_len(pci_dev, mhi_dev->resn);
mhi_cntrl->regs = ioremap_nocache(start, len);
MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs);
if (!mhi_cntrl->regs) {
MHI_ERR("Error ioremap region\n");
goto error_ioremap;
}
ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI);
if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
if (ret == -ENOSPC) {
/* imx_3.14.52_1.1.0_ga
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index f06e8f0..6a9614f 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
if (msgvec > 5)
msgvec = 0;
+#if 1 //Add by Quectel 20190419
+ if (msgvec > 0 && pdev->vendor == 0x17cb) {
+ dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec);
+ msgvec = 0;
+ }
+#endif
+
irq = assign_irq((1 << msgvec), desc, &pos);
if (irq < 0)
return irq;
*/
}
//imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device
if (ret != 1) {
MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required);
goto error_req_msi;
}
}
mhi_cntrl->msi_allocated = ret;
MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq);
for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
if (mhi_cntrl->irq[i] < 0) {
ret = mhi_cntrl->irq[i];
goto error_get_irq_vec;
}
}
#if 0
/* configure runtime pm */
pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS);
pm_runtime_dont_use_autosuspend(&pci_dev->dev);
pm_suspend_ignore_children(&pci_dev->dev, true);
/*
* pci framework will increment usage count (twice) before
* calling local device driver probe function.
* 1st pci.c pci_pm_init() calls pm_runtime_forbid
* 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
* Framework expect pci device driver to call
* pm_runtime_put_noidle to decrement usage count after
* successful probe and and call pm_runtime_allow to enable
* runtime suspend.
*/
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_put_noidle(&pci_dev->dev);
#endif
return 0;
error_get_irq_vec:
pci_free_irq_vectors(pci_dev);
error_req_msi:
iounmap(mhi_cntrl->regs);
error_ioremap:
pci_clear_master(pci_dev);
error_request_region:
pci_disable_device(pci_dev);
error_enable_device:
pci_release_region(pci_dev, mhi_dev->resn);
return ret;
}
#ifdef CONFIG_PM
static int mhi_runtime_idle(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
* rpm_idle to see if device ready to suspend.
* If dev.power usage_count count is 0, rpm fw will call
* rpm_idle cb to see if device is ready to suspend.
* if cb return 0, or cb not defined the framework will
* assume device driver is ready to suspend;
* therefore, fw will schedule runtime suspend.
* In MHI power management, MHI host shall go to
* runtime suspend only after entering MHI State M2, even if
* usage count is 0. Return -EBUSY to disable automatic suspend.
*/
return -EBUSY;
}
static int mhi_runtime_suspend(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_pm_suspend(mhi_cntrl);
if (ret) {
MHI_LOG("Abort due to ret:%d\n", ret);
goto exit_runtime_suspend;
}
ret = mhi_arch_link_off(mhi_cntrl, true);
if (ret)
MHI_ERR("Failed to Turn off link ret:%d\n", ret);
exit_runtime_suspend:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with ret:%d\n", ret);
return ret;
}
static int mhi_runtime_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
if (!mhi_dev->powered_on) {
MHI_LOG("Not fully powered, return success\n");
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
}
/* turn on link */
ret = mhi_arch_link_on(mhi_cntrl);
if (ret)
goto rpm_resume_exit;
/* enter M0 state */
ret = mhi_pm_resume(mhi_cntrl);
rpm_resume_exit:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with :%d\n", ret);
return ret;
}
static int mhi_system_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
ret = mhi_runtime_resume(dev);
if (ret) {
MHI_ERR("Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
int mhi_system_suspend(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev))
return mhi_runtime_suspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
MHI_LOG("Exit\n");
return 0;
}
#endif
/* checks if link is down */
static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
u16 dev_id;
int ret;
/* try reading device id, if dev id don't match, link is down */
ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
}
static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
return pm_runtime_get(dev);
}
static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_put_noidle(dev);
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
void *priv,
enum MHI_CB reason)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
if (reason == MHI_CB_IDLE) {
MHI_LOG("Schedule runtime suspend 1\n");
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
}
}
int mhi_debugfs_trigger_m0(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Exit\n");
pm_runtime_get(&mhi_dev->pci_dev->dev);
pm_runtime_put(&mhi_dev->pci_dev->dev);
return 0;
}
int mhi_debugfs_trigger_m3(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Entry\n");
pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
pm_request_autosuspend(&mhi_dev->pci_dev->dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
mhi_debugfs_trigger_m0, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
mhi_debugfs_trigger_m3, "%llu\n");
static int mhi_init_debugfs_trigger_go(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
MHI_LOG("Trigger power up sequence\n");
mhi_async_power_up(mhi_cntrl);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL,
mhi_init_debugfs_trigger_go, "%llu\n");
int mhi_init_debugfs_debug_show(struct seq_file *m, void *d)
{
seq_puts(m, "Enable debug mode to debug external soc\n");
seq_puts(m,
"Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n");
seq_puts(m, "No spaces between parameters\n");
seq_puts(m, "\t1. devid : 0 or pci device id to register\n");
seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n");
seq_puts(m, "\t3. domain: Rootcomplex\n");
seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n");
seq_puts(m, "\t\t- BIT0: ATTACH\n");
seq_puts(m, "\t\t- BIT1: S1 BYPASS\n");
seq_puts(m, "\t\t-BIT2: FAST_MAP\n");
seq_puts(m, "\t\t-BIT3: ATOMIC\n");
seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n");
seq_puts(m, "\t\t-BIT5: GEOMETRY\n");
seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n");
seq_puts(m, "Examples inputs: '0x307,10000'\n");
seq_puts(m, "\techo '0,10000,1'\n");
seq_puts(m, "\techo '0x307,10000,0,0x3d'\n");
seq_puts(m, "firmware image name will be changed to debug.mbn\n");
return 0;
}
static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file)
{
return single_open(file, mhi_init_debugfs_debug_show, NULL);
}
static ssize_t mhi_init_debugfs_debug_write(struct file *fp,
const char __user *ubuf,
size_t count,
loff_t *pos)
{
char *buf = kmalloc(count + 1, GFP_KERNEL);
/* #,devid,timeout,domain,smmu-cfg */
int args[5] = {0};
static char const *dbg_fw = "debug.mbn";
int ret;
struct mhi_controller *mhi_cntrl = fp->f_inode->i_private;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_device_id *id;
if (!buf)
return -ENOMEM;
ret = copy_from_user(buf, ubuf, count);
if (ret)
goto error_read;
buf[count] = 0;
get_options(buf, ARRAY_SIZE(args), args);
kfree(buf);
/* override default parameters */
mhi_cntrl->fw_image = dbg_fw;
mhi_cntrl->edl_image = dbg_fw;
if (args[0] >= 2 && args[2])
mhi_cntrl->timeout_ms = args[2];
if (args[0] >= 3 && args[3])
mhi_cntrl->domain = args[3];
if (args[0] >= 4 && args[4])
mhi_dev->smmu_cfg = args[4];
/* If it's a new device id register it */
if (args[0] && args[1]) {
/* find the debug_id and overwrite it */
for (id = mhi_pcie_device_id; id->vendor; id++)
if (id->device == MHI_PCIE_DEBUG_ID) {
id->device = args[1];
pci_unregister_driver(&mhi_pcie_driver);
ret = pci_register_driver(&mhi_pcie_driver);
}
}
mhi_dev->debug_mode = true;
debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl,
&mhi_init_debugfs_trigger_go_fops);
pr_info(
"%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n",
__func__, ret, args[1], mhi_dev->smmu_cfg,
mhi_cntrl->timeout_ms);
return count;
error_read:
kfree(buf);
return ret;
}
static const struct file_operations debugfs_debug_ops = {
.open = mhi_init_debugfs_debug_open,
.release = single_release,
.read = seq_read,
.write = mhi_init_debugfs_debug_write,
};
static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl;
struct mhi_dev *mhi_dev;
u64 addr_win[2];
int ret;
mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
if (!mhi_cntrl) {
pr_err("mhi_alloc_controller fail\n");
return NULL;
}
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev_id = pci_dev->device;
mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
mhi_cntrl->bus = pci_dev->bus->number;
mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
mhi_dev->smmu_cfg = 0;
#if 0 //def CONFIG_HAVE_MEMBLOCK
addr_win[0] = memblock_start_of_DRAM();
addr_win[1] = memblock_end_of_DRAM();
#else
#define MHI_MEM_BASE_DEFAULT 0x000000000
#define MHI_MEM_SIZE_DEFAULT 0x2000000000
addr_win[0] = MHI_MEM_BASE_DEFAULT;
addr_win[1] = MHI_MEM_SIZE_DEFAULT;
if (sizeof(dma_addr_t) == 4) {
addr_win[1] = 0xFFFFFFFF;
}
#endif
mhi_cntrl->iova_start = addr_win[0];
mhi_cntrl->iova_stop = addr_win[1];
mhi_dev->pci_dev = pci_dev;
mhi_cntrl->pci_dev = pci_dev;
/* setup power management apis */
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->link_status = mhi_link_status;
ret = mhi_arch_platform_init(mhi_dev);
if (ret)
goto error_probe;
ret = mhi_register_mhi_controller(mhi_cntrl);
if (ret)
goto error_register;
if (mhi_cntrl->parent)
debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent,
mhi_cntrl, &debugfs_debug_ops);
return mhi_cntrl;
error_register:
mhi_arch_platform_deinit(mhi_dev);
error_probe:
mhi_free_controller(mhi_cntrl);
return NULL;
}
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id)
{
struct mhi_controller *mhi_cntrl = NULL;
u32 domain = pci_domain_nr(pci_dev->bus);
u32 bus = pci_dev->bus->number;
u32 slot = PCI_SLOT(pci_dev->devfn);
struct mhi_dev *mhi_dev;
int ret;
pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
mhi_cntrl = mhi_platform_probe(pci_dev);
if (!mhi_cntrl) {
pr_err("mhi_platform_probe fail\n");
return -EPROBE_DEFER;
}
mhi_cntrl->dev_id = pci_dev->device;
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_dev->pci_dev = pci_dev;
mhi_dev->powered_on = true;
ret = mhi_arch_pcie_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret);
return ret;
}
ret = mhi_arch_iommu_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret);
goto error_iommu_init;
}
ret = mhi_init_pci_dev(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret);
goto error_init_pci;
}
/* start power up sequence if not in debug mode */
if (!mhi_dev->debug_mode) {
ret = mhi_async_power_up(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret);
goto error_power_up;
}
}
#if 0
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_allow(&pci_dev->dev);
pm_runtime_disable(&pci_dev->dev);
#endif
if (mhi_cntrl->dentry) {
debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m0_fops);
debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m3_fops);
}
dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
MHI_LOG("Return successful\n");
return 0;
error_power_up:
mhi_deinit_pci_dev(mhi_cntrl);
error_init_pci:
mhi_arch_iommu_deinit(mhi_cntrl);
error_iommu_init:
mhi_arch_pcie_deinit(mhi_cntrl);
return ret;
}
static void mhi_pci_remove(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev);
if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) {
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("%s\n", dev_name(&pci_dev->dev));
if (!mhi_dev->debug_mode) {
mhi_power_down(mhi_cntrl, 1);
}
mhi_deinit_pci_dev(mhi_cntrl);
mhi_arch_iommu_deinit(mhi_cntrl);
mhi_arch_pcie_deinit(mhi_cntrl);
mhi_unregister_mhi_controller(mhi_cntrl);
}
}
static const struct dev_pm_ops pm_ops = {
SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
mhi_runtime_resume,
mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
};
static struct pci_driver mhi_pcie_driver = {
.name = "mhi",
.id_table = mhi_pcie_device_id,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
.driver = {
.pm = &pm_ops
}
};
int __init mhi_controller_qcom_init(void)
{
return pci_register_driver(&mhi_pcie_driver);
};
void mhi_controller_qcom_exit(void)
{
pr_info("%s enter\n", __func__);
pci_unregister_driver(&mhi_pcie_driver);
pr_info("%s exit\n", __func__);
}

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#ifndef _MHI_QTI_
#define _MHI_QTI_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
/* runtime suspend timer */
#define MHI_RPM_SUSPEND_TMR_MS (2000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
dma_addr_t iova_start;
dma_addr_t iova_stop;
bool lpm_disabled;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
void mhi_pci_device_removed(struct pci_dev *pci_dev);
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful);
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl);
#endif /* _MHI_QTI_ */

View File

@ -0,0 +1 @@
obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o

View File

@ -0,0 +1,908 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#ifndef _MHI_H_
#define _MHI_H_
#define PCIE_MHI_DRIVER_VERSION "V1.3.8"
#define ENABLE_MHI_MON
//#define ENABLE_IP_SW0
// #define ENABLE_ADPL
// #define ENABLE_QDSS
#include <linux/miscdevice.h>
typedef enum
{
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_QMI_2_OUT = 16,
MHI_CLIENT_QMI_2_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_IPCR_OUT = 20,
MHI_CLIENT_IPCR_IN = 21,
MHI_CLIENT_TEST_FW_OUT = 22,
MHI_CLIENT_TEST_FW_IN = 23,
MHI_CLIENT_RESERVED_0 = 24,
MHI_CLIENT_BOOT_LOG_IN = 25,
MHI_CLIENT_DCI_OUT = 26,
MHI_CLIENT_DCI_IN = 27,
MHI_CLIENT_QBI_OUT = 28,
MHI_CLIENT_QBI_IN = 29,
MHI_CLIENT_RESERVED_1_LOWER = 30,
MHI_CLIENT_RESERVED_1_UPPER = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_EDL_OUT = 34,
MHI_CLIENT_EDL_IN = 35,
MHI_CLIENT_ADB_FB_OUT = 36,
MHI_CLIENT_ADB_FB_IN = 37,
MHI_CLIENT_RESERVED_2_LOWER = 38,
MHI_CLIENT_RESERVED_2_UPPER = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_0_OUT = 46,
MHI_CLIENT_IP_SW_0_IN = 47,
MHI_CLIENT_IP_SW_1_OUT = 48,
MHI_CLIENT_IP_SW_1_IN = 49,
MHI_CLIENT_RESERVED_3_LOWER = 50,
MHI_CLIENT_RESERVED_3_UPPER = 59,
MHI_CLIENT_TEST_0_OUT = 60,
MHI_CLIENT_TEST_0_IN = 61,
MHI_CLIENT_TEST_1_OUT = 62,
MHI_CLIENT_TEST_1_IN = 63,
MHI_CLIENT_TEST_2_OUT = 64,
MHI_CLIENT_TEST_2_IN = 65,
MHI_CLIENT_TEST_3_OUT = 66,
MHI_CLIENT_TEST_3_IN = 67,
MHI_CLIENT_RESERVED_4_LOWER = 68,
MHI_CLIENT_RESERVED_4_UPPER = 91,
MHI_CLIENT_OEM_0_OUT = 92,
MHI_CLIENT_OEM_0_IN = 93,
MHI_CLIENT_OEM_1_OUT = 94,
MHI_CLIENT_OEM_1_IN = 95,
MHI_CLIENT_OEM_2_OUT = 96,
MHI_CLIENT_OEM_2_IN = 97,
MHI_CLIENT_OEM_3_OUT = 98,
MHI_CLIENT_OEM_3_IN = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_ADPL = 102,
MHI_CLIENT_IP_HW_QDSS = 103,
// MHI_CLIENT_RESERVED_5_LOWER = 103,
MHI_CLIENT_RESERVED_5_UPPER = 127,
MHI_MAX_CHANNELS = 128
}MHI_CLIENT_CHANNEL_TYPE;
/* Event Ring Index */
typedef enum
{
SW_EVT_RING = 0,
PRIMARY_EVENT_RING = SW_EVT_RING,
#ifdef ENABLE_IP_SW0
SW_0_OUT_EVT_RING,
SW_0_IN_EVT_RING,
#endif
IPA_OUT_EVENT_RING,
IPA_IN_EVENT_RING,
#ifdef ENABLE_ADPL
ADPL_EVT_RING,
#endif
#ifdef ENABLE_QDSS
QDSS_EVT_RING,
#endif
MAX_EVT_RING_IDX
}MHI_EVT_RING_IDX;
#define MHI_VERSION 0x01000000
#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */
#define MHI_MSI_INDEX 1
#define MAX_NUM_MHI_DEVICES 1
#define NUM_MHI_XFER_RINGS 128
#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX
#define NUM_MHI_HW_EVT_RINGS 4
#define NUM_MHI_XFER_RING_ELEMENTS 16
#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump
#define NUM_MHI_IPA_IN_RING_ELEMENTS 512
#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
#define NUM_MHI_SW_IP_RING_ELEMENTS 512
#ifdef ENABLE_ADPL
#define NUM_MHI_ADPL_RING_ELEMENTS 256
#endif
#ifdef ENABLE_QDSS
#define NUM_MHI_QDSS_RING_ELEMENTS 256
#endif
/*
* for if set Interrupt moderation time as 1ms,
and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms.
e.g. firehose upgrade.
modem will not trigger irq for these transfer.
*/
#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8
#define MHI_EVT_CMD_QUEUE_SIZE 160
#define MHI_EVT_STATE_QUEUE_SIZE 128
#define MHI_EVT_XFER_QUEUE_SIZE 1024
#define CHAN_INBOUND(_x) ((_x)%2)
#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \
((_x) == MHI_CLIENT_SAHARA_IN) || \
((_x) == MHI_CLIENT_BOOT_LOG_IN))
#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \
((_x) == MHI_CLIENT_EDL_IN))
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
struct mhi_cmd;
struct image_info;
struct bhi_vec_entry;
struct mhi_timesync;
struct mhi_buf_info;
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
* @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
* @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
};
/**
* enum MHI_DEBUG_LEVL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
MHI_MSG_LVL_INFO,
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
};
/*
GSI_XFER_FLAG_BEI: Block event interrupt
1: Event generated by this ring element must not assert an interrupt to the host
0: Event generated by this ring element must assert an interrupt to the host
GSI_XFER_FLAG_EOT: Interrupt on end of transfer
1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT.
0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1
GSI_XFER_FLAG_EOB: Interrupt on end of block
1: Device notifies host after processing this ring element by sending a completion event
0: Completion event is not required after processing this ring element
GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD
*/
/**
* enum MHI_FLAGS - Transfer flags
* @MHI_EOB: End of buffer for bulk transfer
* @MHI_EOT: End of transfer
* @MHI_CHAIN: Linked transfer
*/
enum MHI_FLAGS {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
};
/**
* enum mhi_device_type - Device types
* @MHI_XFER_TYPE: Handles data transfer
* @MHI_TIMESYNC_TYPE: Use for timesync feature
* @MHI_CONTROLLER_TYPE: Control device
*/
enum mhi_device_type {
MHI_XFER_TYPE,
MHI_TIMESYNC_TYPE,
MHI_CONTROLLER_TYPE,
};
/**
* enum mhi_ee - device current execution enviornment
* @MHI_EE_PBL - device in PBL
* @MHI_EE_SBL - device in SBL
* @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
* @MHI_EE_RDDM - device in ram dump collection mode
* @MHI_EE_WFW - device in WLAN firmware mode
* @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
* @MHI_EE_EDL - device in emergency download mode
*/
enum mhi_ee {
MHI_EE_PBL = 0x0,
MHI_EE_SBL = 0x1,
MHI_EE_AMSS = 0x2,
MHI_EE_RDDM = 0x3,
MHI_EE_WFW = 0x4,
MHI_EE_PTHRU = 0x5,
MHI_EE_EDL = 0x6,
MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */
MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
MHI_EE_MAX,
};
/**
* enum mhi_dev_state - device current MHI state
*/
enum mhi_dev_state {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
MHI_STATE_M0 = 0x2,
MHI_STATE_M1 = 0x3,
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
MHI_STATE_SYS_ERR = 0xFF,
MHI_STATE_MAX,
};
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
/**
* struct image_info - firmware and rddm table table
* @mhi_buf - Contain device firmware and rddm table
* @entries - # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
struct bhi_vec_entry *bhi_vec;
u32 entries;
};
/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
* @regs: Points to base of MHI MMIO register space
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
* @dev_id: PCIe device id of the external device
* @domain: PCIe domain the device connected to
* @bus: PCIe bus the device assigned to
* @slot: PCIe slot for the modem
* @iova_start: IOMMU starting address for data
* @iova_stop: IOMMU stop address for data
* @fw_image: Firmware image name for normal booting
* @edl_image: Firmware image name for emergency download mode
* @fbc_download: MHI host needs to do complete image transfer
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size
* @seg_len: BHIe vector size
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @max_chan: Maximum number of channels controller support
* @mhi_chan: Points to channel configuration table
* @lpm_chans: List of channels that require LPM notifications
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @msi_required: Number of msi required to operate
* @msi_allocated: Number of msi allocated by bus master
* @irq: base irq # to request
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @timeout_ms: Timeout in ms for state transitions
* @pm_state: Power management state
* @ee: MHI device execution environment
* @dev_state: MHI STATE
* @status_cb: CB function to notify various power states to but master
* @link_status: Query link status in case of abnormal value read from device
* @runtime_get: Async runtime resume function
* @runtimet_put: Release votes
* @time_get: Return host time in us
* @lpm_disable: Request controller to disable link level low power modes
* @lpm_enable: Controller may enable link level low power modes again
* @priv_data: Points to bus master's private data
*/
struct mhi_controller {
struct list_head node;
struct mhi_device *mhi_dev;
/* device node for iommu ops */
struct device *dev;
struct device_node *of_node;
/* mmio base */
phys_addr_t base_addr;
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
/* device topology */
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
u32 cntrl_idx;
struct device *cntrl_dev;
/* addressing window */
dma_addr_t iova_start;
dma_addr_t iova_stop;
/* fw images */
const char *fw_image;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
bool fbc_download;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
u32 session_id;
u32 sequence_id;
struct image_info *fbc_image;
struct image_info *rddm_image;
/* physical channel config data */
u32 max_chan;
struct mhi_chan *mhi_chan;
struct list_head lpm_chans; /* these chan require lpm notification */
/* physical event config data */
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
u32 msi_irq_base;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
/* cmd rings */
struct mhi_cmd *mhi_cmd;
/* mhi context (shared with device) */
struct mhi_ctxt *mhi_ctxt;
u32 timeout_ms;
/* caller should grab pm_mutex for suspend/resume operations */
struct mutex pm_mutex;
bool pre_init;
rwlock_t pm_lock;
u32 pm_state;
enum mhi_ee ee;
enum mhi_dev_state dev_state;
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
atomic_t pending_pkts;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
/* debug counters */
u32 M0, M2, M3;
/* worker for different state transitions */
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
struct delayed_work ready_worker;
wait_queue_head_t state_event;
/* shadow functions */
void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv,
enum MHI_CB reason);
int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv);
u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*map_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
/* bounce buffer settings */
bool bounce_buf;
size_t buffer_len;
/* supports time sync feature */
struct mhi_timesync *mhi_tsync;
struct mhi_device *tsync_dev;
/* kernel log level */
enum MHI_DEBUG_LEVEL klog_lvl;
int klog_slient;
/* private log level controller driver to set */
enum MHI_DEBUG_LEVEL log_lvl;
/* controller specific data */
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
struct miscdevice miscdev;
#ifdef ENABLE_MHI_MON
spinlock_t lock;
/* Ref */
int nreaders; /* Under mon_lock AND mbus->lock */
struct list_head r_list; /* Chain of readers (usually one) */
struct kref ref; /* Under mon_lock */
/* Stats */
unsigned int cnt_events;
unsigned int cnt_text_lost;
#endif
};
#ifdef ENABLE_MHI_MON
struct mhi_tre;
struct mon_reader {
struct list_head r_link;
struct mhi_controller *m_bus;
void *r_data; /* Use container_of instead? */
void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre);
};
#endif
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
* @mtu: Maximum # of bytes controller support
* @ul_chan_id: MHI channel id for UL transfer
* @dl_chan_id: MHI channel id for DL transfer
* @tiocm: Device current terminal settings
* @priv: Driver private data
*/
struct mhi_device {
struct device dev;
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
size_t mtu;
int ul_chan_id;
int dl_chan_id;
int ul_event_id;
int dl_event_id;
u32 tiocm;
const struct mhi_device_id *id;
const char *chan_name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
atomic_t dev_wake;
enum mhi_device_type dev_type;
void *priv_data;
int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t size, enum MHI_FLAGS flags);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason);
};
/**
* struct mhi_result - Completed buffer information
* @buf_addr: Address of data buffer
* @dir: Channel direction
* @bytes_xfer: # of bytes transferred
* @transaction_status: Status of last trasnferred
*/
struct mhi_result {
void *buf_addr;
enum dma_data_direction dir;
size_t bytes_xferd;
int transaction_status;
};
/**
* struct mhi_buf - Describes the buffer
* @page: buffer as a page
* @buf: cpu address for the buffer
* @phys_addr: physical address of the buffer
* @dma_addr: iommu address for the buffer
* @skb: skb of ip packet
* @len: # of bytes
* @name: Buffer label, for offload channel configurations name must be:
* ECA - Event context array data
* CCA - Channel context array data
*/
struct mhi_buf {
struct list_head node;
struct page *page;
void *buf;
phys_addr_t phys_addr;
dma_addr_t dma_addr;
struct sk_buff *skb;
size_t len;
const char *name; /* ECA, CCA */
};
/**
* struct mhi_driver - mhi driver information
* @id_table: NULL terminated channel ID names
* @ul_xfer_cb: UL data transfer callback
* @dl_xfer_cb: DL data transfer callback
* @status_cb: Asynchronous status callback
*/
struct mhi_driver {
const struct mhi_device_id *id_table;
int (*probe)(struct mhi_device *mhi_dev,
const struct mhi_device_id *id);
void (*remove)(struct mhi_device *mhi_dev);
void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb);
struct device_driver driver;
};
#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
void *priv)
{
mhi_dev->priv_data = priv;
}
static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
{
return mhi_dev->priv_data;
}
/**
* mhi_queue_transfer - Queue a buffer to hardware
* All transfers are asyncronous transfers
* @mhi_dev: Device associated with the channels
* @dir: Data direction
* @buf: Data buffer (skb for hardware channels)
* @len: Size in bytes
* @mflags: Interrupt flags for the device
*/
static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
enum dma_data_direction dir,
void *buf,
size_t len,
enum MHI_FLAGS mflags)
{
if (dir == DMA_TO_DEVICE)
return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
mflags);
else
return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
mflags);
}
static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
{
return mhi_cntrl->priv_data;
}
static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
/**
* mhi_driver_register - Register driver with MHI framework
* @mhi_drv: mhi_driver structure
*/
int mhi_driver_register(struct mhi_driver *mhi_drv);
/**
* mhi_driver_unregister - Unregister a driver for mhi_devices
* @mhi_drv: mhi_driver structure
*/
void mhi_driver_unregister(struct mhi_driver *mhi_drv);
/**
* mhi_device_configure - configure ECA or CCA context
* For offload channels that client manage, call this
* function to configure channel context or event context
* array associated with the channel
* @mhi_div: Device associated with the channels
* @dir: Direction of the channel
* @mhi_buf: Configuration data
* @elements: # of configuration elements
*/
int mhi_device_configure(struct mhi_device *mhi_div,
enum dma_data_direction dir,
struct mhi_buf *mhi_buf,
int elements);
/**
* mhi_device_get - disable all low power modes
* Only disables lpm, does not immediately exit low power mode
* if controller already in a low power mode
* @mhi_dev: Device associated with the channels
*/
void mhi_device_get(struct mhi_device *mhi_dev);
/**
* mhi_device_get_sync - disable all low power modes
* Synchronously disable all low power, exit low power mode if
* controller already in a low power state
* @mhi_dev: Device associated with the channels
*/
int mhi_device_get_sync(struct mhi_device *mhi_dev);
/**
* mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels
*/
void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - setup channel for data transfer
* Moves both UL and DL channel from RESET to START state
* @mhi_dev: Device associated with the channels
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
* mhi_unprepare_from_transfer -unprepare the channels
* Moves both UL and DL channels to RESET state
* @mhi_dev: Device associated with the channels
*/
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_get_no_free_descriptors - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_poll - poll for any available data to consume
* This is only applicable for DL direction
* @mhi_dev: Device associated with the channels
* @budget: In descriptors to service before returning
*/
int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
/**
* mhi_ioctl - user space IOCTL support for MHI channels
* Native support for setting TIOCM
* @mhi_dev: Device associated with the channels
* @cmd: IOCTL cmd
* @arg: Optional parameter, iotcl cmd specific
*/
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
/**
* mhi_alloc_controller - Allocate mhi_controller structure
* Allocate controller structure and additional data for controller
* private data. You may get the private data pointer by calling
* mhi_controller_get_devdata
* @size: # of additional bytes to allocate
*/
struct mhi_controller *mhi_alloc_controller(size_t size);
/**
* of_register_mhi_controller - Register MHI controller
* Registers MHI controller with MHI bus framework. DT must be supported
* @mhi_cntrl: MHI controller to register
*/
int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
/**
* mhi_bdf_to_controller - Look up a registered controller
* Search for controller based on device identification
* @domain: RC domain of the device
* @bus: Bus device connected to
* @slot: Slot device assigned to
* @dev_id: Device Identification
*/
struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
u32 dev_id);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up
* This is optional, call this before power up if controller do not
* want bus framework to automatically free any allocated memory during shutdown
* process.
* @mhi_cntrl: MHI controller
*/
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_async_power_up - Starts MHI power up sequence
* @mhi_cntrl: MHI controller
*/
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_power_down - Start MHI power down sequence
* @mhi_cntrl: MHI controller
* @graceful: link is still accessible, do a graceful shutdown process otherwise
* we will shutdown host w/o putting device into RESET state
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
* mhi_unprepare_after_powre_down - free any allocated memory for power up
* @mhi_cntrl: MHI controller
*/
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_suspend - Move MHI into a suspended state
* Transition to MHI state M3 state from M0||M1||M2 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_resume - Resume MHI from suspended state
* Transition to MHI state M0 state from M3 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: If we trying to capture image while in kernel panic
*/
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force external device into rddm mode
* to collect device ramdump. This is useful if host driver assert
* and we need to see device state as well.
* @mhi_cntrl: MHI controller
*/
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
/**
* mhi_get_remote_time_sync - Get external soc time relative to local soc time
* using MMIO method.
* @mhi_dev: Device associated with the channels
* @t_host: Pointer to output local soc time
* @t_dev: Pointer to output remote soc time
*/
int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
u64 *t_host,
u64 *t_dev);
/**
* mhi_get_mhi_state - Return MHI state of device
* @mhi_cntrl: MHI controller
*/
enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
* mhi_set_mhi_state - Set device state
* @mhi_cntrl: MHI controller
* @state: state to set
*/
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
enum mhi_dev_state state);
/**
* mhi_is_active - helper function to determine if MHI in active state
* @mhi_dev: client device
*/
static inline bool mhi_is_active(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
mhi_cntrl->dev_state <= MHI_STATE_M3);
}
/**
* mhi_debug_reg_dump - dump MHI registers for debug purpose
* @mhi_cntrl: MHI controller
*/
void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
#ifdef CONFIG_MHI_DEBUG
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#else
#define MHI_VERB(fmt, ...)
#endif
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
else if (!mhi_cntrl->klog_slient) \
printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
#ifndef MHI_NAME_SIZE
#define MHI_NAME_SIZE 32
/**
* * struct mhi_device_id - MHI device identification
* * @chan: MHI channel name
* * @driver_data: driver data;
* */
struct mhi_device_id {
const char chan[MHI_NAME_SIZE];
unsigned long driver_data;
};
#endif
#endif /* _MHI_H_ */

View File

@ -0,0 +1,860 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "mhi.h"
#include "mhi_internal.h"
/* Software defines */
/* BHI Version */
#define BHI_MAJOR_VERSION 0x1
#define BHI_MINOR_VERSION 0x1
#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */
#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */
#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) )
typedef u32 ULONG;
typedef struct _bhi_info_type
{
ULONG bhi_ver_minor;
ULONG bhi_ver_major;
ULONG bhi_image_address_low;
ULONG bhi_image_address_high;
ULONG bhi_image_size;
ULONG bhi_rsvd1;
ULONG bhi_imgtxdb;
ULONG bhi_rsvd2;
ULONG bhi_msivec;
ULONG bhi_rsvd3;
ULONG bhi_ee;
ULONG bhi_status;
ULONG bhi_errorcode;
ULONG bhi_errdbg1;
ULONG bhi_errdbg2;
ULONG bhi_errdbg3;
ULONG bhi_sernum;
ULONG bhi_sblantirollbackver;
ULONG bhi_numsegs;
ULONG bhi_msmhwid[6];
ULONG bhi_oempkhash[48];
ULONG bhi_rsvd5;
}BHI_INFO_TYPE, *PBHI_INFO_TYPE;
static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
char str[128];
MHI_LOG("BHI Device Info...\n");
MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status);
MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
for (index = 0; index < 6; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]);
}
MHI_LOG("BHI MSM HW-Id = %s\n", str);
for (index = 0; index < 24; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]);
}
MHI_LOG("BHI OEM PK Hash = %s\n", str);
}
static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset)
{
u32 out = 0;
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out);
return (ret) ? 0 : out;
}
static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE));
/* bhi_ver */
bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW);
bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH);
bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE);
bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1);
bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB);
bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2);
bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC);
bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3);
bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV);
bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS);
bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE);
bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1);
bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2);
bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3);
bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU);
bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER);
bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG);
for (index = 0; index < MSMHWID_NUMDWORDS; index++)
{
bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index));
}
for (index = 0; index < OEMPKHASH_NUMDWORDS; index++)
{
bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index));
}
bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5);
PrintBhiInfo(mhi_cntrl, bhi_info);
/* Check the Execution Environment */
if (!IsPBLExecEnv(bhi_info->bhi_ee))
{
MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
}
/* Return the number of bytes read */
return 0;
}
/* setup rddm vector table for rddm transfer */
static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
int i = 0;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
MHI_VERB("Setting vector:%pad size:%zu\n",
&mhi_buf->dma_addr, mhi_buf->len);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = mhi_buf->len;
}
}
/* collect rddm during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
struct mhi_buf *mhi_buf;
u32 sequence_id;
u32 rx_status;
enum mhi_ee ee;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
const u32 delayus = 2000;
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
const u32 rddm_timeout_us = 200000;
int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
void __iomem *base = mhi_cntrl->bhie;
MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
/*
* This should only be executing during a kernel panic, we expect all
* other cores to shutdown while we're collecting rddm buffer. After
* returning from this function, we expect device to reset.
*
* Normaly, we would read/write pm_state only after grabbing
* pm_lock, since we're in a panic, skipping it. Also there is no
* gurantee this state change would take effect since
* we're setting it w/o grabbing pmlock, it's best effort
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
/* update should take the effect immediately */
smp_wmb();
/* setup the RX vector table */
mhi_rddm_prepare(mhi_cntrl, rddm_image);
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
MHI_LOG("Starting BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
if (unlikely(!sequence_id))
sequence_id = 1;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
MHI_LOG("Trigger device into RDDM mode\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
MHI_LOG("Waiting for device to enter RDDM\n");
while (rddm_retry--) {
ee = mhi_get_exec_env(mhi_cntrl);
if (ee == MHI_EE_RDDM)
break;
udelay(delayus);
}
if (rddm_retry <= 0) {
/* This is a hardware reset, will force device to enter rddm */
MHI_LOG(
"Did not enter RDDM triggering host req. reset to force rddm\n");
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ);
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
MHI_LOG("Waiting for image download completion, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status);
if (ret)
return -EIO;
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
MHI_LOG("RDDM successfully collected\n");
return 0;
}
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
MHI_ERR("Did not complete RDDM transfer\n");
MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
return -EIO;
}
/* download ramdump image from device */
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
struct mhi_buf *mhi_buf;
int ret;
u32 rx_status;
u32 sequence_id;
if (!rddm_image)
return -ENOMEM;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_RDDM ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
return -EIO;
}
mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
/* vector table is the last entry */
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, sequence_id);
MHI_LOG("Waiting for image download completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
EXPORT_SYMBOL(mhi_download_rddm_img);
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming\n");
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#else
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, mhi_cntrl->sequence_id);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_TXVECSTATUS_OFFS,
BHIE_TXVECSTATUS_STATUS_BMSK,
BHIE_TXVECSTATUS_STATUS_SHFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
dma_addr_t dma_addr,
size_t size)
{
u32 tx_status, val;
u32 ImgTxDb = 0x1;
int i, ret;
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct {
char *name;
u32 offset;
} error_reg[] = {
{ "ERROR_CODE", BHI_ERRCODE },
{ "ERROR_DBG1", BHI_ERRDBG1 },
{ "ERROR_DBG2", BHI_ERRDBG2 },
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
MHI_LOG("Starting BHI programming\n");
/* program start sbl download via bhi protocol */
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb);
read_unlock_bh(pm_lock);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
BHI_STATUS_MASK, BHI_STATUS_SHIFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
goto invalid_pm_state;
if (tx_status == BHI_STATUS_ERROR) {
MHI_ERR("Image transfer failed\n");
read_lock_bh(pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
for (i = 0; error_reg[i].name; i++) {
ret = mhi_read_reg(mhi_cntrl, base,
error_reg[i].offset, &val);
if (ret)
break;
MHI_ERR("reg:%s value:0x%x\n",
error_reg[i].name, val);
}
}
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
invalid_pm_state:
return -EIO;
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
{
size_t seg_size = mhi_cntrl->seg_len;
/* requier additional entry for vec table */
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
alloc_size, seg_size, segments);
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
/* allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
GFP_KERNEL);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
/* allocate and populate vector table */
mhi_buf = img_info->mhi_buf;
for (i = 0; i < segments; i++, mhi_buf++) {
size_t vec_size = seg_size;
/* last entry is for vector table */
if (i == segments - 1)
vec_size = sizeof(struct bhi_vec_entry) * i;
mhi_buf->len = vec_size;
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
&mhi_buf->dma_addr, GFP_KERNEL);
if (!mhi_buf->buf)
goto error_alloc_segment;
MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i,
(unsigned long long)mhi_buf->dma_addr,
mhi_buf->len);
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
img_info->entries = segments;
*image_info = img_info;
MHI_LOG("Successfully allocated bhi vec table\n");
return 0;
error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
error_alloc_mhi_buf:
kfree(img_info);
return -ENOMEM;
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
const struct firmware *firmware,
struct image_info *img_info)
{
size_t remainder = firmware->size;
size_t to_cpy;
const u8 *buf = firmware->data;
int i = 0;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
while (remainder) {
MHI_ASSERT(i >= img_info->entries, "malformed vector table");
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = to_cpy;
MHI_VERB("Setting Vector:0x%llx size: %llu\n",
bhi_vec->dma_addr, bhi_vec->size);
buf += to_cpy;
remainder -= to_cpy;
i++;
bhi_vec++;
mhi_buf++;
}
}
void mhi_fw_load_worker(struct work_struct *work)
{
int ret;
struct mhi_controller *mhi_cntrl;
const char *fw_name;
const struct firmware *firmware;
struct image_info *image_info;
void *buf;
dma_addr_t dma_addr;
size_t size;
mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_PBL(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state\n");
return;
}
MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
/* if device in pthru, we do not have to load firmware */
if (mhi_cntrl->ee == MHI_EE_PTHRU)
return;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
return;
}
ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
if (ret) {
MHI_ERR("Error loading firmware, ret:%d\n", ret);
return;
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
/* the sbl size provided is maximum size, not necessarily image size */
if (size > firmware->size)
size = firmware->size;
buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!buf) {
MHI_ERR("Could not allocate memory for image\n");
release_firmware(firmware);
return;
}
/* load sbl image */
memcpy(buf, firmware->data, size);
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
/* error or in edl, we're done */
if (ret || mhi_cntrl->ee == MHI_EE_EDL) {
release_firmware(firmware);
return;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* if we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
firmware->size);
if (ret) {
MHI_ERR("Error alloc size of %zu\n", firmware->size);
goto error_alloc_fw_table;
}
MHI_LOG("Copying firmware image into vector table\n");
/* load the firmware into BHIE vec table */
mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
}
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
if (!mhi_cntrl->fbc_download) {
release_firmware(firmware);
return;
}
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_read;
}
/* wait for SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_SBL ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter BHIE\n");
goto error_read;
}
/* start full firmware image download */
image_info = mhi_cntrl->fbc_image;
ret = mhi_fw_load_amss(mhi_cntrl,
/* last entry is vec table */
&image_info->mhi_buf[image_info->entries - 1]);
MHI_LOG("amss fw_load, ret:%d\n", ret);
release_firmware(firmware);
return;
error_read:
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
error_alloc_fw_table:
release_firmware(firmware);
}
int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size)
{
int ret;
dma_addr_t dma_addr;
void *dma_buf;
MHI_LOG("Device current EE:%s, M:%s, PM:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
#if 0
if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) {
mhi_cntrl->ee = MHI_EE_EDL;
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms + 500));
}
#endif
#if 0
if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid BHI state\n");
return -EINVAL;
}
#endif
if (mhi_cntrl->ee != MHI_EE_EDL) {
MHI_ERR("MHI is not in EDL state\n");
return -EINVAL;
}
dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!dma_buf) {
MHI_ERR("Could not allocate memory for image\n");
return -ENOMEM;
}
ret = copy_from_user(dma_buf, ubuf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);;
return ret;
}
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);
if (ret) {
MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee);
goto error_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_state;
}
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
/* wait for BHIE event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_FP ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter Flash Programmer Environment\n");
goto error_state;
}
MHI_LOG("MHI enter Flash Programmer Environment\n");
return 0;
error_state:
MHI_LOG("Device current EE:%s, M:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
return ret;
}
long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
BHI_INFO_TYPE bhi_info;
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
return ret;
}
long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
size_t size;
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
if (size <= 0) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, size\n");
return -EINVAL;
}
ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
return ret;
}

View File

@ -0,0 +1,274 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/wait.h>
#include "mhi.h"
#include "mhi_internal.h"
struct __packed dtr_ctrl_msg {
u32 preamble;
u32 msg_id;
u32 dest_id;
u32 size;
u32 msg;
};
#define CTRL_MAGIC (0x4C525443)
#define CTRL_MSG_DTR BIT(0)
#define CTRL_MSG_RTS BIT(1)
#define CTRL_MSG_DCD BIT(0)
#define CTRL_MSG_DSR BIT(1)
#define CTRL_MSG_RI BIT(3)
#define CTRL_HOST_STATE (0x10)
#define CTRL_DEVICE_STATE (0x11)
#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF)
static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
struct mhi_device *mhi_dev,
u32 tiocm)
{
struct dtr_ctrl_msg *dtr_msg = NULL;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
spinlock_t *res_lock = &mhi_dev->dev.devres_lock;
u32 cur_tiocm;
int ret = 0;
cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
tiocm &= (TIOCM_DTR | TIOCM_RTS);
/* state did not changed */
if (cur_tiocm == tiocm)
return 0;
mutex_lock(&dtr_chan->mutex);
dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
if (!dtr_msg) {
ret = -ENOMEM;
goto tiocm_exit;
}
dtr_msg->preamble = CTRL_MAGIC;
dtr_msg->msg_id = CTRL_HOST_STATE;
dtr_msg->dest_id = mhi_dev->ul_chan_id;
dtr_msg->size = sizeof(u32);
if (tiocm & TIOCM_DTR)
dtr_msg->msg |= CTRL_MSG_DTR;
if (tiocm & TIOCM_RTS)
dtr_msg->msg |= CTRL_MSG_RTS;
/*
* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit.
* RTS:0 will prevent modem output AT response.
* But 'busybox microcom' do not send any RTS to modem.
* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1
* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0
*/
dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__,
!!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS));
reinit_completion(&dtr_chan->completion);
ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
sizeof(*dtr_msg), MHI_EOT);
if (ret)
goto tiocm_exit;
ret = wait_for_completion_timeout(&dtr_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret) {
MHI_ERR("Failed to receive transfer callback\n");
ret = -EIO;
goto tiocm_exit;
}
ret = 0;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS);
mhi_dev->tiocm |= tiocm;
spin_unlock_irq(res_lock);
tiocm_exit:
kfree(dtr_msg);
mutex_unlock(&dtr_chan->mutex);
return ret;
}
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
/* ioctl not supported by this controller */
if (!mhi_cntrl->dtr_dev)
return -EIO;
switch (cmd) {
case TIOCMGET:
return mhi_dev->tiocm;
case TIOCMSET:
{
u32 tiocm;
ret = get_user(tiocm, (u32 *)arg);
if (ret)
return ret;
return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm);
}
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(mhi_ioctl);
static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = mhi_dev->mtu;
void *buf;
int ret = -EIO, i;
for (i = 0; i < nr_trbs; i++) {
buf = kmalloc(mtu, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
kfree(buf);
return ret;
}
}
return ret;
}
static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr;
u32 chan;
spinlock_t *res_lock;
if (mhi_result->transaction_status == -ENOTCONN) {
kfree(mhi_result->buf_addr);
return;
}
if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) {
MHI_ERR("Unexpected length %zu received\n",
mhi_result->bytes_xferd);
return;
}
MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n",
dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id,
dtr_msg->msg);
chan = CTRL_GET_CHID(dtr_msg);
if (chan >= mhi_cntrl->max_chan)
goto auto_queue;
mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev;
if (!mhi_dev)
goto auto_queue;
res_lock = &mhi_dev->dev.devres_lock;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
if (dtr_msg->msg & CTRL_MSG_DCD)
mhi_dev->tiocm |= TIOCM_CD;
if (dtr_msg->msg & CTRL_MSG_DSR)
mhi_dev->tiocm |= TIOCM_DSR;
if (dtr_msg->msg & CTRL_MSG_RI)
mhi_dev->tiocm |= TIOCM_RI;
spin_unlock_irq(res_lock);
auto_queue:
mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr,
mhi_cntrl->dtr_dev->mtu, MHI_EOT);
}
static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
if (!mhi_result->transaction_status)
complete(&dtr_chan->completion);
}
static void mhi_dtr_remove(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_cntrl->dtr_dev = NULL;
}
static int mhi_dtr_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
MHI_LOG("Enter for DTR control channel\n");
mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
ret = mhi_prepare_for_transfer(mhi_dev);
if (!ret)
mhi_cntrl->dtr_dev = mhi_dev;
if (!ret)
ret = mhi_dtr_queue_inbound(mhi_cntrl);
MHI_LOG("Exit with ret:%d\n", ret);
return ret;
}
static const struct mhi_device_id mhi_dtr_table[] = {
{ .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) },
{},
};
static struct mhi_driver mhi_dtr_driver = {
.id_table = mhi_dtr_table,
.remove = mhi_dtr_remove,
.probe = mhi_dtr_probe,
.ul_xfer_cb = mhi_dtr_ul_xfer_cb,
.dl_xfer_cb = mhi_dtr_dl_xfer_cb,
.driver = {
.name = "MHI_DTR",
.owner = THIS_MODULE,
}
};
int __init mhi_dtr_init(void)
{
return mhi_driver_register(&mhi_dtr_driver);
}
void mhi_dtr_exit(void) {
mhi_driver_unregister(&mhi_dtr_driver);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,362 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element cmd_transfer;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#endif /* _SDX20_MHI_H_ */

View File

@ -0,0 +1,426 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element tre;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#if 0
/* SW channel client list */
enum mhi_client_channel {
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_IP_CTRL_0_OUT = 16,
MHI_CLIENT_IP_CTRL_0_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_DCI_OUT = 20,
MHI_CLIENT_DCI_IN = 21,
MHI_CLIENT_IP_CTRL_3_OUT = 22,
MHI_CLIENT_IP_CTRL_3_IN = 23,
MHI_CLIENT_IP_CTRL_4_OUT = 24,
MHI_CLIENT_IP_CTRL_4_IN = 25,
MHI_CLIENT_IP_CTRL_5_OUT = 26,
MHI_CLIENT_IP_CTRL_5_IN = 27,
MHI_CLIENT_IP_CTRL_6_OUT = 28,
MHI_CLIENT_IP_CTRL_6_IN = 29,
MHI_CLIENT_IP_CTRL_7_OUT = 30,
MHI_CLIENT_IP_CTRL_7_IN = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_IP_SW_0_OUT = 34,
MHI_CLIENT_IP_SW_0_IN = 35,
MHI_CLIENT_IP_SW_1_OUT = 36,
MHI_CLIENT_IP_SW_1_IN = 37,
MHI_CLIENT_IP_SW_2_OUT = 38,
MHI_CLIENT_IP_SW_2_IN = 39,
MHI_CLIENT_IP_SW_3_OUT = 40,
MHI_CLIENT_IP_SW_3_IN = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_4_OUT = 46,
MHI_CLIENT_IP_SW_4_IN = 47,
MHI_MAX_SOFTWARE_CHANNELS = 48,
MHI_CLIENT_TEST_OUT = 60,
MHI_CLIENT_TEST_IN = 61,
MHI_CLIENT_RESERVED_1_LOWER = 62,
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_RESERVED_2_LOWER = 102,
MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102,
};
#endif
#endif /* _SDX20_MHI_H_ */

View File

@ -0,0 +1,33 @@
menu "MHI device support"
config MHI_NETDEV
tristate "MHI NETDEV"
depends on MHI_BUS
help
MHI based net device driver for transferring IP traffic
between host and modem. By enabling this driver, clients
can transfer data using standard network interface. Over
the air traffic goes thru mhi netdev interface.
config MHI_UCI
tristate "MHI UCI"
depends on MHI_BUS
help
MHI based uci driver is for transferring data between host and
modem using standard file operations from user space. Open, read,
write, ioctl, and close operations are supported by this driver.
Please check mhi_uci_match_table for all supported channels that
are exposed to userspace.
config MHI_SATELLITE
tristate "MHI SATELLITE"
depends on MHI_BUS
help
MHI proxy satellite device driver enables NON-HLOS MHI satellite
drivers to communicate with device over PCIe link without host
involvement. Host facilitates propagation of events from device
to NON-HLOS MHI satellite drivers, channel states, and power
management over IPC communication. It helps in HLOS power
savings.
endmenu

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o
obj-$(CONFIG_MHI_UCI) +=mhi_uci.o
obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,981 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/version.h>
#if 1
static inline void *ipc_log_context_create(int max_num_pages,
const char *modname, uint16_t user_version)
{ return NULL; }
static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
{ return -EINVAL; }
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/tty.h>
#include "../core/mhi.h"
#define DEVICE_NAME "mhi"
#define MHI_UCI_DRIVER_NAME "mhi_uci_q"
struct uci_chan {
wait_queue_head_t wq;
spinlock_t lock;
struct list_head pending; /* user space waiting to read */
struct uci_buf *cur_buf; /* current buffer user space reading */
size_t rx_size;
};
struct uci_buf {
struct page *page;
void *data;
size_t len;
unsigned nr_trb;
struct list_head node;
};
struct uci_dev {
struct list_head node;
dev_t devt;
struct device *dev;
struct mhi_device *mhi_dev;
const char *chan;
struct mutex mutex; /* sync open and close */
struct mutex r_mutex;
struct mutex w_mutex;
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
int ref_count;
bool enabled;
unsigned rx_error;
unsigned nr_trb;
unsigned nr_trbs;
struct uci_buf *uci_buf;
struct ktermios termios;
size_t bytes_xferd;
};
struct mhi_uci_drv {
struct list_head head;
struct mutex lock;
struct class *class;
int major;
dev_t dev_t;
};
static int uci_msg_lvl = MHI_MSG_LVL_ERROR;
module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
#define MSG_VERB(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_LOG(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)
#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
#ifdef TCGETS2
__weak int user_termios_to_kernel_termios(struct ktermios *k,
struct termios2 __user *u)
{
return copy_from_user(k, u, sizeof(struct termios2));
}
__weak int kernel_termios_to_user_termios(struct termios2 __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios2));
}
__weak int user_termios_to_kernel_termios_1(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
__weak int kernel_termios_to_user_termios_1(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
#else
__weak int user_termios_to_kernel_termios(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
__weak int kernel_termios_to_user_termios(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
#endif /* TCGETS2 */
#endif
static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
static struct mhi_uci_drv mhi_uci_drv;
static int mhi_queue_inbound(struct uci_dev *uci_dev)
{
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = uci_dev->mtu;
void *buf;
struct uci_buf *uci_buf;
int ret = -EIO, i;
if (uci_dev->uci_buf == NULL) {
uci_dev->nr_trb = 0;
uci_dev->nr_trbs = (nr_trbs + 1);
uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL);
if (!uci_dev->uci_buf)
return -ENOMEM;
uci_buf = uci_dev->uci_buf;
for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) {
uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu));
if (!uci_buf->page)
return -ENOMEM;
uci_buf->data = page_address(uci_buf->page);
uci_buf->len = 0;
uci_buf->nr_trb = i;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) {
//MSG_ERR("[%d] = %p\n", i, uci_buf->data);
}
}
}
for (i = 0; i < nr_trbs; i++) {
#if 0
buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
uci_buf = buf + mtu;
uci_buf->data = buf;
#else
uci_buf = &uci_dev->uci_buf[i];
buf = uci_buf->data;
#endif
MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
#if 0
kfree(buf);
#endif
MSG_ERR("Failed to queue buffer %d\n", i);
return ret;
}
}
return ret;
}
static long mhi_uci_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
long ret = -ERESTARTSYS;
mutex_lock(&uci_dev->mutex);
if (uci_dev->enabled)
ret = mhi_ioctl(mhi_dev, cmd, arg);
if (uci_dev->enabled) {
switch (cmd) {
case TCGETS:
#ifndef TCGETS2
ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios);
#else
ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios);
#endif
break;
case TCSETSF:
case TCSETS:
#ifndef TCGETS2
ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg);
#else
ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg);
#endif
break;
case TCFLSH:
ret = 0;
break;
default:
break;
}
}
mutex_unlock(&uci_dev->mutex);
return ret;
}
static int mhi_uci_release(struct inode *inode, struct file *file)
{
struct uci_dev *uci_dev = file->private_data;
mutex_lock(&uci_dev->mutex);
uci_dev->ref_count--;
if (!uci_dev->ref_count) {
struct uci_chan *uci_chan;
MSG_LOG("Last client left, closing node\n");
if (uci_dev->enabled)
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
/* clean inbound channel */
uci_chan = &uci_dev->dl_chan;
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
uci_chan->cur_buf = NULL;
if (!uci_dev->enabled) {
MSG_LOG("Node is deleted, freeing dev node\n");
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
return 0;
}
}
MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
mutex_unlock(&uci_dev->mutex);
return 0;
}
static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan;
unsigned int mask = 0;
poll_wait(file, &uci_dev->dl_chan.wq, wait);
// ADPL and QDSS do not need poll write. xingduo.du 2023-02-16
// poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->dl_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask = POLLERR;
} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
MSG_VERB("Client can read from node\n");
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&uci_chan->lock);
// ADPL and QDSS are single channel, ul_chan not be initilized. xingduo.du 2023-02-27
if (mhi_dev->ul_chan) {
poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->ul_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask |= POLLERR;
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
MSG_VERB("Client can write to node\n");
mask |= POLLOUT | POLLWRNORM;
}
if (!uci_dev->enabled)
mask |= POLLHUP;
if (uci_dev->rx_error)
mask |= POLLERR;
spin_unlock_bh(&uci_chan->lock);
}
MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
return mask;
}
static ssize_t mhi_uci_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->ul_chan;
size_t bytes_xfered = 0;
int ret, nr_avail;
if (!buf || !count || uci_dev->rx_error)
return -EINVAL;
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
MSG_VERB("Enter: to xfer:%zu bytes\n", count);
while (count) {
size_t xfer_size;
void *kbuf;
enum MHI_FLAGS flags;
spin_unlock_bh(&uci_chan->lock);
nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE);
if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
/* wait for free descriptors */
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled) ||
(nr_avail = mhi_get_no_free_descriptors(mhi_dev,
DMA_TO_DEVICE)) > 0);
if (ret == -ERESTARTSYS || !uci_dev->enabled) {
MSG_LOG("Exit signal caught for node or not enabled\n");
return -ERESTARTSYS;
}
xfer_size = min_t(size_t, count, uci_dev->mtu);
kbuf = kmalloc(xfer_size, GFP_KERNEL);
if (!kbuf) {
MSG_ERR("Failed to allocate memory %zu\n", xfer_size);
return -ENOMEM;
}
ret = copy_from_user(kbuf, buf, xfer_size);
if (unlikely(ret)) {
kfree(kbuf);
return ret;
}
spin_lock_bh(&uci_chan->lock);
/* if ring is full after this force EOT */
if (nr_avail > 1 && (count - xfer_size))
flags = MHI_CHAIN;
else
flags = MHI_EOT;
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
xfer_size, flags);
else
ret = -ERESTARTSYS;
if (ret) {
kfree(kbuf);
goto sys_interrupt;
}
bytes_xfered += xfer_size;
count -= xfer_size;
buf += xfer_size;
}
spin_unlock_bh(&uci_chan->lock);
MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered);
return bytes_xfered;
sys_interrupt:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->dl_chan;
struct uci_buf *uci_buf;
char *ptr;
size_t to_copy;
int ret = 0;
if (!buf || uci_dev->rx_error)
return -EINVAL;
MSG_VERB("Client provided buf len:%zu\n", count);
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
/* No data available to read, wait */
if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
MSG_VERB("No data available to read waiting\n");
spin_unlock_bh(&uci_chan->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled ||
!list_empty(&uci_chan->pending)));
if (ret == -ERESTARTSYS) {
MSG_LOG("Exit signal caught for node\n");
return -ERESTARTSYS;
}
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
MSG_LOG("node is disabled\n");
ret = -ERESTARTSYS;
goto read_error;
}
}
/* new read, get the next descriptor from the list */
if (!uci_chan->cur_buf) {
uci_buf = list_first_entry_or_null(&uci_chan->pending,
struct uci_buf, node);
if (unlikely(!uci_buf)) {
ret = -EIO;
goto read_error;
}
if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) {
dump_stack();
ret = -EIO;
MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n",
mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb);
goto read_error;
}
list_del(&uci_buf->node);
uci_chan->cur_buf = uci_buf;
uci_chan->rx_size = uci_buf->len;
MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size);
}
uci_buf = uci_chan->cur_buf;
spin_unlock_bh(&uci_chan->lock);
/* Copy the buffer to user space */
to_copy = min_t(size_t, count, uci_chan->rx_size);
ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
ret = copy_to_user(buf, ptr, to_copy);
if (ret)
return ret;
MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size);
uci_chan->rx_size -= to_copy;
/* we finished with this buffer, queue it back to hardware */
if (!uci_chan->rx_size) {
spin_lock_bh(&uci_chan->lock);
uci_chan->cur_buf = NULL;
if (uci_dev->enabled)
#if 1 //this can make the address in ring do not change
{
if (uci_buf->page) {
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
uci_buf->data, uci_dev->mtu,
MHI_EOT);
} else {
kfree(uci_buf);
ret = 0;
}
}
#endif
else
ret = -ERESTARTSYS;
if (ret) {
MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret);
#if 0
kfree(uci_buf->data);
#endif
goto read_error;
}
spin_unlock_bh(&uci_chan->lock);
}
MSG_VERB("Returning %zu bytes\n", to_copy);
return to_copy;
read_error:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_write_mutex(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_write(file, buf, count, offp);
mutex_unlock(&uci_dev->w_mutex);
return ret;
}
static ssize_t mhi_uci_read_mutex(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_read(file, buf, count, ppos);
mutex_unlock(&uci_dev->r_mutex);
return ret;
}
static int mhi_uci_open(struct inode *inode, struct file *filp)
{
struct uci_dev *uci_dev = NULL, *tmp_dev;
int ret = -EIO;
struct uci_chan *dl_chan;
mutex_lock(&mhi_uci_drv.lock);
list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) {
if (tmp_dev->devt == inode->i_rdev) {
uci_dev = tmp_dev;
break;
}
}
/* could not find a minor node */
if (!uci_dev)
goto error_exit;
mutex_lock(&uci_dev->mutex);
if (!uci_dev->enabled) {
MSG_ERR("Node exist, but not in active state!\n");
goto error_open_chan;
}
uci_dev->ref_count++;
MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
if (uci_dev->ref_count == 1) {
MSG_LOG("Starting channel\n");
ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
if (ret) {
MSG_ERR("Error starting transfer channels\n");
uci_dev->ref_count--;
goto error_open_chan;
}
ret = mhi_queue_inbound(uci_dev);
if (ret)
goto error_rx_queue;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
uci_dev->ref_count++;
#endif
}
filp->private_data = uci_dev;
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
return 0;
error_rx_queue:
dl_chan = &uci_dev->dl_chan;
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
error_open_chan:
mutex_unlock(&uci_dev->mutex);
error_exit:
mutex_unlock(&mhi_uci_drv.lock);
return ret;
}
static const struct file_operations mhidev_fops = {
.open = mhi_uci_open,
.release = mhi_uci_release,
.read = mhi_uci_read_mutex,
.write = mhi_uci_write_mutex,
.poll = mhi_uci_poll,
.unlocked_ioctl = mhi_uci_ioctl,
};
static void mhi_uci_remove(struct mhi_device *mhi_dev)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
MSG_LOG("Enter\n");
mutex_lock(&mhi_uci_drv.lock);
mutex_lock(&uci_dev->mutex);
/* disable the node */
spin_lock_irq(&uci_dev->dl_chan.lock);
spin_lock_irq(&uci_dev->ul_chan.lock);
uci_dev->enabled = false;
spin_unlock_irq(&uci_dev->ul_chan.lock);
spin_unlock_irq(&uci_dev->dl_chan.lock);
wake_up(&uci_dev->dl_chan.wq);
wake_up(&uci_dev->ul_chan.wq);
/* delete the node to prevent new opens */
device_destroy(mhi_uci_drv.class, uci_dev->devt);
uci_dev->dev = NULL;
list_del(&uci_dev->node);
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count > 0)
uci_dev->ref_count--;
#endif
/* safe to free memory only if all file nodes are closed */
if (!uci_dev->ref_count) {
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
mutex_unlock(&mhi_uci_drv.lock);
return;
}
MSG_LOG("Exit\n");
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
}
static int mhi_uci_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct uci_dev *uci_dev;
int minor;
char node_name[32];
int dir;
uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
if (!uci_dev)
return -ENOMEM;
mutex_init(&uci_dev->mutex);
mutex_init(&uci_dev->r_mutex);
mutex_init(&uci_dev->w_mutex);
uci_dev->mhi_dev = mhi_dev;
minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
if (minor >= MAX_UCI_DEVICES) {
kfree(uci_dev);
return -ENOSPC;
}
mutex_lock(&uci_dev->mutex);
mutex_lock(&mhi_uci_drv.lock);
uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
#if 1
if (mhi_dev->mhi_cntrl->cntrl_idx)
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s%d",
mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx);
else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
#else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
mhi_dev->dev_id, mhi_dev->domain,
mhi_dev->bus, mhi_dev->slot, "_pipe_",
mhi_dev->ul_chan_id);
#endif
set_bit(minor, uci_minors);
/* create debugging buffer */
snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
mhi_dev->ul_chan_id);
for (dir = 0; dir < 2; dir++) {
struct uci_chan *uci_chan = (dir) ?
&uci_dev->ul_chan : &uci_dev->dl_chan;
spin_lock_init(&uci_chan->lock);
init_waitqueue_head(&uci_chan->wq);
INIT_LIST_HEAD(&uci_chan->pending);
}
uci_dev->termios = tty_std_termios;
uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
mhi_device_set_devdata(mhi_dev, uci_dev);
uci_dev->enabled = true;
list_add(&uci_dev->node, &mhi_uci_drv.head);
mutex_unlock(&mhi_uci_drv.lock);
mutex_unlock(&uci_dev->mutex);
MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
return 0;
};
static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->ul_chan;
MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
kfree(mhi_result->buf_addr);
if (!mhi_result->transaction_status)
wake_up(&uci_chan->wq);
}
static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->dl_chan;
unsigned long flags;
struct uci_buf *buf;
unsigned nr_trb = uci_dev->nr_trb;
buf = &uci_dev->uci_buf[nr_trb];
if (buf == NULL) {
MSG_ERR("buf = NULL");
return;
}
if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr)
{
uci_dev->rx_error++;
MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n",
mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr);
return;
}
uci_dev->nr_trb++;
if (uci_dev->nr_trb == uci_dev->nr_trbs)
uci_dev->nr_trb = 0;
if (mhi_result->transaction_status == -ENOTCONN) {
return;
}
if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0)
{
MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
return;
}
if (mhi_result->bytes_xferd > uci_dev->bytes_xferd)
{
uci_dev->bytes_xferd = mhi_result->bytes_xferd;
//MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
// mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
}
MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
spin_lock_irqsave(&uci_chan->lock, flags);
#if 0
buf = mhi_result->buf_addr + uci_dev->mtu;
buf->data = mhi_result->buf_addr;
#endif
buf->len = mhi_result->bytes_xferd;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN)
{
struct uci_buf *tmp_buf = NULL;
int skip_buf = 0;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count == 1)
skip_buf++;
#endif
if (!skip_buf)
tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);;
if (tmp_buf) {
tmp_buf->page = NULL;
tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf);
tmp_buf->len = buf->len;
memcpy(tmp_buf->data, buf->data, buf->len);
}
if (buf) {
struct uci_buf *uci_buf = buf;
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT);
}
buf = tmp_buf;
}
if (buf)
list_add_tail(&buf->node, &uci_chan->pending);
spin_unlock_irqrestore(&uci_chan->lock, flags);
#ifdef CONFIG_PM_SLEEP
if (mhi_dev->dev.power.wakeup)
__pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
#endif
wake_up(&uci_chan->wq);
}
// repaire sdx6x module can not read qdb file. xingduo.du 2023-01-18
#define DIAG_MAX_PCIE_PKT_SZ 8192 //define by module
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_uci_match_table[] = {
{ .chan = "LOOPBACK", .driver_data = 0x1000 },
{ .chan = "SAHARA", .driver_data = 0x4000 },
{ .chan = "EDL", .driver_data = 0x4000 },
{ .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ },
{ .chan = "MBIM", .driver_data = 0x1000 },
{ .chan = "QMI0", .driver_data = 0x1000 },
{ .chan = "QMI1", .driver_data = 0x1000 },
{ .chan = "DUN", .driver_data = 0x1000 },
#ifdef ENABLE_ADPL
{ .chan = "ADPL", .driver_data = 0x1000 },
#endif
#ifdef ENABLE_QDSS
{ .chan = "QDSS", .driver_data = 0x1000 },
#endif
{},
};
static struct mhi_driver mhi_uci_driver = {
.id_table = mhi_uci_match_table,
.remove = mhi_uci_remove,
.probe = mhi_uci_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
.driver = {
.name = MHI_UCI_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
int mhi_device_uci_init(void)
{
int ret;
ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
if (ret < 0)
return ret;
mhi_uci_drv.major = ret;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
mhi_uci_drv.class = class_create(MHI_UCI_DRIVER_NAME);
#else
mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
#endif
if (IS_ERR(mhi_uci_drv.class)) {
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
return -ENODEV;
}
mutex_init(&mhi_uci_drv.lock);
INIT_LIST_HEAD(&mhi_uci_drv.head);
ret = mhi_driver_register(&mhi_uci_driver);
if (ret) {
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}
return ret;
}
void mhi_device_uci_exit(void)
{
mhi_driver_unregister(&mhi_uci_driver);
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}

View File

@ -0,0 +1,31 @@
root@imx6qsabresd:~# busybox microcom /dev/mhi_DUN
[ 384.652992] [I][mhi_uci_open] Node open, ref counts 1
[ 384.658144] [I][mhi_uci_open] Starting channel
[ 384.662612] [I][__mhi_prepare_channel] Entered: preparing channel:32
[ 384.680397] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 384.685890] [I][__mhi_prepare_channel] Chan:32 successfully moved to start state
[ 384.693312] [I][__mhi_prepare_channel] Entered: preparing channel:33
[ 384.708692] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 384.714324] [I][__mhi_prepare_channel] Chan:33 successfully moved to start state
RDY
+CFUN: 1
+CPIN: READY
+QUSIM: 1
+QIND: SMS DONE
+QIND: PB DONE
ati
Quectel
EM20
Revision: EM20GR01A01M4G
OK
at+cpin?
+CPIN: READY
OK

View File

@ -0,0 +1,145 @@
root@OpenWrt:~# insmod pcie_mhi.ko mhi_mbim_enabled=1
root@OpenWrt:~# dmesg | grep mhi
[ 65.587160] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 65.597089] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
[ 65.602250] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit]
[ 65.611690] mhi_q 0000:01:00.0: enabling device (0140 -> 0142)
[ 65.619307] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63
[ 65.619327] [I][mhi_power_up] dev_state:RESET
[ 65.619331] [I][mhi_async_power_up] Requested to power on
[ 65.619449] [I][mhi_alloc_coherent] size = 114688, dma_handle = 6fca0000
[ 65.619462] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221e000
[ 65.619731] [I][mhi_async_power_up] dev_state:RESET ee:AMSS
[ 65.619747] [I][mhi_pm_st_worker] Transition to state:READY
[ 65.619760] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 65.619764] [I][mhi_ready_state_transition] Waiting to enter READY state
[ 65.619885] [I][mhi_async_power_up] Power on setup success
[ 65.619897] [I][mhi_pci_probe] Return successful
[ 65.665114] [I][mhi_ready_state_transition] Device in READY State
[ 65.665125] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR
[ 65.665131] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE
[ 65.665133] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR
[ 65.665137] [I][mhi_init_mmio] Initializing MMIO
[ 65.665142] [I][mhi_init_mmio] CHDBOFF:0x300
[ 65.665151] [I][mhi_init_mmio] ERDBOFF:0x700
[ 65.665156] [I][mhi_init_mmio] Programming all MMIO values.
[ 65.786283] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2
[ 65.786289] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0
[ 65.786295] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR
[ 65.786300] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0
[ 65.789734] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2
[ 65.789739] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS
[ 65.789756] [I][mhi_pm_st_worker] Transition to state:MISSION MODE
[ 65.789767] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 65.789771] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition
[ 65.789787] [I][mhi_init_timesync] No timesync capability found
[ 65.789791] [I][mhi_pm_mission_mode_transition] Adding new devices
[ 65.790570] [I][mhi_dtr_probe] Enter for DTR control channel
[ 65.790577] [I][__mhi_prepare_channel] Entered: preparing channel:18
[ 65.797036] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.797051] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state
[ 65.797055] [I][__mhi_prepare_channel] Entered: preparing channel:19
[ 65.802457] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.802469] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state
[ 65.802485] [I][mhi_dtr_probe] Exit with ret:0
[ 65.802748] [I][mhi_netdev_enable_iface] Prepare the channels for transfer
[ 65.802772] [I][__mhi_prepare_channel] Entered: preparing channel:100
[ 65.825279] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.825293] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state
[ 65.825297] [I][__mhi_prepare_channel] Entered: preparing channel:101
[ 65.835565] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.835578] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state
[ 65.839141] [I][mhi_netdev_enable_iface] Exited.
[ 65.839875] rmnet_vnd_register_device(rmnet_mhi0.1)=0
[ 65.843278] net rmnet_mhi0 rmnet_mhi0.1: NSS context created
[ 65.861808] [I][mhi_pm_mission_mode_transition] Exit with ret:0
[ 68.625595] [I][__mhi_prepare_channel] Entered: preparing channel:12
[ 68.634610] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 68.634622] [I][__mhi_prepare_channel] Chan:12 successfully moved to start state
[ 68.634625] [I][__mhi_prepare_channel] Entered: preparing channel:13
[ 68.644978] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 68.644987] [I][__mhi_prepare_channel] Chan:13 successfully moved to start state
[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1
[ 69.177035] [I][mhi_netdev_open] Opened net dev interface
[ 71.655431] [I][mhi_netdev_open] Opened net dev interface
root@OpenWrt:~# ./quectel-CM &
[04-02_04:14:12:134] Quectel_QConnectManager_Linux_V1.6.0.5
[04-02_04:14:12:134] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002
[04-02_04:14:12:135] network interface '' or qmidev '' is not exist
[04-02_04:14:12:135] netcard driver = pcie_mhi, driver version = V1.3.0.6
[04-02_04:14:12:135] Modem works in MBIM mode
[04-02_04:14:12:135] apn (null), user (null), passwd (null), auth 0
[04-02_04:14:12:135] IP Proto MBIMContextIPTypeIPv4
[04-02_04:14:12:154] mbim_read_thread is created
sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory
[04-02_04:14:12:156] system(echo 0 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256
[04-02_04:14:12:185] system(ip address flush dev rmnet_mhi0)=0
[04-02_04:14:12:187] system(ip link set dev rmnet_mhi0 down)=0
[04-02_04:14:12:188] mbim_open_device()
[04-02_04:14:12:605] mbim_device_caps_query()
[04-02_04:14:12:610] DeviceId: 869710030002905
[04-02_04:14:12:610] HardwareInfo: 0
[04-02_04:14:12:610] mbim_set_radio_state( 1 )
[04-02_04:14:12:613] HwRadioState: 1, SwRadioState: 1
[04-02_04:14:12:613] mbim_subscriber_status_query()
[04-02_04:14:12:620] SubscriberReadyState NotInitialized -> Initialized
[04-02_04:14:12:620] mbim_register_state_query()
[04-02_04:14:12:625] RegisterState Unknown -> Home
[04-02_04:14:12:625] mbim_packet_service_query()
[04-02_04:14:12:629] PacketServiceState Unknown -> Attached
[04-02_04:14:12:629] mbim_query_connect(sessionID=0)
[04-02_04:14:12:633] ActivationState Unknown -> Deactivated
[04-02_04:14:12:633] mbim_set_connect(onoff=1, sessionID=0)
[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1
[04-02_04:14:12:680] ActivationState Deactivated -> Activated
[ 69.177035] [I][mhi_netdev_open] Opened net dev interface
[04-02_04:14:12:680] mbim_ip_config(sessionID=0)
[04-02_04:14:12:683] < SessionId = 0
[04-02_04:14:12:683] < IPv4ConfigurationAvailable = 0xf
[04-02_04:14:12:683] < IPv6ConfigurationAvailable = 0x0
[04-02_04:14:12:683] < IPv4AddressCount = 0x1
[04-02_04:14:12:683] < IPv4AddressOffset = 0x3c
[04-02_04:14:12:683] < IPv6AddressCount = 0x0
[04-02_04:14:12:683] < IPv6AddressOffset = 0x0
[04-02_04:14:12:683] < IPv4 = 10.129.59.93/30
[04-02_04:14:12:683] < gw = 10.129.59.94
[04-02_04:14:12:683] < dns1 = 211.138.180.2
[04-02_04:14:12:683] < dns2 = 211.138.180.3
[04-02_04:14:12:683] < ipv4 mtu = 1500
sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory
[04-02_04:14:12:684] system(echo 1 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256
[04-02_04:14:12:689] system(ip link set dev rmnet_mhi0 up)=0
[04-02_04:14:12:692] system(ip -4 address flush dev rmnet_mhi0)=0
[04-02_04:14:12:694] system(ip -4 address add 10.129.59.93/30 dev rmnet_mhi0)=0
[04-02_04:14:12:697] system(ip -4 route add default via 10.129.59.94 dev rmnet_mhi0)=0
[04-02_04:14:12:699] system(ip -4 link set dev rmnet_mhi0 mtu 1500)=0
root@OpenWrt:~# ifconfig rmnet_mhi0
rmnet_mhi0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:99379 errors:0 dropped:0 overruns:0 frame:0
TX packets:176569 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1528181052 (1.4 GiB) TX bytes:62467192 (59.5 MiB)
root@OpenWrt:~# ifconfig rmnet_mhi0.1
rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00
inet addr:10.129.59.93 Mask:255.255.255.252
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:1089360 errors:0 dropped:0 overruns:0 frame:0
TX packets:176581 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1521449058 (1.4 GiB) TX bytes:57525792 (54.8 MiB)
# adjust CPU load balancing
root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
2
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
4
root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog
2000

View File

@ -0,0 +1,134 @@
disable ccflags-y += -DCONFIG_MHI_NETDEV_MBIM in pcie_mhi/Makefile
root@OpenWrt:~# insmod pcie_mhi.ko
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
[ 138.497564] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit]
[ 138.506952] mhi_q 0000:01:00.0: enabling device (0140 -> 0142)
[ 138.514562] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63
[ 138.514581] [I][mhi_power_up] dev_state:RESET
[ 138.514587] [I][mhi_async_power_up] Requested to power on
[ 138.514728] [I][mhi_alloc_coherent] size = 114688, dma_handle = 72160000
[ 138.514734] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221f000
[ 138.515030] [I][mhi_async_power_up] dev_state:RESET ee:AMSS
[ 138.515056] [I][mhi_pm_st_worker] Transition to state:READY
[ 138.515067] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 138.515073] [I][mhi_ready_state_transition] Waiting to enter READY state
[ 138.515210] [I][mhi_async_power_up] Power on setup success
[ 138.515227] [I][mhi_pci_probe] Return successful
[ 138.589013] [I][mhi_ready_state_transition] Device in READY State
[ 138.589029] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR
[ 138.589038] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE
[ 138.589041] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR
[ 138.589046] [I][mhi_init_mmio] Initializing MMIO
[ 138.589050] [I][mhi_init_mmio] CHDBOFF:0x300
[ 138.589060] [I][mhi_init_mmio] ERDBOFF:0x700
[ 138.589065] [I][mhi_init_mmio] Programming all MMIO values.
[ 138.706124] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2
[ 138.706132] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0
[ 138.706140] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR
[ 138.706146] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0
[ 138.708699] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2
[ 138.708706] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS
[ 138.708726] [I][mhi_pm_st_worker] Transition to state:MISSION MODE
[ 138.708736] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 138.708742] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition
[ 138.708758] [I][mhi_init_timesync] No timesync capability found
[ 138.708764] [I][mhi_pm_mission_mode_transition] Adding new devices
[ 138.709785] [I][mhi_dtr_probe] Enter for DTR control channel
[ 138.709794] [I][__mhi_prepare_channel] Entered: preparing channel:18
[ 138.715378] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.715397] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state
[ 138.715403] [I][__mhi_prepare_channel] Entered: preparing channel:19
[ 138.720201] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.720218] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state
[ 138.720236] [I][mhi_dtr_probe] Exit with ret:0
[ 138.720590] [I][mhi_netdev_enable_iface] Prepare the channels for transfer
[ 138.720630] [I][__mhi_prepare_channel] Entered: preparing channel:100
[ 138.757230] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.757253] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state
[ 138.757259] [I][__mhi_prepare_channel] Entered: preparing channel:101
[ 138.774352] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.774370] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state
[ 138.778137] [I][mhi_netdev_enable_iface] Exited.
[ 138.779018] rmnet_vnd_register_device(rmnet_mhi0.1)=0
[ 138.782283] net rmnet_mhi0 rmnet_mhi0.1: NSS context created
[ 138.800865] [I][mhi_pm_mission_mode_transition] Exit with ret:0
root@OpenWrt:~# ./quectel-CM &
root@OpenWrt:~# [04-02_04:12:16:477] Quectel_QConnectManager_Linux_V1.6.0.5
[04-02_04:12:16:477] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002
[04-02_04:12:16:478] network interface '' or qmidev '' is not exist
[04-02_04:12:16:478] netcard driver = pcie_mhi, driver version = V1.3.0.6
[04-02_04:12:16:479] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1
[04-02_04:12:16:479] Modem works in QMI mode
[04-02_04:12:16:505] cdc_wdm_fd = 7
[04-02_04:12:17:506] QmiThreadSendQMITimeout pthread_cond_timeout_np timeout
[04-02_04:12:18:516] Get clientWDS = 19
[04-02_04:12:18:520] Get clientDMS = 1
[04-02_04:12:18:524] Get clientNAS = 3
[04-02_04:12:18:527] Get clientUIM = 1
[04-02_04:12:18:531] Get clientWDA = 1
[04-02_04:12:18:535] requestBaseBandVersion RM500QGLAAR03A01M4G_BETA_20200107F 1 [Dec 30 2019 17:00:00]
[04-02_04:12:18:539] qmap_settings.rx_urb_size = 16384
[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_datagrams = 16
[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_size = 8192
[04-02_04:12:18:539] qmap_settings.dl_minimum_padding = 0
[04-02_04:12:18:550] requestSetLoopBackState(loopback_state=1, replication_factor=14)
[04-02_04:12:18:557] requestGetSIMStatus SIMStatus: SIM_ABSENT
[04-02_04:12:18:560] requestGetProfile[1] ///0
[04-02_04:12:18:563] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW
[04-02_04:12:18:565] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[04-02_04:12:18:566] ifconfig rmnet_mhi0.1 down
[04-02_04:12:18:571] ifconfig rmnet_mhi0.1 0.0.0.0
ifconfig: SIOCSIFFLAGS: Network is down
[04-02_04:12:18:575] SetLoopBackInd: loopback_state=1, replication_factor=14
[04-02_04:12:18:591] requestSetupDataCall WdsConnectionIPv4Handle: 0xe40182a0
[04-02_04:12:18:601] ifconfig rmnet_mhi0 up
[04-02_04:12:18:607] ifconfig rmnet_mhi0.1 up
[04-02_04:12:18:613] you are use OpenWrt?
[04-02_04:12:18:614] should not calling udhcpc manually?
[04-02_04:12:18:614] should modify /etc/config/network as below?
[04-02_04:12:18:614] config interface wan
[04-02_04:12:18:614] option ifname rmnet_mhi0.1
[04-02_04:12:18:614] option proto dhcp
[04-02_04:12:18:614] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status?
[04-02_04:12:18:614] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1
udhcpc: started, v1.28.3
udhcpc: sending discover
udhcpc: sending select for 192.168.48.171
udhcpc: lease of 192.168.48.171 obtained, lease time 7200
[04-02_04:12:18:809] udhcpc: ifconfig rmnet_mhi0.1 192.168.48.171 netmask 255.255.255.248 broadcast +
[04-02_04:12:18:819] udhcpc: setting default routers: 192.168.48.172
root@OpenWrt:~# ifconfig rmnet_mhi0
rmnet_mhi0 Link encap:Ethernet HWaddr 02:50:F4:00:00:00
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:608 (608.0 B) TX bytes:672 (672.0 B)
root@OpenWrt:~# ifconfig rmnet_mhi0.1
rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00
inet addr:192.168.48.171 Mask:255.255.255.248
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:592 (592.0 B) TX bytes:656 (656.0 B)
# adjust CPU load balancing
root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
2
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
4
root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog
2000

View File

@ -0,0 +1,14 @@
root@imx6qsabresd:~# ./QLog -p /dev/mhi_DIAG -s log &
root@imx6qsabresd:~# [000.000]QLog Version: Quectel_QLog_Linux&Android_V1.2.4
[ 298.597963] [I][mhi_uci_open] Node open, ref counts 1
[ 298.605601] [I][mhi_uci_open] Starting channel
[ 298.612159] [I][__mhi_prepare_channel] Entered: preparing channel:4
[ 298.629906] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 298.635415] [I][__mhi_prepare_channel] Chan:4 successfully moved to start state
[ 298.642749] [I][__mhi_prepare_channel] Entered: preparing channel:5
[ 298.658043] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 298.663543] [I][__mhi_prepare_channel] Chan:5 successfully moved to start state
[000.075]open /dev/mhi_DIAG ttyfd = 3
[000.075]Press CTRL+C to stop catch log.
[000.096]qlog_logfile_create log/20160920_145758_0000.qmdl logfd=4
[005.268]recv: 0M 70K 490B in 5181 msec