pci-mhi-fb

This commit is contained in:
ling 2023-05-04 11:47:21 +08:00
parent f7d0746d5b
commit 4771bd4845
19 changed files with 10603 additions and 1 deletions

47
fibocom_MHI/Makefile Normal file
View File

@ -0,0 +1,47 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=pcie_mhi_fb
PKG_VERSION:=3.2
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/pcie_mhi_fb
SUBMENU:=WWAN Support
TITLE:=Kernel pcie driver for MHI device
DEPENDS:=+pciids +pciutils +fibocom-dial
FILES:=$(PKG_BUILD_DIR)/pcie_mhi_fb.ko
AUTOLOAD:=$(call AutoLoad,90,pcie_mhi_fb)
endef
define KernelPackage/pcie_mhi_fb/description
Kernel module for register a custom pciemhi platform device.
endef
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
$(MAKE_OPTS) \
modules
endef
$(eval $(call KernelPackage,pcie_mhi_fb))

14
fibocom_MHI/src/Kconfig Normal file
View File

@ -0,0 +1,14 @@
menu "pcie mhi device Drivers"
config PCIE_MHI
tristate "MHI dev net interface"
help
This modules enables userspace software clients to communicate
with devices supporting the MHI protocol. Userspace clients
may open the device nodes exposed by MHI UCI and perform
read, write and ioctl operations to communicate with the
attached device.
endmenu

29
fibocom_MHI/src/Makefile Normal file
View File

@ -0,0 +1,29 @@
ccflags-y += -g -Wno-incompatible-pointer-types -Wno-unused-variable
#ccflags-y += -DCONFIG_MHI_NETDEV_MBIM
#obj-${CONFIG_PCIE_MHI} := fibo_mhi.o
obj-m := fibo_mhi.o
fibo_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o devices/mhi_netdev.o devices/mhi_uci.o controllers/mhi_qcom.o
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
fibo_mhi: clean
ifeq ($(findstring 86,$(ARCH)), 86)
cp -f $(PWD)/controllers/mhi_qcom_x86.h $(PWD)/controllers/mhi_qcom.h
else
cp -f $(PWD)/controllers/mhi_qcom_arm.h $(PWD)/controllers/mhi_qcom.h
endif
#ln -sf makefile Makefile
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean

View File

@ -0,0 +1,683 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include "../core/mhi.h"
#include "mhi_qcom.h"
#ifndef PCI_IRQ_MSI
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 ))
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
int nvec = maxvec;
int rc;
if (maxvec < minvec)
return -ERANGE;
do {
rc = pci_enable_msi_block(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
return nvec;
}
#endif
static int fibo_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
return pci_enable_msi_range(dev, min_vecs, max_vecs);
}
static void fibo_pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msi(dev);
}
static int fibo_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
return dev->irq + nr;
}
#else
static int fibo_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
return pci_alloc_irq_vectors(dev, min_vecs, max_vecs, flags);
}
static void fibo_pci_free_irq_vectors(struct pci_dev *dev)
{
pci_free_irq_vectors(dev);
}
static int fibo_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
return pci_irq_vector(dev, nr);
}
#endif
static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0300)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0301)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0302)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)},//SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(0x2C7C, 0x0512)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};
static struct pci_driver mhi_pcie_driver;
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
fibo_pci_free_irq_vectors(pci_dev);
iounmap(mhi_cntrl->regs);
mhi_cntrl->regs = NULL;
pci_clear_master(pci_dev);
pci_release_region(pci_dev, mhi_dev->resn);
pci_disable_device(pci_dev);
}
static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
resource_size_t start, len;
int i;
mhi_dev->resn = MHI_PCI_BAR_NUM;
ret = pci_assign_resource(pci_dev, mhi_dev->resn);
if (ret) {
MHI_ERR("Error assign pci resources, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Error enabling device, ret:%d\n", ret);
goto error_enable_device;
}
ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
if (ret) {
MHI_ERR("Error pci_request_region, ret:%d\n", ret);
goto error_request_region;
}
pci_set_master(pci_dev);
start = pci_resource_start(pci_dev, mhi_dev->resn);
len = pci_resource_len(pci_dev, mhi_dev->resn);
/*begin added by tony.du for mantis 0062018 on 2020-11-10*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,6,0 ))
mhi_cntrl->regs = ioremap_nocache(start, len);
#else
mhi_cntrl->regs = ioremap(start, len);
#endif
/*end added by tony.du for mantis 0062018 on 2020-11-10*/
MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs);
if (!mhi_cntrl->regs) {
MHI_ERR("Error ioremap region\n");
goto error_ioremap;
}
ret = fibo_pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI);
if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
if (ret == -ENOSPC) {
}
//imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device
if (ret != 1) {
MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required);
goto error_req_msi;
}
}
mhi_cntrl->msi_allocated = ret;
MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq);
for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
mhi_cntrl->irq[i] = fibo_pci_irq_vector(pci_dev, i);
if (mhi_cntrl->irq[i] < 0) {
ret = mhi_cntrl->irq[i];
goto error_get_irq_vec;
}
}
return 0;
error_get_irq_vec:
fibo_pci_free_irq_vectors(pci_dev);
error_req_msi:
iounmap(mhi_cntrl->regs);
error_ioremap:
pci_clear_master(pci_dev);
error_request_region:
pci_disable_device(pci_dev);
error_enable_device:
pci_release_region(pci_dev, mhi_dev->resn);
return ret;
}
#ifdef CONFIG_PM
static int mhi_runtime_idle(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
* rpm_idle to see if device ready to suspend.
* If dev.power usage_count count is 0, rpm fw will call
* rpm_idle cb to see if device is ready to suspend.
* if cb return 0, or cb not defined the framework will
* assume device driver is ready to suspend;
* therefore, fw will schedule runtime suspend.
* In MHI power management, MHI host shall go to
* runtime suspend only after entering MHI State M2, even if
* usage count is 0. Return -EBUSY to disable automatic suspend.
*/
return -EBUSY;
}
static int mhi_runtime_suspend(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_pm_suspend(mhi_cntrl);
if (ret) {
MHI_LOG("Abort due to ret:%d\n", ret);
goto exit_runtime_suspend;
}
ret = mhi_arch_link_off(mhi_cntrl, true);
if (ret)
MHI_ERR("Failed to Turn off link ret:%d\n", ret);
exit_runtime_suspend:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with ret:%d\n", ret);
return ret;
}
static int mhi_runtime_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
if (!mhi_dev->powered_on) {
MHI_LOG("Not fully powered, return success\n");
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
}
/* turn on link */
ret = mhi_arch_link_on(mhi_cntrl);
if (ret)
goto rpm_resume_exit;
/* enter M0 state */
ret = mhi_pm_resume(mhi_cntrl);
rpm_resume_exit:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with :%d\n", ret);
return ret;
}
static int mhi_system_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
ret = mhi_runtime_resume(dev);
if (ret) {
MHI_ERR("Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
int mhi_system_suspend(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev))
return mhi_runtime_suspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
MHI_LOG("Exit\n");
return 0;
}
#endif
/* checks if link is down */
static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
u16 dev_id;
int ret;
/* try reading device id, if dev id don't match, link is down */
ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
}
static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
return pm_runtime_get(dev);
}
static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_put_noidle(dev);
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
void *priv,
enum MHI_CB reason)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
if (reason == MHI_CB_IDLE) {
MHI_LOG("Schedule runtime suspend 1\n");
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
}
}
int mhi_debugfs_trigger_m0(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Exit\n");
pm_runtime_get(&mhi_dev->pci_dev->dev);
pm_runtime_put(&mhi_dev->pci_dev->dev);
return 0;
}
int mhi_debugfs_trigger_m3(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Entry\n");
pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
pm_request_autosuspend(&mhi_dev->pci_dev->dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
mhi_debugfs_trigger_m0, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
mhi_debugfs_trigger_m3, "%llu\n");
static int mhi_init_debugfs_trigger_go(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
MHI_LOG("Trigger power up sequence\n");
mhi_async_power_up(mhi_cntrl);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL,
mhi_init_debugfs_trigger_go, "%llu\n");
int mhi_init_debugfs_debug_show(struct seq_file *m, void *d)
{
seq_puts(m, "Enable debug mode to debug external soc\n");
seq_puts(m,
"Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n");
seq_puts(m, "No spaces between parameters\n");
seq_puts(m, "\t1. devid : 0 or pci device id to register\n");
seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n");
seq_puts(m, "\t3. domain: Rootcomplex\n");
seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n");
seq_puts(m, "\t\t- BIT0: ATTACH\n");
seq_puts(m, "\t\t- BIT1: S1 BYPASS\n");
seq_puts(m, "\t\t-BIT2: FAST_MAP\n");
seq_puts(m, "\t\t-BIT3: ATOMIC\n");
seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n");
seq_puts(m, "\t\t-BIT5: GEOMETRY\n");
seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n");
seq_puts(m, "Examples inputs: '0x307,10000'\n");
seq_puts(m, "\techo '0,10000,1'\n");
seq_puts(m, "\techo '0x307,10000,0,0x3d'\n");
seq_puts(m, "firmware image name will be changed to debug.mbn\n");
return 0;
}
static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file)
{
return single_open(file, mhi_init_debugfs_debug_show, NULL);
}
static ssize_t mhi_init_debugfs_debug_write(struct file *fp,
const char __user *ubuf,
size_t count,
loff_t *pos)
{
char *buf = kmalloc(count + 1, GFP_KERNEL);
/* #,devid,timeout,domain,smmu-cfg */
int args[5] = {0};
static char const *dbg_fw = "debug.mbn";
int ret;
struct mhi_controller *mhi_cntrl = fp->f_inode->i_private;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_device_id *id;
if (!buf)
return -ENOMEM;
ret = copy_from_user(buf, ubuf, count);
if (ret)
goto error_read;
buf[count] = 0;
get_options(buf, ARRAY_SIZE(args), args);
kfree(buf);
/* override default parameters */
mhi_cntrl->fw_image = dbg_fw;
mhi_cntrl->edl_image = dbg_fw;
if (args[0] >= 2 && args[2])
mhi_cntrl->timeout_ms = args[2];
if (args[0] >= 3 && args[3])
mhi_cntrl->domain = args[3];
if (args[0] >= 4 && args[4])
mhi_dev->smmu_cfg = args[4];
/* If it's a new device id register it */
if (args[0] && args[1]) {
/* find the debug_id and overwrite it */
for (id = mhi_pcie_device_id; id->vendor; id++)
if (id->device == MHI_PCIE_DEBUG_ID) {
id->device = args[1];
pci_unregister_driver(&mhi_pcie_driver);
ret = pci_register_driver(&mhi_pcie_driver);
}
}
mhi_dev->debug_mode = true;
debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl,
&mhi_init_debugfs_trigger_go_fops);
pr_info(
"%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n",
__func__, ret, args[1], mhi_dev->smmu_cfg,
mhi_cntrl->timeout_ms);
return count;
error_read:
kfree(buf);
return ret;
}
static const struct file_operations debugfs_debug_ops = {
.open = mhi_init_debugfs_debug_open,
.release = single_release,
.read = seq_read,
.write = mhi_init_debugfs_debug_write,
};
static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl;
struct mhi_dev *mhi_dev;
u64 addr_win[2];
int ret;
mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
if (!mhi_cntrl) {
pr_err("mhi_alloc_controller fail\n");
return NULL;
}
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev_id = pci_dev->device;
mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
mhi_cntrl->bus = pci_dev->bus->number;
mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
mhi_dev->smmu_cfg = 0;
addr_win[0] = 0;
addr_win[1] = 0xFFFFFFFFF; //16GB
mhi_cntrl->iova_start = addr_win[0];
mhi_cntrl->iova_stop = addr_win[1];
mhi_dev->pci_dev = pci_dev;
mhi_cntrl->pci_dev = pci_dev;
/* setup power management apis */
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->link_status = mhi_link_status;
ret = mhi_arch_platform_init(mhi_dev);
if (ret)
goto error_probe;
ret = mhi_register_mhi_controller(mhi_cntrl);
if (ret)
goto error_register;
if (mhi_cntrl->parent)
debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent,
mhi_cntrl, &debugfs_debug_ops);
return mhi_cntrl;
error_register:
mhi_arch_platform_deinit(mhi_dev);
error_probe:
mhi_free_controller(mhi_cntrl);
return NULL;
}
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id)
{
struct mhi_controller *mhi_cntrl = NULL;
u32 domain = pci_domain_nr(pci_dev->bus);
u32 bus = pci_dev->bus->number;
u32 slot = PCI_SLOT(pci_dev->devfn);
struct mhi_dev *mhi_dev;
int ret;
pr_err("INFO:%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
mhi_cntrl = mhi_platform_probe(pci_dev);
if (!mhi_cntrl) {
pr_err("mhi_platform_probe fail\n");
return -EPROBE_DEFER;
}
mhi_cntrl->dev_id = pci_dev->device;
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_dev->pci_dev = pci_dev;
mhi_dev->powered_on = true;
ret = mhi_arch_pcie_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret);
return ret;
}
ret = mhi_arch_iommu_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret);
goto error_iommu_init;
}
ret = mhi_init_pci_dev(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret);
goto error_init_pci;
}
/* start power up sequence if not in debug mode */
if (!mhi_dev->debug_mode) {
ret = mhi_async_power_up(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret);
goto error_power_up;
}
}
if (mhi_cntrl->dentry) {
debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m0_fops);
debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m3_fops);
}
dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
MHI_LOG("Return successful\n");
return 0;
error_power_up:
mhi_deinit_pci_dev(mhi_cntrl);
error_init_pci:
mhi_arch_iommu_deinit(mhi_cntrl);
error_iommu_init:
mhi_arch_pcie_deinit(mhi_cntrl);
return ret;
}
static void mhi_pci_remove(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev);
if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) {
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("%s\n", dev_name(&pci_dev->dev));
if (!mhi_dev->debug_mode) {
mhi_power_down(mhi_cntrl, 1);
}
mhi_deinit_pci_dev(mhi_cntrl);
mhi_arch_iommu_deinit(mhi_cntrl);
mhi_arch_pcie_deinit(mhi_cntrl);
mhi_unregister_mhi_controller(mhi_cntrl);
}
}
static const struct dev_pm_ops pm_ops = {
SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
mhi_runtime_resume,
mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
};
static struct pci_driver mhi_pcie_driver = {
.name = "mhi",
.id_table = mhi_pcie_device_id,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
.driver = {
.pm = &pm_ops
}
};
int __init mhi_controller_qcom_init(void)
{
return pci_register_driver(&mhi_pcie_driver);
};
void mhi_controller_qcom_exit(void)
{
pr_err("INFO:%s enter\n", __func__);
pci_unregister_driver(&mhi_pcie_driver);
pr_err("INFO:%s exit\n", __func__);
}

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(32));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

891
fibocom_MHI/src/core/mhi.h Normal file
View File

@ -0,0 +1,891 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_H_
#define _MHI_H_
#include <linux/miscdevice.h>
typedef u64 uint64;
typedef u32 uint32;
typedef enum
{
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_QMI_2_OUT = 16,
MHI_CLIENT_QMI_2_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_IPCR_OUT = 20,
MHI_CLIENT_IPCR_IN = 21,
MHI_CLIENT_TEST_FW_OUT = 22,
MHI_CLIENT_TEST_FW_IN = 23,
MHI_CLIENT_RESERVED_0 = 24,
MHI_CLIENT_BOOT_LOG_IN = 25,
MHI_CLIENT_DCI_OUT = 26,
MHI_CLIENT_DCI_IN = 27,
MHI_CLIENT_QBI_OUT = 28,
MHI_CLIENT_QBI_IN = 29,
MHI_CLIENT_RESERVED_1_LOWER = 30,
MHI_CLIENT_RESERVED_1_UPPER = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_EDL_OUT = 34,
MHI_CLIENT_EDL_IN = 35,
MHI_CLIENT_ADB_FB_OUT = 36,
MHI_CLIENT_ADB_FB_IN = 37,
MHI_CLIENT_RESERVED_2_LOWER = 38,
MHI_CLIENT_RESERVED_2_UPPER = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_0_OUT = 46,
MHI_CLIENT_IP_SW_0_IN = 47,
MHI_CLIENT_IP_SW_1_OUT = 48,
MHI_CLIENT_IP_SW_1_IN = 49,
MHI_CLIENT_GNSS_OUT = 50,
MHI_CLIENT_GNSS_IN = 51,
MHI_CLIENT_AUDIO_OUT = 52,
MHI_CLIENT_AUDIO_IN = 53,
MHI_CLIENT_RESERVED_3_LOWER = 54,
MHI_CLIENT_RESERVED_3_UPPER = 59,
MHI_CLIENT_TEST_0_OUT = 60,
MHI_CLIENT_TEST_0_IN = 61,
MHI_CLIENT_TEST_1_OUT = 62,
MHI_CLIENT_TEST_1_IN = 63,
MHI_CLIENT_TEST_2_OUT = 64,
MHI_CLIENT_TEST_2_IN = 65,
MHI_CLIENT_TEST_3_OUT = 66,
MHI_CLIENT_TEST_3_IN = 67,
MHI_CLIENT_RESERVED_4_LOWER = 68,
MHI_CLIENT_RESERVED_4_UPPER = 91,
MHI_CLIENT_OEM_0_OUT = 92,
MHI_CLIENT_OEM_0_IN = 93,
MHI_CLIENT_OEM_1_OUT = 94,
MHI_CLIENT_OEM_1_IN = 95,
MHI_CLIENT_OEM_2_OUT = 96,
MHI_CLIENT_OEM_2_IN = 97,
MHI_CLIENT_OEM_3_OUT = 98,
MHI_CLIENT_OEM_3_IN = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_ADPL = 102,
MHI_CLIENT_RESERVED_5_LOWER = 103,
MHI_CLIENT_RESERVED_5_UPPER = 127,
MHI_MAX_CHANNELS = 128
}MHI_CLIENT_CHANNEL_TYPE;
#define MHI_VERSION 0x01000000
#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */
#define MHI_MSI_INDEX 1
#define MAX_NUM_MHI_DEVICES 1
#define NUM_MHI_XFER_RINGS 128
#define NUM_MHI_EVT_RINGS 3
#define PRIMARY_EVENT_RING 0
#define IPA_OUT_EVENT_RING 1
#define IPA_IN_EVENT_RING 2
#define NUM_MHI_XFER_RING_ELEMENTS 16
#define NUM_MHI_EVT_RING_ELEMENTS 256
#define NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS 2048
#define NUM_MHI_IPA_IN_EVT_RING_ELEMENTS 1024
#define NUM_MHI_IPA_IN_RING_ELEMENTS 256
#define NUM_MHI_IPA_OUT_RING_ELEMENTS 256
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
#define NUM_MHI_CHAN_RING_ELEMENTS 8
#define MHI_EVT_CMD_QUEUE_SIZE 160
#define MHI_EVT_STATE_QUEUE_SIZE 128
#define MHI_EVT_XFER_QUEUE_SIZE 1024
#define MHI_ALIGN_4BYTE_OFFSET 0x3
#define MHI_ALIGN_4K_OFFSET 0xFFF
#define MAX_TRB_DATA_SIZE 0xFFFF
#define RESERVED_VALUE_64 0xFFFFFFFFFFFFFFFF
#define RESERVED_VALUE 0xFFFFFFFF
#define PCIE_LINK_DOWN 0xFFFFFFFF
#define SECONDS 1000
#define MINUTES 60000
#define MHI_FILE_MHI 0x4D4849
#define MHI_FILE_INIT 0x494E4954
#define MHI_FILE_MSI 0x4D5349
#define MHI_FILE_OS 0x4F53
#define MHI_FILE_SM 0x534D
#define MHI_FILE_THREADS 0x54485245
#define MHI_FILE_TRANSFER 0x5452414E
#define MHI_FILE_UTILS 0x5554494C
#define MHI_ER_PRIORITY_HIGH 0
#define MHI_ER_PRIORITY_MEDIUM 1
#define MHI_ER_PRIORITY_SPECIAL 2
#undef FALSE
#undef TRUE
#define FALSE 0
#define TRUE 1
typedef struct MHI_DEV_CTXT MHI_DEV_CTXT;
typedef struct PCI_CORE_INFO PCI_CORE_INFO;
typedef struct PCIE_DEV_INFO PCIE_DEV_INFO;
/* Memory Segment Properties */
typedef struct _MHI_MEM_PROPS
{
uint64 VirtAligned;
uint64 VirtUnaligned;
uint64 PhysAligned;
uint64 PhysUnaligned;
uint64 Size;
void *Handle;
}MHI_MEM_PROPS, *PMHI_MEM_PROPS;
/* Device Power State Type */
typedef enum
{
POWER_DEVICE_INVALID = 0,
POWER_DEVICE_D0 = 1,
POWER_DEVICE_D1 = 2,
POWER_DEVICE_D2 = 3,
POWER_DEVICE_D3 = 4,
POWER_DEVICE_D3FINAL = 5, // System shutting down
POWER_DEVICE_HIBARNATION = 6, // Entering system state S4
POWER_DEVICE_MAX = 7
}PWR_STATE_TYPE;
/* Channel State */
typedef enum
{
CHAN_STATE_DISABLED = 0,
CHAN_STATE_ENABLED = 1,
CHAN_STATE_RUNNING = 2,
CHAN_STATE_SUSPENDED = 3,
CHAN_STATE_STOPPED = 4,
CHAN_STATE_ERROR = 5,
CHAN_STATE_OTHER = RESERVED_VALUE
}CHAN_STATE_TYPE;
/* Channel Type */
typedef enum
{
INVALID_CHAN = 0,
OUTBOUND_CHAN = 1,
INBOUND_CHAN = 2,
OTHER_CHAN = RESERVED_VALUE
}CHAN_TYPE;
/* Ring Type */
typedef enum
{
CMD_RING = 0,
XFER_RING = 1,
EVT_RING = 2,
}MHI_RING_TYPE;
/* Event Ring */
typedef enum
{
EVT_RING_INVALID = 0x0,
EVT_RING_VALID = 0x1,
EVT_RING_RESERVED = RESERVED_VALUE
}MHI_EVENT_RING_TYPE;
#pragma pack(push,1)
/* MHI Ring Context */
typedef /*_ALIGN(1)*/ struct _MHI_RING_CTXT_TYPE
{
uint32 Info;
uint32 Type;
uint32 Index;
uint64 Base;
uint64 Length;
volatile uint64 RP;
uint64 WP;
}MHI_RING_CTXT_TYPE, *PMHI_RING_CTXT_TYPE;
/* MHI Ring Element */
typedef /*_ALIGN(1)*/ struct _MHI_ELEMENT_TYPE
{
uint64 Ptr;
uint32 Status;
uint32 Control;
}MHI_ELEMENT_TYPE, *PMHI_ELEMENT_TYPE;
#pragma pack(pop)
/* Command Ring Element Type */
typedef enum
{
CMD_NONE = 0,
CMD_NOOP = 1,
CMD_RESET_CHAN = 16,
CMD_STOP_CHAN = 17,
CMD_START_CHAN = 18,
CMD_CANCEL_CHAN_XFERS = 21
}MHI_CMD_TYPE;
/* Event Ring Element Type */
typedef enum
{
STATE_CHANGE_EVT = 32,
CMD_COMPLETION_EVT = 33,
XFER_COMPLETION_EVT = 34,
EE_STATE_CHANGE_EVT = 64
} MHI_EVT_TYPE;
/* Ring Status Type */
typedef enum
{
RING_EMPTY = 0,
RING_FULL = 1,
RING_QUEUED = 2,
} MHI_RING_STATUS_TYPE;
/* XFER Ring Element Type */
#define XFER_RING_ELEMENT_TYPE 2
/* Event Ring Completion Status */
typedef enum
{
EVT_COMPLETION_INVALID = 0,
EVT_COMPLETION_SUCCESS = 1,
EVT_COMPLETION_EOT = 2,
EVT_COMPLETION_OVERFLOW = 3,
EVT_COMPLETION_EOB = 4,
EVT_COMPLETION_OOB = 5, /* Out-Of-Buffer */
EVT_COMPLETION_DB_MODE = 6,
EVT_COMPLETION_UNDEFINED = 16,
EVT_COMPLETION_MALFORMED = 17,
EVT_COMPLETION_OTHER = RESERVED_VALUE
}EVT_COMPLETION_STATUS_TYPE;
/* *********************************************************************************************** */
/* Macros */
/* *********************************************************************************************** */
#define ADVANCE_RING_PTR(RingCtxt, Ptr, Size) \
*Ptr = ((*Ptr - RingCtxt->Base)/sizeof(MHI_ELEMENT_TYPE) == (Size - 1))? \
RingCtxt->Base: (*Ptr + sizeof(MHI_ELEMENT_TYPE))
#define GET_VIRT_ADDR(MhiCtxt, PhysAddr) \
((MhiCtxt)->CtrlSegProps.VirtAligned + ((PhysAddr) - (MhiCtxt)->CtrlSegProps.PhysAligned)) \
#define GET_PHYS_ADDR(MhiCtxt, VirtAddr) \
((MhiCtxt)->CtrlSegProps.PhysAligned + ((VirtAddr) - (MhiCtxt)->CtrlSegProps.VirtAligned)) \
#define GET_RING_ELEMENT_INDEX(RingBase, Element) \
(((Element) - (RingBase))/sizeof(MHI_ELEMENT_TYPE))
#define VALID_RING_PTR(Ring, Ptr) \
(((Ptr) >= (Ring)->Base) && \
((Ptr) <= ((Ring)->Base + (Ring)->Length - sizeof(MHI_ELEMENT_TYPE))))
#define CHAN_INBOUND(_x) ((_x)%2)
#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \
((_x) == MHI_CLIENT_SAHARA_IN) || \
((_x) == MHI_CLIENT_BOOT_LOG_IN))
#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \
((_x) == MHI_CLIENT_EDL_IN))
#define RESERVED_CHAN(_x) (((_x) == MHI_CLIENT_RESERVED_0) || \
((_x) >= MHI_CLIENT_RESERVED_1_LOWER && (_x) <= MHI_CLIENT_RESERVED_1_UPPER) || \
((_x) >= MHI_CLIENT_RESERVED_2_LOWER && (_x) <= MHI_CLIENT_RESERVED_2_UPPER) || \
((_x) >= MHI_CLIENT_RESERVED_3_LOWER && (_x) <= MHI_CLIENT_RESERVED_3_UPPER) || \
((_x) >= MHI_CLIENT_RESERVED_4_LOWER && (_x) <= MHI_CLIENT_RESERVED_4_UPPER) || \
((_x) >= MHI_CLIENT_RESERVED_5_LOWER))
#define VALID_CHAN(_x) ((((_x) >= 0) && ((_x) < MHI_MAX_CHANNELS)))
#define MHI_HW_CHAN(_x) ((_x) == MHI_CLIENT_IP_HW_0_OUT || \
(_x) == MHI_CLIENT_IP_HW_0_IN || \
(_x) == MHI_CLIENT_ADPL)
#define MIN(_x,_y) ((_x) < (_y) ? (_x): (_y))
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
struct mhi_cmd;
struct image_info;
struct bhi_vec_entry;
struct mhi_cntrl_data;
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
};
/**
* enum MHI_DEBUG_LEVL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
MHI_MSG_LVL_INFO,
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
};
/**
* enum MHI_FLAGS - Transfer flags
* @MHI_EOB: End of buffer for bulk transfer
* @MHI_EOT: End of transfer
* @MHI_CHAIN: Linked transfer
*/
enum MHI_FLAGS {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
};
/**
* struct image_info - firmware and rddm table table
* @mhi_buf - Contain device firmware and rddm table
* @entries - # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
struct bhi_vec_entry *bhi_vec;
u32 entries;
};
/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
* @regs: Points to base of MHI MMIO register space
* @bhi: Points to base of MHI BHI register space
* @wake_db: MHI WAKE doorbell register address
* @dev_id: PCIe device id of the external device
* @domain: PCIe domain the device connected to
* @bus: PCIe bus the device assigned to
* @slot: PCIe slot for the modem
* @iova_start: IOMMU starting address for data
* @iova_stop: IOMMU stop address for data
* @fw_image: Firmware image name for normal booting
* @edl_image: Firmware image name for emergency download mode
* @fbc_download: MHI host needs to do complete image transfer
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size
* @seg_len: BHIe vector size
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @max_chan: Maximum number of channels controller support
* @mhi_chan: Points to channel configuration table
* @lpm_chans: List of channels that require LPM notifications
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @msi_required: Number of msi required to operate
* @msi_allocated: Number of msi allocated by bus master
* @irq: base irq # to request
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @timeout_ms: Timeout in ms for state transitions
* @pm_state: Power management state
* @ee: MHI device execution environment
* @dev_state: MHI STATE
* @status_cb: CB function to notify various power states to but master
* @link_status: Query link status in case of abnormal value read from device
* @runtime_get: Async runtime resume function
* @runtimet_put: Release votes
* @priv_data: Points to bus master's private data
*/
struct mhi_controller {
struct list_head node;
/* device node for iommu ops */
struct device *dev;
struct pci_dev *pci_dev;
/* mmio base */
void __iomem *regs;
void __iomem *bhi;
void __iomem *wake_db;
/* device topology */
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
/* addressing window */
dma_addr_t iova_start;
dma_addr_t iova_stop;
/* fw images */
const char *fw_image;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
bool fbc_download;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
u32 session_id;
u32 sequence_id;
struct image_info *fbc_image;
struct image_info *rddm_image;
/* physical channel config data */
u32 max_chan;
struct mhi_chan *mhi_chan;
struct list_head lpm_chans; /* these chan require lpm notification */
/* physical event config data */
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
int irq[8]; /* interrupt table */
struct mhi_event *mhi_event;
/* cmd rings */
struct mhi_cmd *mhi_cmd;
/* mhi context (shared with device) */
struct mhi_ctxt *mhi_ctxt;
u32 timeout_ms;
/* caller should grab pm_mutex for suspend/resume operations */
struct mutex pm_mutex;
bool pre_init;
rwlock_t pm_lock;
u32 pm_state;
u32 ee;
u32 dev_state;
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
/* debug counters */
u32 M0, M1, M2, M3;
/* worker for different state transitions */
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct m1_worker;
struct work_struct syserr_worker;
wait_queue_head_t state_event;
/* shadow functions */
void (*status_cb)(struct mhi_controller *mhi_cntrl, void *piv,
enum MHI_CB reason);
int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
/* kernel log level */
enum MHI_DEBUG_LEVEL klog_lvl;
/* private log level controller driver to set */
enum MHI_DEBUG_LEVEL log_lvl;
/* controller specific data */
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
struct mhi_cntrl_data *data;
struct miscdevice miscdev;
};
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
* @mtu: Maximum # of bytes controller support
* @ul_chan_id: MHI channel id for UL transfer
* @dl_chan_id: MHI channel id for DL transfer
* @priv: Driver private data
*/
struct mhi_device {
struct device dev;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
size_t mtu;
int ul_chan_id;
int dl_chan_id;
int ul_event_id;
int dl_event_id;
const struct mhi_device_id *id;
const char *chan_name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
atomic_t dev_wake;
void *priv_data;
int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason);
};
/**
* struct mhi_result - Completed buffer information
* @buf_addr: Address of data buffer
* @dir: Channel direction
* @bytes_xfer: # of bytes transferred
* @transaction_status: Status of last trasnferred
*/
struct mhi_result {
void *buf_addr;
enum dma_data_direction dir;
size_t bytes_xferd;
int transaction_status;
};
/**
* struct mhi_buf - Describes the buffer
* @buf: cpu address for the buffer
* @phys_addr: physical address of the buffer
* @dma_addr: iommu address for the buffer
* @len: # of bytes
* @name: Buffer label, for offload channel configurations name must be:
* ECA - Event context array data
* CCA - Channel context array data
*/
struct mhi_buf {
void *buf;
phys_addr_t phys_addr;
dma_addr_t dma_addr;
size_t len;
const char *name; /* ECA, CCA */
};
/**
* struct mhi_driver - mhi driver information
* @id_table: NULL terminated channel ID names
* @ul_xfer_cb: UL data transfer callback
* @dl_xfer_cb: DL data transfer callback
* @status_cb: Asynchronous status callback
*/
struct mhi_driver {
const struct mhi_device_id *id_table;
int (*probe)(struct mhi_device *mhi_dev,
const struct mhi_device_id *id);
void (*remove)(struct mhi_device *mhi_dev);
void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
struct mhi_result *result);
void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
struct mhi_result *result);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb);
struct device_driver driver;
};
#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
void *priv)
{
mhi_dev->priv_data = priv;
}
static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
{
return mhi_dev->priv_data;
}
/**
* mhi_queue_transfer - Queue a buffer to hardware
* All transfers are asyncronous transfers
* @mhi_dev: Device associated with the channels
* @dir: Data direction
* @buf: Data buffer (skb for hardware channels)
* @len: Size in bytes
* @mflags: Interrupt flags for the device
*/
static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
enum dma_data_direction dir,
void *buf,
size_t len,
enum MHI_FLAGS mflags)
{
if (dir == DMA_TO_DEVICE)
return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
mflags);
else
return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
mflags);
}
static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
{
return mhi_cntrl->priv_data;
}
static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
/**
* mhi_driver_register - Register driver with MHI framework
* @mhi_drv: mhi_driver structure
*/
int mhi_driver_register(struct mhi_driver *mhi_drv);
/**
* mhi_driver_unregister - Unregister a driver for mhi_devices
* @mhi_drv: mhi_driver structure
*/
void mhi_driver_unregister(struct mhi_driver *mhi_drv);
/**
* mhi_device_configure - configure ECA or CCA context
* For offload channels that client manage, call this
* function to configure channel context or event context
* array associated with the channel
* @mhi_div: Device associated with the channels
* @dir: Direction of the channel
* @mhi_buf: Configuration data
* @elements: # of configuration elements
*/
int mhi_device_configure(struct mhi_device *mhi_div,
enum dma_data_direction dir,
struct mhi_buf *mhi_buf,
int elements);
/**
* mhi_device_get - disable all low power modes
* Only disables lpm, does not immediately exit low power mode
* if controller already in a low power mode
* @mhi_dev: Device associated with the channels
*/
void mhi_device_get(struct mhi_device *mhi_dev);
/**
* mhi_device_get_sync - disable all low power modes
* Synchronously disable all low power, exit low power mode if
* controller already in a low power state
* @mhi_dev: Device associated with the channels
*/
int mhi_device_get_sync(struct mhi_device *mhi_dev);
/**
* mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels
*/
void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - setup channel for data transfer
* Moves both UL and DL channel from RESET to START state
* @mhi_dev: Device associated with the channels
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
* mhi_unprepare_from_transfer -unprepare the channels
* Moves both UL and DL channels to RESET state
* @mhi_dev: Device associated with the channels
*/
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_get_no_free_descriptors - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_poll - poll for any available data to consume
* This is only applicable for DL direction
* @mhi_dev: Device associated with the channels
* @budget: In descriptors to service before returning
*/
int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
/**
* mhi_ioctl - user space IOCTL support for MHI channels
* Native support for setting TIOCM
* @mhi_dev: Device associated with the channels
* @cmd: IOCTL cmd
* @arg: Optional parameter, iotcl cmd specific
*/
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
/**
* mhi_alloc_controller - Allocate mhi_controller structure
* Allocate controller structure and additional data for controller
* private data. You may get the private data pointer by calling
* mhi_controller_get_devdata
* @size: # of additional bytes to allocate
*/
struct mhi_controller *mhi_alloc_controller(size_t size);
/**
* mhi_register_mhi_controller - Register MHI controller
* Registers MHI controller with MHI bus framework. DT must be supported
* @mhi_cntrl: MHI controller to register
*/
int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
/**
* mhi_bdf_to_controller - Look up a registered controller
* Search for controller based on device identification
* @domain: RC domain of the device
* @bus: Bus device connected to
* @slot: Slot device assigned to
* @dev_id: Device Identification
*/
struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
u32 dev_id);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up
* This is optional, call this before power up if controller do not
* want bus framework to automatically free any allocated memory during shutdown
* process.
* @mhi_cntrl: MHI controller
*/
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_async_power_up - Starts MHI power up sequence
* @mhi_cntrl: MHI controller
*/
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_power_down - Start MHI power down sequence
* @mhi_cntrl: MHI controller
* @graceful: link is still accessible, do a graceful shutdown process otherwise
* we will shutdown host w/o putting device into RESET state
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
* mhi_unprepare_after_powre_down - free any allocated memory for power up
* @mhi_cntrl: MHI controller
*/
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_suspend - Move MHI into a suspended state
* Transition to MHI state M3 state from M0||M1||M2 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_resume - Resume MHI from suspended state
* Transition to MHI state M0 state from M3 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: If we trying to capture image while in kernel panic
*/
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force external device into rddm mode
* to collect device ramdump. This is useful if host driver assert
* and we need to see device state as well.
* @mhi_cntrl: MHI controller
*/
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl);
void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl);
extern int mhi_debug_mask;
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("VERBOSE:[D][%s] " fmt, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_err("INFO:[I][%s] " fmt, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_err("ALERT:[C][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#ifndef MHI_NAME_SIZE
#define MHI_NAME_SIZE 32
/**
* * struct mhi_device_id - MHI device identification
* * @chan: MHI channel name
* * @driver_data: driver data;
* */
struct mhi_device_id {
const char chan[MHI_NAME_SIZE];
unsigned long driver_data;
};
#endif
#endif /* _MHI_H_ */

View File

@ -0,0 +1,878 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include "mhi.h"
#include "mhi_internal.h"
#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1
#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2
/* Software defines */
/* BHI Version */
#define BHI_MAJOR_VERSION 0x1
#define BHI_MINOR_VERSION 0x1
#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */
#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */
#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) )
typedef u32 ULONG;
typedef struct _bhi_info_type
{
ULONG bhi_ver_minor;
ULONG bhi_ver_major;
ULONG bhi_image_address_low;
ULONG bhi_image_address_high;
ULONG bhi_image_size;
ULONG bhi_rsvd1;
ULONG bhi_imgtxdb;
ULONG bhi_rsvd2;
ULONG bhi_msivec;
ULONG bhi_rsvd3;
ULONG bhi_ee;
ULONG bhi_status;
ULONG bhi_errorcode;
ULONG bhi_errdbg1;
ULONG bhi_errdbg2;
ULONG bhi_errdbg3;
ULONG bhi_sernum;
ULONG bhi_sblantirollbackver;
ULONG bhi_numsegs;
ULONG bhi_msmhwid[6];
ULONG bhi_oempkhash[48];
ULONG bhi_rsvd5;
}BHI_INFO_TYPE, *PBHI_INFO_TYPE;
#if 0
static void PrintBhiInfo(BHI_INFO_TYPE *bhi_info)
{
ULONG index;
printk("BHI Device Info...\n");
printk("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
printk("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
printk("BHI Status = 0x%X\n", bhi_info->bhi_status);
printk("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
printk("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
printk("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
printk("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
printk("BHI MSM HW-Id = ");
for (index = 0; index < 6; index++)
{
printk("0x%X ", bhi_info->bhi_msmhwid[index]);
}
printk("\n");
printk("BHI OEM PK Hash = \n");
for (index = 0; index < 24; index++)
{
printk("0x%X ", bhi_info->bhi_oempkhash[index]);
}
printk("\n");
}
#endif
static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset)
{
u32 out = 0;
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, offset, &out);
return (ret) ? 0 : out;
}
static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE));
/* bhi_ver */
bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW);
bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH);
bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE);
bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1);
bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB);
bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2);
bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC);
bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3);
bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV);
bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS);
bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE);
bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1);
bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2);
bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3);
bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNUM);
bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER);
bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG);
for (index = 0; index < MSMHWID_NUMDWORDS; index++)
{
bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index));
}
for (index = 0; index < OEMPKHASH_NUMDWORDS; index++)
{
bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index));
}
bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5);
//PrintBhiInfo(bhi_info);
/* Check the Execution Environment */
if (!IsPBLExecEnv(bhi_info->bhi_ee))
{
printk("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
}
/* Return the number of bytes read */
return 0;
}
/* setup rddm vector table for rddm transfer */
static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
int i = 0;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
MHI_VERB("Setting vector:%pad size:%zu\n",
&mhi_buf->dma_addr, mhi_buf->len);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = mhi_buf->len;
}
}
/* collect rddm during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
struct mhi_buf *mhi_buf;
u32 sequence_id;
u32 rx_status;
enum MHI_EE ee;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
const u32 delayus = 100;
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
void __iomem *base = mhi_cntrl->bhi;
MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
/*
* This should only be executing during a kernel panic, we expect all
* other cores to shutdown while we're collecting rddm buffer. After
* returning from this function, we expect device to reset.
*
* Normaly, we would read/write pm_state only after grabbing
* pm_lock, since we're in a panic, skipping it.
*/
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
return -EIO;
/*
* There is no gurantee this state change would take effect since
* we're setting it w/o grabbing pmlock, it's best effort
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
/* update should take the effect immediately */
smp_wmb();
/* setup the RX vector table */
mhi_rddm_prepare(mhi_cntrl, rddm_image);
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
MHI_LOG("Starting BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
if (unlikely(!sequence_id))
sequence_id = 1;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
MHI_LOG("Trigger device into RDDM mode\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
MHI_LOG("Waiting for image download completion\n");
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status);
if (ret)
return -EIO;
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
MHI_LOG("RDDM successfully collected\n");
return 0;
}
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
MHI_ERR("Did not complete RDDM transfer\n");
MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
return -EIO;
}
/* download ramdump image from device */
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
struct mhi_buf *mhi_buf;
int ret;
u32 rx_status;
u32 sequence_id;
if (!rddm_image)
return -ENOMEM;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_RDDM ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
return -EIO;
}
mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
/* vector table is the last entry */
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, sequence_id);
MHI_LOG("Waiting for image download completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
EXPORT_SYMBOL(mhi_download_rddm_img);
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming\n");
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, mhi_cntrl->sequence_id);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_TXVECSTATUS_OFFS,
BHIE_TXVECSTATUS_STATUS_BMSK,
BHIE_TXVECSTATUS_STATUS_SHFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
void *buf,
size_t size)
{
u32 tx_status, val;
int i, ret;
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
dma_addr_t phys = dma_map_single(mhi_cntrl->dev, buf, size,
DMA_TO_DEVICE);
struct {
char *name;
u32 offset;
} error_reg[] = {
{ "ERROR_CODE", BHI_ERRCODE },
{ "ERROR_DBG1", BHI_ERRDBG1 },
{ "ERROR_DBG2", BHI_ERRDBG2 },
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
if (dma_mapping_error(mhi_cntrl->dev, phys))
return -ENOMEM;
MHI_LOG("Starting BHI programming\n");
/* program start sbl download via bhi protocol */
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(phys));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(phys));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id);
read_unlock_bh(pm_lock);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
/*MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||*/
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
BHI_STATUS_MASK, BHI_STATUS_SHIFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
#if 0
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
goto invalid_pm_state;
#endif
MHI_LOG("image transfer status:%d\n", tx_status);
if (tx_status == BHI_STATUS_ERROR) {
MHI_ERR("Image transfer failed\n");
read_lock_bh(pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
for (i = 0; error_reg[i].name; i++) {
ret = mhi_read_reg(mhi_cntrl, base,
error_reg[i].offset, &val);
if (ret)
break;
MHI_ERR("reg:%s value:0x%x\n",
error_reg[i].name, val);
}
}
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
dma_unmap_single(mhi_cntrl->dev, phys, size, DMA_TO_DEVICE);
return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
invalid_pm_state:
dma_unmap_single(mhi_cntrl->dev, phys, size, DMA_TO_DEVICE);
return -EIO;
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
{
size_t seg_size = mhi_cntrl->seg_len;
/* requier additional entry for vec table */
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
alloc_size, seg_size, segments);
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
/* allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
GFP_KERNEL);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
/* allocate and populate vector table */
mhi_buf = img_info->mhi_buf;
for (i = 0; i < segments; i++, mhi_buf++) {
size_t vec_size = seg_size;
/* last entry is for vector table */
if (i == segments - 1)
vec_size = sizeof(struct __packed bhi_vec_entry) * i;
mhi_buf->len = vec_size;
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
&mhi_buf->dma_addr, GFP_KERNEL);
if (!mhi_buf->buf)
goto error_alloc_segment;
MHI_LOG("Entry:%d Address:0x%llx size:%zd\n", i,
(u64)mhi_buf->dma_addr, mhi_buf->len);
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
img_info->entries = segments;
*image_info = img_info;
MHI_LOG("Successfully allocated bhi vec table\n");
return 0;
error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
error_alloc_mhi_buf:
kfree(img_info);
return -ENOMEM;
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
const struct firmware *firmware,
struct image_info *img_info)
{
size_t remainder = firmware->size;
size_t to_cpy;
const u8 *buf = firmware->data;
int i = 0;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
while (remainder) {
MHI_ASSERT(i >= img_info->entries, "malformed vector table");
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = to_cpy;
MHI_VERB("Setting Vector:0x%llx size: %llu\n",
bhi_vec->dma_addr, bhi_vec->size);
buf += to_cpy;
remainder -= to_cpy;
i++;
bhi_vec++;
mhi_buf++;
}
}
void mhi_fw_load_worker(struct work_struct *work)
{
int ret;
struct mhi_controller *mhi_cntrl;
const char *fw_name;
const struct firmware *firmware;
struct image_info *image_info;
void *buf;
size_t size;
mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_PBL(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state\n");
return;
}
MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
/* if device in pthru, we do not have to load firmware */
if (mhi_cntrl->ee == MHI_EE_PT)
return;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
return;
}
ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
if (ret) {
MHI_ERR("Error loading firmware, ret:%d\n", ret);
return;
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
/* the sbl size provided is maximum size, not necessarily image size */
if (size > firmware->size)
size = firmware->size;
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
MHI_ERR("Could not allocate memory for image\n");
release_firmware(firmware);
return;
}
/* load sbl image */
memcpy(buf, firmware->data, size);
ret = mhi_fw_load_sbl(mhi_cntrl, buf, size);
kfree(buf);
if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
release_firmware(firmware);
/* error or in edl, we're done */
if (ret || mhi_cntrl->ee == MHI_EE_EDL)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* if we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
firmware->size);
if (ret) {
MHI_ERR("Error alloc size of %zu\n", firmware->size);
goto error_alloc_fw_table;
}
MHI_LOG("Copying firmware image into vector table\n");
/* load the firmware into BHIE vec table */
mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
}
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
if (!mhi_cntrl->fbc_download)
return;
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_read;
}
/* wait for BHIE event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_BHIE ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter BHIE\n");
goto error_read;
}
/* start full firmware image download */
image_info = mhi_cntrl->fbc_image;
ret = mhi_fw_load_amss(mhi_cntrl,
/* last entry is vec table */
&image_info->mhi_buf[image_info->entries - 1]);
MHI_LOG("amss fw_load, ret:%d\n", ret);
release_firmware(firmware);
return;
error_read:
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
error_alloc_fw_table:
release_firmware(firmware);
}
int BhiWrite(struct mhi_controller *mhi_cntrl, void *buf, size_t size)
{
int ret;
MHI_LOG("Device current EE:%s,%d M:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
mhi_cntrl->ee,
TO_MHI_STATE_STR(mhi_get_m_state(mhi_cntrl)));
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
if (!MHI_IN_PBL(mhi_cntrl->ee)/* || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)*/) {
MHI_ERR("MHI is not in valid BHI state:%d\n", mhi_cntrl->ee);
return -EINVAL;
}
if (mhi_cntrl->ee != MHI_EE_EDL)
return 0;
ret = mhi_fw_load_sbl(mhi_cntrl, buf, size);
if (ret) {
MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee);
goto error_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_state;
}
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
/* wait for BHIE event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_FP ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter Flash Programmer Environment\n");
goto error_state;
}
MHI_LOG("MHI enter Flash Programmer Environment\n");
return 0;
error_state:
MHI_LOG("Device current EE:%s, M:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_m_state(mhi_cntrl)));
return ret;
}
static int mhi_cntrl_open(struct inode *inode, struct file *f)
{
return 0;
}
static int mhi_cntrl_release(struct inode *inode, struct file *f)
{
return 0;
}
static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
{
long ret = -EINVAL;
void *ubuf = (void *)__arg;
struct miscdevice *c = (struct miscdevice *)f->private_data;
struct mhi_controller *mhi_cntrl = container_of(c, struct mhi_controller, miscdev);
switch (cmd) {
case IOCTL_BHI_GETDEVINFO:
{
BHI_INFO_TYPE bhi_info;
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
}
break;
case IOCTL_BHI_WRITEIMAGE:
{
void *buf;
size_t size;
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL) {
return -ENOMEM;
}
ret = copy_from_user(buf, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %ld\n", ret);
kfree(buf);
return ret;
}
ret = BhiWrite(mhi_cntrl, buf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
kfree(buf);
}
break;
default:
break;
}
return ret;
}
static const struct file_operations mhi_cntrl_fops = {
.unlocked_ioctl = mhi_cntrl_ioctl,
.open = mhi_cntrl_open,
.release = mhi_cntrl_release,
};
int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl)
{
mhi_cntrl->miscdev.minor = MISC_DYNAMIC_MINOR;
mhi_cntrl->miscdev.name = "mhi_BHI";
mhi_cntrl->miscdev.fops = &mhi_cntrl_fops;
return misc_register(&mhi_cntrl->miscdev);
}
void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl)
{
misc_deregister(&mhi_cntrl->miscdev);
}

View File

@ -0,0 +1,362 @@
#ifndef __MHI_COMMON_H
#define __MHI_COMMON_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element tre;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#endif /* _MHI_COMMON_H_ */

View File

@ -0,0 +1,181 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/wait.h>
#include "mhi.h"
#include "mhi_internal.h"
struct __packed dtr_ctrl_msg {
u32 preamble;
u32 msg_id;
u32 dest_id;
u32 size;
u32 msg;
};
#define CTRL_MAGIC (0x4C525443)
#define CTRL_MSG_DTR BIT(0)
#define CTRL_MSG_ID (0x10)
static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
u32 tiocm)
{
struct dtr_ctrl_msg *dtr_msg = NULL;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
int ret = 0;
tiocm &= TIOCM_DTR;
if (mhi_chan->tiocm == tiocm)
return 0;
mutex_lock(&dtr_chan->mutex);
dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
if (!dtr_msg) {
ret = -ENOMEM;
goto tiocm_exit;
}
dtr_msg->preamble = CTRL_MAGIC;
dtr_msg->msg_id = CTRL_MSG_ID;
dtr_msg->dest_id = mhi_chan->chan;
dtr_msg->size = sizeof(u32);
if (tiocm & TIOCM_DTR)
dtr_msg->msg |= CTRL_MSG_DTR;
reinit_completion(&dtr_chan->completion);
ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
sizeof(*dtr_msg), MHI_EOT);
if (ret)
goto tiocm_exit;
ret = wait_for_completion_timeout(&dtr_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret) {
MHI_ERR("Failed to receive transfer callback\n");
ret = -EIO;
goto tiocm_exit;
}
ret = 0;
mhi_chan->tiocm = tiocm;
tiocm_exit:
kfree(dtr_msg);
mutex_unlock(&dtr_chan->mutex);
return ret;
}
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = mhi_dev->ul_chan;
int ret;
/* ioctl not supported by this controller */
if (!mhi_cntrl->dtr_dev)
return -EIO;
switch (cmd) {
case TIOCMGET:
return mhi_chan->tiocm;
case TIOCMSET:
{
u32 tiocm;
ret = get_user(tiocm, (u32 *)arg);
if (ret)
return ret;
return mhi_dtr_tiocmset(mhi_cntrl, mhi_chan, tiocm);
}
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(mhi_ioctl);
static void mhi_dtr_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
if (!mhi_result->transaction_status)
complete(&dtr_chan->completion);
}
static void mhi_dtr_remove(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_cntrl->dtr_dev = NULL;
}
static int mhi_dtr_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
MHI_LOG("Enter for DTR control channel\n");
ret = mhi_prepare_for_transfer(mhi_dev);
if (!ret)
mhi_cntrl->dtr_dev = mhi_dev;
MHI_LOG("Exit with ret:%d\n", ret);
return ret;
}
static const struct mhi_device_id mhi_dtr_table[] = {
{ .chan = "IP_CTRL" },
{ },
};
static struct mhi_driver mhi_dtr_driver = {
.id_table = mhi_dtr_table,
.remove = mhi_dtr_remove,
.probe = mhi_dtr_probe,
.ul_xfer_cb = mhi_dtr_xfer_cb,
.dl_xfer_cb = mhi_dtr_xfer_cb,
.driver = {
.name = "MHI_DTR",
.owner = THIS_MODULE,
}
};
int __init mhi_dtr_init(void)
{
return mhi_driver_register(&mhi_dtr_driver);
}
void mhi_dtr_exit(void) {
mhi_driver_unregister(&mhi_dtr_driver);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,800 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_INT_H
#define _MHI_INT_H
#include <linux/version.h>
#ifndef writel_relaxed
#define writel_relaxed writel
#endif
#ifndef U32_MAX
#define U32_MAX ((u32)~0U)
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 ))
static inline void reinit_completion(struct completion *x)
{
x->done = 0;
}
#endif
extern struct bus_type mhi_bus_type;
/* MHI mmio register mapping */
#define PCI_INVALID_READ(val) (val == U32_MAX)
#define MHIREGLEN (0x0)
#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
#define MHIREGLEN_MHIREGLEN_SHIFT (0)
#define MHIVER (0x8)
#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
#define MHIVER_MHIVER_SHIFT (0)
#define MHICFG (0x10)
#define MHICFG_NHWER_MASK (0xFF000000)
#define MHICFG_NHWER_SHIFT (24)
#define MHICFG_NER_MASK (0xFF0000)
#define MHICFG_NER_SHIFT (16)
#define MHICFG_NHWCH_MASK (0xFF00)
#define MHICFG_NHWCH_SHIFT (8)
#define MHICFG_NCH_MASK (0xFF)
#define MHICFG_NCH_SHIFT (0)
#define CHDBOFF (0x18)
#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
#define CHDBOFF_CHDBOFF_SHIFT (0)
#define ERDBOFF (0x20)
#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
#define ERDBOFF_ERDBOFF_SHIFT (0)
#define BHIOFF (0x28)
#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
#define BHIOFF_BHIOFF_SHIFT (0)
#define DEBUGOFF (0x30)
#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
#define DEBUGOFF_DEBUGOFF_SHIFT (0)
#define MHICTRL (0x38)
#define MHICTRL_MHISTATE_MASK (0x0000FF00)
#define MHICTRL_MHISTATE_SHIFT (8)
#define MHICTRL_RESET_MASK (0x2)
#define MHICTRL_RESET_SHIFT (1)
#define MHISTATUS (0x48)
#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
#define MHISTATUS_MHISTATE_SHIFT (8)
#define MHISTATUS_SYSERR_MASK (0x4)
#define MHISTATUS_SYSERR_SHIFT (2)
#define MHISTATUS_READY_MASK (0x1)
#define MHISTATUS_READY_SHIFT (0)
#define CCABAP_LOWER (0x58)
#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
#define CCABAP_HIGHER (0x5C)
#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
#define ECABAP_LOWER (0x60)
#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
#define ECABAP_HIGHER (0x64)
#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
#define CRCBAP_LOWER (0x68)
#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
#define CRCBAP_HIGHER (0x6C)
#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
#define CRDB_LOWER (0x70)
#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
#define CRDB_HIGHER (0x74)
#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
#define MHICTRLBASE_LOWER (0x80)
#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
#define MHICTRLBASE_HIGHER (0x84)
#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
#define MHICTRLLIMIT_LOWER (0x88)
#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
#define MHICTRLLIMIT_HIGHER (0x8C)
#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
#define MHIDATABASE_LOWER (0x98)
#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
#define MHIDATABASE_HIGHER (0x9C)
#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
#define MHIDATALIMIT_LOWER (0xA0)
#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
#define MHIDATALIMIT_HIGHER (0xA4)
#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
/* MHI BHI offfsets */
#define BHI_BHIVERSION_MINOR (0x00)
#define BHI_BHIVERSION_MAJOR (0x04)
#define BHI_IMGADDR_LOW (0x08)
#define BHI_IMGADDR_HIGH (0x0C)
#define BHI_IMGSIZE (0x10)
#define BHI_RSVD1 (0x14)
#define BHI_IMGTXDB (0x18)
#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
#define BHI_TXDB_SEQNUM_SHFT (0)
#define BHI_RSVD2 (0x1C)
#define BHI_INTVEC (0x20)
#define BHI_RSVD3 (0x24)
#define BHI_EXECENV (0x28)
#define BHI_STATUS (0x2C)
#define BHI_ERRCODE (0x30)
#define BHI_ERRDBG1 (0x34)
#define BHI_ERRDBG2 (0x38)
#define BHI_ERRDBG3 (0x3C)
#define BHI_SERIALNUM ( 0x40 )
#define BHI_SERIALNU (0x40)
#define BHI_SBLANTIROLLVER (0x44)
#define BHI_NUMSEG (0x48)
#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
#define BHI_RSVD5 (0xC4)
#define BHI_STATUS_MASK (0xC0000000)
#define BHI_STATUS_SHIFT (30)
#define BHI_STATUS_ERROR (3)
#define BHI_STATUS_SUCCESS (2)
#define BHI_STATUS_RESET (0)
/* MHI BHIE offsets */
#define BHIE_OFFSET (0x0124) /* BHIE register space offset from BHI base */
#define BHIE_MSMSOCID_OFFS (BHIE_OFFSET + 0x0000)
#define BHIE_TXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x002C)
#define BHIE_TXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0030)
#define BHIE_TXVECSIZE_OFFS (BHIE_OFFSET + 0x0034)
#define BHIE_TXVECDB_OFFS (BHIE_OFFSET + 0x003C)
#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_TXVECDB_SEQNUM_SHFT (0)
#define BHIE_TXVECSTATUS_OFFS (BHIE_OFFSET + 0x0044)
#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
#define BHIE_RXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x0060)
#define BHIE_RXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0064)
#define BHIE_RXVECSIZE_OFFS (BHIE_OFFSET + 0x0068)
#define BHIE_RXVECDB_OFFS (BHIE_OFFSET + 0x0070)
#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_RXVECDB_SEQNUM_SHFT (0)
#define BHIE_RXVECSTATUS_OFFS (BHIE_OFFSET + 0x0078)
#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
struct __packed mhi_event_ctxt {
u32 reserved : 8;
u32 intmodc : 8;
u32 intmodt : 16;
u32 ertype;
u32 msivec;
u64 rbase;
u64 rlen;
u64 rp;
u64 wp;
};
struct __packed mhi_chan_ctxt {
u32 chstate : 8;
u32 brstmode : 2;
u32 pollcfg : 6;
u32 reserved : 16;
u32 chtype;
u32 erindex;
u64 rbase;
u64 rlen;
u64 rp;
u64 wp;
};
struct __packed mhi_cmd_ctxt {
u32 reserved0;
u32 reserved1;
u32 reserved2;
u64 rbase;
u64 rlen;
u64 rp;
u64 wp;
};
struct __packed mhi_tre {
u64 ptr;
u32 dword[2];
};
struct __packed bhi_vec_entry {
u64 dma_addr;
u64 size;
};
/* no operation command */
#define MHI_TRE_CMD_NOOP_PTR cpu_to_le64(0)
#define MHI_TRE_CMD_NOOP_DWORD0 cpu_to_le32(0)
#define MHI_TRE_CMD_NOOP_DWORD1 cpu_to_le32(1 << 16)
/* channel reset command */
#define MHI_TRE_CMD_RESET_PTR cpu_to_le64(0)
#define MHI_TRE_CMD_RESET_DWORD0 cpu_to_le32(0)
#define MHI_TRE_CMD_RESET_DWORD1(chid) cpu_to_le32((chid << 24) | (16 << 16))
/* channel stop command */
#define MHI_TRE_CMD_STOP_PTR cpu_to_le64(0)
#define MHI_TRE_CMD_STOP_DWORD0 cpu_to_le32(0)
#define MHI_TRE_CMD_STOP_DWORD1(chid) cpu_to_le32((chid << 24) | (17 << 16))
/* channel start command */
#define MHI_TRE_CMD_START_PTR cpu_to_le64(0)
#define MHI_TRE_CMD_START_DWORD0 cpu_to_le32(0)
#define MHI_TRE_CMD_START_DWORD1(chid) cpu_to_le32((chid << 24) | (18 << 16))
#define MHI_TRE_GET_CMD_CHID(tre) ((le32_to_cpu((tre)->dword[1]) >> 24) & 0xFF)
/* event descriptor macros */
//#define MHI_TRE_EV_PTR(ptr) (ptr)
//#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
#define MHI_TRE_EV_DWORD1(chid, type) cpu_to_le32((chid << 24) | (type << 16))
#define MHI_TRE_GET_EV_PTR(tre) le64_to_cpu((tre)->ptr)
#define MHI_TRE_GET_EV_CODE(tre) ((le32_to_cpu((tre)->dword[0]) >> 24) & 0xFF)
#define MHI_TRE_GET_EV_LEN(tre) (le32_to_cpu((tre)->dword[0]) & 0xFFFF)
#define MHI_TRE_GET_EV_CHID(tre) ((le32_to_cpu((tre)->dword[1]) >> 24) & 0xFF)
#define MHI_TRE_GET_EV_TYPE(tre) ((le32_to_cpu((tre)->dword[1]) >> 16) & 0xFF)
#define MHI_TRE_GET_EV_STATE(tre) ((le32_to_cpu((tre)->dword[0]) >> 24) & 0xFF)
#define MHI_TRE_GET_EV_EXECENV(tre) ((le32_to_cpu((tre)->dword[0]) >> 24) & 0xFF)
/* transfer descriptor macros */
#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr)
#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(len & MHI_MAX_MTU)
#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) cpu_to_le32((2 << 16) | (bei << 10) \
| (ieot << 9) | (ieob << 8) | chain)
enum MHI_CMD {
MHI_CMD_NOOP = 0x0,
MHI_CMD_RESET_CHAN = 0x1,
MHI_CMD_STOP_CHAN = 0x2,
MHI_CMD_START_CHAN = 0x3,
MHI_CMD_RESUME_CHAN = 0x4,
};
enum MHI_PKT_TYPE {
MHI_PKT_TYPE_INVALID = 0x0,
MHI_PKT_TYPE_NOOP_CMD = 0x1,
MHI_PKT_TYPE_TRANSFER = 0x2,
MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
MHI_PKT_TYPE_TX_EVENT = 0x22,
MHI_PKT_TYPE_EE_EVENT = 0x40,
MHI_PKT_TYPE_STALE_EVENT, /* internal event */
};
/* MHI transfer completion events */
enum MHI_EV_CCS {
MHI_EV_CC_INVALID = 0x0,
MHI_EV_CC_SUCCESS = 0x1,
MHI_EV_CC_EOT = 0x2,
MHI_EV_CC_OVERFLOW = 0x3,
MHI_EV_CC_EOB = 0x4,
MHI_EV_CC_OOB = 0x5,
MHI_EV_CC_DB_MODE = 0x6,
MHI_EV_CC_UNDEFINED_ERR = 0x10,
MHI_EV_CC_BAD_TRE = 0x11,
};
enum MHI_CH_STATE {
MHI_CH_STATE_DISABLED = 0x0,
MHI_CH_STATE_ENABLED = 0x1,
MHI_CH_STATE_RUNNING = 0x2,
MHI_CH_STATE_SUSPENDED = 0x3,
MHI_CH_STATE_STOP = 0x4,
MHI_CH_STATE_ERROR = 0x5,
};
enum MHI_CH_CFG {
MHI_CH_CFG_CHAN_ID = 0,
MHI_CH_CFG_ELEMENTS = 1,
MHI_CH_CFG_ER_INDEX = 2,
MHI_CH_CFG_DIRECTION = 3,
MHI_CH_CFG_BRSTMODE = 4,
MHI_CH_CFG_POLLCFG = 5,
MHI_CH_CFG_EE = 6,
MHI_CH_CFG_XFER_TYPE = 7,
MHI_CH_CFG_BITCFG = 8,
MHI_CH_CFG_MAX
};
#define MHI_CH_CFG_BIT_LPM_NOTIFY BIT(0) /* require LPM notification */
#define MHI_CH_CFG_BIT_OFFLOAD_CH BIT(1) /* satellite mhi devices */
#define MHI_CH_CFG_BIT_DBMODE_RESET_CH BIT(2) /* require db mode to reset */
#define MHI_CH_CFG_BIT_PRE_ALLOC BIT(3) /* host allocate buffers for DL */
enum MHI_EV_CFG {
MHI_EV_CFG_ELEMENTS = 0,
MHI_EV_CFG_INTMOD = 1,
MHI_EV_CFG_MSI = 2,
MHI_EV_CFG_CHAN = 3,
MHI_EV_CFG_PRIORITY = 4,
MHI_EV_CFG_BRSTMODE = 5,
MHI_EV_CFG_BITCFG = 6,
MHI_EV_CFG_MAX
};
#define MHI_EV_CFG_BIT_HW_EV BIT(0) /* hw event ring */
#define MHI_EV_CFG_BIT_CL_MANAGE BIT(1) /* client manages the event ring */
#define MHI_EV_CFG_BIT_OFFLOAD_EV BIT(2) /* satellite driver manges it */
#define MHI_EV_CFG_BIT_CTRL_EV BIT(3) /* ctrl event ring */
enum MHI_BRSTMODE {
MHI_BRSTMODE_DISABLE = 0x2,
MHI_BRSTMODE_ENABLE = 0x3,
};
#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \
mode != MHI_BRSTMODE_ENABLE)
enum MHI_EE {
MHI_EE_PBL = 0x0, /* Primary Boot Loader */
MHI_EE_SBL = 0x1, /* Secondary Boot Loader */
MHI_EE_AMSS = 0x2, /* AMSS Firmware */
MHI_EE_RDDM = 0x3, /* WIFI Ram Dump Debug Module */
MHI_EE_WFW = 0x4, /* WIFI (WLAN) Firmware */
MHI_EE_PT = 0x5, /* PassThrough, Non PCIe BOOT (PCIe is BIOS locked, not used for boot */
MHI_EE_EDL = 0x6, /* PCIe enabled in PBL for emergency download (Non PCIe BOOT) */
MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */
MHI_EE_BHIE = MHI_EE_FP,
MHI_EE_UEFI = 0x8, /* UEFI */
MHI_EE_DISABLE_TRANSITION = 0x9,
MHI_EE_MAX
};
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PT || ee == MHI_EE_EDL)
enum MHI_ST_TRANSITION {
MHI_ST_TRANSITION_PBL,
MHI_ST_TRANSITION_READY,
MHI_ST_TRANSITION_SBL,
MHI_ST_TRANSITION_AMSS,
MHI_ST_TRANSITION_FP,
MHI_ST_TRANSITION_BHIE = MHI_ST_TRANSITION_FP,
MHI_ST_TRANSITION_MAX,
};
extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX];
#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \
"INVALID_STATE" : mhi_state_tran_str[state])
enum MHI_STATE {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
MHI_STATE_M0 = 0x2,
MHI_STATE_M1 = 0x3,
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_D3 = 0x6,
MHI_STATE_BHI = 0x7,
MHI_STATE_SYS_ERR = 0xFF,
MHI_STATE_MAX,
};
extern const char * const mhi_state_str[MHI_STATE_MAX];
#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
!mhi_state_str[state]) ? \
"INVALID_STATE" : mhi_state_str[state])
/* internal power states */
enum MHI_PM_STATE {
MHI_PM_DISABLE = BIT(0), /* MHI is not enabled */
MHI_PM_POR = BIT(1), /* reset state */
MHI_PM_M0 = BIT(2),
MHI_PM_M1 = BIT(3),
MHI_PM_M1_M2_TRANSITION = BIT(4), /* register access not allowed */
MHI_PM_M2 = BIT(5),
MHI_PM_M3_ENTER = BIT(6),
MHI_PM_M3 = BIT(7),
MHI_PM_M3_EXIT = BIT(8),
MHI_PM_FW_DL_ERR = BIT(9), /* firmware download failure state */
MHI_PM_SYS_ERR_DETECT = BIT(10),
MHI_PM_SYS_ERR_PROCESS = BIT(11),
MHI_PM_SHUTDOWN_PROCESS = BIT(12),
MHI_PM_LD_ERR_FATAL_DETECT = BIT(13), /* link not accessible */
};
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
MHI_PM_M1 | MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
MHI_PM_M1 | MHI_PM_M2))
#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
MHI_PM_IN_ERROR_STATE(pm_state))
#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
(MHI_PM_M3_ENTER | MHI_PM_M3))
/* accepted buffer type for the channel */
enum MHI_XFER_TYPE {
MHI_XFER_BUFFER,
MHI_XFER_SKB,
MHI_XFER_SCLIST,
MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */
};
#define NR_OF_CMD_RINGS (1)
#define CMD_EL_PER_RING (128)
#define PRIMARY_CMD_RING (0)
#define MHI_DEV_WAKE_DB (127)
#define MHI_M2_DEBOUNCE_TMR_US (10000)
#define MHI_MAX_MTU (0xffff)
enum MHI_ER_TYPE {
MHI_ER_TYPE_INVALID = 0x0,
MHI_ER_TYPE_VALID = 0x1,
};
struct db_cfg {
bool reset_req;
bool db_mode;
u32 pollcfg;
enum MHI_BRSTMODE brstmode;
dma_addr_t db_val;
void (*process_db)(struct mhi_controller *mhi_cntrl,
struct db_cfg *db_cfg, void __iomem *io_addr,
dma_addr_t db_val);
};
struct mhi_pm_transitions {
enum MHI_PM_STATE from_state;
u32 to_states;
};
struct state_transition {
struct list_head node;
enum MHI_ST_TRANSITION state;
};
/* Control Segment */
struct mhi_ctrl_seg
{
struct __packed mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
struct __packed mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
struct __packed mhi_tre diag_in_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
struct __packed mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16);
//struct __packed mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __aligned(NUM_MHI_EVT_RING_ELEMENTS*16);
struct __packed mhi_tre event_ring_0[NUM_MHI_EVT_RING_ELEMENTS] __aligned(NUM_MHI_EVT_RING_ELEMENTS*16);
struct __packed mhi_tre event_ring_1[NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS*16);
struct __packed mhi_tre event_ring_2[NUM_MHI_IPA_IN_EVT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_IN_EVT_RING_ELEMENTS*16);
struct __packed mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __aligned(CMD_EL_PER_RING*16);
struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128);
struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128);
struct mhi_cmd_ctxt cmd_ctxt[NR_OF_CMD_RINGS] __aligned(128);
} __aligned(4096);
struct mhi_ctxt {
struct mhi_event_ctxt *er_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_cmd_ctxt *cmd_ctxt;
dma_addr_t er_ctxt_addr;
dma_addr_t chan_ctxt_addr;
dma_addr_t cmd_ctxt_addr;
struct mhi_ctrl_seg *ctrl_seg;
dma_addr_t ctrl_seg_addr;
};
struct mhi_ring {
dma_addr_t dma_handle;
dma_addr_t iommu_base;
u64 *ctxt_wp; /* point to ctxt wp */
void *pre_aligned;
void *base;
void *rp;
void *wp;
size_t el_size;
size_t len;
size_t elements;
size_t alloc_size;
void __iomem *db_addr;
};
struct mhi_cmd {
struct mhi_ring ring;
spinlock_t lock;
};
struct mhi_buf_info {
dma_addr_t p_addr;
void *v_addr;
void *wp;
size_t len;
void *cb_buf;
enum dma_data_direction dir;
};
struct mhi_event {
u32 er_index;
u32 intmod;
u32 msi;
int chan; /* this event ring is dedicated to a channel */
u32 priority;
struct mhi_ring ring;
struct db_cfg db_cfg;
bool hw_ring;
bool cl_manage;
bool offload_ev; /* managed by a device driver */
bool ctrl_ev;
spinlock_t lock;
struct mhi_chan *mhi_chan; /* dedicated to channel */
struct tasklet_struct task;
struct mhi_controller *mhi_cntrl;
};
struct mhi_chan {
u32 chan;
u32 ring;
const char *name;
/*
* important, when consuming increment tre_ring first, when releasing
* decrement buf_ring first. If tre_ring has space, buf_ring
* guranteed to have space so we do not need to check both rings.
*/
struct mhi_ring buf_ring;
struct mhi_ring tre_ring;
u32 er_index;
u32 intmod;
u32 tiocm;
u32 full;
enum dma_data_direction dir;
struct db_cfg db_cfg;
enum MHI_EE ee;
enum MHI_XFER_TYPE xfer_type;
enum MHI_CH_STATE ch_state;
enum MHI_EV_CCS ccs;
bool lpm_notify;
bool configured;
bool offload_ch;
bool pre_alloc;
/* functions that generate the transfer ring elements */
int (*gen_tre)(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan, void *buf, void *cb,
size_t len, enum MHI_FLAGS flags);
int (*queue_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
/* xfer call back */
struct mhi_device *mhi_dev;
void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
struct mutex mutex;
struct completion completion;
rwlock_t lock;
struct list_head node;
};
struct mhi_bus {
struct list_head controller_list;
struct mutex lock;
struct dentry *dentry;
};
struct mhi_cntrl_data {
struct mhi_ctxt mhi_ctxt;
struct mhi_cmd mhi_cmd[NR_OF_CMD_RINGS];
struct mhi_event mhi_event[NUM_MHI_EVT_RINGS];
struct mhi_chan mhi_chan[MHI_MAX_CHANNELS];
};
/* default MHI timeout */
#define MHI_TIMEOUT_MS (3000)
extern struct mhi_bus mhi_bus;
/* debug fs related functions */
int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d);
int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d);
int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d);
int mhi_debugfs_trigger_reset(void *data, u64 val);
void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl);
void mhi_init_debugfs(struct mhi_controller *mhi_cntrl);
/* power management apis */
enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
struct mhi_controller *mhi_cntrl,
enum MHI_PM_STATE state);
const char *to_mhi_pm_state_str(enum MHI_PM_STATE state);
void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
enum MHI_EE mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
enum MHI_STATE mhi_get_m_state(struct mhi_controller *mhi_cntrl);
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
enum MHI_ST_TRANSITION state);
void mhi_pm_st_worker(struct work_struct *work);
void mhi_fw_load_worker(struct work_struct *work);
void mhi_pm_m1_worker(struct work_struct *work);
void mhi_pm_sys_err_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason);
/* queue transfer buffer */
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags);
int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS mflags);
int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS mflags);
int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS mflags);
int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS mflags);
/* register access methods */
void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
void __iomem *db_addr, dma_addr_t wp);
void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
struct db_cfg *db_mode, void __iomem *db_addr,
dma_addr_t wp);
int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 *out);
int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 mask,
u32 shift, u32 *out);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 mask, u32 shift, u32 val);
void mhi_ring_er_db(struct mhi_event *mhi_event);
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
dma_addr_t wp);
void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum MHI_STATE state);
/* memory allocation methods */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 5,3,0 ))
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret = dma_alloc_coherent(dev, size, dma_handle,
flag | __GFP_ZERO);
return ret;
}
#endif
static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
size_t size,
dma_addr_t *dma_handle,
gfp_t gfp)
{
void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp);
MHI_LOG("size = %zd, dma_handle = %llx\n", size, (u64)*dma_handle);
if (buf) {
//if (*dma_handle < mhi_cntrl->iova_start || 0 == mhi_cntrl->iova_start)
// mhi_cntrl->iova_start = (*dma_handle)&0xFFF0000000;
//if ((*dma_handle + size) > mhi_cntrl->iova_stop || 0 == mhi_cntrl->iova_stop)
// mhi_cntrl->iova_stop = ((*dma_handle + size)+0x0FFFFFFF)&0xFFF0000000;
}
if (buf)
atomic_add(size, &mhi_cntrl->alloc_size);
return buf;
}
static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
size_t size,
void *vaddr,
dma_addr_t dma_handle)
{
atomic_sub(size, &mhi_cntrl->alloc_size);
dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle);
}
struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl,
struct mhi_device *mhi_dev)
{
kfree(mhi_dev);
}
int mhi_destroy_device(struct device *dev, void *data);
void mhi_create_devices(struct mhi_controller *mhi_cntrl);
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info, size_t alloc_size);
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info);
/* initialization methods */
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
int mhi_dtr_init(void);
/* isr handlers */
irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev);
irqreturn_t mhi_intvec_handlr(int irq_number, void *dev);
void mhi_ev_task(unsigned long data);
#ifdef CONFIG_MHI_DEBUG
#define MHI_ASSERT(cond, msg) do { \
if (cond) \
panic(msg); \
} while (0)
#else
#define MHI_ASSERT(cond, msg) do { \
if (cond) { \
MHI_ERR(msg); \
WARN_ON(cond); \
} \
} while (0)
#endif
#endif /* _MHI_INT_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
#ifndef _FIBO_MHI_NETDEV_H
#define _FIBO_MHI_NETDEV_H
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,15,0 ))
static inline void *fibo_skb_put_data(struct sk_buff *skb, const void *data,
unsigned int len)
{
void *tmp = skb_put(skb, len);
memcpy(tmp,data, len);
return tmp;
}
#endif
#endif

View File

@ -0,0 +1,785 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/tty.h>
#include "../core/mhi.h"
#define DEVICE_NAME "mhi"
#define MHI_UCI_DRIVER_NAME "mhi_uci"
struct uci_chan {
wait_queue_head_t wq;
spinlock_t lock;
struct list_head pending; /* user space waiting to read */
struct uci_buf *cur_buf; /* current buffer user space reading */
size_t rx_size;
};
struct uci_buf {
void *data;
size_t len;
struct list_head node;
};
struct uci_dev {
struct list_head node;
dev_t devt;
struct device *dev;
struct mhi_device *mhi_dev;
const char *chan;
struct mutex mutex; /* sync open and close */
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
int ref_count;
bool enabled;
bool disconnect;
struct ktermios termios;
int sigs;
};
struct mhi_uci_drv {
struct list_head head;
struct mutex lock;
struct class *class;
int major;
dev_t dev_t;
};
enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR;
typedef struct _QCQMI_HDR {
u8 IFType;
u16 Length;
u8 CtlFlags; // reserved
u8 QMIType;
u8 ClientId;
} __attribute__ ((packed)) *PQCQMI_HDR;
#define MSG_VERB(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_LOG(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)
#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state
static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
static struct mhi_uci_drv mhi_uci_drv;
static int mhi_queue_inbound(struct uci_dev *uci_dev)
{
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = uci_dev->mtu;
void *buf;
struct uci_buf *uci_buf;
int ret = -EIO, i;
for (i = 0; i < nr_trbs; i++) {
buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
uci_buf = buf + mtu;
uci_buf->data = buf;
MSG_VERB("Allocated buf %d of %d size %zd\n", i, nr_trbs, mtu);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
kfree(buf);
MSG_ERR("Failed to queue buffer %d\n", i);
return ret;
}
}
return ret;
}
static long mhi_uci_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
long ret = -ERESTARTSYS;
mutex_lock(&uci_dev->mutex);
if (uci_dev->enabled) {
switch (cmd) {
case TCGETS:
#ifndef TCGETS2
ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios);
#else
ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios);
#endif
break;
case TCSETSF:
case TCSETS:
#ifndef TCGETS2
ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg);
#else
ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg);
#endif
break;
case TIOCMSET:
case TIOCMBIS:
case TIOCMBIC:
{
uint32_t val;
ret = get_user(val, (uint32_t *)arg);
if (ret)
return ret;
switch (cmd) {
case TIOCMBIS:
uci_dev->sigs |= val;
break;
case TIOCMBIC:
uci_dev->sigs &= ~val;
break;
case TIOCMSET:
uci_dev->sigs = val;
break;
}
}
break;
case TIOCMGET:
ret = put_user(uci_dev->sigs | TIOCM_RTS, (uint32_t *)arg);
break;
case TCFLSH:
ret = 0;
break;
default:
ret = mhi_ioctl(mhi_dev, cmd, arg);
break;
}
}
mutex_unlock(&uci_dev->mutex);
return ret;
}
static int mhi_uci_release(struct inode *inode, struct file *file)
{
struct uci_dev *uci_dev = file->private_data;
mutex_lock(&uci_dev->mutex);
uci_dev->ref_count--;
if (!uci_dev->ref_count) {
struct uci_buf *itr, *tmp;
struct uci_chan *uci_chan;
MSG_LOG("Last client left, closing node\n");
if (uci_dev->enabled)
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
/* clean inbound channel */
uci_chan = &uci_dev->dl_chan;
list_for_each_entry_safe(itr, tmp, &uci_chan->pending, node) {
list_del(&itr->node);
kfree(itr->data);
}
if (uci_chan->cur_buf)
kfree(uci_chan->cur_buf->data);
uci_chan->cur_buf = NULL;
if (!uci_dev->enabled) {
MSG_LOG("Node is deleted, freeing dev node\n");
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
return 0;
}
}
mutex_unlock(&uci_dev->mutex);
MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
return 0;
}
static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan;
unsigned int mask = 0;
poll_wait(file, &uci_dev->dl_chan.wq, wait);
poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->dl_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask = POLLERR;
} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
MSG_VERB("Client can read from node\n");
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&uci_chan->lock);
uci_chan = &uci_dev->ul_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask |= POLLERR;
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
MSG_VERB("Client can write to node\n");
mask |= POLLOUT | POLLWRNORM;
}
if (uci_dev->disconnect)
mask |= POLLHUP;
spin_unlock_bh(&uci_chan->lock);
MSG_VERB("Client attempted to poll, returning mask 0x%x\n", mask);
return mask;
}
static ssize_t mhi_uci_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->ul_chan;
size_t bytes_xfered = 0;
int ret;
if (!buf || !count)
return -EINVAL;
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
MSG_VERB("Enter: to xfer:%zd bytes\n", count);
while (count) {
size_t xfer_size;
void *kbuf;
enum MHI_FLAGS flags;
spin_unlock_bh(&uci_chan->lock);
if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) == 0 && (file->f_mode & FMODE_NDELAY))
break;
/* wait for free descriptors */
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled) ||
mhi_get_no_free_descriptors
(mhi_dev, DMA_TO_DEVICE) > 0);
if (ret == -ERESTARTSYS) {
MSG_LOG("Exit signal caught for node\n");
return -ERESTARTSYS;
}
xfer_size = min_t(size_t, count, uci_dev->mtu);
kbuf = kmalloc(xfer_size, GFP_KERNEL);
if (!kbuf) {
MSG_ERR("Failed to allocate memory %zd\n", xfer_size);
return -ENOMEM;
}
ret = copy_from_user(kbuf, buf, xfer_size);
if (unlikely(ret)) {
kfree(kbuf);
return ret;
}
spin_lock_bh(&uci_chan->lock);
flags = MHI_EOT;
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
xfer_size, flags);
else
ret = -ERESTARTSYS;
if (ret) {
kfree(kbuf);
goto sys_interrupt;
}
bytes_xfered += xfer_size;
count -= xfer_size;
buf += xfer_size;
}
spin_unlock_bh(&uci_chan->lock);
MSG_VERB("Exit: Number of bytes xferred:%zd\n", bytes_xfered);
return bytes_xfered;
sys_interrupt:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->dl_chan;
struct uci_buf *uci_buf;
char *ptr;
size_t to_copy;
int ret = 0;
if (!buf)
return -EINVAL;
MSG_VERB("Client provided buf len:%zd\n", count);
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
/* No data available to read, wait */
if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
MSG_VERB("No data available to read waiting\n");
spin_unlock_bh(&uci_chan->lock);
if (file->f_mode & FMODE_NDELAY)
return -EAGAIN;
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled ||
!list_empty(&uci_chan->pending)));
if (ret == -ERESTARTSYS) {
MSG_LOG("Exit signal caught for node\n");
return -ERESTARTSYS;
}
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
MSG_LOG("node is disabled\n");
ret = -ERESTARTSYS;
goto read_error;
}
}
/* new read, get the next descriptor from the list */
if (!uci_chan->cur_buf) {
uci_buf = list_first_entry_or_null(&uci_chan->pending,
struct uci_buf, node);
if (unlikely(!uci_buf)) {
ret = -EIO;
goto read_error;
}
list_del(&uci_buf->node);
uci_chan->cur_buf = uci_buf;
uci_chan->rx_size = uci_buf->len;
MSG_VERB("Got pkt of size:%zd\n", uci_chan->rx_size);
}
uci_buf = uci_chan->cur_buf;
spin_unlock_bh(&uci_chan->lock);
/* Copy the buffer to user space */
to_copy = min_t(size_t, count, uci_chan->rx_size);
ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
ret = copy_to_user(buf, ptr, to_copy);
if (ret)
return ret;
MSG_VERB("Copied %zd of %zd bytes\n", to_copy, uci_chan->rx_size);
uci_chan->rx_size -= to_copy;
/* we finished with this buffer, queue it back to hardware */
if (!uci_chan->rx_size) {
spin_lock_bh(&uci_chan->lock);
uci_chan->cur_buf = NULL;
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
uci_buf->data, uci_dev->mtu,
MHI_EOT);
else
ret = -ERESTARTSYS;
if (ret) {
MSG_ERR("Failed to recycle element\n");
kfree(uci_buf->data);
goto read_error;
}
spin_unlock_bh(&uci_chan->lock);
}
MSG_VERB("Returning %zd bytes\n", to_copy);
return to_copy;
read_error:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static int mhi_uci_open(struct inode *inode, struct file *filp)
{
struct uci_dev *uci_dev;
int ret = -EIO;
struct uci_buf *buf_itr, *tmp;
struct uci_chan *dl_chan;
mutex_lock(&mhi_uci_drv.lock);
list_for_each_entry(uci_dev, &mhi_uci_drv.head, node) {
if (uci_dev->devt == inode->i_rdev) {
ret = 0;
break;
}
}
mutex_unlock(&mhi_uci_drv.lock);
/* could not find a minor node */
if (ret)
return ret;
mutex_lock(&uci_dev->mutex);
if (!uci_dev->enabled) {
MSG_ERR("Node exist, but not in active state!\n");
goto error_open_chan;
}
uci_dev->ref_count++;
MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
if (uci_dev->ref_count == 1) {
MSG_LOG("Starting channel\n");
ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
if (ret) {
MSG_ERR("Error starting transfer channels\n");
uci_dev->ref_count--;
goto error_open_chan;
}
ret = mhi_queue_inbound(uci_dev);
if (ret)
goto error_rx_queue;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
uci_dev->ref_count++;
if (uci_dev->mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN) {
}
if (uci_dev->mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN) {
}
#endif
}
filp->private_data = uci_dev;
mutex_unlock(&uci_dev->mutex);
return 0;
error_rx_queue:
dl_chan = &uci_dev->dl_chan;
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
list_for_each_entry_safe(buf_itr, tmp, &dl_chan->pending, node) {
list_del(&buf_itr->node);
kfree(buf_itr->data);
}
error_open_chan:
mutex_unlock(&uci_dev->mutex);
return ret;
}
static const struct file_operations mhidev_fops = {
.open = mhi_uci_open,
.release = mhi_uci_release,
.read = mhi_uci_read,
.write = mhi_uci_write,
.poll = mhi_uci_poll,
.unlocked_ioctl = mhi_uci_ioctl,
};
static void mhi_uci_remove(struct mhi_device *mhi_dev)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
MSG_LOG("Enter\n");
/* disable the node */
mutex_lock(&uci_dev->mutex);
spin_lock_irq(&uci_dev->dl_chan.lock);
spin_lock_irq(&uci_dev->ul_chan.lock);
uci_dev->enabled = false;
uci_dev->disconnect = true;
spin_unlock_irq(&uci_dev->ul_chan.lock);
spin_unlock_irq(&uci_dev->dl_chan.lock);
wake_up(&uci_dev->dl_chan.wq);
wake_up(&uci_dev->ul_chan.wq);
/* delete the node to prevent new opens */
device_destroy(mhi_uci_drv.class, uci_dev->devt);
uci_dev->dev = NULL;
mutex_lock(&mhi_uci_drv.lock);
list_del(&uci_dev->node);
mutex_unlock(&mhi_uci_drv.lock);
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count > 0)
uci_dev->ref_count--;
#endif
/* safe to free memory only if all file nodes are closed */
if (!uci_dev->ref_count) {
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
return;
}
mutex_unlock(&uci_dev->mutex);
MSG_LOG("Exit\n");
}
static int mhi_uci_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct uci_dev *uci_dev;
int minor;
int dir;
uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
if (!uci_dev)
return -ENOMEM;
mutex_init(&uci_dev->mutex);
uci_dev->mhi_dev = mhi_dev;
minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
if (minor >= MAX_UCI_DEVICES) {
kfree(uci_dev);
return -ENOSPC;
}
mutex_lock(&uci_dev->mutex);
mutex_lock(&mhi_uci_drv.lock);
uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
set_bit(minor, uci_minors);
for (dir = 0; dir < 2; dir++) {
struct uci_chan *uci_chan = (dir) ?
&uci_dev->ul_chan : &uci_dev->dl_chan;
spin_lock_init(&uci_chan->lock);
init_waitqueue_head(&uci_chan->wq);
INIT_LIST_HEAD(&uci_chan->pending);
};
uci_dev->termios = tty_std_termios;
uci_dev->sigs = 0;
uci_dev->mtu = id->driver_data;
mhi_device_set_devdata(mhi_dev, uci_dev);
uci_dev->enabled = true;
list_add(&uci_dev->node, &mhi_uci_drv.head);
mutex_unlock(&mhi_uci_drv.lock);
mutex_unlock(&uci_dev->mutex);
MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
return 0;
};
static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->ul_chan;
MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
kfree(mhi_result->buf_addr);
if (!mhi_result->transaction_status)
wake_up(&uci_chan->wq);
}
static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->dl_chan;
unsigned long flags;
struct uci_buf *buf;
MSG_VERB("chan:mhi_dev->dl_chan_id:%d, status:%d receive_len:%zu\n",
mhi_dev->dl_chan_id, mhi_result->transaction_status, mhi_result->bytes_xferd);
if (mhi_result->transaction_status == -ENOTCONN) {
kfree(mhi_result->buf_addr);
return;
}
spin_lock_irqsave(&uci_chan->lock, flags);
buf = mhi_result->buf_addr + uci_dev->mtu;
if (buf->data != mhi_result->buf_addr) {
MSG_LOG("%p, %p\n", buf->data, mhi_result->buf_addr);
}
buf->data = mhi_result->buf_addr;
buf->len = mhi_result->bytes_xferd;
if (uci_dev->mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN) {
PQCQMI_HDR pHdr = (PQCQMI_HDR) (buf->data);
u16 qmiLength = (le16_to_cpu(pHdr->Length) + 1);
// open qmi chan, but not read data from the chan, will cause next error, donot know why by now, so it is not recomment to use uqmi&libqmi?
// dmesg -c > /dev/null; echo 1 > /dev/mhi_QMI0; sleep 3; ./FIBO-CM -d /dev/mhi_QMI0 -v
if (qmiLength != buf->len) {
unsigned char *d = (unsigned char *) pHdr;
MSG_ERR("bytes_xferd=%zd, qmiLength=%d %02x%02x%02x%02x - %02x%02x%02x%02x\n", buf->len, qmiLength,
d[0],d[1],d[2],d[3],d[qmiLength+0],d[qmiLength+1],d[qmiLength+2],d[qmiLength+3]);
if (buf->len > qmiLength)
buf->len = qmiLength;
}
}
list_add_tail(&buf->node, &uci_chan->pending);
spin_unlock_irqrestore(&uci_chan->lock, flags);
wake_up(&uci_chan->wq);
}
#define DIAG_MAX_PCIE_PKT_SZ 2048 //define by module
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_uci_match_table[] = {
{ .chan = "LOOPBACK", .driver_data = 0x1000 },
{ .chan = "SAHARA", .driver_data = 0x4000 },
{ .chan = "EDL", .driver_data = 0x4000 },
{ .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ },
{ .chan = "EFS", .driver_data = 0x1000 },
#ifdef CONFIG_MHI_NETDEV_MBIM
{ .chan = "MBIM", .driver_data = 0x1000 },
#else
{ .chan = "QMI0", .driver_data = 0x1000 },
{ .chan = "QMI1", .driver_data = 0x1000 },
#endif
{ .chan = "TF", .driver_data = 0x1000 },
{ .chan = "BL", .driver_data = 0x1000 },
{ .chan = "DUN", .driver_data = 0x1000 },
{ .chan = "GNSS", .driver_data = 0x1000 },
{ .chan = "AUDIO", .driver_data = 0x1000 },
{ },
};
static struct mhi_driver mhi_uci_driver = {
.id_table = mhi_uci_match_table,
.remove = mhi_uci_remove,
.probe = mhi_uci_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
.driver = {
.name = MHI_UCI_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
int mhi_device_uci_init(void)
{
int ret;
ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
if (ret < 0)
return ret;
mhi_uci_drv.major = ret;
mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
if (IS_ERR(mhi_uci_drv.class)) {
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
return -ENODEV;
}
mutex_init(&mhi_uci_drv.lock);
INIT_LIST_HEAD(&mhi_uci_drv.head);
ret = mhi_driver_register(&mhi_uci_driver);
if (ret) {
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}
return ret;
}
void mhi_device_uci_exit(void)
{
mhi_driver_unregister(&mhi_uci_driver);
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}

View File

@ -8,7 +8,7 @@
include $(TOPDIR)/rules.mk
LUCI_TITLE:=PCI Modem Server
LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G
LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G +kmod-pcie_mhi_fb
include $(TOPDIR)/feeds/luci/luci.mk