From 4771bd484578cf74375de63a9325c6f231847faa Mon Sep 17 00:00:00 2001 From: ling <1042585959@qq.com> Date: Thu, 4 May 2023 11:47:21 +0800 Subject: [PATCH] pci-mhi-fb --- fibocom_MHI/Makefile | 47 + fibocom_MHI/src/Kconfig | 14 + fibocom_MHI/src/Makefile | 29 + fibocom_MHI/src/controllers/mhi_qcom.c | 683 +++++++++ fibocom_MHI/src/controllers/mhi_qcom.h | 92 ++ fibocom_MHI/src/controllers/mhi_qcom_arm.h | 92 ++ fibocom_MHI/src/controllers/mhi_qcom_x86.h | 92 ++ fibocom_MHI/src/core/mhi.h | 891 +++++++++++ fibocom_MHI/src/core/mhi_boot.c | 878 +++++++++++ fibocom_MHI/src/core/mhi_common.h | 362 +++++ fibocom_MHI/src/core/mhi_dtr.c | 181 +++ fibocom_MHI/src/core/mhi_init.c | 1442 +++++++++++++++++ fibocom_MHI/src/core/mhi_internal.h | 800 ++++++++++ fibocom_MHI/src/core/mhi_main.c | 1612 ++++++++++++++++++++ fibocom_MHI/src/core/mhi_pm.c | 1271 +++++++++++++++ fibocom_MHI/src/devices/mhi_netdev.c | 1313 ++++++++++++++++ fibocom_MHI/src/devices/mhi_netdev.h | 18 + fibocom_MHI/src/devices/mhi_uci.c | 785 ++++++++++ luci-app-pcimodem/Makefile | 2 +- 19 files changed, 10603 insertions(+), 1 deletion(-) create mode 100644 fibocom_MHI/Makefile create mode 100644 fibocom_MHI/src/Kconfig create mode 100644 fibocom_MHI/src/Makefile create mode 100644 fibocom_MHI/src/controllers/mhi_qcom.c create mode 100644 fibocom_MHI/src/controllers/mhi_qcom.h create mode 100644 fibocom_MHI/src/controllers/mhi_qcom_arm.h create mode 100644 fibocom_MHI/src/controllers/mhi_qcom_x86.h create mode 100644 fibocom_MHI/src/core/mhi.h create mode 100644 fibocom_MHI/src/core/mhi_boot.c create mode 100644 fibocom_MHI/src/core/mhi_common.h create mode 100644 fibocom_MHI/src/core/mhi_dtr.c create mode 100644 fibocom_MHI/src/core/mhi_init.c create mode 100644 fibocom_MHI/src/core/mhi_internal.h create mode 100644 fibocom_MHI/src/core/mhi_main.c create mode 100644 fibocom_MHI/src/core/mhi_pm.c create mode 100644 fibocom_MHI/src/devices/mhi_netdev.c create mode 100644 fibocom_MHI/src/devices/mhi_netdev.h create mode 100644 fibocom_MHI/src/devices/mhi_uci.c diff --git a/fibocom_MHI/Makefile b/fibocom_MHI/Makefile new file mode 100644 index 0000000..156e01b --- /dev/null +++ b/fibocom_MHI/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=pcie_mhi_fb +PKG_VERSION:=3.2 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/pcie_mhi_fb + SUBMENU:=WWAN Support + TITLE:=Kernel pcie driver for MHI device + DEPENDS:=+pciids +pciutils +fibocom-dial + FILES:=$(PKG_BUILD_DIR)/pcie_mhi_fb.ko + AUTOLOAD:=$(call AutoLoad,90,pcie_mhi_fb) +endef + +define KernelPackage/pcie_mhi_fb/description + Kernel module for register a custom pciemhi platform device. +endef + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(LINUX_DIR)" \ + $(MAKE_OPTS) \ + modules +endef + +$(eval $(call KernelPackage,pcie_mhi_fb)) diff --git a/fibocom_MHI/src/Kconfig b/fibocom_MHI/src/Kconfig new file mode 100644 index 0000000..ec1e587 --- /dev/null +++ b/fibocom_MHI/src/Kconfig @@ -0,0 +1,14 @@ +menu "pcie mhi device Drivers" + +config PCIE_MHI + tristate "MHI dev net interface" + help + This modules enables userspace software clients to communicate + with devices supporting the MHI protocol. Userspace clients + may open the device nodes exposed by MHI UCI and perform + read, write and ioctl operations to communicate with the + attached device. + +endmenu + + diff --git a/fibocom_MHI/src/Makefile b/fibocom_MHI/src/Makefile new file mode 100644 index 0000000..82ec28f --- /dev/null +++ b/fibocom_MHI/src/Makefile @@ -0,0 +1,29 @@ +ccflags-y += -g -Wno-incompatible-pointer-types -Wno-unused-variable +#ccflags-y += -DCONFIG_MHI_NETDEV_MBIM +#obj-${CONFIG_PCIE_MHI} := fibo_mhi.o +obj-m := fibo_mhi.o +fibo_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o devices/mhi_netdev.o devices/mhi_uci.o controllers/mhi_qcom.o + +PWD := $(shell pwd) +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +endif + + +fibo_mhi: clean +ifeq ($(findstring 86,$(ARCH)), 86) + cp -f $(PWD)/controllers/mhi_qcom_x86.h $(PWD)/controllers/mhi_qcom.h +else + cp -f $(PWD)/controllers/mhi_qcom_arm.h $(PWD)/controllers/mhi_qcom.h +endif + #ln -sf makefile Makefile + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + +clean: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean diff --git a/fibocom_MHI/src/controllers/mhi_qcom.c b/fibocom_MHI/src/controllers/mhi_qcom.c new file mode 100644 index 0000000..87e0fd4 --- /dev/null +++ b/fibocom_MHI/src/controllers/mhi_qcom.c @@ -0,0 +1,683 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qcom.h" + + +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int fibo_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void fibo_pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int fibo_pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return dev->irq + nr; +} +#else +static int fibo_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors(dev, min_vecs, max_vecs, flags); +} + +static void fibo_pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_free_irq_vectors(dev); +} + +static int fibo_pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return pci_irq_vector(dev, nr); +} +#endif + + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0300)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0301)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0302)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)},//SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(0x2C7C, 0x0512)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +static struct pci_driver mhi_pcie_driver; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + fibo_pci_free_irq_vectors(pci_dev); + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t start, len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + + start = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); + /*begin added by tony.du for mantis 0062018 on 2020-11-10*/ + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,6,0 )) + mhi_cntrl->regs = ioremap_nocache(start, len); + #else + mhi_cntrl->regs = ioremap(start, len); + #endif + /*end added by tony.du for mantis 0062018 on 2020-11-10*/ + MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + + ret = fibo_pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + + mhi_cntrl->msi_allocated = ret; + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq); + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = fibo_pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + + return 0; + +error_get_irq_vec: + fibo_pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered returning -EBUSY\n"); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered\n"); + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) + return mhi_runtime_suspend(dev); + + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_put_noidle(dev); +} + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + if (reason == MHI_CB_IDLE) { + MHI_LOG("Schedule runtime suspend 1\n"); + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + } +} + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +static int mhi_init_debugfs_trigger_go(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + + MHI_LOG("Trigger power up sequence\n"); + + mhi_async_power_up(mhi_cntrl); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL, + mhi_init_debugfs_trigger_go, "%llu\n"); + + +int mhi_init_debugfs_debug_show(struct seq_file *m, void *d) +{ + seq_puts(m, "Enable debug mode to debug external soc\n"); + seq_puts(m, + "Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n"); + seq_puts(m, "No spaces between parameters\n"); + seq_puts(m, "\t1. devid : 0 or pci device id to register\n"); + seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n"); + seq_puts(m, "\t3. domain: Rootcomplex\n"); + seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n"); + seq_puts(m, "\t\t- BIT0: ATTACH\n"); + seq_puts(m, "\t\t- BIT1: S1 BYPASS\n"); + seq_puts(m, "\t\t-BIT2: FAST_MAP\n"); + seq_puts(m, "\t\t-BIT3: ATOMIC\n"); + seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n"); + seq_puts(m, "\t\t-BIT5: GEOMETRY\n"); + seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n"); + seq_puts(m, "Examples inputs: '0x307,10000'\n"); + seq_puts(m, "\techo '0,10000,1'\n"); + seq_puts(m, "\techo '0x307,10000,0,0x3d'\n"); + seq_puts(m, "firmware image name will be changed to debug.mbn\n"); + + return 0; +} + +static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file) +{ + return single_open(file, mhi_init_debugfs_debug_show, NULL); +} + +static ssize_t mhi_init_debugfs_debug_write(struct file *fp, + const char __user *ubuf, + size_t count, + loff_t *pos) +{ + char *buf = kmalloc(count + 1, GFP_KERNEL); + /* #,devid,timeout,domain,smmu-cfg */ + int args[5] = {0}; + static char const *dbg_fw = "debug.mbn"; + int ret; + struct mhi_controller *mhi_cntrl = fp->f_inode->i_private; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_device_id *id; + + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf, ubuf, count); + if (ret) + goto error_read; + buf[count] = 0; + get_options(buf, ARRAY_SIZE(args), args); + kfree(buf); + + /* override default parameters */ + mhi_cntrl->fw_image = dbg_fw; + mhi_cntrl->edl_image = dbg_fw; + + if (args[0] >= 2 && args[2]) + mhi_cntrl->timeout_ms = args[2]; + + if (args[0] >= 3 && args[3]) + mhi_cntrl->domain = args[3]; + + if (args[0] >= 4 && args[4]) + mhi_dev->smmu_cfg = args[4]; + + /* If it's a new device id register it */ + if (args[0] && args[1]) { + /* find the debug_id and overwrite it */ + for (id = mhi_pcie_device_id; id->vendor; id++) + if (id->device == MHI_PCIE_DEBUG_ID) { + id->device = args[1]; + pci_unregister_driver(&mhi_pcie_driver); + ret = pci_register_driver(&mhi_pcie_driver); + } + } + + mhi_dev->debug_mode = true; + debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl, + &mhi_init_debugfs_trigger_go_fops); + pr_info( + "%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n", + __func__, ret, args[1], mhi_dev->smmu_cfg, + mhi_cntrl->timeout_ms); + return count; + +error_read: + kfree(buf); + return ret; +} + +static const struct file_operations debugfs_debug_ops = { + .open = mhi_init_debugfs_debug_open, + .release = single_release, + .read = seq_read, + .write = mhi_init_debugfs_debug_write, +}; + +static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + u64 addr_win[2]; + int ret; + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) { + pr_err("mhi_alloc_controller fail\n"); + return NULL; + } + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + mhi_dev->smmu_cfg = 0; + + addr_win[0] = 0; + addr_win[1] = 0xFFFFFFFFF; //16GB + + mhi_cntrl->iova_start = addr_win[0]; + mhi_cntrl->iova_stop = addr_win[1]; + + mhi_dev->pci_dev = pci_dev; + mhi_cntrl->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->link_status = mhi_link_status; + + ret = mhi_arch_platform_init(mhi_dev); + if (ret) + goto error_probe; + + ret = mhi_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + if (mhi_cntrl->parent) + debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent, + mhi_cntrl, &debugfs_debug_ops); + + return mhi_cntrl; + +error_register: + mhi_arch_platform_deinit(mhi_dev); + +error_probe: + mhi_free_controller(mhi_cntrl); + + return NULL; +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl = NULL; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_err("INFO:%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + + mhi_cntrl = mhi_platform_probe(pci_dev); + if (!mhi_cntrl) { + pr_err("mhi_platform_probe fail\n"); + return -EPROBE_DEFER; + } + + mhi_cntrl->dev_id = pci_dev->device; + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->pci_dev = pci_dev; + mhi_dev->powered_on = true; + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret); + return ret; + } + + ret = mhi_arch_iommu_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret); + goto error_iommu_init; + } + + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret); + goto error_init_pci; + } + + /* start power up sequence if not in debug mode */ + if (!mhi_dev->debug_mode) { + ret = mhi_async_power_up(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret); + goto error_power_up; + } + } + + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + MHI_LOG("Return successful\n"); + + return 0; + +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_iommu_deinit(mhi_cntrl); + +error_iommu_init: + mhi_arch_pcie_deinit(mhi_cntrl); + + return ret; +} + +static void mhi_pci_remove(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev); + + if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) { + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + MHI_LOG("%s\n", dev_name(&pci_dev->dev)); + if (!mhi_dev->debug_mode) { + mhi_power_down(mhi_cntrl, 1); + } + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_iommu_deinit(mhi_cntrl); + mhi_arch_pcie_deinit(mhi_cntrl); + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove, + .driver = { + .pm = &pm_ops + } +}; + +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_err("INFO:%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_err("INFO:%s exit\n", __func__); +} diff --git a/fibocom_MHI/src/controllers/mhi_qcom.h b/fibocom_MHI/src/controllers/mhi_qcom.h new file mode 100644 index 0000000..4a80026 --- /dev/null +++ b/fibocom_MHI/src/controllers/mhi_qcom.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) +#define MHI_RPM_SUSPEND_TMR_MS (3000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + bool debug_mode; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64)); +} + +static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +#endif /* _MHI_QCOM_ */ diff --git a/fibocom_MHI/src/controllers/mhi_qcom_arm.h b/fibocom_MHI/src/controllers/mhi_qcom_arm.h new file mode 100644 index 0000000..4a80026 --- /dev/null +++ b/fibocom_MHI/src/controllers/mhi_qcom_arm.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) +#define MHI_RPM_SUSPEND_TMR_MS (3000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + bool debug_mode; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64)); +} + +static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +#endif /* _MHI_QCOM_ */ diff --git a/fibocom_MHI/src/controllers/mhi_qcom_x86.h b/fibocom_MHI/src/controllers/mhi_qcom_x86.h new file mode 100644 index 0000000..7b8e33e --- /dev/null +++ b/fibocom_MHI/src/controllers/mhi_qcom_x86.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) +#define MHI_RPM_SUSPEND_TMR_MS (3000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + bool debug_mode; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(32)); +} + +static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +#endif /* _MHI_QCOM_ */ diff --git a/fibocom_MHI/src/core/mhi.h b/fibocom_MHI/src/core/mhi.h new file mode 100644 index 0000000..f72fd63 --- /dev/null +++ b/fibocom_MHI/src/core/mhi.h @@ -0,0 +1,891 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_H_ +#define _MHI_H_ + +#include + +typedef u64 uint64; +typedef u32 uint32; + +typedef enum +{ + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_QMI_2_OUT = 16, + MHI_CLIENT_QMI_2_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_IPCR_OUT = 20, + MHI_CLIENT_IPCR_IN = 21, + MHI_CLIENT_TEST_FW_OUT = 22, + MHI_CLIENT_TEST_FW_IN = 23, + MHI_CLIENT_RESERVED_0 = 24, + MHI_CLIENT_BOOT_LOG_IN = 25, + MHI_CLIENT_DCI_OUT = 26, + MHI_CLIENT_DCI_IN = 27, + MHI_CLIENT_QBI_OUT = 28, + MHI_CLIENT_QBI_IN = 29, + MHI_CLIENT_RESERVED_1_LOWER = 30, + MHI_CLIENT_RESERVED_1_UPPER = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_EDL_OUT = 34, + MHI_CLIENT_EDL_IN = 35, + MHI_CLIENT_ADB_FB_OUT = 36, + MHI_CLIENT_ADB_FB_IN = 37, + MHI_CLIENT_RESERVED_2_LOWER = 38, + MHI_CLIENT_RESERVED_2_UPPER = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_0_OUT = 46, + MHI_CLIENT_IP_SW_0_IN = 47, + MHI_CLIENT_IP_SW_1_OUT = 48, + MHI_CLIENT_IP_SW_1_IN = 49, + MHI_CLIENT_GNSS_OUT = 50, + MHI_CLIENT_GNSS_IN = 51, + MHI_CLIENT_AUDIO_OUT = 52, + MHI_CLIENT_AUDIO_IN = 53, + MHI_CLIENT_RESERVED_3_LOWER = 54, + MHI_CLIENT_RESERVED_3_UPPER = 59, + MHI_CLIENT_TEST_0_OUT = 60, + MHI_CLIENT_TEST_0_IN = 61, + MHI_CLIENT_TEST_1_OUT = 62, + MHI_CLIENT_TEST_1_IN = 63, + MHI_CLIENT_TEST_2_OUT = 64, + MHI_CLIENT_TEST_2_IN = 65, + MHI_CLIENT_TEST_3_OUT = 66, + MHI_CLIENT_TEST_3_IN = 67, + MHI_CLIENT_RESERVED_4_LOWER = 68, + MHI_CLIENT_RESERVED_4_UPPER = 91, + MHI_CLIENT_OEM_0_OUT = 92, + MHI_CLIENT_OEM_0_IN = 93, + MHI_CLIENT_OEM_1_OUT = 94, + MHI_CLIENT_OEM_1_IN = 95, + MHI_CLIENT_OEM_2_OUT = 96, + MHI_CLIENT_OEM_2_IN = 97, + MHI_CLIENT_OEM_3_OUT = 98, + MHI_CLIENT_OEM_3_IN = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_ADPL = 102, + MHI_CLIENT_RESERVED_5_LOWER = 103, + MHI_CLIENT_RESERVED_5_UPPER = 127, + MHI_MAX_CHANNELS = 128 +}MHI_CLIENT_CHANNEL_TYPE; + +#define MHI_VERSION 0x01000000 +#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */ +#define MHI_MSI_INDEX 1 +#define MAX_NUM_MHI_DEVICES 1 +#define NUM_MHI_XFER_RINGS 128 +#define NUM_MHI_EVT_RINGS 3 +#define PRIMARY_EVENT_RING 0 +#define IPA_OUT_EVENT_RING 1 +#define IPA_IN_EVENT_RING 2 +#define NUM_MHI_XFER_RING_ELEMENTS 16 +#define NUM_MHI_EVT_RING_ELEMENTS 256 +#define NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS 2048 +#define NUM_MHI_IPA_IN_EVT_RING_ELEMENTS 1024 +#define NUM_MHI_IPA_IN_RING_ELEMENTS 256 +#define NUM_MHI_IPA_OUT_RING_ELEMENTS 256 +#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128 +#define NUM_MHI_CHAN_RING_ELEMENTS 8 +#define MHI_EVT_CMD_QUEUE_SIZE 160 +#define MHI_EVT_STATE_QUEUE_SIZE 128 +#define MHI_EVT_XFER_QUEUE_SIZE 1024 +#define MHI_ALIGN_4BYTE_OFFSET 0x3 +#define MHI_ALIGN_4K_OFFSET 0xFFF +#define MAX_TRB_DATA_SIZE 0xFFFF +#define RESERVED_VALUE_64 0xFFFFFFFFFFFFFFFF +#define RESERVED_VALUE 0xFFFFFFFF +#define PCIE_LINK_DOWN 0xFFFFFFFF +#define SECONDS 1000 +#define MINUTES 60000 + +#define MHI_FILE_MHI 0x4D4849 +#define MHI_FILE_INIT 0x494E4954 +#define MHI_FILE_MSI 0x4D5349 +#define MHI_FILE_OS 0x4F53 +#define MHI_FILE_SM 0x534D +#define MHI_FILE_THREADS 0x54485245 +#define MHI_FILE_TRANSFER 0x5452414E +#define MHI_FILE_UTILS 0x5554494C + + +#define MHI_ER_PRIORITY_HIGH 0 +#define MHI_ER_PRIORITY_MEDIUM 1 +#define MHI_ER_PRIORITY_SPECIAL 2 + +#undef FALSE +#undef TRUE +#define FALSE 0 +#define TRUE 1 + +typedef struct MHI_DEV_CTXT MHI_DEV_CTXT; +typedef struct PCI_CORE_INFO PCI_CORE_INFO; +typedef struct PCIE_DEV_INFO PCIE_DEV_INFO; + +/* Memory Segment Properties */ +typedef struct _MHI_MEM_PROPS +{ + uint64 VirtAligned; + uint64 VirtUnaligned; + uint64 PhysAligned; + uint64 PhysUnaligned; + uint64 Size; + void *Handle; +}MHI_MEM_PROPS, *PMHI_MEM_PROPS; + +/* Device Power State Type */ +typedef enum +{ + POWER_DEVICE_INVALID = 0, + POWER_DEVICE_D0 = 1, + POWER_DEVICE_D1 = 2, + POWER_DEVICE_D2 = 3, + POWER_DEVICE_D3 = 4, + POWER_DEVICE_D3FINAL = 5, // System shutting down + POWER_DEVICE_HIBARNATION = 6, // Entering system state S4 + POWER_DEVICE_MAX = 7 +}PWR_STATE_TYPE; + +/* Channel State */ +typedef enum +{ + CHAN_STATE_DISABLED = 0, + CHAN_STATE_ENABLED = 1, + CHAN_STATE_RUNNING = 2, + CHAN_STATE_SUSPENDED = 3, + CHAN_STATE_STOPPED = 4, + CHAN_STATE_ERROR = 5, + + CHAN_STATE_OTHER = RESERVED_VALUE +}CHAN_STATE_TYPE; + +/* Channel Type */ +typedef enum +{ + INVALID_CHAN = 0, + OUTBOUND_CHAN = 1, + INBOUND_CHAN = 2, + + OTHER_CHAN = RESERVED_VALUE +}CHAN_TYPE; + +/* Ring Type */ +typedef enum +{ + CMD_RING = 0, + XFER_RING = 1, + EVT_RING = 2, +}MHI_RING_TYPE; + +/* Event Ring */ +typedef enum +{ + EVT_RING_INVALID = 0x0, + EVT_RING_VALID = 0x1, + EVT_RING_RESERVED = RESERVED_VALUE +}MHI_EVENT_RING_TYPE; + +#pragma pack(push,1) + +/* MHI Ring Context */ +typedef /*_ALIGN(1)*/ struct _MHI_RING_CTXT_TYPE +{ + uint32 Info; + uint32 Type; + uint32 Index; + uint64 Base; + uint64 Length; + volatile uint64 RP; + uint64 WP; +}MHI_RING_CTXT_TYPE, *PMHI_RING_CTXT_TYPE; + +/* MHI Ring Element */ +typedef /*_ALIGN(1)*/ struct _MHI_ELEMENT_TYPE +{ + uint64 Ptr; + uint32 Status; + uint32 Control; +}MHI_ELEMENT_TYPE, *PMHI_ELEMENT_TYPE; + +#pragma pack(pop) + +/* Command Ring Element Type */ +typedef enum +{ + CMD_NONE = 0, + CMD_NOOP = 1, + CMD_RESET_CHAN = 16, + CMD_STOP_CHAN = 17, + CMD_START_CHAN = 18, + CMD_CANCEL_CHAN_XFERS = 21 +}MHI_CMD_TYPE; + +/* Event Ring Element Type */ +typedef enum +{ + STATE_CHANGE_EVT = 32, + CMD_COMPLETION_EVT = 33, + XFER_COMPLETION_EVT = 34, + EE_STATE_CHANGE_EVT = 64 +} MHI_EVT_TYPE; + +/* Ring Status Type */ +typedef enum +{ + RING_EMPTY = 0, + RING_FULL = 1, + RING_QUEUED = 2, +} MHI_RING_STATUS_TYPE; + +/* XFER Ring Element Type */ +#define XFER_RING_ELEMENT_TYPE 2 + +/* Event Ring Completion Status */ +typedef enum +{ + EVT_COMPLETION_INVALID = 0, + EVT_COMPLETION_SUCCESS = 1, + EVT_COMPLETION_EOT = 2, + EVT_COMPLETION_OVERFLOW = 3, + EVT_COMPLETION_EOB = 4, + EVT_COMPLETION_OOB = 5, /* Out-Of-Buffer */ + EVT_COMPLETION_DB_MODE = 6, + EVT_COMPLETION_UNDEFINED = 16, + EVT_COMPLETION_MALFORMED = 17, + + EVT_COMPLETION_OTHER = RESERVED_VALUE +}EVT_COMPLETION_STATUS_TYPE; + +/* *********************************************************************************************** */ +/* Macros */ +/* *********************************************************************************************** */ +#define ADVANCE_RING_PTR(RingCtxt, Ptr, Size) \ + *Ptr = ((*Ptr - RingCtxt->Base)/sizeof(MHI_ELEMENT_TYPE) == (Size - 1))? \ + RingCtxt->Base: (*Ptr + sizeof(MHI_ELEMENT_TYPE)) + +#define GET_VIRT_ADDR(MhiCtxt, PhysAddr) \ + ((MhiCtxt)->CtrlSegProps.VirtAligned + ((PhysAddr) - (MhiCtxt)->CtrlSegProps.PhysAligned)) \ + +#define GET_PHYS_ADDR(MhiCtxt, VirtAddr) \ + ((MhiCtxt)->CtrlSegProps.PhysAligned + ((VirtAddr) - (MhiCtxt)->CtrlSegProps.VirtAligned)) \ + +#define GET_RING_ELEMENT_INDEX(RingBase, Element) \ + (((Element) - (RingBase))/sizeof(MHI_ELEMENT_TYPE)) + +#define VALID_RING_PTR(Ring, Ptr) \ + (((Ptr) >= (Ring)->Base) && \ + ((Ptr) <= ((Ring)->Base + (Ring)->Length - sizeof(MHI_ELEMENT_TYPE)))) + +#define CHAN_INBOUND(_x) ((_x)%2) + +#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \ + ((_x) == MHI_CLIENT_SAHARA_IN) || \ + ((_x) == MHI_CLIENT_BOOT_LOG_IN)) + +#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \ + ((_x) == MHI_CLIENT_EDL_IN)) + +#define RESERVED_CHAN(_x) (((_x) == MHI_CLIENT_RESERVED_0) || \ + ((_x) >= MHI_CLIENT_RESERVED_1_LOWER && (_x) <= MHI_CLIENT_RESERVED_1_UPPER) || \ + ((_x) >= MHI_CLIENT_RESERVED_2_LOWER && (_x) <= MHI_CLIENT_RESERVED_2_UPPER) || \ + ((_x) >= MHI_CLIENT_RESERVED_3_LOWER && (_x) <= MHI_CLIENT_RESERVED_3_UPPER) || \ + ((_x) >= MHI_CLIENT_RESERVED_4_LOWER && (_x) <= MHI_CLIENT_RESERVED_4_UPPER) || \ + ((_x) >= MHI_CLIENT_RESERVED_5_LOWER)) + +#define VALID_CHAN(_x) ((((_x) >= 0) && ((_x) < MHI_MAX_CHANNELS))) + +#define MHI_HW_CHAN(_x) ((_x) == MHI_CLIENT_IP_HW_0_OUT || \ + (_x) == MHI_CLIENT_IP_HW_0_IN || \ + (_x) == MHI_CLIENT_ADPL) + +#define MIN(_x,_y) ((_x) < (_y) ? (_x): (_y)) + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct image_info; +struct bhi_vec_entry; +struct mhi_cntrl_data; + +/** + * enum MHI_CB - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment + */ +enum MHI_CB { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, +}; + +/** + * enum MHI_DEBUG_LEVL - various debugging level + */ +enum MHI_DEBUG_LEVEL { + MHI_MSG_LVL_VERBOSE, + MHI_MSG_LVL_INFO, + MHI_MSG_LVL_ERROR, + MHI_MSG_LVL_CRITICAL, + MHI_MSG_LVL_MASK_ALL, +}; + +/** + * enum MHI_FLAGS - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum MHI_FLAGS { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * struct image_info - firmware and rddm table table + * @mhi_buf - Contain device firmware and rddm table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + +/** + * struct mhi_controller - Master controller structure for external modem + * @dev: Device associated with this controller + * @of_node: DT that has MHI configuration information + * @regs: Points to base of MHI MMIO register space + * @bhi: Points to base of MHI BHI register space + * @wake_db: MHI WAKE doorbell register address + * @dev_id: PCIe device id of the external device + * @domain: PCIe domain the device connected to + * @bus: PCIe bus the device assigned to + * @slot: PCIe slot for the modem + * @iova_start: IOMMU starting address for data + * @iova_stop: IOMMU stop address for data + * @fw_image: Firmware image name for normal booting + * @edl_image: Firmware image name for emergency download mode + * @fbc_download: MHI host needs to do complete image transfer + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size + * @seg_len: BHIe vector size + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @max_chan: Maximum number of channels controller support + * @mhi_chan: Points to channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @msi_required: Number of msi required to operate + * @msi_allocated: Number of msi allocated by bus master + * @irq: base irq # to request + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: Power management state + * @ee: MHI device execution environment + * @dev_state: MHI STATE + * @status_cb: CB function to notify various power states to but master + * @link_status: Query link status in case of abnormal value read from device + * @runtime_get: Async runtime resume function + * @runtimet_put: Release votes + * @priv_data: Points to bus master's private data + */ +struct mhi_controller { + struct list_head node; + + /* device node for iommu ops */ + struct device *dev; + struct pci_dev *pci_dev; + + /* mmio base */ + void __iomem *regs; + void __iomem *bhi; + void __iomem *wake_db; + + /* device topology */ + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + + /* addressing window */ + dma_addr_t iova_start; + dma_addr_t iova_stop; + + /* fw images */ + const char *fw_image; + const char *edl_image; + + /* mhi host manages downloading entire fbc images */ + bool fbc_download; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + u32 session_id; + u32 sequence_id; + struct image_info *fbc_image; + struct image_info *rddm_image; + + /* physical channel config data */ + u32 max_chan; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; /* these chan require lpm notification */ + + /* physical event config data */ + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 msi_required; + u32 msi_allocated; + int irq[8]; /* interrupt table */ + struct mhi_event *mhi_event; + + /* cmd rings */ + struct mhi_cmd *mhi_cmd; + + /* mhi context (shared with device) */ + struct mhi_ctxt *mhi_ctxt; + + u32 timeout_ms; + + /* caller should grab pm_mutex for suspend/resume operations */ + struct mutex pm_mutex; + bool pre_init; + rwlock_t pm_lock; + u32 pm_state; + u32 ee; + u32 dev_state; + bool wake_set; + atomic_t dev_wake; + atomic_t alloc_size; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + + /* debug counters */ + u32 M0, M1, M2, M3; + + /* worker for different state transitions */ + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct m1_worker; + struct work_struct syserr_worker; + wait_queue_head_t state_event; + + /* shadow functions */ + void (*status_cb)(struct mhi_controller *mhi_cntrl, void *piv, + enum MHI_CB reason); + int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv); + + /* channel to control DTR messaging */ + struct mhi_device *dtr_dev; + + /* kernel log level */ + enum MHI_DEBUG_LEVEL klog_lvl; + + /* private log level controller driver to set */ + enum MHI_DEBUG_LEVEL log_lvl; + + /* controller specific data */ + void *priv_data; + void *log_buf; + struct dentry *dentry; + struct dentry *parent; + struct mhi_cntrl_data *data; + + struct miscdevice miscdev; +}; + +/** + * struct mhi_device - mhi device structure associated bind to channel + * @dev: Device associated with the channels + * @mtu: Maximum # of bytes controller support + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @priv: Driver private data + */ +struct mhi_device { + struct device dev; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + size_t mtu; + int ul_chan_id; + int dl_chan_id; + int ul_event_id; + int dl_event_id; + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + atomic_t dev_wake; + void *priv_data; + int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS flags); + int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS flags); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason); +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @dir: Channel direction + * @bytes_xfer: # of bytes transferred + * @transaction_status: Status of last trasnferred + */ +struct mhi_result { + void *buf_addr; + enum dma_data_direction dir; + size_t bytes_xferd; + int transaction_status; +}; + +/** + * struct mhi_buf - Describes the buffer + * @buf: cpu address for the buffer + * @phys_addr: physical address of the buffer + * @dma_addr: iommu address for the buffer + * @len: # of bytes + * @name: Buffer label, for offload channel configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + */ +struct mhi_buf { + void *buf; + phys_addr_t phys_addr; + dma_addr_t dma_addr; + size_t len; + const char *name; /* ECA, CCA */ +}; + +/** + * struct mhi_driver - mhi driver information + * @id_table: NULL terminated channel ID names + * @ul_xfer_cb: UL data transfer callback + * @dl_xfer_cb: DL data transfer callback + * @status_cb: Asynchronous status callback + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev, + void *priv) +{ + mhi_dev->priv_data = priv; +} + +static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev) +{ + return mhi_dev->priv_data; +} + +/** + * mhi_queue_transfer - Queue a buffer to hardware + * All transfers are asyncronous transfers + * @mhi_dev: Device associated with the channels + * @dir: Data direction + * @buf: Data buffer (skb for hardware channels) + * @len: Size in bytes + * @mflags: Interrupt flags for the device + */ +static inline int mhi_queue_transfer(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + if (dir == DMA_TO_DEVICE) + return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len, + mflags); + else + return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len, + mflags); +} + +static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl) +{ + return mhi_cntrl->priv_data; +} + +static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} + +/** + * mhi_driver_register - Register driver with MHI framework + * @mhi_drv: mhi_driver structure + */ +int mhi_driver_register(struct mhi_driver *mhi_drv); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: mhi_driver structure + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_device_configure - configure ECA or CCA context + * For offload channels that client manage, call this + * function to configure channel context or event context + * array associated with the channel + * @mhi_div: Device associated with the channels + * @dir: Direction of the channel + * @mhi_buf: Configuration data + * @elements: # of configuration elements + */ +int mhi_device_configure(struct mhi_device *mhi_div, + enum dma_data_direction dir, + struct mhi_buf *mhi_buf, + int elements); + +/** + * mhi_device_get - disable all low power modes + * Only disables lpm, does not immediately exit low power mode + * if controller already in a low power mode + * @mhi_dev: Device associated with the channels + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - disable all low power modes + * Synchronously disable all low power, exit low power mode if + * controller already in a low power state + * @mhi_dev: Device associated with the channels + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - re-enable low power modes + * @mhi_dev: Device associated with the channels + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - setup channel for data transfer + * Moves both UL and DL channel from RESET to START state + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer -unprepare the channels + * Moves both UL and DL channels to RESET state + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_get_no_free_descriptors - Get transfer ring length + * Get # of TD available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** + * mhi_poll - poll for any available data to consume + * This is only applicable for DL direction + * @mhi_dev: Device associated with the channels + * @budget: In descriptors to service before returning + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_ioctl - user space IOCTL support for MHI channels + * Native support for setting TIOCM + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg); + +/** + * mhi_alloc_controller - Allocate mhi_controller structure + * Allocate controller structure and additional data for controller + * private data. You may get the private data pointer by calling + * mhi_controller_get_devdata + * @size: # of additional bytes to allocate + */ +struct mhi_controller *mhi_alloc_controller(size_t size); + +/** + * mhi_register_mhi_controller - Register MHI controller + * Registers MHI controller with MHI bus framework. DT must be supported + * @mhi_cntrl: MHI controller to register + */ +int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +/** + * mhi_bdf_to_controller - Look up a registered controller + * Search for controller based on device identification + * @domain: RC domain of the device + * @bus: Bus device connected to + * @slot: Slot device assigned to + * @dev_id: Device Identification + */ +struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, + u32 dev_id); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up + * This is optional, call this before power up if controller do not + * want bus framework to automatically free any allocated memory during shutdown + * process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Starts MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: link is still accessible, do a graceful shutdown process otherwise + * we will shutdown host w/o putting device into RESET state + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_powre_down - free any allocated memory for power up + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_suspend - Move MHI into a suspended state + * Transition to MHI state M3 state from M0||M1||M2 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * Transition to MHI state M0 state from M3 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: If we trying to capture image while in kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force external device into rddm mode + * to collect device ramdump. This is useful if host driver assert + * and we need to see device state as well. + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl); +void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl); + +extern int mhi_debug_mask; + +#define MHI_VERB(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("VERBOSE:[D][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_LOG(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("INFO:[I][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ERR(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#define MHI_CRITICAL(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \ + pr_err("ALERT:[C][%s] " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#ifndef MHI_NAME_SIZE +#define MHI_NAME_SIZE 32 +/** + * * struct mhi_device_id - MHI device identification + * * @chan: MHI channel name + * * @driver_data: driver data; + * */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + unsigned long driver_data; +}; +#endif + +#endif /* _MHI_H_ */ diff --git a/fibocom_MHI/src/core/mhi_boot.c b/fibocom_MHI/src/core/mhi_boot.c new file mode 100644 index 0000000..a981841 --- /dev/null +++ b/fibocom_MHI/src/core/mhi_boot.c @@ -0,0 +1,878 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1 +#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2 + +/* Software defines */ +/* BHI Version */ +#define BHI_MAJOR_VERSION 0x1 +#define BHI_MINOR_VERSION 0x1 + +#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */ +#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */ + +#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) ) + +typedef u32 ULONG; + +typedef struct _bhi_info_type +{ + ULONG bhi_ver_minor; + ULONG bhi_ver_major; + ULONG bhi_image_address_low; + ULONG bhi_image_address_high; + ULONG bhi_image_size; + ULONG bhi_rsvd1; + ULONG bhi_imgtxdb; + ULONG bhi_rsvd2; + ULONG bhi_msivec; + ULONG bhi_rsvd3; + ULONG bhi_ee; + ULONG bhi_status; + ULONG bhi_errorcode; + ULONG bhi_errdbg1; + ULONG bhi_errdbg2; + ULONG bhi_errdbg3; + ULONG bhi_sernum; + ULONG bhi_sblantirollbackver; + ULONG bhi_numsegs; + ULONG bhi_msmhwid[6]; + ULONG bhi_oempkhash[48]; + ULONG bhi_rsvd5; +}BHI_INFO_TYPE, *PBHI_INFO_TYPE; + +#if 0 +static void PrintBhiInfo(BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + + printk("BHI Device Info...\n"); + printk("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor); + printk("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee); + printk("BHI Status = 0x%X\n", bhi_info->bhi_status); + printk("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3); + printk("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum); + printk("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver); + printk("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs); + printk("BHI MSM HW-Id = "); + for (index = 0; index < 6; index++) + { + printk("0x%X ", bhi_info->bhi_msmhwid[index]); + } + printk("\n"); + + printk("BHI OEM PK Hash = \n"); + for (index = 0; index < 24; index++) + { + printk("0x%X ", bhi_info->bhi_oempkhash[index]); + } + printk("\n"); +} +#endif + +static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset) +{ + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, offset, &out); + + return (ret) ? 0 : out; +} + +static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + + memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE)); + + /* bhi_ver */ + bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW); + bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH); + bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE); + bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1); + bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB); + bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2); + bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC); + bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3); + bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV); + bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS); + bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE); + bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1); + bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2); + bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3); + bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNUM); + bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER); + bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG); + for (index = 0; index < MSMHWID_NUMDWORDS; index++) + { + bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index)); + } + for (index = 0; index < OEMPKHASH_NUMDWORDS; index++) + { + bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index)); + } + bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5); + //PrintBhiInfo(bhi_info); + /* Check the Execution Environment */ + if (!IsPBLExecEnv(bhi_info->bhi_ee)) + { + printk("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee); + } + + /* Return the number of bytes read */ + return 0; +} + +/* setup rddm vector table for rddm transfer */ +static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + int i = 0; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + MHI_VERB("Setting vector:%pad size:%zu\n", + &mhi_buf->dma_addr, mhi_buf->len); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } +} + +/* collect rddm during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + struct mhi_buf *mhi_buf; + u32 sequence_id; + u32 rx_status; + enum MHI_EE ee; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + const u32 delayus = 100; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + void __iomem *base = mhi_cntrl->bhi; + + MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting rddm buffer. After + * returning from this function, we expect device to reset. + * + * Normaly, we would read/write pm_state only after grabbing + * pm_lock, since we're in a panic, skipping it. + */ + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return -EIO; + + /* + * There is no gurantee this state change would take effect since + * we're setting it w/o grabbing pmlock, it's best effort + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* setup the RX vector table */ + mhi_rddm_prepare(mhi_cntrl, rddm_image); + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + MHI_LOG("Starting BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + + if (unlikely(!sequence_id)) + sequence_id = 1; + + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + MHI_LOG("Trigger device into RDDM mode\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + MHI_LOG("Waiting for image download completion\n"); + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) { + MHI_LOG("RDDM successfully collected\n"); + return 0; + } + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + MHI_ERR("Did not complete RDDM transfer\n"); + MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee)); + MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret); + + return -EIO; +} + +/* download ramdump image from device */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + struct mhi_buf *mhi_buf; + int ret; + u32 rx_status; + u32 sequence_id; + + if (!rddm_image) + return -ENOMEM; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + return -EIO; + } + + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + + /* vector table is the last entry */ + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, sequence_id); + MHI_LOG("Waiting for image download completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_download_rddm_img); + +static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); + + mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + mhi_cntrl->sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, mhi_cntrl->sequence_id); + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} + +static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, + void *buf, + size_t size) +{ + u32 tx_status, val; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + dma_addr_t phys = dma_map_single(mhi_cntrl->dev, buf, size, + DMA_TO_DEVICE); + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + if (dma_mapping_error(mhi_cntrl->dev, phys)) + return -ENOMEM; + + MHI_LOG("Starting BHI programming\n"); + + /* program start sbl download via bhi protocol */ + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(phys)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(phys)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK; + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + /*MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||*/ + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + #if 0 + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + #endif + + MHI_LOG("image transfer status:%d\n", tx_status); + + if (tx_status == BHI_STATUS_ERROR) { + MHI_ERR("Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + MHI_ERR("reg:%s value:0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + dma_unmap_single(mhi_cntrl->dev, phys, size, DMA_TO_DEVICE); + + return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT; + +invalid_pm_state: + dma_unmap_single(mhi_cntrl->dev, phys, size, DMA_TO_DEVICE); + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + /* requier additional entry for vec table */ + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n", + alloc_size, seg_size, segments); + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* last entry is for vector table */ + if (i == segments - 1) + vec_size = sizeof(struct __packed bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, + &mhi_buf->dma_addr, GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + + MHI_LOG("Entry:%d Address:0x%llx size:%zd\n", i, + (u64)mhi_buf->dma_addr, mhi_buf->len); + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + MHI_LOG("Successfully allocated bhi vec table\n"); + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + int i = 0; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + MHI_ASSERT(i >= img_info->entries, "malformed vector table"); + + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + MHI_VERB("Setting Vector:0x%llx size: %llu\n", + bhi_vec->dma_addr, bhi_vec->size); + buf += to_cpy; + remainder -= to_cpy; + i++; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_worker(struct work_struct *work) +{ + int ret; + struct mhi_controller *mhi_cntrl; + const char *fw_name; + const struct firmware *firmware; + struct image_info *image_info; + void *buf; + size_t size; + + mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); + + MHI_LOG("Waiting for device to enter PBL from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_PBL(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state\n"); + return; + } + + MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* if device in pthru, we do not have to load firmware */ + if (mhi_cntrl->ee == MHI_EE_PT) + return; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n"); + return; + } + + ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev); + if (ret) { + MHI_ERR("Error loading firmware, ret:%d\n", ret); + return; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* the sbl size provided is maximum size, not necessarily image size */ + if (size > firmware->size) + size = firmware->size; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + MHI_ERR("Could not allocate memory for image\n"); + release_firmware(firmware); + return; + } + + /* load sbl image */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_sbl(mhi_cntrl, buf, size); + kfree(buf); + + if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL) + release_firmware(firmware); + + /* error or in edl, we're done */ + if (ret || mhi_cntrl->ee == MHI_EE_EDL) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * if we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + MHI_ERR("Error alloc size of %zu\n", firmware->size); + goto error_alloc_fw_table; + } + + MHI_LOG("Copying firmware image into vector table\n"); + + /* load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + if (!mhi_cntrl->fbc_download) + return; + + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_read; + } + + /* wait for BHIE event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_BHIE || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter BHIE\n"); + goto error_read; + } + + /* start full firmware image download */ + image_info = mhi_cntrl->fbc_image; + ret = mhi_fw_load_amss(mhi_cntrl, + /* last entry is vec table */ + &image_info->mhi_buf[image_info->entries - 1]); + + MHI_LOG("amss fw_load, ret:%d\n", ret); + + release_firmware(firmware); + + return; + +error_read: + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + +error_alloc_fw_table: + release_firmware(firmware); +} + +int BhiWrite(struct mhi_controller *mhi_cntrl, void *buf, size_t size) +{ + int ret; + + MHI_LOG("Device current EE:%s,%d M:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + mhi_cntrl->ee, + TO_MHI_STATE_STR(mhi_get_m_state(mhi_cntrl))); + + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + + if (!MHI_IN_PBL(mhi_cntrl->ee)/* || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)*/) { + MHI_ERR("MHI is not in valid BHI state:%d\n", mhi_cntrl->ee); + return -EINVAL; + } + + if (mhi_cntrl->ee != MHI_EE_EDL) + return 0; + + ret = mhi_fw_load_sbl(mhi_cntrl, buf, size); + + if (ret) { + MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee); + goto error_state; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_state; + } + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + /* wait for BHIE event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_FP || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter Flash Programmer Environment\n"); + goto error_state; + } + + MHI_LOG("MHI enter Flash Programmer Environment\n"); + return 0; + +error_state: + MHI_LOG("Device current EE:%s, M:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_m_state(mhi_cntrl))); + + return ret; +} + +static int mhi_cntrl_open(struct inode *inode, struct file *f) +{ + return 0; +} + +static int mhi_cntrl_release(struct inode *inode, struct file *f) +{ + return 0; +} + +static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +{ + long ret = -EINVAL; + void *ubuf = (void *)__arg; + struct miscdevice *c = (struct miscdevice *)f->private_data; + struct mhi_controller *mhi_cntrl = container_of(c, struct mhi_controller, miscdev); + + switch (cmd) { + case IOCTL_BHI_GETDEVINFO: + { + BHI_INFO_TYPE bhi_info; + ret = BhiRead(mhi_cntrl, &bhi_info); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret); + return ret; + } + + ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info)); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret); + } + } + break; + + case IOCTL_BHI_WRITEIMAGE: + { + void *buf; + size_t size; + + ret = copy_from_user(&size, ubuf, sizeof(size)); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret); + return ret; + } + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) { + return -ENOMEM; + } + + ret = copy_from_user(buf, ubuf+sizeof(size), size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %ld\n", ret); + kfree(buf); + return ret; + } + + ret = BhiWrite(mhi_cntrl, buf, size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret); + } + kfree(buf); + } + break; + + default: + break; + } + + return ret; +} + +static const struct file_operations mhi_cntrl_fops = { + .unlocked_ioctl = mhi_cntrl_ioctl, + .open = mhi_cntrl_open, + .release = mhi_cntrl_release, +}; + +int mhi_cntrl_register_miscdev(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->miscdev.minor = MISC_DYNAMIC_MINOR; + mhi_cntrl->miscdev.name = "mhi_BHI"; + mhi_cntrl->miscdev.fops = &mhi_cntrl_fops; + + return misc_register(&mhi_cntrl->miscdev); +} + +void mhi_cntrl_deregister_miscdev(struct mhi_controller *mhi_cntrl) +{ + misc_deregister(&mhi_cntrl->miscdev); +} diff --git a/fibocom_MHI/src/core/mhi_common.h b/fibocom_MHI/src/core/mhi_common.h new file mode 100644 index 0000000..a554604 --- /dev/null +++ b/fibocom_MHI/src/core/mhi_common.h @@ -0,0 +1,362 @@ +#ifndef __MHI_COMMON_H +#define __MHI_COMMON_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element tre; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; +#endif /* _MHI_COMMON_H_ */ diff --git a/fibocom_MHI/src/core/mhi_dtr.c b/fibocom_MHI/src/core/mhi_dtr.c new file mode 100644 index 0000000..3651ee1 --- /dev/null +++ b/fibocom_MHI/src/core/mhi_dtr.c @@ -0,0 +1,181 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +struct __packed dtr_ctrl_msg { + u32 preamble; + u32 msg_id; + u32 dest_id; + u32 size; + u32 msg; +}; + +#define CTRL_MAGIC (0x4C525443) +#define CTRL_MSG_DTR BIT(0) +#define CTRL_MSG_ID (0x10) + +static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + u32 tiocm) +{ + struct dtr_ctrl_msg *dtr_msg = NULL; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + int ret = 0; + + tiocm &= TIOCM_DTR; + if (mhi_chan->tiocm == tiocm) + return 0; + + mutex_lock(&dtr_chan->mutex); + + dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL); + if (!dtr_msg) { + ret = -ENOMEM; + goto tiocm_exit; + } + + dtr_msg->preamble = CTRL_MAGIC; + dtr_msg->msg_id = CTRL_MSG_ID; + dtr_msg->dest_id = mhi_chan->chan; + dtr_msg->size = sizeof(u32); + if (tiocm & TIOCM_DTR) + dtr_msg->msg |= CTRL_MSG_DTR; + + reinit_completion(&dtr_chan->completion); + ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg, + sizeof(*dtr_msg), MHI_EOT); + if (ret) + goto tiocm_exit; + + ret = wait_for_completion_timeout(&dtr_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret) { + MHI_ERR("Failed to receive transfer callback\n"); + ret = -EIO; + goto tiocm_exit; + } + + ret = 0; + mhi_chan->tiocm = tiocm; + +tiocm_exit: + kfree(dtr_msg); + mutex_unlock(&dtr_chan->mutex); + + return ret; +} + +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->ul_chan; + int ret; + + /* ioctl not supported by this controller */ + if (!mhi_cntrl->dtr_dev) + return -EIO; + + switch (cmd) { + case TIOCMGET: + return mhi_chan->tiocm; + case TIOCMSET: + { + u32 tiocm; + + ret = get_user(tiocm, (u32 *)arg); + if (ret) + return ret; + + return mhi_dtr_tiocmset(mhi_cntrl, mhi_chan, tiocm); + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(mhi_ioctl); + +static void mhi_dtr_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + + MHI_VERB("Received with status:%d\n", mhi_result->transaction_status); + if (!mhi_result->transaction_status) + complete(&dtr_chan->completion); +} + +static void mhi_dtr_remove(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_cntrl->dtr_dev = NULL; +} + +static int mhi_dtr_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + MHI_LOG("Enter for DTR control channel\n"); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (!ret) + mhi_cntrl->dtr_dev = mhi_dev; + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +static const struct mhi_device_id mhi_dtr_table[] = { + { .chan = "IP_CTRL" }, + { }, +}; + +static struct mhi_driver mhi_dtr_driver = { + .id_table = mhi_dtr_table, + .remove = mhi_dtr_remove, + .probe = mhi_dtr_probe, + .ul_xfer_cb = mhi_dtr_xfer_cb, + .dl_xfer_cb = mhi_dtr_xfer_cb, + .driver = { + .name = "MHI_DTR", + .owner = THIS_MODULE, + } +}; + +int __init mhi_dtr_init(void) +{ + return mhi_driver_register(&mhi_dtr_driver); +} + +void mhi_dtr_exit(void) { + mhi_driver_unregister(&mhi_dtr_driver); +} diff --git a/fibocom_MHI/src/core/mhi_init.c b/fibocom_MHI/src/core/mhi_init.c new file mode 100644 index 0000000..d976411 --- /dev/null +++ b/fibocom_MHI/src/core/mhi_init.c @@ -0,0 +1,1442 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_WFW] = "WFW", + [MHI_EE_PT] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_FP] = "FlashProg", + [MHI_EE_UEFI] = "UEFI", + [MHI_EE_DISABLE_TRANSITION] = "RESET", +}; + +const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = { + [MHI_ST_TRANSITION_PBL] = "PBL", + [MHI_ST_TRANSITION_READY] = "READY", + [MHI_ST_TRANSITION_SBL] = "SBL", + [MHI_ST_TRANSITION_AMSS] = "AMSS", + [MHI_ST_TRANSITION_FP] = "FlashProg", + [MHI_ST_TRANSITION_BHIE] = "BHIE", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_D3] = "D3", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +static const char * const mhi_pm_state_str[] = { + "DISABLE", + "POR", + "M0", + "M1", + "M1->M2", + "M2", + "M?->M3", + "M3", + "M3->M0", + "FW DL Error", + "SYS_ERR Detect", + "SYS_ERR Process", + "SHUTDOWN Process", + "LD or Error Fatal Detect", +}; + +struct mhi_bus mhi_bus; + +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state) +{ + int index = find_last_bit((unsigned long *)&state, 32); + + if (index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +static void mhi_ring_aligned_check(struct mhi_controller *mhi_cntrl, u64 rbase, u64 rlen) { + uint64_t ra; + + ra = rbase; + do_div(ra, roundup_pow_of_two(rlen)); + + if (rbase != ra * roundup_pow_of_two(rlen)) { + MHI_ERR("bad params ring base not aligned 0x%llx align 0x%lx\n", rbase, roundup_pow_of_two(rlen)); + } +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + MHI_LOG("enter\n"); + + if (mhi_cntrl->msi_allocated == 1) { + free_irq(mhi_cntrl->irq[0], mhi_event); + return; + } + + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + int i; + int ret; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + if (mhi_cntrl->msi_allocated == 1) { + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + mhi_event->msi = 0; + } + + ret = request_irq(mhi_cntrl->irq[0], + mhi_msi_handlr, IRQF_SHARED, "mhi", mhi_cntrl->mhi_event); + if (ret) { + MHI_ERR("Error requesting irq:%d, ret=%d\n", mhi_cntrl->irq[0], ret); + } + return ret; + } + + /* for BHI INTVEC msi */ + ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handlr, + mhi_intvec_threaded_handlr, IRQF_ONESHOT, + "mhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ret = request_irq(mhi_cntrl->irq[mhi_event->msi], + mhi_msi_handlr, IRQF_SHARED, "mhi", + mhi_event); + if (ret) { + MHI_ERR("Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->msi], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + MHI_LOG("enter\n"); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; + + ring->base = NULL; + ring->iommu_base = 0; + } + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; + + ring->base = NULL; + ring->iommu_base = 0; + } + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + + mhi_cntrl->mhi_ctxt = NULL; +} + +static int mhi_init_debugfs_mhi_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_states_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_event_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_event_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_chan_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_chan_show, inode->i_private); +} + +static const struct file_operations debugfs_state_ops = { + .open = mhi_init_debugfs_mhi_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_ev_ops = { + .open = mhi_init_debugfs_mhi_event_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_chan_ops = { + .open = mhi_init_debugfs_mhi_chan_open, + .release = single_release, + .read = seq_read, +}; + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL, + mhi_debugfs_trigger_reset, "%llu\n"); + +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl) +{ + struct dentry *dentry; + char node[32]; + + if (!mhi_cntrl->parent) + return; + + snprintf(node, sizeof(node), "%04x_%02u:%02u.%02u", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + + dentry = debugfs_create_dir(node, mhi_cntrl->parent); + if (IS_ERR_OR_NULL(dentry)) + return; + + debugfs_create_file("states", 0444, dentry, mhi_cntrl, + &debugfs_state_ops); + debugfs_create_file("events", 0444, dentry, mhi_cntrl, + &debugfs_ev_ops); + debugfs_create_file("chan", 0444, dentry, mhi_cntrl, &debugfs_chan_ops); + debugfs_create_file("reset", 0444, dentry, mhi_cntrl, + &debugfs_trigger_reset_fops); + mhi_cntrl->dentry = dentry; +} + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->dentry); + mhi_cntrl->dentry = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->alloc_size, 0); + + mhi_ctxt = &mhi_cntrl->data->mhi_ctxt; + + /* setup channel ctxt */ + mhi_ctxt->ctrl_seg = mhi_alloc_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), + &mhi_ctxt->ctrl_seg_addr, GFP_KERNEL); + if (!mhi_ctxt->ctrl_seg) + goto error_alloc_chan_ctxt; + + MHI_LOG("mhi_ctxt->ctrl_seg = %p\n", mhi_ctxt->ctrl_seg); + if ((unsigned long)mhi_ctxt->ctrl_seg & (4096-1)) { + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + goto error_alloc_chan_ctxt; + } + +/* ++Transfer rings ++-------------- ++MHI channels are logical, unidirectional data pipes between host and device. ++Each channel associated with a single transfer ring. The data direction can be ++either inbound (device to host) or outbound (host to device). Transfer ++descriptors are managed by using transfer rings, which are defined for each ++channel between device and host and resides in the host memory. ++ ++Transfer ring Pointer: Transfer Ring Array ++[Read Pointer (RP)] ----------->[Ring Element] } TD ++[Write Pointer (WP)]- [Ring Element] ++ - [Ring Element] ++ --------->[Ring Element] ++ [Ring Element] ++ ++1. Host allocate memory for transfer ring ++2. Host sets base, read pointer, write pointer in corresponding channel context ++3. Ring is considered empty when RP == WP ++4. Ring is considered full when WP + 1 == RP ++4. RP indicates the next element to be serviced by device ++4. When host new buffer to send, host update the Ring element with buffer information ++5. Host increment the WP to next element ++6. Ring the associated channel DB. +*/ + + mhi_ctxt->chan_ctxt = mhi_ctxt->ctrl_seg->chan_ctxt; + mhi_ctxt->chan_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ctxt); + + mhi_chan = mhi_cntrl->data->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* If it's offload channel skip this step */ + if (mhi_chan->offload_ch) + continue; + + chan_ctxt->chstate = MHI_CH_STATE_DISABLED; + chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode; + chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg; + chan_ctxt->chtype = cpu_to_le32(mhi_chan->dir); + chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = &chan_ctxt->wp; + } + +/* ++Event rings ++----------- ++Events from the device to host are organized in event rings and defined in event ++descriptors. Event rings are array of EDs that resides in the host memory. ++ ++Transfer ring Pointer: Event Ring Array ++[Read Pointer (RP)] ----------->[Ring Element] } ED ++[Write Pointer (WP)]- [Ring Element] ++ - [Ring Element] ++ --------->[Ring Element] ++ [Ring Element] ++ ++1. Host allocate memory for event ring ++2. Host sets base, read pointer, write pointer in corresponding channel context ++3. Both host and device has local copy of RP, WP ++3. Ring is considered empty (no events to service) when WP + 1 == RP ++4. Ring is full of events when RP == WP ++4. RP - 1 = last event device programmed ++4. When there is a new event device need to send, device update ED pointed by RP ++5. Device increment RP to next element ++6. Device trigger and interrupt ++ ++Example Operation for data transfer: ++ ++1. Host prepare TD with buffer information ++2. Host increment Chan[id].ctxt.WP ++3. Host ring channel DB register ++4. Device wakes up process the TD ++5. Device generate a completion event for that TD by updating ED ++6. Device increment Event[id].ctxt.RP ++7. Device trigger MSI to wake host ++8. Host wakes up and check event ring for completion event ++9. Host update the Event[i].ctxt.WP to indicate processed of completion event. +*/ + mhi_ctxt->er_ctxt = mhi_ctxt->ctrl_seg->er_ctxt; + mhi_ctxt->er_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, er_ctxt); + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->data->mhi_event; + for (i = 0; i < NUM_MHI_EVT_RINGS; i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* it's a satellite ev, we do not touch it */ + if (mhi_event->offload_ev) + continue; + + er_ctxt->intmodc = 0; + er_ctxt->intmodt = cpu_to_le16(mhi_event->intmod); + er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); + if (mhi_cntrl->msi_allocated == 1) { + mhi_event->msi = 0; + } + er_ctxt->msivec = cpu_to_le32(mhi_event->msi); + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct __packed mhi_tre); + ring->len = ring->el_size * ring->elements; + + ring->alloc_size = ring->len; + + + if (i == 0) + { + ring->pre_aligned = mhi_ctxt->ctrl_seg->event_ring_0; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, event_ring_0); + } + else if (i == 1) + { + ring->pre_aligned = mhi_ctxt->ctrl_seg->event_ring_1; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, event_ring_1); + } + else if (i == 2) + { + ring->pre_aligned = mhi_ctxt->ctrl_seg->event_ring_2; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, event_ring_2); + } + + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = cpu_to_le64(ring->iommu_base); + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = cpu_to_le64(ring->len); + ring->ctxt_wp = &er_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, er_ctxt->rbase, er_ctxt->rlen); + memset(ring->base, 0xCC, ring->len); + } + + mhi_ctxt->cmd_ctxt = mhi_ctxt->ctrl_seg->cmd_ctxt; + mhi_ctxt->cmd_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ctxt); + + mhi_cmd = mhi_cntrl->data->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct __packed mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; + + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->cmd_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = cpu_to_le64(ring->len); + ring->ctxt_wp = &cmd_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, cmd_ctxt->rbase, cmd_ctxt->rlen); + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +error_alloc_chan_ctxt: + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0 } + }; + + MHI_LOG("Initializing MMIO\n"); + + MHI_LOG("iova_start = %llx, iova_stop = %llx\n", (u64)mhi_cntrl->iova_start, (u64)mhi_cntrl->iova_stop); + MHI_LOG("cmd_ctxt_addr = %llx\n", (u64)mhi_cntrl->mhi_ctxt->cmd_ctxt_addr); + MHI_LOG("chan_ctxt_addr = %llx\n", (u64)mhi_cntrl->mhi_ctxt->chan_ctxt_addr); + MHI_LOG("er_ctxt_addr = %llx\n", (u64)mhi_cntrl->mhi_ctxt->er_ctxt_addr); + + /* set up DB register for all the chan rings */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("CHDBOFF:0x%x\n", val); + + /* setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; + + /* setup channel db addresses */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* setup event ring db addresses */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("ERDBOFF:0x%x\n", val); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* set up DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + MHI_LOG("Programming all MMIO values.\n"); + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + kfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + chan_ctxt->rbase = cpu_to_le64(0); +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct __packed mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + tre_ring->alloc_size = tre_ring->len; + if (MHI_CLIENT_IP_HW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_ctxt->ctrl_seg->hw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_HW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_ctxt->ctrl_seg->hw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_out_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]); + } + else { + tre_ring->pre_aligned = &mhi_ctxt->ctrl_seg->chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ring[mhi_chan->ring]); + } + tre_ring->iommu_base = tre_ring->dma_handle; + tre_ring->base = tre_ring->pre_aligned + (tre_ring->iommu_base - tre_ring->dma_handle); + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = kzalloc(buf_ring->len, GFP_KERNEL); + MHI_LOG("%d size = %zd\n", __LINE__, buf_ring->len); + + if (!buf_ring->base) { + return -ENOMEM; + } + + chan_ctxt->chstate = MHI_CH_STATE_ENABLED; + chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = cpu_to_le64(tre_ring->len); + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = 1; + + mhi_ring_aligned_check(mhi_cntrl, chan_ctxt->rbase, chan_ctxt->rlen); + /* update to all cores */ + smp_wmb(); + + return 0; +} + +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *ch_ctxt; + int er_index, chan; + + mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : mhi_dev->dl_chan; + er_index = mhi_chan->er_index; + chan = mhi_chan->chan; + + for (; elements > 0; elements--, cfg_tbl++) { + /* update event context array */ + if (!strcmp(cfg_tbl->name, "ECA")) { + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index]; + if (sizeof(*er_ctxt) != cfg_tbl->len) { + MHI_ERR( + "Invalid ECA size, expected:%zu actual%zu\n", + sizeof(*er_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt)); + continue; + } + + /* update channel context array */ + if (!strcmp(cfg_tbl->name, "CCA")) { + ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan]; + if (cfg_tbl->len != sizeof(*ch_ctxt)) { + MHI_ERR( + "Invalid CCA size, expected:%zu actual:%zu\n", + sizeof(*ch_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt)); + continue; + } + + return -EINVAL; + } + + return 0; +} + +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int num, i; + struct mhi_event *mhi_event; + u32 bit_cfg; + + num = NUM_MHI_EVT_RINGS; + + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = mhi_cntrl->data->mhi_event; + + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + mhi_event->er_index = i; + mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements + mhi_event->intmod = 1; //Interrupt moderation time in ms + mhi_event->priority = MHI_ER_PRIORITY_MEDIUM; //Event ring priority, set to 1 for now + + mhi_event->msi = 1 + i; //MSI associated with this event ring + if (i == IPA_OUT_EVENT_RING) + { + mhi_event->chan = MHI_CLIENT_IP_HW_0_OUT; //Dedicated channel number, if it's a dedicated event ring + mhi_event->ring.elements = NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS; + mhi_event->priority = MHI_ER_PRIORITY_HIGH; + } + else if (i == IPA_IN_EVENT_RING) + { + mhi_event->chan = MHI_CLIENT_IP_HW_0_IN; //Dedicated channel number, if it's a dedicated event ring + mhi_event->ring.elements = NUM_MHI_IPA_IN_EVT_RING_ELEMENTS; + mhi_event->priority = MHI_ER_PRIORITY_HIGH; + } + else + mhi_event->chan = 0; + + if (mhi_event->chan >= mhi_cntrl->max_chan) + goto error_ev_cfg; + + /* this event ring has a dedicated channel */ + if (mhi_event->chan) + mhi_event->mhi_chan = &mhi_cntrl->mhi_chan[mhi_event->chan]; + + //mhi_event->priority = 1; //Event ring priority, set to 1 for now + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + + if (mhi_event->chan == MHI_CLIENT_IP_HW_0_OUT || mhi_event->chan == MHI_CLIENT_IP_HW_0_IN) + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + + if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + bit_cfg = (MHI_EV_CFG_BIT_CTRL_EV & 0); + if (bit_cfg & MHI_EV_CFG_BIT_HW_EV) { + mhi_event->hw_ring = true; + mhi_cntrl->hw_ev_rings++; + } else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = !!(bit_cfg & MHI_EV_CFG_BIT_CL_MANAGE); + mhi_event->offload_ev = !!(bit_cfg & MHI_EV_CFG_BIT_OFFLOAD_EV); + mhi_event->ctrl_ev = !!(bit_cfg & MHI_EV_CFG_BIT_CTRL_EV); + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1; + + return 0; + + error_ev_cfg: + return -EINVAL; +} +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + u32 num, i; + u32 ring = 0; + + struct chan_cfg_t { + const char *chan_name; + u32 chan_id; + u32 elements; + }; + + static struct chan_cfg_t chan_cfg[] = { + //"Qualcomm PCIe Loopback" + {"LOOPBACK", MHI_CLIENT_LOOPBACK_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"LOOPBACK", MHI_CLIENT_LOOPBACK_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm PCIe Sahara" + {"SAHARA", MHI_CLIENT_SAHARA_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"SAHARA", MHI_CLIENT_SAHARA_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm PCIe Diagnostics" + {"DIAG", MHI_CLIENT_DIAG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DIAG", MHI_CLIENT_DIAG_IN, NUM_MHI_DIAG_IN_RING_ELEMENTS}, + //"Qualcomm PCIe QDSS Data" + {"QDSS", MHI_CLIENT_QDSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"QDSS", MHI_CLIENT_QDSS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm PCIe EFS" + {"EFS", MHI_CLIENT_EFS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EFS", MHI_CLIENT_EFS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#ifdef CONFIG_MHI_NETDEV_MBIM + //"Qualcomm PCIe MBIM" + {"MBIM", MHI_CLIENT_MBIM_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"MBIM", MHI_CLIENT_MBIM_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#else + //"Qualcomm PCIe QMI" + {"QMI0", MHI_CLIENT_QMI_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"QMI0", MHI_CLIENT_QMI_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm PCIe QMI" + //{"QMI1", MHI_CLIENT_QMI_2_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"QMI1", MHI_CLIENT_QMI_2_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#endif + //"Qualcomm PCIe IP CTRL" + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + + //"Qualcomm PCIe Boot Logging" + //{"BL", MHI_CLIENT_BOOT_LOG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"BL", MHI_CLIENT_BOOT_LOG_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm PCIe Modem" + {"DUN", MHI_CLIENT_DUN_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DUN", MHI_CLIENT_DUN_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm EDL " + {"EDL", MHI_CLIENT_EDL_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EDL", MHI_CLIENT_EDL_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + + {"GNSS", MHI_CLIENT_GNSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"GNSS", MHI_CLIENT_GNSS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + + {"AUDIO", MHI_CLIENT_AUDIO_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"AUDIO", MHI_CLIENT_AUDIO_IN, NUM_MHI_CHAN_RING_ELEMENTS}, + //"Qualcomm PCIe WWAN Adapter" + {"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS}, + {"IP_HW0", MHI_CLIENT_IP_HW_0_IN, NUM_MHI_IPA_IN_RING_ELEMENTS}, + }; + + mhi_cntrl->max_chan = MHI_MAX_CHANNELS; + num = sizeof(chan_cfg)/sizeof(chan_cfg[0]); + + mhi_cntrl->mhi_chan = mhi_cntrl->data->mhi_chan, + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for (i = 0; i < num; i++) { + struct mhi_chan *mhi_chan; + u32 chan_id = chan_cfg[i].chan_id; + u32 bit_cfg = 0; + u32 pollcfg = 0; + enum MHI_BRSTMODE brstmode = MHI_BRSTMODE_DISABLE; + + if (chan_id >= mhi_cntrl->max_chan) + goto error_chan_cfg; + + mhi_chan = &mhi_cntrl->mhi_chan[chan_id]; + mhi_chan->chan = chan_id; + mhi_chan->buf_ring.elements = chan_cfg[i].elements; + if (MHI_HW_CHAN(mhi_chan->chan) || mhi_chan->chan == MHI_CLIENT_DIAG_IN) { + mhi_chan->ring = 0; + } + else { + mhi_chan->ring = ring; + ring += mhi_chan->buf_ring.elements; + } + mhi_chan->tre_ring.elements = mhi_chan->buf_ring.elements; + if (chan_id == MHI_CLIENT_IP_HW_0_OUT) + mhi_chan->er_index = IPA_OUT_EVENT_RING; + else if (chan_id == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->er_index = IPA_IN_EVENT_RING; + else + mhi_chan->er_index = PRIMARY_EVENT_RING; + mhi_chan->dir = (chan_cfg[i].chan_id&1) ? INBOUND_CHAN : OUTBOUND_CHAN; + + mhi_chan->db_cfg.pollcfg = pollcfg; + mhi_chan->ee = MHI_EE_AMSS; + if (CHAN_SBL(chan_cfg[i].chan_id)) + mhi_chan->ee = MHI_EE_SBL; + else if (CHAN_EDL(chan_cfg[i].chan_id)) + mhi_chan->ee = MHI_EE_FP; + + mhi_chan->xfer_type = MHI_XFER_BUFFER; + if ((chan_cfg[i].chan_id == MHI_CLIENT_IP_HW_0_OUT || chan_cfg[i].chan_id == MHI_CLIENT_IP_HW_0_IN) + || (chan_cfg[i].chan_id == MHI_CLIENT_IP_SW_0_OUT || chan_cfg[i].chan_id == MHI_CLIENT_IP_SW_0_IN)) + mhi_chan->xfer_type = MHI_XFER_SKB; + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = !!(bit_cfg & MHI_CH_CFG_BIT_LPM_NOTIFY); + mhi_chan->offload_ch = !!(bit_cfg & MHI_CH_CFG_BIT_OFFLOAD_CH); + mhi_chan->db_cfg.reset_req = + !!(bit_cfg & MHI_CH_CFG_BIT_DBMODE_RESET_CH); + mhi_chan->pre_alloc = !!(bit_cfg & MHI_CH_CFG_BIT_PRE_ALLOC); + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + mhi_chan->name = chan_cfg[i].chan_name; + + if (!mhi_chan->offload_ch) { + if (mhi_chan->chan == MHI_CLIENT_IP_HW_0_OUT || mhi_chan->chan == MHI_CLIENT_IP_HW_0_IN) + brstmode = MHI_BRSTMODE_ENABLE; + + mhi_chan->db_cfg.brstmode = brstmode; + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + MHI_LOG("chan ring need %d, chan ring size %zd\n", + ring, sizeof(mhi_cntrl->data->mhi_ctxt.ctrl_seg->chan_ring)/sizeof(struct __packed mhi_tre)); + + if (ring > sizeof(mhi_cntrl->data->mhi_ctxt.ctrl_seg->chan_ring)/sizeof(struct __packed mhi_tre)) + return -ENOMEM; + + return 0; + +error_chan_cfg: + return -EINVAL; +} + +static int of_parse_dt(struct mhi_controller *mhi_cntrl, struct device_node *of_node) +{ + int ret; + + mhi_cntrl->fw_image = NULL; + mhi_cntrl->edl_image = NULL; + mhi_cntrl->fbc_download = 0; + mhi_cntrl->sbl_size = 0; + mhi_cntrl->seg_len = 0; + + /* parse MHI channel configuration */ + ret = of_parse_ch_cfg(mhi_cntrl, of_node); + if (ret) { + MHI_ERR("Error of_parse_ch_cfg ret:%d\n", ret); + return ret; + } + + /* parse MHI event configuration */ + ret = of_parse_ev_cfg(mhi_cntrl, of_node); + if (ret) { + MHI_ERR("Error of_parse_ch_cfg ret:%d\n", ret); + goto error_ev_cfg; + } + + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + return 0; + + error_ev_cfg: + return ret; +} + +int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + int ret; + int i; + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + + mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) + return -EINVAL; + + if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + return -EINVAL; + + ret = of_parse_dt(mhi_cntrl, NULL); + if (ret) { + MHI_ERR("Error of_parse_dt ret:%d\n", ret); + return -EINVAL; + } + + mhi_cntrl->mhi_cmd = &mhi_cntrl->data->mhi_cmd[0]; + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); + INIT_WORK(&mhi_cntrl->m1_worker, mhi_pm_m1_worker); + INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->ctrl_ev) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + } + + mhi_cntrl->parent = mhi_bus.dentry; + + /* add to list */ + mutex_lock(&mhi_bus.lock); + list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list); + mutex_unlock(&mhi_bus.lock); + + return 0; + +//error_alloc_cmd: + + return -ENOMEM; +}; + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + mutex_lock(&mhi_bus.lock); + list_del(&mhi_cntrl->node); + mutex_unlock(&mhi_bus.lock); +} + +/* set ptr to control private data */ +static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl, + void *priv) +{ + mhi_cntrl->priv_data = priv; +} + + +/* allocate mhi controller to register */ +struct mhi_controller *mhi_alloc_controller(size_t size) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(size + sizeof(*mhi_cntrl) + sizeof(struct mhi_cntrl_data), GFP_KERNEL); + MHI_LOG("%d size = %zd\n", __LINE__, size + sizeof(*mhi_cntrl)); + + if (mhi_cntrl && size) + mhi_controller_set_devdata(mhi_cntrl, mhi_cntrl + 1); + + mhi_cntrl->data = (struct mhi_cntrl_data *)(((char *)mhi_cntrl) + (size + sizeof(*mhi_cntrl))); + + return mhi_cntrl; +} +EXPORT_SYMBOL(mhi_alloc_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error with init dev_ctxt\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + + /* + * allocate rddm table if specified, this table is for debug purpose + * so we'll ignore erros + */ + if (mhi_cntrl->rddm_size) + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + + mhi_cntrl->pre_init = true; + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +error_setup_irq: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + mhi_cntrl->pre_init = false; +} + +/* match dev to drv */ +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + for (id = mhi_drv->id_table; id->chan != NULL && id->chan[0] != '\0'; id++) { + if (!strcmp(mhi_dev->chan_name, id->chan)) { + mhi_dev->id = id; + return 1; + } + } + + return 0; +}; + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + kfree(mhi_dev); +} + +struct bus_type mhi_bus_type = { + .name = "mhi", + .dev_name = "mhi", + .match = mhi_match, +}; + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + bool offload_ch = ((ul_chan && ul_chan->offload_ch) || + (dl_chan && dl_chan->offload_ch)); + + MHI_LOG("%s dev->name = %s\n", __func__, dev_name(dev)); + + /* all offload channels require status_cb to be defined */ + if (offload_ch) { + if (!mhi_dev->status_cb) + return -EINVAL; + mhi_dev->status_cb = mhi_drv->status_cb; + } + + if (ul_chan && !offload_ch) { + if (!mhi_drv->ul_xfer_cb) + return -EINVAL; + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + } + + if (dl_chan && !offload_ch) { + if (!mhi_drv->dl_xfer_cb) + return -EINVAL; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * if this channal event ring manage by client, then + * status_cb must be defined so we can send the async + * cb whenever there are pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + return -EINVAL; + mhi_dev->status_cb = mhi_drv->status_cb; + } + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum MHI_CH_STATE ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name); + + /* reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* move channel state to disable, no more processing */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + /* reset the channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + } + + /* destroy the device */ + mhi_drv->remove(mhi_dev); + + /* de_init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + if (ch_state[dir] == MHI_CH_STATE_ENABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + /* relinquish any pending votes */ + read_lock_bh(&mhi_cntrl->pm_lock); + while (atomic_read(&mhi_dev->dev_wake)) + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_driver_register(struct mhi_driver *mhi_drv) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + return driver_register(driver); +} +EXPORT_SYMBOL(mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL(mhi_driver_unregister); + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + struct device *dev; + //MHI_LOG("%d size = %zd\n", __LINE__, sizeof(*mhi_dev)); + + if (!mhi_dev) + return NULL; + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + dev->parent = mhi_cntrl->dev; + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_id = mhi_cntrl->dev_id; + mhi_dev->domain = mhi_cntrl->domain; + mhi_dev->bus = mhi_cntrl->bus; + mhi_dev->slot = mhi_cntrl->slot; + mhi_dev->mtu = MHI_MAX_MTU; + atomic_set(&mhi_dev->dev_wake, 0); + + return mhi_dev; +} + +extern int mhi_dtr_init(void); +extern void mhi_dtr_exit(void); +extern int mhi_device_netdev_init(struct dentry *parent); +extern void mhi_device_netdev_exit(void); +extern int mhi_device_uci_init(void); +extern void mhi_device_uci_exit(void); +extern int mhi_controller_qcom_init(void); +extern void mhi_controller_qcom_exit(void); + +static char mhi_version[] = "Fibocom_Linux_PCIE_MHI_Driver_V1.0.5"; +module_param_string(mhi_version, mhi_version, sizeof(mhi_version), S_IRUGO); + +static int __init mhi_init(void) +{ + int ret; + struct dentry *dentry; + + pr_err("INFO:%s %s\n", __func__, mhi_version); + + mutex_init(&mhi_bus.lock); + INIT_LIST_HEAD(&mhi_bus.controller_list); + dentry = debugfs_create_dir("mhi", NULL); + if (!IS_ERR_OR_NULL(dentry)) + mhi_bus.dentry = dentry; + + ret = bus_register(&mhi_bus_type); + if (ret) { + pr_err("Error bus_register ret:%d\n", ret); + return ret; + } + + ret = mhi_dtr_init(); + if (ret) { + pr_err("Error mhi_dtr_init ret:%d\n", ret); + bus_unregister(&mhi_bus_type); + return ret; + } + + ret = mhi_device_netdev_init(dentry); + if (ret) { + pr_err("Error mhi_device_netdev_init ret:%d\n", ret); + } + + ret = mhi_device_uci_init(); + if (ret) { + pr_err("Error mhi_device_uci_init ret:%d\n", ret); + } + + ret = mhi_controller_qcom_init(); + if (ret) { + pr_err("Error mhi_controller_qcom_init ret:%d\n", ret); + } + + return ret; +} + +static void mhi_exit(void) +{ + mhi_controller_qcom_exit(); + mhi_device_uci_exit(); + mhi_device_netdev_exit(); + mhi_dtr_exit(); + bus_unregister(&mhi_bus_type); + debugfs_remove_recursive(mhi_bus.dentry); +} + +module_init(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/fibocom_MHI/src/core/mhi_internal.h b/fibocom_MHI/src/core/mhi_internal.h new file mode 100644 index 0000000..2ab1771 --- /dev/null +++ b/fibocom_MHI/src/core/mhi_internal.h @@ -0,0 +1,800 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 3,10,108 )) +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} +#endif + +extern struct bus_type mhi_bus_type; + +/* MHI mmio register mapping */ +#define PCI_INVALID_READ(val) (val == U32_MAX) + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNUM ( 0x40 ) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_OFFSET (0x0124) /* BHIE register space offset from BHI base */ +#define BHIE_MSMSOCID_OFFS (BHIE_OFFSET + 0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0030) +#define BHIE_TXVECSIZE_OFFS (BHIE_OFFSET + 0x0034) +#define BHIE_TXVECDB_OFFS (BHIE_OFFSET + 0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (BHIE_OFFSET + 0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0064) +#define BHIE_RXVECSIZE_OFFS (BHIE_OFFSET + 0x0068) +#define BHIE_RXVECDB_OFFS (BHIE_OFFSET + 0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (BHIE_OFFSET + 0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +struct __packed mhi_event_ctxt { + u32 reserved : 8; + u32 intmodc : 8; + u32 intmodt : 16; + u32 ertype; + u32 msivec; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +}; + +struct __packed mhi_chan_ctxt { + u32 chstate : 8; + u32 brstmode : 2; + u32 pollcfg : 6; + u32 reserved : 16; + u32 chtype; + u32 erindex; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +}; + +struct __packed mhi_cmd_ctxt { + u32 reserved0; + u32 reserved1; + u32 reserved2; + u64 rbase; + u64 rlen; + u64 rp; + u64 wp; +}; + +struct __packed mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +struct __packed bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +/* no operation command */ +#define MHI_TRE_CMD_NOOP_PTR cpu_to_le64(0) +#define MHI_TRE_CMD_NOOP_DWORD0 cpu_to_le32(0) +#define MHI_TRE_CMD_NOOP_DWORD1 cpu_to_le32(1 << 16) + +/* channel reset command */ +#define MHI_TRE_CMD_RESET_PTR cpu_to_le64(0) +#define MHI_TRE_CMD_RESET_DWORD0 cpu_to_le32(0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) cpu_to_le32((chid << 24) | (16 << 16)) + +/* channel stop command */ +#define MHI_TRE_CMD_STOP_PTR cpu_to_le64(0) +#define MHI_TRE_CMD_STOP_DWORD0 cpu_to_le32(0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) cpu_to_le32((chid << 24) | (17 << 16)) + +/* channel start command */ +#define MHI_TRE_CMD_START_PTR cpu_to_le64(0) +#define MHI_TRE_CMD_START_DWORD0 cpu_to_le32(0) +#define MHI_TRE_CMD_START_DWORD1(chid) cpu_to_le32((chid << 24) | (18 << 16)) + +#define MHI_TRE_GET_CMD_CHID(tre) ((le32_to_cpu((tre)->dword[1]) >> 24) & 0xFF) + +/* event descriptor macros */ +//#define MHI_TRE_EV_PTR(ptr) (ptr) +//#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) cpu_to_le32((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) ((le32_to_cpu((tre)->dword[0]) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (le32_to_cpu((tre)->dword[0]) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((le32_to_cpu((tre)->dword[1]) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((le32_to_cpu((tre)->dword[1]) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((le32_to_cpu((tre)->dword[0]) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((le32_to_cpu((tre)->dword[0]) >> 24) & 0xFF) + + +/* transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) +#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) cpu_to_le32((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +enum MHI_CMD { + MHI_CMD_NOOP = 0x0, + MHI_CMD_RESET_CHAN = 0x1, + MHI_CMD_STOP_CHAN = 0x2, + MHI_CMD_START_CHAN = 0x3, + MHI_CMD_RESUME_CHAN = 0x4, +}; + +enum MHI_PKT_TYPE { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum MHI_EV_CCS { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, + MHI_EV_CC_OOB = 0x5, + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum MHI_CH_STATE { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +enum MHI_CH_CFG { + MHI_CH_CFG_CHAN_ID = 0, + MHI_CH_CFG_ELEMENTS = 1, + MHI_CH_CFG_ER_INDEX = 2, + MHI_CH_CFG_DIRECTION = 3, + MHI_CH_CFG_BRSTMODE = 4, + MHI_CH_CFG_POLLCFG = 5, + MHI_CH_CFG_EE = 6, + MHI_CH_CFG_XFER_TYPE = 7, + MHI_CH_CFG_BITCFG = 8, + MHI_CH_CFG_MAX +}; + +#define MHI_CH_CFG_BIT_LPM_NOTIFY BIT(0) /* require LPM notification */ +#define MHI_CH_CFG_BIT_OFFLOAD_CH BIT(1) /* satellite mhi devices */ +#define MHI_CH_CFG_BIT_DBMODE_RESET_CH BIT(2) /* require db mode to reset */ +#define MHI_CH_CFG_BIT_PRE_ALLOC BIT(3) /* host allocate buffers for DL */ + +enum MHI_EV_CFG { + MHI_EV_CFG_ELEMENTS = 0, + MHI_EV_CFG_INTMOD = 1, + MHI_EV_CFG_MSI = 2, + MHI_EV_CFG_CHAN = 3, + MHI_EV_CFG_PRIORITY = 4, + MHI_EV_CFG_BRSTMODE = 5, + MHI_EV_CFG_BITCFG = 6, + MHI_EV_CFG_MAX +}; + +#define MHI_EV_CFG_BIT_HW_EV BIT(0) /* hw event ring */ +#define MHI_EV_CFG_BIT_CL_MANAGE BIT(1) /* client manages the event ring */ +#define MHI_EV_CFG_BIT_OFFLOAD_EV BIT(2) /* satellite driver manges it */ +#define MHI_EV_CFG_BIT_CTRL_EV BIT(3) /* ctrl event ring */ + +enum MHI_BRSTMODE { + MHI_BRSTMODE_DISABLE = 0x2, + MHI_BRSTMODE_ENABLE = 0x3, +}; + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \ + mode != MHI_BRSTMODE_ENABLE) + +enum MHI_EE { + MHI_EE_PBL = 0x0, /* Primary Boot Loader */ + MHI_EE_SBL = 0x1, /* Secondary Boot Loader */ + MHI_EE_AMSS = 0x2, /* AMSS Firmware */ + MHI_EE_RDDM = 0x3, /* WIFI Ram Dump Debug Module */ + MHI_EE_WFW = 0x4, /* WIFI (WLAN) Firmware */ + MHI_EE_PT = 0x5, /* PassThrough, Non PCIe BOOT (PCIe is BIOS locked, not used for boot */ + MHI_EE_EDL = 0x6, /* PCIe enabled in PBL for emergency download (Non PCIe BOOT) */ + MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */ + MHI_EE_BHIE = MHI_EE_FP, + MHI_EE_UEFI = 0x8, /* UEFI */ + + MHI_EE_DISABLE_TRANSITION = 0x9, + MHI_EE_MAX +}; + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PT || ee == MHI_EE_EDL) + +enum MHI_ST_TRANSITION { + MHI_ST_TRANSITION_PBL, + MHI_ST_TRANSITION_READY, + MHI_ST_TRANSITION_SBL, + MHI_ST_TRANSITION_AMSS, + MHI_ST_TRANSITION_FP, + MHI_ST_TRANSITION_BHIE = MHI_ST_TRANSITION_FP, + MHI_ST_TRANSITION_MAX, +}; + +extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX]; +#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : mhi_state_tran_str[state]) + +enum MHI_STATE { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_D3 = 0x6, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +/* internal power states */ +enum MHI_PM_STATE { + MHI_PM_DISABLE = BIT(0), /* MHI is not enabled */ + MHI_PM_POR = BIT(1), /* reset state */ + MHI_PM_M0 = BIT(2), + MHI_PM_M1 = BIT(3), + MHI_PM_M1_M2_TRANSITION = BIT(4), /* register access not allowed */ + MHI_PM_M2 = BIT(5), + MHI_PM_M3_ENTER = BIT(6), + MHI_PM_M3 = BIT(7), + MHI_PM_M3_EXIT = BIT(8), + MHI_PM_FW_DL_ERR = BIT(9), /* firmware download failure state */ + MHI_PM_SYS_ERR_DETECT = BIT(10), + MHI_PM_SYS_ERR_PROCESS = BIT(11), + MHI_PM_SHUTDOWN_PROCESS = BIT(12), + MHI_PM_LD_ERR_FATAL_DETECT = BIT(13), /* link not accessible */ +}; + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M1 | MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1)) +#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M1 | MHI_PM_M2)) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +/* accepted buffer type for the channel */ +enum MHI_XFER_TYPE { + MHI_XFER_BUFFER, + MHI_XFER_SKB, + MHI_XFER_SCLIST, + MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */ +}; + +#define NR_OF_CMD_RINGS (1) +#define CMD_EL_PER_RING (128) +#define PRIMARY_CMD_RING (0) +#define MHI_DEV_WAKE_DB (127) +#define MHI_M2_DEBOUNCE_TMR_US (10000) +#define MHI_MAX_MTU (0xffff) + +enum MHI_ER_TYPE { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum MHI_BRSTMODE brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum MHI_PM_STATE from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum MHI_ST_TRANSITION state; +}; + +/* Control Segment */ +struct mhi_ctrl_seg +{ + struct __packed mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct __packed mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); + struct __packed mhi_tre diag_in_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); + struct __packed mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16); + //struct __packed mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); + struct __packed mhi_tre event_ring_0[NUM_MHI_EVT_RING_ELEMENTS] __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); + struct __packed mhi_tre event_ring_1[NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_OUT_EVT_RING_ELEMENTS*16); + struct __packed mhi_tre event_ring_2[NUM_MHI_IPA_IN_EVT_RING_ELEMENTS] __aligned(NUM_MHI_IPA_IN_EVT_RING_ELEMENTS*16); + struct __packed mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __aligned(CMD_EL_PER_RING*16); + + struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128); + struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128); + struct mhi_cmd_ctxt cmd_ctxt[NR_OF_CMD_RINGS] __aligned(128); +} __aligned(4096); + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; + struct mhi_ctrl_seg *ctrl_seg; + dma_addr_t ctrl_seg_addr; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + dma_addr_t p_addr; + void *v_addr; + void *wp; + size_t len; + void *cb_buf; + enum dma_data_direction dir; +}; + +struct mhi_event { + u32 er_index; + u32 intmod; + u32 msi; + int chan; /* this event ring is dedicated to a channel */ + u32 priority; + struct mhi_ring ring; + struct db_cfg db_cfg; + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ + bool ctrl_ev; + spinlock_t lock; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + struct tasklet_struct task; + struct mhi_controller *mhi_cntrl; +}; + +struct mhi_chan { + u32 chan; + u32 ring; + const char *name; + /* + * important, when consuming increment tre_ring first, when releasing + * decrement buf_ring first. If tre_ring has space, buf_ring + * guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 er_index; + u32 intmod; + u32 tiocm; + u32 full; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum MHI_EE ee; + enum MHI_XFER_TYPE xfer_type; + enum MHI_CH_STATE ch_state; + enum MHI_EV_CCS ccs; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + /* functions that generate the transfer ring elements */ + int (*gen_tre)(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, void *buf, void *cb, + size_t len, enum MHI_FLAGS flags); + int (*queue_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS flags); + /* xfer call back */ + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; +}; + +struct mhi_bus { + struct list_head controller_list; + struct mutex lock; + struct dentry *dentry; +}; + +struct mhi_cntrl_data { + struct mhi_ctxt mhi_ctxt; + struct mhi_cmd mhi_cmd[NR_OF_CMD_RINGS]; + struct mhi_event mhi_event[NUM_MHI_EVT_RINGS]; + struct mhi_chan mhi_chan[MHI_MAX_CHANNELS]; +}; + +/* default MHI timeout */ +#define MHI_TIMEOUT_MS (3000) +extern struct mhi_bus mhi_bus; + +/* debug fs related functions */ +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d); +int mhi_debugfs_trigger_reset(void *data, u64 val); + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl); + +/* power management apis */ +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state); +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +enum MHI_EE mhi_get_exec_env(struct mhi_controller *mhi_cntrl); +enum MHI_STATE mhi_get_m_state(struct mhi_controller *mhi_cntrl); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_fw_load_worker(struct work_struct *work); +void mhi_pm_m1_worker(struct work_struct *work); +void mhi_pm_sys_err_worker(struct work_struct *work); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason); + +/* queue transfer buffer */ +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags); +int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); + + +/* register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t wp); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t wp); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t wp); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum MHI_STATE state); + +/* memory allocation methods */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 5,3,0 )) +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; +} +#endif + +static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + dma_addr_t *dma_handle, + gfp_t gfp) +{ + void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp); + + MHI_LOG("size = %zd, dma_handle = %llx\n", size, (u64)*dma_handle); + if (buf) { + //if (*dma_handle < mhi_cntrl->iova_start || 0 == mhi_cntrl->iova_start) + // mhi_cntrl->iova_start = (*dma_handle)&0xFFF0000000; + //if ((*dma_handle + size) > mhi_cntrl->iova_stop || 0 == mhi_cntrl->iova_stop) + // mhi_cntrl->iova_stop = ((*dma_handle + size)+0x0FFFFFFF)&0xFFF0000000; + } + if (buf) + atomic_add(size, &mhi_cntrl->alloc_size); + + return buf; +} +static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + atomic_sub(size, &mhi_cntrl->alloc_size); + dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle); +} +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); +static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + kfree(mhi_dev); +} +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +/* initialization methods */ +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_dtr_init(void); + +/* isr handlers */ +irqreturn_t mhi_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev); +void mhi_ev_task(unsigned long data); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#else + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MHI_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#endif + +#endif /* _MHI_INT_H */ diff --git a/fibocom_MHI/src/core/mhi_main.c b/fibocom_MHI/src/core/mhi_main.c new file mode 100644 index 0000000..feae931 --- /dev/null +++ b/fibocom_MHI/src/core/mhi_main.c @@ -0,0 +1,1612 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 *out) +{ + u32 tmp = readl_relaxed(base + offset); + + /* unexpected value, query the link status */ + if (PCI_INVALID_READ(tmp) && + mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data)) + return -EIO; + + *out = tmp; + + return 0; +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 val) +{ + writel_relaxed(val, base + offset); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, + void __iomem *db_addr, + dma_addr_t wp) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp)); +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); + db_cfg->db_mode = 0; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = cpu_to_le64(db); + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = cpu_to_le64(db); + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, + db); +} + +enum MHI_EE mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} + +enum MHI_STATE mhi_get_m_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} + +int mhi_queue_sclist(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +int mhi_queue_nop(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + /* smp update */ + smp_wmb(); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr) +{ + return (addr - ring->base) + ring->iommu_base; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + dma_addr_t ctxt_wp; + + /* update the WP */ + ring->wp += ring->el_size; + ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; + + if (ring->wp >= (ring->base + ring->len)) { + ring->wp = ring->base; + ctxt_wp = ring->iommu_base; + } + + *ring->ctxt_wp = cpu_to_le64(ctxt_wp); + + /* update the RP */ + //memset((unsigned char *)ring->rp, 0x55, ring->el_size); //carl.yin debug + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* visible to other cores */ + smp_wmb(); +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_queue_skb(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct sk_buff *skb = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + mhi_cntrl->wake_get(mhi_cntrl, false); + + /* generate the tre */ + buf_info = buf_ring->wp; + buf_info->v_addr = skb->data; + buf_info->cb_buf = skb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, len, + buf_info->dir); + + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + goto map_error; + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), le64_to_cpu(mhi_tre->ptr), + le32_to_cpu(mhi_tre->dword[0]), le32_to_cpu(mhi_tre->dword[1])); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + read_lock_bh(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_bh(&mhi_chan->lock); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE) { + bool override = (mhi_cntrl->pm_state != MHI_PM_M0); + + mhi_cntrl->wake_put(mhi_cntrl, override); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +map_error: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -ENOMEM; +} + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + void *buf, + void *cb, + size_t buf_len, + enum MHI_FLAGS flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + buf_info->v_addr = buf; + buf_info->cb_buf = cb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = buf_len; + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf, buf_len, + buf_info->dir); + + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + return -ENOMEM; + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), le64_to_cpu(mhi_tre->ptr), + le32_to_cpu(mhi_tre->dword[0]), le32_to_cpu(mhi_tre->dword[1])); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring; + unsigned long flags; + int ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + goto error_queue; + + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + if (unlikely(ret)) + goto error_queue; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE) { + bool override = (mhi_cntrl->pm_state != MHI_PM_M0); + + mhi_cntrl->wake_put(mhi_cntrl, override); + } + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; + +error_queue: + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return -ENOMEM; +} + +/* destroy specific device */ +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + struct mhi_driver *mhi_drv; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_drv = to_mhi_driver(dev->driver); + mhi_cntrl = mhi_dev->mhi_cntrl; + + MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name); + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* remove device associated with the channel */ + mutex_lock(&mhi_chan->mutex); + mutex_unlock(&mhi_chan->mutex); + } + + /* notify the client and remove the device from mhi bus */ + device_del(dev); + + return 0; +} + +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} + +/* bind mhi channels into mhi devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + int ret; + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->ee != mhi_cntrl->ee) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = mhi_chan->er_index; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + } + + mhi_chan->mhi_dev = mhi_dev; + + /* check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = + mhi_chan->er_index; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = + mhi_chan->er_index; + } + mhi_chan->mhi_dev = mhi_dev; + } + } + + mhi_dev->chan_name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, + mhi_dev->slot, mhi_dev->chan_name); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + } + } + + //mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + u32 ev_code; + struct mhi_result result; + unsigned long flags = 0; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + if (CHAN_INBOUND(mhi_chan->chan) && (tre_ring->rp + tre_ring->el_size == tre_ring->wp)) { + mhi_chan->full++; + } + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * if it's a DB Event then we need to grab the lock + * with preemption disable and as a write because we + * have to update db register and another thread could + * be doing same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + /* device rp after servicing the TREs */ + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + result.dir = mhi_chan->dir; + + /* local rp */ + local_rp = tre_ring->rp; + MHI_VERB("base=%p, local_wp=%p, local_rp=%p, dev_rp=%p\n", tre_ring->base, tre_ring->wp, tre_ring->rp, dev_rp); + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* if it's last tre get len from the event */ + if (local_rp == ev_tre) + xfer_len = MHI_TRE_GET_EV_LEN(event); + else + xfer_len = buf_info->len; + + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, + buf_info->len, buf_info->dir); + + result.buf_addr = buf_info->cb_buf; + result.bytes_xferd = xfer_len; + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + MHI_VERB("buf_addr=%p, bytes_xferd=%zd\n", result.buf_addr, result.bytes_xferd); + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + /* + * recycle the buffer if buffer is pre-allocated, + * if there is error, not much we can do apart from + * dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + MHI_ERR( + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + }; + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan); + mhi_chan->db_cfg.db_mode = 1; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + MHI_ASSERT(1, "Received BAD TRE event for ring"); + break; + default: + MHI_CRITICAL("Unknown TX completion.\n"); + + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +#include "mhi_common.h" +static void mhi_dump_tre(struct mhi_controller *mhi_cntrl, struct mhi_tre *_ev) { + union mhi_dev_ring_element_type *ev = (union mhi_dev_ring_element_type *)_ev; + + switch (ev->generic.type) { + case MHI_DEV_RING_EL_INVALID: { + MHI_ERR("carl_ev cmd_invalid, ptr=%llx, %x, %x\n", _ev->ptr, _ev->dword[0], _ev->dword[1]); + } + break; + case MHI_DEV_RING_EL_NOOP: { + MHI_LOG("carl_ev cmd_no_op chan=%u\n", ev->cmd_no_op.chid); + } + break; + case MHI_DEV_RING_EL_TRANSFER: { + MHI_LOG("carl_ev tre data=%llx, len=%u, chan=%u\n", + ev->tre.data_buf_ptr, ev->tre.len, ev->tre.chain); + } + break; + case MHI_DEV_RING_EL_RESET: { + MHI_LOG("carl_ev cmd_reset chan=%u\n", ev->cmd_reset.chid); + } + break; + case MHI_DEV_RING_EL_STOP: { + MHI_LOG("carl_ev cmd_stop chan=%u\n", ev->cmd_stop.chid); + } + break; + case MHI_DEV_RING_EL_START: { + MHI_LOG("carl_ev cmd_start chan=%u\n", ev->cmd_start.chid); + } + break; + case MHI_DEV_RING_EL_MHI_STATE_CHG: { + MHI_LOG("carl_ev evt_state_change mhistate=%u\n", ev->evt_state_change.mhistate); + } + break; + case MHI_DEV_RING_EL_CMD_COMPLETION_EVT:{ + MHI_LOG("carl_ev evt_cmd_comp code=%u\n", ev->evt_cmd_comp.code); + } + break; + case MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT:{ + MHI_VERB("carl_ev evt_tr_comp ptr=%llx, len=%u, code=%u, chan=%u\n", + ev->evt_tr_comp.ptr, ev->evt_tr_comp.len, ev->evt_tr_comp.code, ev->evt_tr_comp.chid); + } + break; + case MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY:{ + MHI_LOG("carl_ev evt_ee_state execenv=%u\n", ev->evt_ee_state.execenv); + } + break; + case MHI_DEV_RING_EL_UNDEF: + default: { + MHI_ERR("carl_ev el_undef type=%d\n", ev->generic.type); + }; + break; + } +} + + +static int mhi_process_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + dev_rp = mhi_to_virtual(ev_ring, le64_to_cpu(er_ctxt->rp)); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + switch (type) { + case MHI_PKT_TYPE_TX_EVENT: + { + u32 chan; + struct mhi_chan *mhi_chan; + + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + break; + } + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum MHI_STATE new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + MHI_LOG("MHI state change event to state:%s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum MHI_PM_STATE new_state; + + MHI_ERR("MHI system error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + schedule_work( + &mhi_cntrl->syserr_worker); + break; + } + default: + MHI_ERR("Unsupported STE:%s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(local_rp); + struct mhi_cmd *cmd_ring = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + /* out of order completion received */ + MHI_ASSERT(cmd_pkt != mhi_ring->rp, + "Out of order cmd completion"); + + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(local_rp); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + mhi_del_ring_element(mhi_cntrl, mhi_ring); + break; + } + case MHI_PKT_TYPE_EE_EVENT: + { + enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX; + enum MHI_EE event = MHI_TRE_GET_EV_EXECENV(local_rp); + + MHI_LOG("MHI EE received event:%s, old EE:%s\n", + TO_MHI_EXEC_STR(event), TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + switch (event) { + case MHI_EE_SBL: + st = MHI_ST_TRANSITION_SBL; + break; + case MHI_EE_AMSS: + st = MHI_ST_TRANSITION_AMSS; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, + mhi_cntrl->priv_data, + MHI_CB_EE_RDDM); + break; + /* fall thru to wake up the event */ + case MHI_EE_WFW: + case MHI_EE_PT: + case MHI_EE_EDL: + case MHI_EE_FP: + case MHI_EE_UEFI: + write_lock_irq(&mhi_cntrl->pm_lock); + if (event == MHI_EE_FP) + st = MHI_ST_TRANSITION_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + break; + default: + MHI_ERR("Unhandled EE event:%s\n", + TO_MHI_EXEC_STR(event)); + } + if (st != MHI_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + break; + } + case MHI_PKT_TYPE_STALE_EVENT: + MHI_VERB("Stale Event received for chan:%u\n", + MHI_TRE_GET_EV_CHID(local_rp)); + break; + default: + //MHI_ERR("Unsupported packet type code 0x%x\n", type); + break; + } + + memset((unsigned char *)ev_ring->rp, 0x00, ev_ring->el_size); //carl.yin debug + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, le64_to_cpu(er_ctxt->rp)); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_process_event_ring(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + enum MHI_STATE state = MHI_STATE_MAX; + enum MHI_PM_STATE pm_state = 0; + int ret; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + /* process ctrl events events */ + ret = mhi_process_event_ring(mhi_cntrl, mhi_event, U32_MAX); + + /* + * we received a MSI but no events to process maybe device went to + * SYS_ERR state, check the state + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + state = mhi_get_m_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } +} + +irqreturn_t mhi_msi_handlr(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, le64_to_cpu(er_ctxt->rp)); + + if (mhi_cntrl->msi_allocated == 1) + { + unsigned long flags; + int i; + enum MHI_STATE mhi_state = mhi_get_m_state(mhi_cntrl); + + if (mhi_state == MHI_STATE_SYS_ERR) { + enum MHI_PM_STATE pm_state = 0; + + MHI_ERR("MHI system error detected\n"); + write_lock_irqsave(&mhi_cntrl->pm_lock, flags); + pm_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); + write_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } else if (mhi_state != mhi_cntrl->dev_state) { + MHI_LOG("MHISTATUS %s -> %s\n", TO_MHI_STATE_STR(mhi_cntrl->dev_state), TO_MHI_STATE_STR(mhi_state)); + wake_up(&mhi_cntrl->state_event); + } + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->data->mhi_event; + for (i = 0; i < NUM_MHI_EVT_RINGS; i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, le64_to_cpu(er_ctxt->rp)); + + if (ev_ring->rp != dev_rp) { + MHI_VERB("local_rp=%p vs dev_rp=%p\n", ev_ring->rp, dev_rp); + if (mhi_event->priority == MHI_ER_PRIORITY_HIGH) + { + tasklet_hi_schedule(&mhi_event->task); + } + else + { + tasklet_schedule(&mhi_event->task); + } + } + } + + return IRQ_HANDLED; + } + + /* confirm ER has pending events to process before scheduling work */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA); + } + else + { + if (mhi_event->priority == MHI_ER_PRIORITY_HIGH) + { + tasklet_hi_schedule(&mhi_event->task); + } + else + { + tasklet_schedule(&mhi_event->task); + } + } + + return IRQ_HANDLED; +} + +/* this is the threaded fn */ +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + enum MHI_STATE state = MHI_STATE_MAX; + enum MHI_PM_STATE pm_state = 0; + + MHI_VERB("Enter\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + state = mhi_get_m_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + + MHI_VERB("Exit\n"); + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev) +{ + + struct mhi_controller *mhi_cntrl = dev; + + /* wake up any events waiting for state change */ + MHI_VERB("Enter\n"); + wake_up(&mhi_cntrl->state_event); + MHI_VERB("Exit\n"); + + return IRQ_WAKE_THREAD; +} + +static int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum MHI_CMD cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + int chan = mhi_chan->chan; + + MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* MHI host currently handles RESET and START cmd */ + if (cmd != MHI_CMD_START_CHAN && cmd != MHI_CMD_RESET_CHAN) + return -EINVAL; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + if (cmd == MHI_CMD_START_CHAN) { + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + } else { + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + } + + MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", + (u64)mhi_to_physical(ring, cmd_tre), le64_to_cpu(cmd_tre->ptr), + le32_to_cpu(cmd_tre->dword[0]), le32_to_cpu(cmd_tre->dword[1])); + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + + MHI_LOG("Entered: preparing channel:%d\n", mhi_chan->chan); + + if (mhi_cntrl->ee != mhi_chan->ee) { + MHI_ERR("Current EE:%s Required EE:%s for chan:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_EXEC_STR(mhi_chan->ee), + mhi_chan->name); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + /* client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error with init chan\n"); + goto error_init_chan; + } + } + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI host is not in active state\n"); + read_unlock_bh(&mhi_cntrl->pm_lock); + ret = -EIO; + goto error_pm_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + if (ret) { + MHI_ERR("Failed to send start chan cmd\n"); + goto error_send_cmd; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive cmd completion for chan:%d\n", + mhi_chan->chan); + ret = -EIO; + goto error_send_cmd; + } + + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_ENABLED; + write_unlock_irq(&mhi_chan->lock); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* pre allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + + while (nr_el--) { + void *buf; + + buf = kmalloc(MHI_MAX_MTU, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + ret = mhi_queue_buf(mhi_dev, mhi_chan, buf, MHI_MAX_MTU, + MHI_EOT); + if (ret) { + MHI_ERR("Chan:%d error queue buffer\n", + mhi_chan->chan); + kfree(buf); + goto error_pre_alloc; + } + } + } + + mutex_unlock(&mhi_chan->mutex); + + MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan); + + return 0; + +error_send_cmd: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + mutex_unlock(&mhi_chan->mutex); + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_event_ctxt *er_ctxt; + struct mhi_event *mhi_event; + struct mhi_ring *ev_ring, *buf_ring, *tre_ring; + unsigned long flags; + int chan = mhi_chan->chan; + struct mhi_result result; + + /* nothing to reset, client don't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + ev_ring = &mhi_event->ring; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + MHI_LOG("Marking all events for chan:%d as stale\n", chan); + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + dev_rp = mhi_to_virtual(ev_ring, le64_to_cpu(er_ctxt->rp)); + if (!mhi_event->mhi_chan) { + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == + MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + } else { + /* dedicated event ring so move the ptr to end */ + ev_ring->rp = dev_rp; + ev_ring->wp = ev_ring->rp - ev_ring->el_size; + if (ev_ring->wp < ev_ring->base) + ev_ring->wp = ev_ring->base + ev_ring->len - + ev_ring->el_size; + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + } + + MHI_LOG("Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) + mhi_cntrl->wake_put(mhi_cntrl, false); + + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, + buf_info->len, buf_info->dir); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + MHI_LOG("Reset complete.\n"); +} + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + + MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan); + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan); + write_unlock_irq(&mhi_chan->lock); + mutex_unlock(&mhi_chan->mutex); + return; + } + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + goto error_invalid_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + if (ret) { + MHI_ERR("Failed to send reset chan cmd\n"); + goto error_completion; + } + + /* even if it fails we will still reset */ + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) + MHI_ERR("Failed to receive cmd completion, still resetting\n"); + +error_completion: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_invalid_state: + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + u32 reset=-1, ready=-1; + int ret; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, &reset); + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, &ready); + if (!ret) + seq_printf(m, + "Device current EE:%s, M:%s, RESET:%d, READY:%d\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_m_state(mhi_cntrl)), + reset, ready); + + seq_printf(m, + "pm_state:%s dev_state:%s EE:%s M0:%u M1:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->M0, mhi_cntrl->M1, mhi_cntrl->M2, mhi_cntrl->M3, + mhi_cntrl->wake_set, + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->alloc_size)); + + return 0; +} + +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + + int i; + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index:%d offload event ring\n", i); + } else { + seq_printf(m, + "Index:%d modc:%d modt:%d base:0x%0llx len:0x%llx", + i, er_ctxt->intmodc, er_ctxt->intmodt, + er_ctxt->rbase, er_ctxt->rlen); + seq_printf(m, + " rp:0x%llx wp:0x%llx local_rp:0x%llx db:0x%llx\n", + er_ctxt->rp, er_ctxt->wp, + (u64)mhi_to_physical(ring, ring->rp), + (u64)mhi_event->db_cfg.db_val); + { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t i; + for (i = 0; i < ring->elements; i++, tre++) { + seq_printf(m, + "%llx, %x, %x\n", + tre->ptr, tre->dword[0], tre->dword[1]); + } + } + } + } + + return 0; +} + +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) offload channel\n", + mhi_chan->name, mhi_chan->chan); + } else if (mhi_chan->mhi_dev) { + seq_printf(m, + "%s(%u) state:0x%x brstmode:0x%x pllcfg:0x%x type:0x%x erindex:%u", + mhi_chan->name, mhi_chan->chan, + chan_ctxt->chstate, chan_ctxt->brstmode, + chan_ctxt->pollcfg, chan_ctxt->chtype, + chan_ctxt->erindex); + seq_printf(m, + " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx full:%d\n", + chan_ctxt->rbase, chan_ctxt->rlen, + chan_ctxt->wp, + (u64)mhi_to_physical(ring, ring->rp), + (u64)mhi_to_physical(ring, ring->wp), + (u64)mhi_chan->db_cfg.db_val, mhi_chan->full); + } + } + + return 0; +} + +/* move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error moving chan %s,%d to START state\n", + mhi_chan->name, mhi_chan->chan); + goto error_open_chan; + } + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } +} +EXPORT_SYMBOL(mhi_unprepare_from_transfer); + +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL(mhi_get_no_free_descriptors); + +struct mhi_controller *mhi_bdf_to_controller(u32 domain, + u32 bus, + u32 slot, + u32 dev_id) +{ + struct mhi_controller *itr, *tmp; + + list_for_each_entry_safe(itr, tmp, &mhi_bus.controller_list, node) + if (itr->domain == domain && itr->bus == bus && + itr->slot == slot && itr->dev_id == dev_id) + return itr; + + return NULL; +} +EXPORT_SYMBOL(mhi_bdf_to_controller); + +int mhi_poll(struct mhi_device *mhi_dev, + u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_process_event_ring(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL(mhi_poll); diff --git a/fibocom_MHI/src/core/mhi_pm.c b/fibocom_MHI/src/core/mhi_pm.c new file mode 100644 index 0000000..039de5b --- /dev/null +++ b/fibocom_MHI/src/core/mhi_pm.c @@ -0,0 +1,1271 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* + * Not all MHI states transitions are sync transitions. Linkdown, SSR, and + * shutdown can happen anytime asynchronously. This function will transition to + * new state only if we're allowed to transitions. + * + * Priority increase as we go down, example while in any states from L0, start + * state from L1, L2, or L3 can be set. Notable exception to this rule is state + * DISABLE. From DISABLE state we can transition to only POR or state. Also + * for example while in L2 state, user cannot jump back to L1 or L0 states. + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M1 -> M1_M2 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 -> FW_DL_ERR + * M1_M2 -> M0 (Device can trigger it) + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * M1 -> M3_ENTER --> M3 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + */ +static struct mhi_pm_transitions const mhi_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M1 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | + MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M1, + MHI_PM_M1_M2_TRANSITION | MHI_PM_M3_ENTER | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M1_M2_TRANSITION, + MHI_PM_M2 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + }, +}; + +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) { + MHI_CRITICAL("cur_state:%s is not a valid pm_state\n", + to_mhi_pm_state_str(cur_state)); + return cur_state; + } + + if (unlikely(mhi_state_transitions[index].from_state != cur_state)) { + MHI_ERR("index:%u cur_state:%s != actual_state: %s\n", + index, to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str + (mhi_state_transitions[index].from_state)); + return cur_state; + } + + if (unlikely(!(mhi_state_transitions[index].to_states & state))) { + MHI_LOG( + "Not allowing pm state transition from:%s to:%s state\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(state)); + return cur_state; + } + + MHI_VERB("Transition to pm state from:%s to:%s\n", + to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state)); + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum MHI_STATE state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* set device wake */ +void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + + /* if set, regardless of count set the bit if not set */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_ACCESS_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* if resources requested already, then increment and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_ACCESS_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* clear device wake */ +void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override) +{ + unsigned long flags; + + if (atomic_read(&mhi_cntrl->dev_wake) == 0) { + MHI_LOG("%s 00000\n", __func__); + return; + } + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0"); + + /* resources not dropping to 0, decrement and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_ACCESS_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + u32 reset = 1, ready = 0; + struct mhi_event *mhi_event; + enum MHI_PM_STATE cur_state; + int ret, i; + + MHI_LOG("Waiting to enter READY state\n"); + + MHI_LOG("Device current EE:%s, M:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_m_state(mhi_cntrl))); + + /* wait for RESET to be cleared and READY bit to be set */ + for (i = 0; i < mhi_cntrl->timeout_ms; i += 25) { //maybe miss msi interrupt? + ready = 0; + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &reset) || + mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &ready) || + (!reset && ready), + msecs_to_jiffies(25)); + + /* device enter into error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + MHI_LOG("reset=0x%x, ready=0x%x, pm_state=0x%x\n", reset, reset, mhi_cntrl->pm_state); + return -EIO; + } + + if (!reset && ready) + break; + } + + /* device did not transition to ready state */ + if (reset || !ready) { + MHI_LOG("reset=0x%x, ready=0x%x, pm_state=0x%x\n", reset, reset, mhi_cntrl->pm_state); + return -ETIMEDOUT; + } + + MHI_LOG("Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + MHI_ERR("Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto error_mmio; + + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + MHI_ERR("Error programming mmio registers\n"); + goto error_mmio; + } + + /* add elements to all sw event rings */ + MHI_LOG("mhi_db_sw_ring\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); + /* needs to update to all cores */ + smp_wmb(); + + /* ring the db for event rings */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* set device into M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + struct mhi_chan *mhi_chan; + int i; + + MHI_LOG("Entered With State:%s PM_STATE:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M0), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + mhi_cntrl->M0++; + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + + /* ring all event rings and CMD ring only if we're in AMSS */ + if (mhi_cntrl->ee == MHI_EE_AMSS) { + //struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + MHI_LOG("mhi_db_all_ring\n"); + + /* only ring primary cmd ring */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* ring channel db registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->db_cfg.reset_req) + mhi_chan->db_cfg.db_mode = true; + + /* only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + write_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + MHI_VERB("Exited\n"); + + return 0; +} + +void mhi_pm_m1_worker(struct work_struct *work) +{ + enum MHI_PM_STATE cur_state; + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = container_of(work, struct mhi_controller, m1_worker); + + MHI_LOG("M1 state transition from dev_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + + /* we either Entered M3 or we did M3->M0 Exit */ + if (mhi_cntrl->pm_state != MHI_PM_M1) + goto invalid_pm_state; + + MHI_LOG("Transitioning to M2 Transition\n"); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M1_M2_TRANSITION); + if (unlikely(cur_state != MHI_PM_M1_M2_TRANSITION)) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M1_M2_TRANSITION), + to_mhi_pm_state_str(cur_state)); + goto invalid_pm_state; + } + + mhi_cntrl->dev_state = MHI_STATE_M2; + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + write_unlock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->M2++; + + /* during M2 transition we cannot access DB registers must sleep */ + usleep_range(MHI_M2_DEBOUNCE_TMR_US, MHI_M2_DEBOUNCE_TMR_US + 50); + write_lock_irq(&mhi_cntrl->pm_lock); + + /* during de-bounce time could be receiving M0 Event */ + if (mhi_cntrl->pm_state == MHI_PM_M1_M2_TRANSITION) { + MHI_LOG("Entered M2 State\n"); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (unlikely(cur_state != MHI_PM_M2)) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M2), + to_mhi_pm_state_str(cur_state)); + goto invalid_pm_state; + } + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* transfer pending, exit M2 */ + if (unlikely(atomic_read(&mhi_cntrl->dev_wake))) { + MHI_VERB("Exiting M2 Immediately, count:%d\n", + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_IDLE); + + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + +invalid_pm_state: + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = mhi_get_m_state(mhi_cntrl); + if (mhi_cntrl->dev_state == MHI_STATE_M1) { + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M1); + + /* schedule M1->M2 transition */ + if (state == MHI_PM_M1) { + schedule_work(&mhi_cntrl->m1_worker); + mhi_cntrl->M1++; + } + } + write_unlock_irq(&mhi_cntrl->pm_lock); +} + +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M3), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + wake_up(&mhi_cntrl->state_event); + mhi_cntrl->M3++; + + MHI_LOG("Entered mhi_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return 0; +} + +static int mhi_pm_amss_transition(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event; + + MHI_LOG("Processing AMSS Transition\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_AMSS; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + + /* add elements to all HW event rings */ + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + MHI_LOG("mhi_db_hw_ring\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); + /* all ring updates must get updated immediately */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_LOG("Adding new devices\n"); + + /* add supported devices */ + mhi_create_devices(mhi_cntrl); + + MHI_LOG("Exited\n"); + + return 0; +} + +/* handles both sys_err and shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE transition_state) +{ + enum MHI_PM_STATE cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + int ret = 0, i; + u32 regVal; + + MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(transition_state)); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state == transition_state) { + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* not handling sys_err, could be middle of shut down */ + if (cur_state != transition_state) { + MHI_LOG("Failed to transition to state:0x%x from:0x%x\n", + transition_state, cur_state); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* trigger MHI RESET so device will not access host ddr */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + u32 i; + + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* wait for reset to be cleared */ + for (i = 0; i < mhi_cntrl->timeout_ms; i += 25) { //maybe miss msi interrupt + in_reset = -1; + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &in_reset) + || !in_reset, msecs_to_jiffies(25)); + if (in_reset == 0) + break; + } + + if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + MHI_CRITICAL("Device failed to exit RESET state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* Set the numbers of Event Rings supported */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NCH_MASK, MHICFG_NCH_SHIFT, ®Val); + MHI_LOG("%d channels, ret=%d\n", regVal, ret); + + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + MHI_LOG("Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + MHI_LOG("Reset all active channels and remove mhi devices\n"); + device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device); + + MHI_LOG("Finish resetting channels\n"); + + /* release lock and wait for all pending thread to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Waiting for all pending threads to complete\n"); + wake_up(&mhi_cntrl->state_event); + flush_work(&mhi_cntrl->m1_worker); + flush_work(&mhi_cntrl->st_worker); + flush_work(&mhi_cntrl->fw_worker); + + mutex_lock(&mhi_cntrl->pm_mutex); + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0"); + + /* reset the ev rings and cmd rings */ + MHI_LOG("Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* do not touch offload er */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + if (cur_state == MHI_PM_SYS_ERR_PROCESS) { + mhi_ready_state_transition(mhi_cntrl); + } else { + /* move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + MHI_ERR("Error moving from pm state:%s to state:%s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + } + + MHI_LOG("Exit with pm_state:%s mhi_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +int mhi_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + enum MHI_PM_STATE cur_state; + int ret; + + MHI_LOG("Trigger MHI Reset\n"); + + /* exit lpm first */ + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + + return 0; +} + +/* queue a new work item and scheduler work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + schedule_work(&mhi_cntrl->st_worker); + + return 0; +} + +void mhi_pm_sys_err_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + syserr_worker); + + MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); +} + +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + MHI_LOG("Transition to state:%s\n", + TO_MHI_STATE_TRANS_STR(itr->state)); + + if (mhi_cntrl->ee != mhi_get_exec_env(mhi_cntrl)) { + MHI_ERR("%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + } + + switch (itr->state) { + case MHI_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + wake_up(&mhi_cntrl->state_event); + break; + case MHI_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + mhi_create_devices(mhi_cntrl); + break; + case MHI_ST_TRANSITION_AMSS: + mhi_pm_amss_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_FP: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 bhi_offset; + enum MHI_EE current_ee; + enum MHI_ST_TRANSITION next_state; + u32 mhi_cfg = 0xFFFFFFFF; + u32 mhi_timeout = 10; + enum MHI_STATE mhiDevState; + + MHI_LOG("Requested to power on\n"); + +//# echo 1 > /sys/module/ep_pcie_core/parameters/debug_mask +//# echo 3 > /sys/kernel/debug/pcie-ep/case +//in sdx20 mimo offset is 0x100 +//ep_pcie_reg_dump: 0x0100 00000100 00000000 01000000 b8efc7ae 00010880 ecd23c0b 00000300 00000000 + while (mhi_cfg == 0xFFFFFFFF && mhi_timeout != 0) { //refer to MhiInitNewDev + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, &mhi_cfg); + if (ret) { + + } + MHI_LOG("mhi_cfg = 0x%x\n", mhi_cfg); + msleep(50); + mhi_timeout--; + } + if (mhi_cfg == 0xFFFFFFFF) + return EBUSY; + + mhiDevState = mhi_get_m_state(mhi_cntrl); + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhiDevState]); + + if (mhiDevState >= MHI_STATE_M0) { + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + msleep(50); + mhiDevState = mhi_get_m_state(mhi_cntrl); + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhiDevState]); + } + + /* set to default wake if not set */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + + if (!mhi_cntrl->pre_init) { + /* setup device context */ + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting dev_context\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + } + + /* setup bhi offset & intvec */ + write_lock_irq(&mhi_cntrl->pm_lock); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_offset); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhi offset\n"); + goto error_bhi_offset; + } + + MHI_LOG("bhi_offset = 0x%x\n", bhi_offset); //this should be 0x100, in sdx20, bhi offset is 0x200 + mhi_cntrl->bhi = mhi_cntrl->regs + bhi_offset; + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_cntrl->pm_state = MHI_PM_POR; + + mhi_timeout = 30; + while ((current_ee = mhi_get_exec_env(mhi_cntrl)) != MHI_EE_AMSS) + { + if (current_ee < MHI_EE_MAX) + { + MHI_LOG("wait for ee = %s\n", mhi_ee_str[current_ee]); + } + + msleep(500); + mhi_timeout--; + if (0 == mhi_timeout) + { + break; + } + } + + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + MHI_LOG("current_ee = %s\n", mhi_ee_str[current_ee]); + + /* confirm device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + MHI_ERR("Not a valid ee for power on\n"); + ret = -EIO; + goto error_bhi_offset; + } + + /* transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY; + + mhi_queue_state_transition(mhi_cntrl, next_state); + + mhi_init_debugfs(mhi_cntrl); + mhi_cntrl_register_miscdev(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Power on setup success\n"); + + + return 0; + +error_bhi_offset: + if (!mhi_cntrl->pre_init) + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + if (!mhi_cntrl->pre_init) + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum MHI_PM_STATE cur_state; + MHI_LOG("enter\n"); + + /* if it's not graceful shutdown, force MHI to a linkdown state */ + if (!graceful) { + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_LD_ERR_FATAL_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) + MHI_ERR("Failed to move to state:%s from:%s\n", + to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + } + else { + device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device); + } + + mhi_cntrl_deregister_miscdev(mhi_cntrl); + mhi_deinit_debugfs(mhi_cntrl); + + if (!mhi_cntrl->pre_init) { + /* free all allocated resources */ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + } + +} +EXPORT_SYMBOL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_AMSS || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (mhi_cntrl->ee == MHI_EE_AMSS) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + int ret; + enum MHI_PM_STATE new_state; + struct mhi_chan *itr, *tmp; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* do a quick check to see if any pending data, then exit */ + if (atomic_read(&mhi_cntrl->dev_wake)) { + MHI_VERB("Busy, aborting M3\n"); + return -EBUSY; + } + + /* exit MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Did not enter M0||M1 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + if (atomic_read(&mhi_cntrl->dev_wake)) { + MHI_VERB("Busy, aborting M3\n"); + write_unlock_irq(&mhi_cntrl->pm_lock); + return -EBUSY; + } + + /* anytime after this, we will resume thru runtime pm framework */ + MHI_LOG("Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* set dev to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_LOG("Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M3 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; +} +EXPORT_SYMBOL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + int ret; + struct mhi_chan *itr, *tmp; + + MHI_LOG("Entered with pm_state:%s dev_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3"); + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* set dev to M0 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + return 0; +} + +static int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_inc(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + atomic_inc(&mhi_dev->dev_wake); + + return ret; +} +EXPORT_SYMBOL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_dec(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_put); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + int ret; + + MHI_LOG("Enter with pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* before rddm mode, we need to enter M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto no_reg_access; + + MHI_LOG("Triggering SYS_ERR to force rddm state\n"); + + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + /* wait for rddm event */ + MHI_LOG("Waiting for device to enter RDDM state\n"); + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = !ret ? 0 : -EIO; + + MHI_LOG("Exiting with pm_state:%s ee:%s ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + return ret; + +no_reg_access: + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + + return -EIO; +} +EXPORT_SYMBOL(mhi_force_rddm_mode); diff --git a/fibocom_MHI/src/devices/mhi_netdev.c b/fibocom_MHI/src/devices/mhi_netdev.c new file mode 100644 index 0000000..d34716e --- /dev/null +++ b/fibocom_MHI/src/devices/mhi_netdev.c @@ -0,0 +1,1313 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_netdev.h" + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; +#define FIBO_QMAP_MUX_ID 0x81 + +#ifdef CONFIG_MHI_NETDEV_MBIM +#else +static uint __read_mostly qmap_mode = 1; +module_param(qmap_mode, uint, S_IRUGO); +#endif + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_stats { + u32 rx_int; + u32 tx_full; + u32 tx_pkts; + u32 rx_budget_overflow; + u32 rx_frag; + u32 alloc_failed; +}; + +/* important: do not exceed sk_buf->cb (48 bytes) */ +struct mhi_skb_priv { + void *buf; + size_t size; + struct mhi_netdev *mhi_netdev; +}; + +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + spinlock_t rx_lock; + bool enabled; + rwlock_t pm_lock; /* state change lock */ + int (*rx_queue)(struct mhi_netdev *mhi_netdev, gfp_t gfp_t); + struct work_struct alloc_work; + int wake; + + struct sk_buff_head rx_allocated; + + u32 mru; + const char *interface_name; + struct napi_struct napi; + struct net_device *ndev; + struct sk_buff *frag_skb; + bool recycle_buf; + + struct mhi_stats stats; + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; +#ifdef CONFIG_MHI_NETDEV_MBIM + u16 tx_seq; + u16 rx_seq; + u32 rx_max; +#endif +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + + +static struct mhi_netdev * g_mhi_netdev = NULL; + +static inline void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + //#define MHI_NETDEV_DEBUG + #ifdef MHI_NETDEV_DEBUG + if (g_mhi_netdev && g_mhi_netdev->msg_lvl > MHI_MSG_LVL_CRITICAL) + { + int i; + printk("dump %s,%s:len=%d \n", tag, g_mhi_netdev->ndev->name, len); + for (i = 0; i < len; i++) + { + printk(" 0x%02x", data[i]); + if (((i+1) % 16) == 0) + { + printk("\n"); + } + } + + printk("\n"); + } + #endif +} + + +static int macaddr_check = 0; +static int mhi_netdev_macaddr_check_get(char * buffer, const struct kernel_param * kp) +{ + char mac_str[32]; + + if (g_mhi_netdev == NULL) + { + return sprintf(buffer, "%s\n", "null");; + } + + snprintf(mac_str, sizeof(mac_str), "%02x:%02x:%02x:%02x:%02x:%02x\n", + g_mhi_netdev->ndev->dev_addr[0], + g_mhi_netdev->ndev->dev_addr[1], + g_mhi_netdev->ndev->dev_addr[2], + g_mhi_netdev->ndev->dev_addr[3], + g_mhi_netdev->ndev->dev_addr[4], + g_mhi_netdev->ndev->dev_addr[5]); + + return sprintf(buffer, "%s", mac_str); + +} + + +static int mhi_netdev_macaddr_check_set(const char * val, const struct kernel_param * kp) +{ + if (g_mhi_netdev == NULL) + { + return 0; + } + + if (val[0] == '1') + { + if (!is_valid_ether_addr(g_mhi_netdev->ndev->dev_addr)) + { + eth_random_addr(g_mhi_netdev->ndev->dev_addr); + g_mhi_netdev->ndev->addr_assign_type = NET_ADDR_RANDOM; + + if (!is_valid_ether_addr(g_mhi_netdev->ndev->dev_addr)) + { + eth_random_addr(g_mhi_netdev->ndev->dev_addr); + } + else + { + printk("invalid ether addr\n"); + } + } + + return 0; + } + + return -EINVAL; +} + +module_param_call(macaddr_check, mhi_netdev_macaddr_check_set, mhi_netdev_macaddr_check_get, &macaddr_check, 0644); + + + +static void mhi_netdev_skb_destructor(struct sk_buff *skb) +{ + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + struct mhi_netdev *mhi_netdev = skb_priv->mhi_netdev; + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + MHI_ASSERT(skb->data != skb_priv->buf, "incorrect buf"); + skb_queue_tail(&mhi_netdev->rx_allocated, skb); +} + +static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) +{ + u32 cur_mru = mhi_netdev->mru; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_skb_priv *skb_priv; + int ret; + struct sk_buff *skb; + int no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i; + + for (i = 0; i < no_tre; i++) { + skb = alloc_skb(cur_mru + ETH_HLEN, gfp_t); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, ETH_HLEN); + + read_lock_bh(&mhi_netdev->pm_lock); + if (unlikely(!mhi_netdev->enabled)) { + MSG_ERR("Interface not enabled\n"); + ret = -EIO; + goto error_queue; + } + + skb_priv = (struct mhi_skb_priv *)skb->cb; + skb_priv->buf = skb->data; + skb_priv->size = cur_mru; + skb_priv->mhi_netdev = mhi_netdev; + skb->dev = mhi_netdev->ndev; + + if (mhi_netdev->recycle_buf) + skb->destructor = mhi_netdev_skb_destructor; + + spin_lock_bh(&mhi_netdev->rx_lock); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, + skb_priv->size, MHI_EOT); + spin_unlock_bh(&mhi_netdev->rx_lock); + + if (ret) { + MSG_ERR("Failed to queue skb, ret:%d\n", ret); + ret = -EIO; + goto error_queue; + } + + read_unlock_bh(&mhi_netdev->pm_lock); + } + + return 0; + +error_queue: + skb->destructor = NULL; + read_unlock_bh(&mhi_netdev->pm_lock); + dev_kfree_skb_any(skb); + + return ret; +} + +static void mhi_netdev_alloc_work(struct work_struct *work) +{ + struct mhi_netdev *mhi_netdev = container_of(work, struct mhi_netdev, + alloc_work); + /* sleep about 1 sec and retry, that should be enough time + * for system to reclaim freed memory back. + */ + const int sleep_ms = 1000; + int retry = 60; + int ret; + + MSG_LOG("Entered\n"); + do { + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + /* sleep and try again */ + if (ret == -ENOMEM) { + msleep(sleep_ms); + retry--; + } + } while (ret == -ENOMEM && retry); + + MSG_LOG("Exit with status:%d retry:%d\n", ret, retry); +} + +/* we will recycle buffers */ +static int mhi_netdev_skb_recycle(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int no_tre; + int ret = 0; + struct sk_buff *skb; + struct mhi_skb_priv *skb_priv; + + read_lock_bh(&mhi_netdev->pm_lock); + if (!mhi_netdev->enabled) { + read_unlock_bh(&mhi_netdev->pm_lock); + return -EIO; + } + + no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + + spin_lock_bh(&mhi_netdev->rx_lock); + while (no_tre) { + skb = skb_dequeue(&mhi_netdev->rx_allocated); + + /* no free buffers to recycle, reschedule work */ + if (!skb) { + ret = -ENOMEM; + goto error_queue; + } + + skb_priv = (struct mhi_skb_priv *)(skb->cb); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, + skb_priv->size, MHI_EOT); + + /* failed to queue buffer */ + if (ret) { + MSG_ERR("Failed to queue skb, ret:%d\n", ret); + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + goto error_queue; + } + + no_tre--; + } + +error_queue: + spin_unlock_bh(&mhi_netdev->rx_lock); + read_unlock_bh(&mhi_netdev->pm_lock); + + return ret; +} + +static void mhi_netdev_dealloc(struct mhi_netdev *mhi_netdev) +{ + struct sk_buff *skb; + + skb = skb_dequeue(&mhi_netdev->rx_allocated); + while (skb) { + skb->destructor = NULL; + kfree_skb(skb); + skb = skb_dequeue(&mhi_netdev->rx_allocated); + } +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int rx_work = 0; + int ret; + + MSG_VERB("Entered\n"); + + read_lock_bh(&mhi_netdev->pm_lock); + + if (!mhi_netdev->enabled) { + MSG_LOG("interface is disabled!\n"); + napi_complete(napi); + read_unlock_bh(&mhi_netdev->pm_lock); + return 0; + } + + mhi_device_get(mhi_dev); + + rx_work = mhi_poll(mhi_dev, budget); + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + rx_work = 0; + napi_complete(napi); + goto exit_poll; + } + + /* queue new buffers */ + ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); + if (ret == -ENOMEM) { + MSG_LOG("out of tre, queuing bg worker\n"); + mhi_netdev->stats.alloc_failed++; + schedule_work(&mhi_netdev->alloc_work); + } + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + else + mhi_netdev->stats.rx_budget_overflow++; + +exit_poll: + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_netdev->pm_lock); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} + +static int mhi_netdev_open(struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +#ifdef CONFIG_MHI_NETDEV_MBIM +static struct sk_buff *mhi_mbim_tx_fixup(struct mhi_netdev *mhi_netdev, struct sk_buff *skb, struct net_device *dev) { + struct usb_cdc_ncm_nth16 *nth16; + struct usb_cdc_ncm_ndp16 *ndp16; + __le32 sign; + u8 *c; + u16 tci = 0; + unsigned int skb_len; + + qmap_hex_dump(__func__, skb->data, skb->len); + + if (skb->len > VLAN_ETH_HLEN && __vlan_get_tag(skb, &tci) == 0) + { + skb_pull(skb, VLAN_ETH_HLEN); + } + else + { + skb_pull(skb, ETH_HLEN); + } + + skb_len = skb->len; + + if (skb_headroom(skb) < sizeof(struct usb_cdc_ncm_nth16)) { + printk("skb_headroom small!\n"); + return NULL; + } + + if (skb_tailroom(skb) < (sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2)) { + if (skb_pad(skb, (sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2))) { + printk("skb_tailroom small!\n"); + return NULL; + } + } + + skb_push(skb, sizeof(struct usb_cdc_ncm_nth16)); + skb_put(skb, sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2); + + nth16 = (struct usb_cdc_ncm_nth16 *)skb->data; + nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + nth16->wSequence = cpu_to_le16(mhi_netdev->tx_seq++); + nth16->wBlockLength = cpu_to_le16(skb->len); + nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16) + skb_len); + + sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); + c = (u8 *)&sign; + //tci = 0; + c[3] = tci; + + ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + nth16->wNdpIndex); + ndp16->dwSignature = sign; + ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2); + ndp16->wNextNdpIndex = 0; + + ndp16->dpe16[0].wDatagramIndex = sizeof(struct usb_cdc_ncm_nth16); + ndp16->dpe16[0].wDatagramLength = skb_len; + + ndp16->dpe16[1].wDatagramIndex = 0; + ndp16->dpe16[1].wDatagramLength = 0; + + return skb; +} + +static int mhi_mbim_rx_fixup(struct mhi_netdev *mhi_netdev, struct sk_buff *skb_in, struct net_device *dev) { + struct usb_cdc_ncm_nth16 *nth16; + int ndpoffset, len; + u16 wSequence; + struct mhi_netdev *ctx = mhi_netdev; + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("frame too short\n"); + goto error; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + MSG_ERR("invalid NTH16 signature <%#010x>\n", le32_to_cpu(nth16->dwSignature)); + goto error; + } + + len = le16_to_cpu(nth16->wBlockLength); + if (len > ctx->rx_max) { + MSG_ERR("unsupported NTB block length %u/%u\n", len, ctx->rx_max); + goto error; + } + + wSequence = le16_to_cpu(nth16->wSequence); + if (ctx->rx_seq != wSequence) { + MSG_ERR("sequence number glitch prev=%d curr=%d\n", ctx->rx_seq, wSequence); + } + ctx->rx_seq = wSequence + 1; + + ndpoffset = nth16->wNdpIndex; + + while (ndpoffset > 0) { + struct usb_cdc_ncm_ndp16 *ndp16 ; + struct usb_cdc_ncm_dpe16 *dpe16; + int nframes, x; + u8 *c; + u16 tci = 0; + + if (skb_in->len < (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("invalid NDP offset <%u>\n", ndpoffset); + goto error; + } + + ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset); + + if (le16_to_cpu(ndp16->wLength) < 0x10) { + MSG_ERR("invalid DPT16 length <%u>\n", le16_to_cpu(ndp16->wLength)); + goto error; + } + + nframes = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16)); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_ndp16) + nframes * (sizeof(struct usb_cdc_ncm_dpe16)))) { + MSG_ERR("Invalid nframes = %d\n", nframes); + goto error; + } + + switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) { + case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3]; + /* tag IPS<0> packets too if MBIM_IPS0_VID exists */ + //if (!tci && info->flags & FLAG_IPS0_VLAN) + // tci = MBIM_IPS0_VID; + break; + case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3] + 256; + break; + default: + MSG_ERR("unsupported NDP signature <0x%08x>\n", le32_to_cpu(ndp16->dwSignature)); + goto error; + } + + #if 0 + if (tci != 0) { + MSG_ERR("unsupported tci %d by now\n", tci); + goto error; + } + #endif + + dpe16 = ndp16->dpe16; + + for (x = 0; x < nframes; x++, dpe16++) { + int offset = le16_to_cpu(dpe16->wDatagramIndex); + int skb_len = le16_to_cpu(dpe16->wDatagramLength); + struct sk_buff *skb; + + if (offset == 0 || skb_len == 0) { + break; + } + + /* sanity checking */ + if (((offset + skb_len) > skb_in->len) || (skb_len > ctx->rx_max)) { + MSG_ERR("invalid frame detected (ignored) x=%d, offset=%d, skb_len=%u\n", x, offset, skb_len); + goto error; + } + + skb = skb_clone(skb_in, GFP_ATOMIC); + if (!skb) { + MSG_ERR("skb_clone fail\n"); + goto error; + } + + skb_pull(skb, offset); + skb_trim(skb, skb_len); + switch (skb->data[0] & 0xf0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + MSG_ERR("unknow skb->protocol %02x\n", skb->data[0]); + goto error; + } + skb_reset_mac_header(skb); + + /* map MBIM session to VLAN */ + if (tci) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); + + netif_receive_skb(skb); + } + + /* are there more NDPs to process? */ + ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex); + } + + return 1; +error: + MSG_ERR("%s error\n", __func__); + return 0; +} +#else +static struct sk_buff *mhi_qmap_tx_fixup(struct mhi_netdev *mhi_netdev, struct sk_buff *skb, struct net_device *dev) { + struct qmap_hdr *qhdr; + u16 tci = 0; + + if (skb->len > VLAN_ETH_HLEN && __vlan_get_tag(skb, &tci) == 0) + { + skb_pull(skb, VLAN_ETH_HLEN); + } + else + { + skb_pull(skb, ETH_HLEN); + } + + if (skb_headroom(skb) < sizeof(struct qmap_hdr)) { + printk("skb_headroom small!\n"); + return NULL; + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = 0; + qhdr->mux_id = FIBO_QMAP_MUX_ID + tci; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static int mhi_qmap_rx_fixup(struct mhi_netdev *mhi_netdev, struct sk_buff *skb_in, struct net_device *dev) +{ + while (skb_in->len > sizeof(struct qmap_hdr)) + { + struct qmap_hdr *qhdr = (struct qmap_hdr *)skb_in->data; + struct sk_buff *skb = NULL; + int pkt_len = be16_to_cpu(qhdr->pkt_len); + u16 tci = qhdr->mux_id - FIBO_QMAP_MUX_ID; + int skb_len; + int ret; + + if (skb_in->len < (pkt_len + sizeof(struct qmap_hdr))) + { + MSG_ERR("drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error; + } + + if (qhdr->cd_rsvd_pad & 0x80) + { + MSG_ERR("drop qmap command packet %x\n", qhdr->cd_rsvd_pad); + skb_pull(skb_in, pkt_len + sizeof(struct qmap_hdr)); + continue; + } + + skb_len = pkt_len - (qhdr->cd_rsvd_pad&0x3F); + + skb = netdev_alloc_skb_ip_align(dev, skb_len + ETH_HLEN); + if (!skb) + { + MSG_ERR("netdev_alloc_skb_ip_align fail\n"); + goto error; + } + + switch (skb_in->data[sizeof(struct qmap_hdr)] & 0xf0) + { + case 0x40: + { + skb->protocol = htons(ETH_P_IP); + break; + } + + case 0x60: + { + skb->protocol = htons(ETH_P_IPV6); + break; + } + + default: + { + MSG_ERR("unknow skb->protocol %02x\n", skb->data[0]); + kfree_skb(skb); + goto error; + } + } + + /* add an ethernet header */ + skb_put(skb, ETH_HLEN); + skb_reset_mac_header(skb); + eth_hdr(skb)->h_proto = skb->protocol;; + eth_zero_addr(eth_hdr(skb)->h_source); + memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); + + /* add datagram */ + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,15,0 )) + fibo_skb_put_data(skb, skb_in->data + sizeof(struct qmap_hdr), skb_len); + #else + skb_put_data(skb, skb_in->data + sizeof(struct qmap_hdr), skb_len); + #endif + + skb_pull(skb, ETH_HLEN); + + /* map MBIM session to VLAN */ + if (tci) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); + + ret = netif_receive_skb(skb); + + skb_pull(skb_in, pkt_len + sizeof(struct qmap_hdr)); + } + + return 1; + +error: + MSG_ERR("%s error\n", __func__); + return 0; +} +#endif + +static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + struct mhi_skb_priv *tx_priv; + + MSG_VERB("Entered\n"); + + tx_priv = (struct mhi_skb_priv *)(skb->cb); + tx_priv->mhi_netdev = mhi_netdev; + read_lock_bh(&mhi_netdev->pm_lock); + + if (unlikely(!mhi_netdev->enabled)) { + /* Only reason interface could be disabled and we get data + * is due to an SSR. We do not want to stop the queue and + * return error. Instead we will flush all the uplink packets + * and return successful + */ + res = NETDEV_TX_OK; + dev_kfree_skb_any(skb); + goto mhi_xmit_exit; + } + +#ifdef CONFIG_MHI_NETDEV_MBIM + if (mhi_mbim_tx_fixup(mhi_netdev, skb, dev) == NULL) { + res = NETDEV_TX_OK; + dev_kfree_skb_any(skb); + goto mhi_xmit_exit; + } +#else + if (qmap_mode) { + if (mhi_qmap_tx_fixup(mhi_netdev, skb, dev) == NULL) { + res = NETDEV_TX_OK; + dev_kfree_skb_any(skb); + goto mhi_xmit_exit; + } + } +#endif + + qmap_hex_dump(__func__, skb->data, skb->len); + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (res) { + MSG_VERB("Failed to queue with reason:%d\n", res); + netif_stop_queue(dev); + mhi_netdev->stats.tx_full++; + res = NETDEV_TX_BUSY; + goto mhi_xmit_exit; + } + + mhi_netdev->stats.tx_pkts++; + +mhi_xmit_exit: + read_unlock_bh(&mhi_netdev->pm_lock); + MSG_VERB("Exited\n"); + + return res; +} + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + .ndo_change_mtu = mhi_netdev_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + /* set this after calling ether_setup */ + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->watchdog_timeo = WATCHDOG_TIMEOUT; +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int no_tre; + + MSG_LOG("Prepare the channels for transfer\n"); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start TX chan ret %d\n", ret); + goto mhi_failed_to_start; + } + + /* first time enabling the node */ + if (!mhi_netdev->ndev) { + struct mhi_netdev_priv *mhi_netdev_priv; + + snprintf(ifname, sizeof(ifname), "%s%%d", + mhi_netdev->interface_name); + + rtnl_lock(); +#ifdef NET_NAME_PREDICTABLE + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); +#else + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, + mhi_netdev_setup); +#endif + + if (!mhi_netdev->ndev) { + ret = -ENOMEM; + rtnl_unlock(); + goto net_dev_alloc_fail; + } + + //mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + rtnl_unlock(); + + netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, + mhi_netdev_poll, NAPI_POLL_WEIGHT); + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + skb_queue_head_init(&mhi_netdev->rx_allocated); + } + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = true; + write_unlock_irq(&mhi_netdev->pm_lock); + + /* queue buffer for rx path */ + no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + if (ret) + schedule_work(&mhi_netdev->alloc_work); + + /* if we recycle prepare one more set */ + if (mhi_netdev->recycle_buf) + for (; no_tre >= 0; no_tre--) { + struct sk_buff *skb = alloc_skb(mhi_netdev->mru + ETH_HLEN, + GFP_KERNEL); + struct mhi_skb_priv *skb_priv; + + if (!skb) + break; + + skb_reserve(skb, ETH_HLEN); + + skb_priv = (struct mhi_skb_priv *)skb->cb; + skb_priv->buf = skb->data; + skb_priv->size = mhi_netdev->mru; + skb_priv->mhi_netdev = mhi_netdev; + skb->dev = mhi_netdev->ndev; + skb->destructor = mhi_netdev_skb_destructor; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + } + + napi_enable(&mhi_netdev->napi); + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(&mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + +net_dev_alloc_fail: + mhi_unprepare_from_transfer(mhi_dev); + +mhi_failed_to_start: + MSG_ERR("Exited ret %d.\n", ret); + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); +} + +static int mhi_netdev_process_fragment(struct mhi_netdev *mhi_netdev, + struct sk_buff *skb) +{ + struct sk_buff *temp_skb; + + if (mhi_netdev->frag_skb) { + /* merge the new skb into the old fragment */ + temp_skb = skb_copy_expand(mhi_netdev->frag_skb, ETH_HLEN, skb->len, + GFP_ATOMIC); + if (!temp_skb) { + dev_kfree_skb(mhi_netdev->frag_skb); + mhi_netdev->frag_skb = NULL; + return -ENOMEM; + } + + dev_kfree_skb_any(mhi_netdev->frag_skb); + mhi_netdev->frag_skb = temp_skb; + memcpy(skb_put(mhi_netdev->frag_skb, skb->len), skb->data, + skb->len); + } else { + mhi_netdev->frag_skb = skb_copy(skb, GFP_ATOMIC); + if (!mhi_netdev->frag_skb) + return -ENOMEM; + } + + mhi_netdev->stats.rx_frag++; + + return 0; +} + +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *dev = mhi_netdev->ndev; + int ret = 0; + static size_t bytes_xferd = 0; + + if (mhi_result->transaction_status == -ENOTCONN) { + dev_kfree_skb(skb); + return; + } + + if (mhi_result->bytes_xferd > bytes_xferd) { + bytes_xferd = mhi_result->bytes_xferd; + //printk("%s bytes_xferd=%zd\n", __func__, bytes_xferd); + } + + skb_put(skb, mhi_result->bytes_xferd); + dev->stats.rx_packets++; + dev->stats.rx_bytes += mhi_result->bytes_xferd; + + /* merge skb's together, it's a chain transfer */ + if (mhi_result->transaction_status == -EOVERFLOW || + mhi_netdev->frag_skb) { + ret = mhi_netdev_process_fragment(mhi_netdev, skb); + + /* recycle the skb */ + if (mhi_netdev->recycle_buf) + mhi_netdev_skb_destructor(skb); + else + dev_kfree_skb(skb); + + if (ret) + return; + } + + /* more data will come, don't submit the buffer */ + if (mhi_result->transaction_status == -EOVERFLOW) + return; + + if (mhi_netdev->frag_skb) { + skb = mhi_netdev->frag_skb; + skb->dev = dev; + mhi_netdev->frag_skb = NULL; + } + + qmap_hex_dump(__func__, skb->data, skb->len); + +#ifdef CONFIG_MHI_NETDEV_MBIM + mhi_mbim_rx_fixup(mhi_netdev, skb, dev); + dev_kfree_skb_any(skb); +#else + if (qmap_mode) { + mhi_qmap_rx_fixup(mhi_netdev, skb, dev); + dev_kfree_skb_any(skb); + } + else { + switch (skb->data[0] & 0xf0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + netif_receive_skb(skb); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + netif_receive_skb(skb); + break; + default: + break; + } + } +#endif + + mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); +} + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + if (napi_schedule_prep(&mhi_netdev->napi)) { + __napi_schedule(&mhi_netdev->napi); + mhi_netdev->stats.rx_int++; + return; + } + +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *mhi_netdev_debugfs_dentry; + +static int mhi_netdev_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_netdev *mhi_netdev = data; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int ret; + + MSG_LOG("Triggering channel reset\n"); + + /* disable the interface so no data processing */ + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + napi_disable(&mhi_netdev->napi); + + /* disable all hardware channels */ + mhi_unprepare_from_transfer(mhi_dev); + + /* clean up all alocated buffers */ + mhi_netdev_dealloc(mhi_netdev); + + MSG_LOG("Restarting iface\n"); + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_netdev_debugfs_trigger_reset_fops, NULL, + mhi_netdev_debugfs_trigger_reset, "%llu\n"); + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + int i; + const umode_t mode = 0600; + struct dentry *file; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct dentry *dentry = mhi_netdev_debugfs_dentry; + + const struct { + char *name; + u32 *ptr; + } debugfs_table[] = { + { + "rx_int", + &mhi_netdev->stats.rx_int + }, + { + "tx_full", + &mhi_netdev->stats.tx_full + }, + { + "tx_pkts", + &mhi_netdev->stats.tx_pkts + }, + { + "rx_budget_overflow", + &mhi_netdev->stats.rx_budget_overflow + }, + { + "rx_fragmentation", + &mhi_netdev->stats.rx_frag + }, + { + "alloc_failed", + &mhi_netdev->stats.alloc_failed + }, + { + NULL, NULL + }, + }; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; + /*begin added by tony.du for mantis 0062018 on 2020-11-10*/ + debugfs_create_u32("msg_lvl", mode, mhi_netdev->dentry, + (u32 *)&mhi_netdev->msg_lvl); + /*end added by tony.du for mantis 0062018 on 2020-11-10*/ + + /* Add debug stats table */ + for (i = 0; debugfs_table[i].name; i++) { + /*begin added by tony.du for mantis 0062018 on 2020-11-10*/ + debugfs_create_u32(debugfs_table[i].name, mode, + mhi_netdev->dentry, + debugfs_table[i].ptr); + /*end added by tony.du for mantis 0062018 on 2020-11-10*/ + } + + debugfs_create_file("reset", mode, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_trigger_reset_fops); +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ + mhi_netdev_debugfs_dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, parent); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Remove notification received\n"); + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + + napi_disable(&mhi_netdev->napi); + netif_napi_del(&mhi_netdev->napi); + mhi_netdev_dealloc(mhi_netdev); + unregister_netdev(mhi_netdev->ndev); + free_netdev(mhi_netdev->ndev); + flush_work(&mhi_netdev->alloc_work); + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev; + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + mhi_netdev->alias = 0; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + + mhi_netdev->mru = 0x4000; + if (mhi_dev->dev_id == 0x0304) { //SDX24 + mhi_netdev->mru = 0x8000; + } +#ifdef CONFIG_MHI_NETDEV_MBIM + mhi_netdev->rx_max = 0x8000; +#endif + + if (!strcmp(id->chan, "IP_HW0")) + mhi_netdev->interface_name = "pcie_mhi"; + else if (!strcmp(id->chan, "IP_SW0")) + mhi_netdev->interface_name = "pcie_swip"; + else + mhi_netdev->interface_name = id->chan; + + mhi_netdev->recycle_buf = false; + + mhi_netdev->rx_queue = mhi_netdev->recycle_buf ? + mhi_netdev_skb_recycle : mhi_netdev_alloc_skb; + + spin_lock_init(&mhi_netdev->rx_lock); + rwlock_init(&mhi_netdev->pm_lock); + INIT_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work); + + mhi_netdev->msg_lvl = MHI_MSG_LVL_INFO; + + /* setup network interface */ + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) { + pr_err("Error mhi_netdev_enable_iface ret:%d\n", ret); + return ret; + } + + mhi_netdev_create_debugfs(mhi_netdev); + + g_mhi_netdev = mhi_netdev; + + return 0; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_SW0" }, + { .chan = "IP_HW_ADPL" }, + { }, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +int __init mhi_device_netdev_init(struct dentry *parent) +{ + mhi_netdev_create_debugfs_dir(parent); + + return mhi_driver_register(&mhi_netdev_driver); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mhi_netdev_debugfs_dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} diff --git a/fibocom_MHI/src/devices/mhi_netdev.h b/fibocom_MHI/src/devices/mhi_netdev.h new file mode 100644 index 0000000..13735bb --- /dev/null +++ b/fibocom_MHI/src/devices/mhi_netdev.h @@ -0,0 +1,18 @@ +#ifndef _FIBO_MHI_NETDEV_H +#define _FIBO_MHI_NETDEV_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,15,0 )) +static inline void *fibo_skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memcpy(tmp,data, len); + + return tmp; +} +#endif + +#endif diff --git a/fibocom_MHI/src/devices/mhi_uci.c b/fibocom_MHI/src/devices/mhi_uci.c new file mode 100644 index 0000000..7195ab0 --- /dev/null +++ b/fibocom_MHI/src/devices/mhi_uci.c @@ -0,0 +1,785 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define DEVICE_NAME "mhi" +#define MHI_UCI_DRIVER_NAME "mhi_uci" + +struct uci_chan { + wait_queue_head_t wq; + spinlock_t lock; + struct list_head pending; /* user space waiting to read */ + struct uci_buf *cur_buf; /* current buffer user space reading */ + size_t rx_size; +}; + +struct uci_buf { + void *data; + size_t len; + struct list_head node; +}; + +struct uci_dev { + struct list_head node; + dev_t devt; + struct device *dev; + struct mhi_device *mhi_dev; + const char *chan; + struct mutex mutex; /* sync open and close */ + struct uci_chan ul_chan; + struct uci_chan dl_chan; + size_t mtu; + int ref_count; + bool enabled; + bool disconnect; + struct ktermios termios; + int sigs; +}; + +struct mhi_uci_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; + dev_t dev_t; +}; + +enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR; + +typedef struct _QCQMI_HDR { + u8 IFType; + u16 Length; + u8 CtlFlags; // reserved + u8 QMIType; + u8 ClientId; +} __attribute__ ((packed)) *PQCQMI_HDR; + +#define MSG_VERB(fmt, ...) do { \ + if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MAX_UCI_DEVICES (64) +#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state + +static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES); +static struct mhi_uci_drv mhi_uci_drv; + +static int mhi_queue_inbound(struct uci_dev *uci_dev) +{ + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = uci_dev->mtu; + void *buf; + struct uci_buf *uci_buf; + int ret = -EIO, i; + + for (i = 0; i < nr_trbs; i++) { + buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + uci_buf = buf + mtu; + uci_buf->data = buf; + + MSG_VERB("Allocated buf %d of %d size %zd\n", i, nr_trbs, mtu); + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + kfree(buf); + MSG_ERR("Failed to queue buffer %d\n", i); + return ret; + } + } + + return ret; +} + +static long mhi_uci_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + long ret = -ERESTARTSYS; + + mutex_lock(&uci_dev->mutex); + if (uci_dev->enabled) { + switch (cmd) { + case TCGETS: +#ifndef TCGETS2 + ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios); +#else + ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios); +#endif + break; + + case TCSETSF: + case TCSETS: +#ifndef TCGETS2 + ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg); +#else + ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg); +#endif + break; + + case TIOCMSET: + case TIOCMBIS: + case TIOCMBIC: + { + uint32_t val; + + ret = get_user(val, (uint32_t *)arg); + if (ret) + return ret; + + switch (cmd) { + case TIOCMBIS: + uci_dev->sigs |= val; + break; + case TIOCMBIC: + uci_dev->sigs &= ~val; + break; + case TIOCMSET: + uci_dev->sigs = val; + break; + } + } + break; + + case TIOCMGET: + ret = put_user(uci_dev->sigs | TIOCM_RTS, (uint32_t *)arg); + break; + + case TCFLSH: + ret = 0; + break; + + default: + ret = mhi_ioctl(mhi_dev, cmd, arg); + break; + } + } + mutex_unlock(&uci_dev->mutex); + + return ret; +} + +static int mhi_uci_release(struct inode *inode, struct file *file) +{ + struct uci_dev *uci_dev = file->private_data; + + mutex_lock(&uci_dev->mutex); + uci_dev->ref_count--; + if (!uci_dev->ref_count) { + struct uci_buf *itr, *tmp; + struct uci_chan *uci_chan; + + MSG_LOG("Last client left, closing node\n"); + + if (uci_dev->enabled) + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + + /* clean inbound channel */ + uci_chan = &uci_dev->dl_chan; + list_for_each_entry_safe(itr, tmp, &uci_chan->pending, node) { + list_del(&itr->node); + kfree(itr->data); + } + if (uci_chan->cur_buf) + kfree(uci_chan->cur_buf->data); + + uci_chan->cur_buf = NULL; + + if (!uci_dev->enabled) { + MSG_LOG("Node is deleted, freeing dev node\n"); + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + return 0; + } + } + + mutex_unlock(&uci_dev->mutex); + + MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count); + + return 0; +} + +static unsigned int mhi_uci_poll(struct file *file, poll_table *wait) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan; + unsigned int mask = 0; + + poll_wait(file, &uci_dev->dl_chan.wq, wait); + poll_wait(file, &uci_dev->ul_chan.wq, wait); + + uci_chan = &uci_dev->dl_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask = POLLERR; + } else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) { + MSG_VERB("Client can read from node\n"); + mask |= POLLIN | POLLRDNORM; + } + spin_unlock_bh(&uci_chan->lock); + + uci_chan = &uci_dev->ul_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask |= POLLERR; + } else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) { + MSG_VERB("Client can write to node\n"); + mask |= POLLOUT | POLLWRNORM; + } + + if (uci_dev->disconnect) + mask |= POLLHUP; + + spin_unlock_bh(&uci_chan->lock); + + MSG_VERB("Client attempted to poll, returning mask 0x%x\n", mask); + + return mask; +} + +static ssize_t mhi_uci_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->ul_chan; + size_t bytes_xfered = 0; + int ret; + + if (!buf || !count) + return -EINVAL; + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + MSG_VERB("Enter: to xfer:%zd bytes\n", count); + + while (count) { + size_t xfer_size; + void *kbuf; + enum MHI_FLAGS flags; + + spin_unlock_bh(&uci_chan->lock); + + if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) == 0 && (file->f_mode & FMODE_NDELAY)) + break; + + /* wait for free descriptors */ + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled) || + mhi_get_no_free_descriptors + (mhi_dev, DMA_TO_DEVICE) > 0); + + if (ret == -ERESTARTSYS) { + MSG_LOG("Exit signal caught for node\n"); + return -ERESTARTSYS; + } + + xfer_size = min_t(size_t, count, uci_dev->mtu); + kbuf = kmalloc(xfer_size, GFP_KERNEL); + if (!kbuf) { + MSG_ERR("Failed to allocate memory %zd\n", xfer_size); + return -ENOMEM; + } + + ret = copy_from_user(kbuf, buf, xfer_size); + if (unlikely(ret)) { + kfree(kbuf); + return ret; + } + + spin_lock_bh(&uci_chan->lock); + flags = MHI_EOT; + if (uci_dev->enabled) + ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf, + xfer_size, flags); + else + ret = -ERESTARTSYS; + + if (ret) { + kfree(kbuf); + goto sys_interrupt; + } + + bytes_xfered += xfer_size; + count -= xfer_size; + buf += xfer_size; + } + + spin_unlock_bh(&uci_chan->lock); + MSG_VERB("Exit: Number of bytes xferred:%zd\n", bytes_xfered); + + return bytes_xfered; + +sys_interrupt: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->dl_chan; + struct uci_buf *uci_buf; + char *ptr; + size_t to_copy; + int ret = 0; + + if (!buf) + return -EINVAL; + + MSG_VERB("Client provided buf len:%zd\n", count); + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + /* No data available to read, wait */ + if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) { + MSG_VERB("No data available to read waiting\n"); + + spin_unlock_bh(&uci_chan->lock); + + if (file->f_mode & FMODE_NDELAY) + return -EAGAIN; + + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled || + !list_empty(&uci_chan->pending))); + if (ret == -ERESTARTSYS) { + MSG_LOG("Exit signal caught for node\n"); + return -ERESTARTSYS; + } + + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + MSG_LOG("node is disabled\n"); + ret = -ERESTARTSYS; + goto read_error; + } + } + + /* new read, get the next descriptor from the list */ + if (!uci_chan->cur_buf) { + uci_buf = list_first_entry_or_null(&uci_chan->pending, + struct uci_buf, node); + if (unlikely(!uci_buf)) { + ret = -EIO; + goto read_error; + } + + list_del(&uci_buf->node); + uci_chan->cur_buf = uci_buf; + uci_chan->rx_size = uci_buf->len; + MSG_VERB("Got pkt of size:%zd\n", uci_chan->rx_size); + } + + uci_buf = uci_chan->cur_buf; + spin_unlock_bh(&uci_chan->lock); + + /* Copy the buffer to user space */ + to_copy = min_t(size_t, count, uci_chan->rx_size); + ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size); + ret = copy_to_user(buf, ptr, to_copy); + if (ret) + return ret; + + MSG_VERB("Copied %zd of %zd bytes\n", to_copy, uci_chan->rx_size); + uci_chan->rx_size -= to_copy; + + /* we finished with this buffer, queue it back to hardware */ + if (!uci_chan->rx_size) { + spin_lock_bh(&uci_chan->lock); + uci_chan->cur_buf = NULL; + + if (uci_dev->enabled) + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, + uci_buf->data, uci_dev->mtu, + MHI_EOT); + else + ret = -ERESTARTSYS; + + if (ret) { + MSG_ERR("Failed to recycle element\n"); + kfree(uci_buf->data); + goto read_error; + } + + spin_unlock_bh(&uci_chan->lock); + } + + MSG_VERB("Returning %zd bytes\n", to_copy); + + return to_copy; + +read_error: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static int mhi_uci_open(struct inode *inode, struct file *filp) +{ + struct uci_dev *uci_dev; + int ret = -EIO; + struct uci_buf *buf_itr, *tmp; + struct uci_chan *dl_chan; + + mutex_lock(&mhi_uci_drv.lock); + list_for_each_entry(uci_dev, &mhi_uci_drv.head, node) { + if (uci_dev->devt == inode->i_rdev) { + ret = 0; + break; + } + } + mutex_unlock(&mhi_uci_drv.lock); + + /* could not find a minor node */ + if (ret) + return ret; + + mutex_lock(&uci_dev->mutex); + if (!uci_dev->enabled) { + MSG_ERR("Node exist, but not in active state!\n"); + goto error_open_chan; + } + + uci_dev->ref_count++; + + MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count); + + if (uci_dev->ref_count == 1) { + MSG_LOG("Starting channel\n"); + ret = mhi_prepare_for_transfer(uci_dev->mhi_dev); + if (ret) { + MSG_ERR("Error starting transfer channels\n"); + uci_dev->ref_count--; + goto error_open_chan; + } + + ret = mhi_queue_inbound(uci_dev); + if (ret) + goto error_rx_queue; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + uci_dev->ref_count++; + if (uci_dev->mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN) { + } + if (uci_dev->mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN) { + } +#endif + } + + filp->private_data = uci_dev; + mutex_unlock(&uci_dev->mutex); + + return 0; + + error_rx_queue: + dl_chan = &uci_dev->dl_chan; + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + list_for_each_entry_safe(buf_itr, tmp, &dl_chan->pending, node) { + list_del(&buf_itr->node); + kfree(buf_itr->data); + } + + error_open_chan: + mutex_unlock(&uci_dev->mutex); + + return ret; +} + +static const struct file_operations mhidev_fops = { + .open = mhi_uci_open, + .release = mhi_uci_release, + .read = mhi_uci_read, + .write = mhi_uci_write, + .poll = mhi_uci_poll, + .unlocked_ioctl = mhi_uci_ioctl, +}; + +static void mhi_uci_remove(struct mhi_device *mhi_dev) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Enter\n"); + + /* disable the node */ + mutex_lock(&uci_dev->mutex); + spin_lock_irq(&uci_dev->dl_chan.lock); + spin_lock_irq(&uci_dev->ul_chan.lock); + uci_dev->enabled = false; + uci_dev->disconnect = true; + spin_unlock_irq(&uci_dev->ul_chan.lock); + spin_unlock_irq(&uci_dev->dl_chan.lock); + wake_up(&uci_dev->dl_chan.wq); + wake_up(&uci_dev->ul_chan.wq); + + /* delete the node to prevent new opens */ + device_destroy(mhi_uci_drv.class, uci_dev->devt); + uci_dev->dev = NULL; + mutex_lock(&mhi_uci_drv.lock); + list_del(&uci_dev->node); + mutex_unlock(&mhi_uci_drv.lock); + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count > 0) + uci_dev->ref_count--; +#endif + + /* safe to free memory only if all file nodes are closed */ + if (!uci_dev->ref_count) { + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + return; + } + + mutex_unlock(&uci_dev->mutex); + MSG_LOG("Exit\n"); +} + +static int mhi_uci_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct uci_dev *uci_dev; + int minor; + int dir; + + uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL); + if (!uci_dev) + return -ENOMEM; + + mutex_init(&uci_dev->mutex); + uci_dev->mhi_dev = mhi_dev; + + minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES); + if (minor >= MAX_UCI_DEVICES) { + kfree(uci_dev); + return -ENOSPC; + } + + mutex_lock(&uci_dev->mutex); + mutex_lock(&mhi_uci_drv.lock); + + uci_dev->devt = MKDEV(mhi_uci_drv.major, minor); + + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s", + mhi_dev->chan_name); + + set_bit(minor, uci_minors); + + for (dir = 0; dir < 2; dir++) { + struct uci_chan *uci_chan = (dir) ? + &uci_dev->ul_chan : &uci_dev->dl_chan; + spin_lock_init(&uci_chan->lock); + init_waitqueue_head(&uci_chan->wq); + INIT_LIST_HEAD(&uci_chan->pending); + }; + + uci_dev->termios = tty_std_termios; + uci_dev->sigs = 0; + + uci_dev->mtu = id->driver_data; + mhi_device_set_devdata(mhi_dev, uci_dev); + uci_dev->enabled = true; + + list_add(&uci_dev->node, &mhi_uci_drv.head); + mutex_unlock(&mhi_uci_drv.lock); + mutex_unlock(&uci_dev->mutex); + + MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name); + + return 0; +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->ul_chan; + + MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + kfree(mhi_result->buf_addr); + if (!mhi_result->transaction_status) + wake_up(&uci_chan->wq); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->dl_chan; + unsigned long flags; + struct uci_buf *buf; + + MSG_VERB("chan:mhi_dev->dl_chan_id:%d, status:%d receive_len:%zu\n", + mhi_dev->dl_chan_id, mhi_result->transaction_status, mhi_result->bytes_xferd); + + if (mhi_result->transaction_status == -ENOTCONN) { + kfree(mhi_result->buf_addr); + return; + } + + spin_lock_irqsave(&uci_chan->lock, flags); + buf = mhi_result->buf_addr + uci_dev->mtu; + if (buf->data != mhi_result->buf_addr) { + MSG_LOG("%p, %p\n", buf->data, mhi_result->buf_addr); + } + buf->data = mhi_result->buf_addr; + buf->len = mhi_result->bytes_xferd; + + if (uci_dev->mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN) { + PQCQMI_HDR pHdr = (PQCQMI_HDR) (buf->data); + u16 qmiLength = (le16_to_cpu(pHdr->Length) + 1); + +// open qmi chan, but not read data from the chan, will cause next error, donot know why by now, so it is not recomment to use uqmi&libqmi? +// dmesg -c > /dev/null; echo 1 > /dev/mhi_QMI0; sleep 3; ./FIBO-CM -d /dev/mhi_QMI0 -v + if (qmiLength != buf->len) { + unsigned char *d = (unsigned char *) pHdr; + MSG_ERR("bytes_xferd=%zd, qmiLength=%d %02x%02x%02x%02x - %02x%02x%02x%02x\n", buf->len, qmiLength, + d[0],d[1],d[2],d[3],d[qmiLength+0],d[qmiLength+1],d[qmiLength+2],d[qmiLength+3]); + if (buf->len > qmiLength) + buf->len = qmiLength; + } + } + + list_add_tail(&buf->node, &uci_chan->pending); + spin_unlock_irqrestore(&uci_chan->lock, flags); + + wake_up(&uci_chan->wq); +} + +#define DIAG_MAX_PCIE_PKT_SZ 2048 //define by module + +/* .driver_data stores max mtu */ +static const struct mhi_device_id mhi_uci_match_table[] = { + { .chan = "LOOPBACK", .driver_data = 0x1000 }, + { .chan = "SAHARA", .driver_data = 0x4000 }, + { .chan = "EDL", .driver_data = 0x4000 }, + { .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ }, + { .chan = "EFS", .driver_data = 0x1000 }, +#ifdef CONFIG_MHI_NETDEV_MBIM + { .chan = "MBIM", .driver_data = 0x1000 }, +#else + { .chan = "QMI0", .driver_data = 0x1000 }, + { .chan = "QMI1", .driver_data = 0x1000 }, +#endif + { .chan = "TF", .driver_data = 0x1000 }, + { .chan = "BL", .driver_data = 0x1000 }, + { .chan = "DUN", .driver_data = 0x1000 }, + { .chan = "GNSS", .driver_data = 0x1000 }, + { .chan = "AUDIO", .driver_data = 0x1000 }, + { }, +}; + +static struct mhi_driver mhi_uci_driver = { + .id_table = mhi_uci_match_table, + .remove = mhi_uci_remove, + .probe = mhi_uci_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = MHI_UCI_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_device_uci_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); + if (ret < 0) + return ret; + + mhi_uci_drv.major = ret; + mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); + if (IS_ERR(mhi_uci_drv.class)) { + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_uci_drv.lock); + INIT_LIST_HEAD(&mhi_uci_drv.head); + + ret = mhi_driver_register(&mhi_uci_driver); + if (ret) { + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + } + + return ret; +} + +void mhi_device_uci_exit(void) +{ + mhi_driver_unregister(&mhi_uci_driver); + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); +} diff --git a/luci-app-pcimodem/Makefile b/luci-app-pcimodem/Makefile index 1bcb748..6f71bbb 100644 --- a/luci-app-pcimodem/Makefile +++ b/luci-app-pcimodem/Makefile @@ -8,7 +8,7 @@ include $(TOPDIR)/rules.mk LUCI_TITLE:=PCI Modem Server -LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G +LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G +kmod-pcie_mhi_fb include $(TOPDIR)/feeds/luci/luci.mk