diff --git a/driver/quectel_MHI/Makefile b/driver/quectel_MHI/Makefile new file mode 100755 index 0000000..f7dd11f --- /dev/null +++ b/driver/quectel_MHI/Makefile @@ -0,0 +1,46 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=pcie_mhi +PKG_VERSION:=1.3.8 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/pcie_mhi + SUBMENU:=WWAN Support + TITLE:=Kernel pcie driver for MHI device + FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko + AUTOLOAD:=$(call AutoLoad,90,pcie_mhi) +endef + +define KernelPackage/pcie_mhi/description + Kernel module for register a custom pciemhi platform device. +endef + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(LINUX_DIR)" \ + $(MAKE_OPTS) \ + modules +endef + +$(eval $(call KernelPackage,pcie_mhi)) diff --git a/driver/quectel_MHI/src/Makefile b/driver/quectel_MHI/src/Makefile new file mode 100644 index 0000000..1b2c1f5 --- /dev/null +++ b/driver/quectel_MHI/src/Makefile @@ -0,0 +1,34 @@ +#ccflags-y += -g +obj-m += pcie_mhi.o +pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o +pcie_mhi-objs += devices/mhi_uci.o + +ifeq (1,1) +pcie_mhi-objs += devices/mhi_netdev_quectel.o +else +pcie_mhi-objs += devices/mhi_netdev.o +pcie_mhi-objs += devices/rmnet_handler.o +endif + +PWD := $(shell pwd) +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +endif + +pcie_mhi: clean + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + #cp pcie_mhi.ko /tftpboot/ + +clean: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean + find . -name *.o.ur-safe | xargs rm -f + +install: pcie_mhi + sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/ + sudo depmod diff --git a/driver/quectel_MHI/src/README b/driver/quectel_MHI/src/README new file mode 100644 index 0000000..2bb6ff0 --- /dev/null +++ b/driver/quectel_MHI/src/README @@ -0,0 +1,36 @@ +1. porting pcie_mhi driver as next + +$ git diff drivers/Makefile +diff --git a/drivers/Makefile b/drivers/Makefile +index 77fbc52..e45837e 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/ + obj-$(CONFIG_FSI) += fsi/ + obj-$(CONFIG_TEE) += tee/ + obj-$(CONFIG_MULTIPLEXER) += mux/ ++obj-y += pcie_mhi/ + +$ tree drivers/pcie_mhi/ -L 1 +drivers/pcie_mhi/ + controllers + core + devices + Makefile + +2. check RG500 attach pcie_mhi driver successful + +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:0302 +01:00.0 Class ff00: 17cb:0306 + +root@OpenWrt:~# dmesg | grep mhi +[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 + +3. how to use, see next logs + +log/QXDM_OVER_PCIE.txt +log/AT_OVER_PCIE.txt +log/MBIM_OVER_PCIE.txt +log/QMI_OVER_PCIE.txt diff --git a/driver/quectel_MHI/src/ReleaseNote.txt b/driver/quectel_MHI/src/ReleaseNote.txt new file mode 100644 index 0000000..d923b98 --- /dev/null +++ b/driver/quectel_MHI/src/ReleaseNote.txt @@ -0,0 +1,103 @@ +Release Notes + +[V1.3.4] +Date: 12/8/2022 +enhancement: + 1. only allow to enable autosuspend when module is in MHI_EE_AMSS + 2. show pcie link speed and width when driver probe + 3. check pcie link status by read pcie vid and pid when driver probe, + if pcie link is down, return -EIO + 4. support RM520 (1eac:1004) + 5. support qmap command packet +fix: + 1. fix tx queue is wrong stop when do uplink TPUT + 2. fix after QFirehose, module fail to bootup at very small probability + 3. mhi uci add mutex lock for concurrent reads/writes + +[V1.3.3] +Date: 30/6/2022 +enhancement: + 1. remove one un-necessary kmalloc when do qfirehose + 2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon + 3. set ring size of event 0 to 256 (from 1024), required by x6x + 4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled + 5. porting IPQ5018 mhi rate controll code from spf11.5 + 6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver) + 7. support set different mac address for rmnet net card + 8. when mhi netdev fail to malloc, use delay_work instead work + 9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU' +fix: + 1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU + 2. set dma mask when driver probe, some SOC like rpi_4 need it + +[V1.3.2] +Date: 12/16/2021 +enhancement: + 1. support Linux Kernel V5.14 + 2. mhi_netdev_quectel.c do not print log in softirq context + +[V1.3.1] +Date: 9/26/2021 +enhancement: +fix: + +[V1.3.0.19] +Date: 9/18/2021 +enhancement: + 1. support sdx62 (17cb:0308) + 2. support IPQ5018's NSS + 3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c + and pcie_mhi.ko must load after then rmnet_nss.ko + 4. allow bhi irq is not 0 (for ipq5018) +fix: + +[V1.3.0.18] +Date: 4/14/2021 +enhancement: + 1. support mbim multiple call, usage: + # insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4 + # quectel-mbim-proxy -d /dev/mhi_MBIM & + # quectel-CM -n X +fix: + +[V1.3.0.17] +Date: 3/11/2021 +enhancement: +fix: + 1. fix CPU loading very high when TPUT test when only one MSI interrupt + 2. fix error on latest X24 modem + +[V1.3.0.16] +Date: 11/18/2020 +enhancement: +fix: + 1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan + +[V1.3.0.15] +Date: 10/30/2020 +enhancement: + 1. support multi-modems, named as /dev/mhi_X +fix: + 1. fix compile error on kernel v5.8 + +[V1.3.0.14] +Date: 10/9/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix compile error on kernel v5.6 + 2. support runtime suspend + +[V1.3.0.13] +Date: 9/7/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix error on X55 + PCIE2.0(e.g IPQ4019) + 2. support runtime suspend + +[V1.3.0.12] +Date: 7/7/2020 +enhancement: + 1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE), +fix: \ No newline at end of file diff --git a/driver/quectel_MHI/src/controllers/Kconfig b/driver/quectel_MHI/src/controllers/Kconfig new file mode 100644 index 0000000..e18b38b --- /dev/null +++ b/driver/quectel_MHI/src/controllers/Kconfig @@ -0,0 +1,13 @@ +menu "MHI controllers" + +config MHI_QTI + tristate "MHI QTI" + depends on MHI_BUS + help + If you say yes to this option, MHI bus support for QTI modem chipsets + will be enabled. QTI PCIe based modems uses MHI as the communication + protocol. MHI control driver is the bus master for such modems. As the + bus master driver, it oversees power management operations such as + suspend, resume, powering on and off the device. + +endmenu diff --git a/driver/quectel_MHI/src/controllers/Makefile b/driver/quectel_MHI/src/controllers/Makefile new file mode 100644 index 0000000..ab9ec55 --- /dev/null +++ b/driver/quectel_MHI/src/controllers/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o diff --git a/driver/quectel_MHI/src/controllers/mhi_arch_qti.c b/driver/quectel_MHI/src/controllers/mhi_arch_qti.c new file mode 100644 index 0000000..de19d94 --- /dev/null +++ b/driver/quectel_MHI/src/controllers/mhi_arch_qti.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qti.h" + +struct arch_info { + struct mhi_dev *mhi_dev; + struct msm_bus_scale_pdata *msm_bus_pdata; + u32 bus_client; + struct pci_saved_state *pcie_state; + struct pci_saved_state *ref_pcie_state; + struct dma_iommu_mapping *mapping; +}; + +struct mhi_bl_info { + struct mhi_device *mhi_device; + async_cookie_t cookie; + void *ipc_log; +}; + +/* ipc log markings */ +#define DLOG "Dev->Host: " +#define HLOG "Host: " + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_IPC_LOG_PAGES (100) +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE; + +#else + +#define MHI_IPC_LOG_PAGES (10) +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR; + +#endif + +static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + + MHI_LOG("Setting bus request to index %d\n", index); + + if (arch_info->bus_client) + return msm_bus_scale_client_update_request( + arch_info->bus_client, + index); + + /* default return success */ + return 0; +} + +static void mhi_bl_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev); + char *buf = mhi_result->buf_addr; + + /* force a null at last character */ + buf[mhi_result->bytes_xferd - 1] = 0; + + ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf); +} + +static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ +} + +static void mhi_bl_remove(struct mhi_device *mhi_dev) +{ + struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev); + + ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n"); + + /* wait for boot monitor to exit */ + async_synchronize_cookie(mhi_bl_info->cookie + 1); +} + +static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie) +{ + struct mhi_bl_info *mhi_bl_info = data; + struct mhi_device *mhi_device = mhi_bl_info->mhi_device; + struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl; + /* 15 sec timeout for booting device */ + const u32 timeout = msecs_to_jiffies(15000); + + /* wait for device to enter boot stage */ + wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS + || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION, + timeout); + + if (mhi_cntrl->ee == MHI_EE_AMSS) { + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Device successfully booted to mission mode\n"); + + mhi_unprepare_from_transfer(mhi_device); + } else { + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Device failed to boot to mission mode, ee = %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + } +} + +static int mhi_bl_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + char node_name[32]; + struct mhi_bl_info *mhi_bl_info; + + mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info), + GFP_KERNEL); + if (!mhi_bl_info) + return -ENOMEM; + + snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + + mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, + node_name, 0); + if (!mhi_bl_info->ipc_log) + return -EINVAL; + + mhi_bl_info->mhi_device = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_bl_info); + + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Entered SBL, Session ID:0x%x\n", + mhi_dev->mhi_cntrl->session_id); + + /* start a thread to monitor entering mission mode */ + mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info); + + return 0; +} + +static const struct mhi_device_id mhi_bl_match_table[] = { + { .chan = "BL" }, + {}, +}; + +static struct mhi_driver mhi_bl_driver = { + .id_table = mhi_bl_match_table, + .remove = mhi_bl_remove, + .probe = mhi_bl_probe, + .ul_xfer_cb = mhi_bl_dummy_cb, + .dl_xfer_cb = mhi_bl_dl_cb, + .driver = { + .name = "MHI_BL", + .owner = THIS_MODULE, + }, +}; + +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + char node[32]; + + if (!arch_info) { + arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev, + sizeof(*arch_info), GFP_KERNEL); + if (!arch_info) + return -ENOMEM; + + mhi_dev->arch_info = arch_info; + + snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES, + node, 0); + mhi_cntrl->log_lvl = mhi_ipc_log_lvl; + + /* save reference state for pcie config space */ + arch_info->ref_pcie_state = pci_store_saved_state( + mhi_dev->pci_dev); + + mhi_driver_register(&mhi_bl_driver); + } + + return mhi_arch_set_bus_request(mhi_cntrl, 1); +} + +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ + mhi_arch_set_bus_request(mhi_cntrl, 0); +} + +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + if (graceful) { + pci_clear_master(pci_dev); + ret = pci_save_state(mhi_dev->pci_dev); + if (ret) { + MHI_ERR("Failed with pci_save_state, ret:%d\n", ret); + return ret; + } + + arch_info->pcie_state = pci_store_saved_state(pci_dev); + pci_disable_device(pci_dev); + } + + /* + * We will always attempt to put link into D3hot, however + * link down may have happened due to error fatal, so + * ignoring the return code + */ + pci_set_power_state(pci_dev, PCI_D3hot); + + /* release the resources */ + mhi_arch_set_bus_request(mhi_cntrl, 0); + + MHI_LOG("Exited\n"); + + return 0; +} + +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + /* request resources and establish link trainning */ + ret = mhi_arch_set_bus_request(mhi_cntrl, 1); + if (ret) + MHI_LOG("Could not set bus frequency, ret:%d\n", ret); + + ret = pci_set_power_state(pci_dev, PCI_D0); + if (ret) { + MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Failed to enable device, ret:%d\n", ret); + return ret; + } + + ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state); + if (ret) + MHI_LOG("Failed to load saved cfg state\n"); + + pci_restore_state(pci_dev); + pci_set_master(pci_dev); + + MHI_LOG("Exited\n"); + + return 0; +} diff --git a/driver/quectel_MHI/src/controllers/mhi_qcom.c b/driver/quectel_MHI/src/controllers/mhi_qcom.c new file mode 100644 index 0000000..df6ce19 --- /dev/null +++ b/driver/quectel_MHI/src/controllers/mhi_qcom.c @@ -0,0 +1,715 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qcom.h" + +#if 1 +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return dev->irq + nr; +} +#endif +#endif + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(0x2C7C, 0x0512)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id); + +static struct pci_driver mhi_pcie_driver; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + pci_free_irq_vectors(pci_dev); + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t start, len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + + start = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); + mhi_cntrl->regs = ioremap_nocache(start, len); + MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + + ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + /* imx_3.14.52_1.1.0_ga + diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c + index f06e8f0..6a9614f 100644 + --- a/drivers/pci/host/pcie-designware.c + +++ b/drivers/pci/host/pcie-designware.c + @@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + if (msgvec > 5) + msgvec = 0; + + +#if 1 //Add by Quectel 20190419 + + if (msgvec > 0 && pdev->vendor == 0x17cb) { + + dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec); + + msgvec = 0; + + } + +#endif + + + irq = assign_irq((1 << msgvec), desc, &pos); + if (irq < 0) + return irq; + */ + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + + mhi_cntrl->msi_allocated = ret; + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq); + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + +#if 0 + /* configure runtime pm */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS); + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call + * pm_runtime_put_noidle to decrement usage count after + * successful probe and and call pm_runtime_allow to enable + * runtime suspend. + */ + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_put_noidle(&pci_dev->dev); +#endif + + return 0; + +error_get_irq_vec: + pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered returning -EBUSY\n"); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered\n"); + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) + return mhi_runtime_suspend(dev); + + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_put_noidle(dev); +} + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + if (reason == MHI_CB_IDLE) { + MHI_LOG("Schedule runtime suspend 1\n"); + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + } +} + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +static int mhi_init_debugfs_trigger_go(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + + MHI_LOG("Trigger power up sequence\n"); + + mhi_async_power_up(mhi_cntrl); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL, + mhi_init_debugfs_trigger_go, "%llu\n"); + + +int mhi_init_debugfs_debug_show(struct seq_file *m, void *d) +{ + seq_puts(m, "Enable debug mode to debug external soc\n"); + seq_puts(m, + "Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n"); + seq_puts(m, "No spaces between parameters\n"); + seq_puts(m, "\t1. devid : 0 or pci device id to register\n"); + seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n"); + seq_puts(m, "\t3. domain: Rootcomplex\n"); + seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n"); + seq_puts(m, "\t\t- BIT0: ATTACH\n"); + seq_puts(m, "\t\t- BIT1: S1 BYPASS\n"); + seq_puts(m, "\t\t-BIT2: FAST_MAP\n"); + seq_puts(m, "\t\t-BIT3: ATOMIC\n"); + seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n"); + seq_puts(m, "\t\t-BIT5: GEOMETRY\n"); + seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n"); + seq_puts(m, "Examples inputs: '0x307,10000'\n"); + seq_puts(m, "\techo '0,10000,1'\n"); + seq_puts(m, "\techo '0x307,10000,0,0x3d'\n"); + seq_puts(m, "firmware image name will be changed to debug.mbn\n"); + + return 0; +} + +static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file) +{ + return single_open(file, mhi_init_debugfs_debug_show, NULL); +} + +static ssize_t mhi_init_debugfs_debug_write(struct file *fp, + const char __user *ubuf, + size_t count, + loff_t *pos) +{ + char *buf = kmalloc(count + 1, GFP_KERNEL); + /* #,devid,timeout,domain,smmu-cfg */ + int args[5] = {0}; + static char const *dbg_fw = "debug.mbn"; + int ret; + struct mhi_controller *mhi_cntrl = fp->f_inode->i_private; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_device_id *id; + + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf, ubuf, count); + if (ret) + goto error_read; + buf[count] = 0; + get_options(buf, ARRAY_SIZE(args), args); + kfree(buf); + + /* override default parameters */ + mhi_cntrl->fw_image = dbg_fw; + mhi_cntrl->edl_image = dbg_fw; + + if (args[0] >= 2 && args[2]) + mhi_cntrl->timeout_ms = args[2]; + + if (args[0] >= 3 && args[3]) + mhi_cntrl->domain = args[3]; + + if (args[0] >= 4 && args[4]) + mhi_dev->smmu_cfg = args[4]; + + /* If it's a new device id register it */ + if (args[0] && args[1]) { + /* find the debug_id and overwrite it */ + for (id = mhi_pcie_device_id; id->vendor; id++) + if (id->device == MHI_PCIE_DEBUG_ID) { + id->device = args[1]; + pci_unregister_driver(&mhi_pcie_driver); + ret = pci_register_driver(&mhi_pcie_driver); + } + } + + mhi_dev->debug_mode = true; + debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl, + &mhi_init_debugfs_trigger_go_fops); + pr_info( + "%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n", + __func__, ret, args[1], mhi_dev->smmu_cfg, + mhi_cntrl->timeout_ms); + return count; + +error_read: + kfree(buf); + return ret; +} + +static const struct file_operations debugfs_debug_ops = { + .open = mhi_init_debugfs_debug_open, + .release = single_release, + .read = seq_read, + .write = mhi_init_debugfs_debug_write, +}; + +static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + u64 addr_win[2]; + int ret; + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) { + pr_err("mhi_alloc_controller fail\n"); + return NULL; + } + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + mhi_dev->smmu_cfg = 0; + #if 0 //def CONFIG_HAVE_MEMBLOCK + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + #else +#define MHI_MEM_BASE_DEFAULT 0x000000000 +#define MHI_MEM_SIZE_DEFAULT 0x2000000000 + addr_win[0] = MHI_MEM_BASE_DEFAULT; + addr_win[1] = MHI_MEM_SIZE_DEFAULT; + if (sizeof(dma_addr_t) == 4) { + addr_win[1] = 0xFFFFFFFF; + } + #endif + + mhi_cntrl->iova_start = addr_win[0]; + mhi_cntrl->iova_stop = addr_win[1]; + + mhi_dev->pci_dev = pci_dev; + mhi_cntrl->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->link_status = mhi_link_status; + + ret = mhi_arch_platform_init(mhi_dev); + if (ret) + goto error_probe; + + ret = mhi_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + if (mhi_cntrl->parent) + debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent, + mhi_cntrl, &debugfs_debug_ops); + + return mhi_cntrl; + +error_register: + mhi_arch_platform_deinit(mhi_dev); + +error_probe: + mhi_free_controller(mhi_cntrl); + + return NULL; +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl = NULL; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + + mhi_cntrl = mhi_platform_probe(pci_dev); + if (!mhi_cntrl) { + pr_err("mhi_platform_probe fail\n"); + return -EPROBE_DEFER; + } + + mhi_cntrl->dev_id = pci_dev->device; + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->pci_dev = pci_dev; + mhi_dev->powered_on = true; + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret); + return ret; + } + + ret = mhi_arch_iommu_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret); + goto error_iommu_init; + } + + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret); + goto error_init_pci; + } + + /* start power up sequence if not in debug mode */ + if (!mhi_dev->debug_mode) { + ret = mhi_async_power_up(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret); + goto error_power_up; + } + } + +#if 0 + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_allow(&pci_dev->dev); + pm_runtime_disable(&pci_dev->dev); +#endif + + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + MHI_LOG("Return successful\n"); + + return 0; + +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_iommu_deinit(mhi_cntrl); + +error_iommu_init: + mhi_arch_pcie_deinit(mhi_cntrl); + + return ret; +} + +static void mhi_pci_remove(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev); + + if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) { + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + MHI_LOG("%s\n", dev_name(&pci_dev->dev)); + if (!mhi_dev->debug_mode) { + mhi_power_down(mhi_cntrl, 1); + } + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_iommu_deinit(mhi_cntrl); + mhi_arch_pcie_deinit(mhi_cntrl); + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove, + .driver = { + .pm = &pm_ops + } +}; + +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_info("%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_info("%s exit\n", __func__); +} diff --git a/driver/quectel_MHI/src/controllers/mhi_qcom.h b/driver/quectel_MHI/src/controllers/mhi_qcom.h new file mode 100644 index 0000000..bced45b --- /dev/null +++ b/driver/quectel_MHI/src/controllers/mhi_qcom.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) +#define MHI_RPM_SUSPEND_TMR_MS (3000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + bool debug_mode; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64)); +} + +static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +#endif /* _MHI_QCOM_ */ diff --git a/driver/quectel_MHI/src/controllers/mhi_qti.c b/driver/quectel_MHI/src/controllers/mhi_qti.c new file mode 100644 index 0000000..21bcd04 --- /dev/null +++ b/driver/quectel_MHI/src/controllers/mhi_qti.c @@ -0,0 +1,1309 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define MAX_MHI 8 +#ifdef CONFIG_PCI_MSM +#define QCOM_AP_AND_EFUSE_PCIE_SLEEP +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP +#include +#include +#endif +#endif +//#define QCOM_AP_SDM845_IOMMU_MAP +#ifdef QCOM_AP_SDM845_IOMMU_MAP +#include +#include +#include +#endif +#include "../core/mhi.h" +#include "../core/mhi_internal.h" +#include "mhi_qti.h" + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP +extern int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); +struct arch_info { + struct mhi_dev *mhi_dev; + struct msm_bus_scale_pdata *msm_bus_pdata; + u32 bus_client; + struct pci_saved_state *pcie_state; + struct pci_saved_state *ref_pcie_state; + struct dma_iommu_mapping *mapping; +}; +#endif + +#if 1 +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +#ifdef PCI_IRQ_NOMSIX +#define PCI_IRQ_MSI PCI_IRQ_NOMSIX +#endif + +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ +#if 0//defined(CONFIG_PINCTRL_IPQ5018) + struct pcie_port *pp = dev->bus->sysdata; + pp->msi[nr]; //msi maybe not continuous +#endif + return dev->irq + nr; +} +#endif +#endif + +struct firmware_info { + unsigned int dev_id; + const char *fw_image; + const char *edl_image; +}; + +static const struct firmware_info firmware_table[] = { + {.dev_id = 0x306, .fw_image = "sdx55m/sbl1.mbn"}, + {.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"}, + {.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"}, + /* default, set to debug.mbn */ + {.fw_image = "debug.mbn"}, +}; + +static int debug_mode; +module_param_named(debug_mode, debug_mode, int, 0644); + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + pm_runtime_disable(&pci_dev->dev); + pci_free_irq_vectors(pci_dev); + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + +#if 1 //some SOC like rpi_4b need next codes + ret = -EIO; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) + if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) { + ret = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(64)); + } else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { + ret = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(32)); + } +#else + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + } +#endif + if (ret) { + MHI_ERR("Error dma mask\n"); + } +#endif + + mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); +#ifndef ioremap_nocache //4bdc0d676a643140bdf17dbf7eafedee3d496a3c +#define ioremap_nocache ioremap +#endif + mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + +#if 0 + ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required, + mhi_cntrl->msi_required, PCI_IRQ_NOMSIX); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + MHI_ERR("Failed to enable MSI, ret:%d\n", ret); + goto error_req_msi; + } +#else + ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + /* imx_3.14.52_1.1.0_ga + diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c + index f06e8f0..6a9614f 100644 + --- a/drivers/pci/host/pcie-designware.c + +++ b/drivers/pci/host/pcie-designware.c + @@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + if (msgvec > 5) + msgvec = 0; + + +#if 1 //Add by Quectel 20190419 + + if (msgvec > 0 && pdev->vendor == 0x17cb) { + + dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec); + + msgvec = 0; + + } + +#endif + + + irq = assign_irq((1 << msgvec), desc, &pos); + if (irq < 0) + return irq; + */ + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, ret, pci_dev->irq); +#endif + + mhi_cntrl->msi_allocated = ret; + mhi_cntrl->irq = kmalloc_array(mhi_cntrl->msi_allocated, + sizeof(*mhi_cntrl->irq), GFP_KERNEL); + if (!mhi_cntrl->irq) { + ret = -ENOMEM; + goto error_alloc_msi_vec; + } + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + + /* configure runtime pm */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS); + pm_runtime_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call + * pm_runtime_put_noidle to decrement usage count after + * successful probe and and call pm_runtime_allow to enable + * runtime suspend. + */ + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_put_noidle(&pci_dev->dev); + + return 0; + +error_get_irq_vec: + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + +error_alloc_msi_vec: + pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + if (mhi_cntrl->ee != MHI_EE_AMSS) { + MHI_LOG("Not AMSS, return busy\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return -EBUSY; + } + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + if ((mhi_cntrl->dev_state == MHI_STATE_M0 || mhi_cntrl->dev_state == MHI_STATE_M3) + && mhi_cntrl->ee == MHI_EE_AMSS) { + return 0; + } + MHI_LOG("Entered returning -EBUSY, mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + //pm_runtime_set_active(dev); + //pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + int ret; + + MHI_LOG("Entered\n"); + + if (atomic_read(&mhi_cntrl->pending_pkts)) { + MHI_LOG("Abort due to pending_pkts:%d\n", atomic_read(&mhi_cntrl->pending_pkts)); + return -EBUSY; + } + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) { + ret = mhi_runtime_suspend(dev); + if (ret) { + MHI_LOG("suspend failed ret:%d\n", ret); + return ret; + } + } + + //pm_runtime_set_suspended(dev); + //pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +/* disable PCIe L1 */ +static int mhi_lpm_disable(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL; + u8 val; + int ret; + + ret = pci_read_config_byte(pci_dev, lnkctl, &val); + if (ret) { + MHI_ERR("Error reading LNKCTL, ret:%d\n", ret); + return ret; + } + + /* L1 is not supported or already disabled */ + if (!(val & PCI_EXP_LNKCTL_ASPM_L1)) + return 0; + + val &= ~PCI_EXP_LNKCTL_ASPM_L1; + ret = pci_write_config_byte(pci_dev, lnkctl, val); + if (ret) { + MHI_ERR("Error writing LNKCTL to disable LPM, ret:%d\n", ret); + return ret; + } + + mhi_dev->lpm_disabled = true; + + return ret; +} + +/* enable PCIe L1 */ +static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL; + u8 val; + int ret; + + /* L1 is not supported or already disabled */ + if (!mhi_dev->lpm_disabled) + return 0; + + ret = pci_read_config_byte(pci_dev, lnkctl, &val); + if (ret) { + MHI_ERR("Error reading LNKCTL, ret:%d\n", ret); + return ret; + } + + val |= PCI_EXP_LNKCTL_ASPM_L1; + ret = pci_write_config_byte(pci_dev, lnkctl, val); + if (ret) { + MHI_ERR("Error writing LNKCTL to enable LPM, ret:%d\n", ret); + return ret; + } + + mhi_dev->lpm_disabled = false; + + return ret; +} + +static int mhi_power_up(struct mhi_controller *mhi_cntrl) +{ + enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl); + const u32 delayus = 10; + int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms * 1000, delayus); + int ret; + + MHI_LOG("dev_state:%s\n", TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + /* + * It's possible device did not go thru a cold reset before + * power up and still in error state. If device in error state, + * we need to trigger a soft reset before continue with power + * up + */ + if (dev_state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + while (itr--) { + dev_state = mhi_get_mhi_state(mhi_cntrl); + if (dev_state != MHI_STATE_SYS_ERR) + break; + usleep_range(delayus, delayus << 1); + } + MHI_LOG("dev_state:%s\n", TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + /* device still in error state, abort power up */ + if (dev_state == MHI_STATE_SYS_ERR) + return -EIO; + } + + ret = mhi_async_power_up(mhi_cntrl); + + /* power up create the dentry */ + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + return ret; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put(dev); +} + +static void mhi_runtime_mark_last_busy(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); +} + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP +static void mhi_pci_event_cb(struct msm_pcie_notify *notify) +{ + struct pci_dev *pci_dev = notify->user; + struct device *dev = &pci_dev->dev; + + dev_info(&pci_dev->dev, "Received PCIe event %d", notify->event); + switch (notify->event) { + case MSM_PCIE_EVENT_WAKEUP: + if (dev && pm_runtime_status_suspended(dev)) { + pm_request_resume(dev); + pm_runtime_mark_last_busy(dev); + } + break; + default: + break; + } +} + +static struct msm_pcie_register_event mhi_pcie_events[MAX_MHI]; +#endif + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + switch (reason) { + case MHI_CB_FATAL_ERROR: + case MHI_CB_SYS_ERROR: + pm_runtime_forbid(dev); + break; + case MHI_CB_EE_MISSION_MODE: + //pm_runtime_allow(dev); + break; + default: + break; + } +} + +/* capture host SoC XO time in ticks */ +static u64 mhi_time_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + return 0; +} + +static ssize_t timeout_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + /* buffer provided by sysfs has a minimum size of PAGE_SIZE */ + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_cntrl->timeout_ms); +} + +static ssize_t timeout_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u32 timeout_ms; + + if (kstrtou32(buf, 0, &timeout_ms) < 0) + return -EINVAL; + + mhi_cntrl->timeout_ms = timeout_ms; + + return count; +} +static DEVICE_ATTR_RW(timeout_ms); + +static ssize_t power_up_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + ret = mhi_power_up(mhi_cntrl); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_WO(power_up); + +static struct attribute *mhi_attrs[] = { + &dev_attr_timeout_ms.attr, + &dev_attr_power_up.attr, + NULL +}; + +static const struct attribute_group mhi_group = { + .attrs = mhi_attrs, +}; + +static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + struct device_node *of_node = pci_dev->dev.of_node; + const struct firmware_info *firmware_info; + bool use_bb; + u64 addr_win[2]; + int ret, i; + + //if (!of_node) + // return ERR_PTR(-ENODEV); + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) + return ERR_PTR(-ENOMEM); + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &pci_dev->dev; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->vendor = pci_dev->vendor; + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + +#if 0 + use_bb = of_property_read_bool(of_node, "mhi,use-bb"); + + /* + * if s1 translation enabled or using bounce buffer pull iova addr + * from dt + */ + if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) { + ret = of_property_count_elems_of_size(of_node, "qti,addr-win", + sizeof(addr_win)); + if (ret != 1) + goto error_register; + ret = of_property_read_u64_array(of_node, "qti,addr-win", + addr_win, 2); + if (ret) + goto error_register; + } else { + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + } +#else + use_bb = false; + (void)use_bb; + addr_win[0] = 0x000000000; + addr_win[1] = 0x2000000000; //MHI_MEM_SIZE_DEFAULT + if (sizeof(dma_addr_t) == 4) { + addr_win[1] = 0xFFFFFFFF; + } +#endif + + mhi_dev->iova_start = addr_win[0]; + mhi_dev->iova_stop = addr_win[1]; + + /* + * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low + * level mapping api to map buffers outside of smmu domain + */ + if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS)) + mhi_cntrl->iova_start = 0; + else + mhi_cntrl->iova_start = addr_win[0]; + + mhi_cntrl->iova_stop = mhi_dev->iova_stop; + mhi_cntrl->of_node = of_node; + + mhi_dev->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->runtime_mark_last_busy = mhi_runtime_mark_last_busy; + mhi_cntrl->link_status = mhi_link_status; + + mhi_cntrl->lpm_disable = mhi_lpm_disable; + mhi_cntrl->lpm_enable = mhi_lpm_enable; + mhi_cntrl->time_get = mhi_time_get; + + ret = of_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + for (i = 0; i < ARRAY_SIZE(firmware_table); i++) { + firmware_info = firmware_table + i; + + /* debug mode always use default */ + if (!debug_mode && mhi_cntrl->dev_id == firmware_info->dev_id) + break; + } + +#if 0 + mhi_cntrl->fw_image = firmware_info->fw_image; + mhi_cntrl->edl_image = firmware_info->edl_image; +#endif + + if (sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_group)) + MHI_ERR("Error while creating the sysfs group\n"); + + return mhi_cntrl; + +error_register: + mhi_free_controller(mhi_cntrl); + + return ERR_PTR(-EINVAL); +} + +static bool mhi_pci_is_alive(struct pci_dev *pdev) +{ + u16 vendor = 0; + + if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + return false; + + if (vendor == (u16) ~0 || vendor == 0) + return false; + + return true; +} + +static void mhi_pci_show_link(struct mhi_controller *mhi_cntrl, struct pci_dev *pci_dev) +{ + int pcie_cap_reg; + u16 stat; + u32 caps; + const char *speed; + + pcie_cap_reg = pci_find_capability(pci_dev, PCI_CAP_ID_EXP); + + if (!pcie_cap_reg) + return; + + pci_read_config_word(pci_dev, + pcie_cap_reg + PCI_EXP_LNKSTA, + &stat); + pci_read_config_dword(pci_dev, + pcie_cap_reg + PCI_EXP_LNKCAP, + &caps); + + switch (caps & PCI_EXP_LNKCAP_SLS) { + case PCI_EXP_LNKCAP_SLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKCAP_SLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkCap: Speed %sGT/s, Width x%d\n", speed, + (caps & PCI_EXP_LNKCAP_MLW) >> 4); + + switch (stat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKSTA_CLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkSta: Speed %sGT/s, Width x%d\n", speed, + (stat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); + +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 dev_id = pci_dev->device; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + +#if !defined(CONFIG_PCI_MSI) + /* MT7621 RTL8198D EcoNet-EN7565 */ + #error "pcie msi is not support by this soc! and i donot support INTx (SW1SDX55-2688)" +#endif + + if (!mhi_pci_is_alive(pci_dev)) { + /* + root@OpenWrt:~# hexdump /sys/bus/pci/devices/0000:01:00.0/config + 0000000 ffff ffff ffff ffff ffff ffff ffff ffff + * + 0001000 + */ + pr_err("mhi_pci is not alive! pcie link is down\n"); + pr_err("double check by 'hexdump /sys/bus/pci/devices/%s/config'\n", dev_name(&pci_dev->dev)); + return -EIO; + } + + /* see if we already registered */ + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); + if (!mhi_cntrl) + mhi_cntrl = mhi_register_controller(pci_dev); + + if (IS_ERR(mhi_cntrl)) + return PTR_ERR(mhi_cntrl); + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->powered_on = true; + + mhi_arch_iommu_init(mhi_cntrl); + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) + goto error_init_pci_arch; + + mhi_cntrl->dev = &pci_dev->dev; + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) + goto error_init_pci; + + /* start power up sequence */ + if (!debug_mode) { + ret = mhi_power_up(mhi_cntrl); + if (ret) + goto error_power_up; + } + + pm_runtime_mark_last_busy(&pci_dev->dev); + + mhi_pci_show_link(mhi_cntrl, pci_dev); + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + { + struct msm_pcie_register_event *pcie_event = &mhi_pcie_events[mhi_cntrl->cntrl_idx]; + + pcie_event->events = MSM_PCIE_EVENT_WAKEUP; +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,14,117 )) + pcie_event->pcie_event.user = pci_dev; + pcie_event->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK; + pcie_event->pcie_event.callback = mhi_pci_event_cb; +#else + pcie_event->user = pci_dev; + pcie_event->mode = MSM_PCIE_TRIGGER_CALLBACK; + pcie_event->callback = mhi_pci_event_cb; +#endif + + ret = msm_pcie_register_event(pcie_event); + if (ret) { + MHI_LOG("Failed to register for PCIe event"); + } + } +#endif + + MHI_LOG("Return successful\n"); + + return 0; + + mhi_unregister_mhi_controller(mhi_cntrl); +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_pcie_deinit(mhi_cntrl); +error_init_pci_arch: + mhi_arch_iommu_deinit(mhi_cntrl); + + return ret; +} + +void mhi_pci_device_removed(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 dev_id = pci_dev->device; + u32 slot = PCI_SLOT(pci_dev->devfn); + + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); + + if (mhi_cntrl) { + + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + { + struct msm_pcie_register_event *pcie_event = &mhi_pcie_events[mhi_cntrl->cntrl_idx]; + + msm_pcie_deregister_event(pcie_event); + } +#endif + + pm_stay_awake(&mhi_cntrl->mhi_dev->dev); + + /* if link is in drv suspend, wake it up */ + pm_runtime_get_sync(&pci_dev->dev); + + mutex_lock(&mhi_cntrl->pm_mutex); + if (!mhi_dev->powered_on) { + MHI_LOG("Not in active state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + pm_runtime_put_noidle(&pci_dev->dev); + return; + } + mhi_dev->powered_on = false; + mutex_unlock(&mhi_cntrl->pm_mutex); + + pm_runtime_put_noidle(&pci_dev->dev); + + MHI_LOG("Triggering shutdown process\n"); + mhi_power_down(mhi_cntrl, false); + + /* turn the link off */ + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_link_off(mhi_cntrl, false); + + mhi_arch_pcie_deinit(mhi_cntrl); + mhi_arch_iommu_deinit(mhi_cntrl); + + pm_relax(&mhi_cntrl->mhi_dev->dev); + + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308)}, //SDX62 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x011a)}, //SDX35 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0309)}, //SDX7X + {PCI_DEVICE(0x1eac, 0x1001)}, //EM120 + {PCI_DEVICE(0x1eac, 0x1002)}, //EM160 + {PCI_DEVICE(0x1eac, 0x1004)}, //RM520 + {PCI_DEVICE(0x1eac, 0x100b)}, //RM255 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id); + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi_q", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_device_removed, + .driver = { + .pm = &pm_ops + } +}; + +#if 0 +module_pci_driver(mhi_pcie_driver); +#else +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_info("%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_info("%s exit\n", __func__); +} + +#ifdef QCOM_AP_SDM845_IOMMU_MAP +struct dma_iommu_mapping *mhi_smmu_mapping[MAX_MHI]; + +#define SMMU_BASE 0x10000000 +#define SMMU_SIZE 0x40000000 +static struct dma_iommu_mapping * sdm845_smmu_init(struct pci_dev *pdev) { + int ret = 0; + int atomic_ctx = 1; + int s1_bypass = 1; + struct dma_iommu_mapping *mapping; + + mapping = arm_iommu_create_mapping(&platform_bus_type, SMMU_BASE, SMMU_SIZE); + if (IS_ERR(mapping)) { + ret = PTR_ERR(mapping); + dev_err(&pdev->dev, "Create mapping failed, err = %d\n", ret); + return NULL; + } + + ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx); + if (ret < 0) { + dev_err(&pdev->dev, "Set atomic_ctx attribute failed, err = %d\n", ret); + goto set_attr_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass); + if (ret < 0) { + dev_err(&pdev->dev, "Set s1_bypass attribute failed, err = %d\n", ret); + arm_iommu_release_mapping(mapping); + goto set_attr_fail; + } + + ret = arm_iommu_attach_device(&pdev->dev, mapping); + if (ret < 0) { + dev_err(&pdev->dev, "Attach device failed, err = %d\n", ret); + goto attach_fail; + } + + return mapping; + +attach_fail: +set_attr_fail: + arm_iommu_release_mapping(mapping); + return NULL; +} +#endif + +int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_SDM845_IOMMU_MAP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_smmu_mapping[mhi_cntrl->cntrl_idx] = sdm845_smmu_init(mhi_dev->pci_dev); +#endif + + return 0; +} + +void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_SDM845_IOMMU_MAP + if (mhi_smmu_mapping[mhi_cntrl->cntrl_idx]) { + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + arm_iommu_detach_device(&mhi_dev->pci_dev->dev); + arm_iommu_release_mapping(mhi_smmu_mapping[mhi_cntrl->cntrl_idx]); + mhi_smmu_mapping[mhi_cntrl->cntrl_idx] = NULL; + } +#endif +} + +static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index) +{ + MHI_LOG("Setting bus request to index %d\n", index); + return 0; +} + +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + + if (!arch_info) { + arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev, + sizeof(*arch_info), GFP_KERNEL); + if (!arch_info) + return -ENOMEM; + + mhi_dev->arch_info = arch_info; + + /* save reference state for pcie config space */ + arch_info->ref_pcie_state = pci_store_saved_state( + mhi_dev->pci_dev); + } +#endif + + return mhi_arch_set_bus_request(mhi_cntrl, 1); +} + +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ + mhi_arch_set_bus_request(mhi_cntrl, 0); +} + +int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + if (graceful) { + pci_clear_master(pci_dev); + ret = pci_save_state(mhi_dev->pci_dev); + if (ret) { + MHI_ERR("Failed with pci_save_state, ret:%d\n", ret); + return ret; + } + + arch_info->pcie_state = pci_store_saved_state(pci_dev); + pci_disable_device(pci_dev); + } + + /* + * We will always attempt to put link into D3hot, however + * link down may have happened due to error fatal, so + * ignoring the return code + */ + pci_set_power_state(pci_dev, PCI_D3hot); + + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev, + NULL, 0); + MHI_ERR("msm_pcie_pm_control(MSM_PCIE_SUSPEND), ret:%d\n", ret); + + /* release the resources */ + mhi_arch_set_bus_request(mhi_cntrl, 0); + + MHI_LOG("Exited\n"); +#endif + + return 0; +} + +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + /* request resources and establish link trainning */ + ret = mhi_arch_set_bus_request(mhi_cntrl, 1); + if (ret) + MHI_LOG("Could not set bus frequency, ret:%d\n", ret); + + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev, + NULL, 0); + MHI_LOG("msm_pcie_pm_control(MSM_PCIE_RESUME), ret:%d\n", ret); + if (ret) { + MHI_ERR("Link training failed, ret:%d\n", ret); + return ret; + } + + ret = pci_set_power_state(pci_dev, PCI_D0); + if (ret) { + MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Failed to enable device, ret:%d\n", ret); + return ret; + } + + ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state); + if (ret) + MHI_LOG("Failed to load saved cfg state\n"); + + pci_restore_state(pci_dev); + pci_set_master(pci_dev); + + MHI_LOG("Exited\n"); +#endif + + return 0; +} +#endif diff --git a/driver/quectel_MHI/src/controllers/mhi_qti.h b/driver/quectel_MHI/src/controllers/mhi_qti.h new file mode 100644 index 0000000..7ac021a --- /dev/null +++ b/driver/quectel_MHI/src/controllers/mhi_qti.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#ifndef _MHI_QTI_ +#define _MHI_QTI_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) + +/* runtime suspend timer */ +#define MHI_RPM_SUSPEND_TMR_MS (2000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + dma_addr_t iova_start; + dma_addr_t iova_stop; + bool lpm_disabled; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +void mhi_pci_device_removed(struct pci_dev *pci_dev); +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl); +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl); +int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl); +void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl); +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful); +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl); + +#endif /* _MHI_QTI_ */ diff --git a/driver/quectel_MHI/src/core/Makefile b/driver/quectel_MHI/src/core/Makefile new file mode 100644 index 0000000..a743fbf --- /dev/null +++ b/driver/quectel_MHI/src/core/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o diff --git a/driver/quectel_MHI/src/core/mhi.h b/driver/quectel_MHI/src/core/mhi.h new file mode 100644 index 0000000..f9671af --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi.h @@ -0,0 +1,908 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#ifndef _MHI_H_ +#define _MHI_H_ + +#define PCIE_MHI_DRIVER_VERSION "V1.3.8" +#define ENABLE_MHI_MON +//#define ENABLE_IP_SW0 + +// #define ENABLE_ADPL + +// #define ENABLE_QDSS + +#include +typedef enum +{ + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_QMI_2_OUT = 16, + MHI_CLIENT_QMI_2_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_IPCR_OUT = 20, + MHI_CLIENT_IPCR_IN = 21, + MHI_CLIENT_TEST_FW_OUT = 22, + MHI_CLIENT_TEST_FW_IN = 23, + MHI_CLIENT_RESERVED_0 = 24, + MHI_CLIENT_BOOT_LOG_IN = 25, + MHI_CLIENT_DCI_OUT = 26, + MHI_CLIENT_DCI_IN = 27, + MHI_CLIENT_QBI_OUT = 28, + MHI_CLIENT_QBI_IN = 29, + MHI_CLIENT_RESERVED_1_LOWER = 30, + MHI_CLIENT_RESERVED_1_UPPER = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_EDL_OUT = 34, + MHI_CLIENT_EDL_IN = 35, + MHI_CLIENT_ADB_FB_OUT = 36, + MHI_CLIENT_ADB_FB_IN = 37, + MHI_CLIENT_RESERVED_2_LOWER = 38, + MHI_CLIENT_RESERVED_2_UPPER = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_0_OUT = 46, + MHI_CLIENT_IP_SW_0_IN = 47, + MHI_CLIENT_IP_SW_1_OUT = 48, + MHI_CLIENT_IP_SW_1_IN = 49, + MHI_CLIENT_RESERVED_3_LOWER = 50, + MHI_CLIENT_RESERVED_3_UPPER = 59, + MHI_CLIENT_TEST_0_OUT = 60, + MHI_CLIENT_TEST_0_IN = 61, + MHI_CLIENT_TEST_1_OUT = 62, + MHI_CLIENT_TEST_1_IN = 63, + MHI_CLIENT_TEST_2_OUT = 64, + MHI_CLIENT_TEST_2_IN = 65, + MHI_CLIENT_TEST_3_OUT = 66, + MHI_CLIENT_TEST_3_IN = 67, + MHI_CLIENT_RESERVED_4_LOWER = 68, + MHI_CLIENT_RESERVED_4_UPPER = 91, + MHI_CLIENT_OEM_0_OUT = 92, + MHI_CLIENT_OEM_0_IN = 93, + MHI_CLIENT_OEM_1_OUT = 94, + MHI_CLIENT_OEM_1_IN = 95, + MHI_CLIENT_OEM_2_OUT = 96, + MHI_CLIENT_OEM_2_IN = 97, + MHI_CLIENT_OEM_3_OUT = 98, + MHI_CLIENT_OEM_3_IN = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_ADPL = 102, + MHI_CLIENT_IP_HW_QDSS = 103, + // MHI_CLIENT_RESERVED_5_LOWER = 103, + MHI_CLIENT_RESERVED_5_UPPER = 127, + MHI_MAX_CHANNELS = 128 +}MHI_CLIENT_CHANNEL_TYPE; + +/* Event Ring Index */ +typedef enum +{ + SW_EVT_RING = 0, + PRIMARY_EVENT_RING = SW_EVT_RING, +#ifdef ENABLE_IP_SW0 + SW_0_OUT_EVT_RING, + SW_0_IN_EVT_RING, +#endif + IPA_OUT_EVENT_RING, + IPA_IN_EVENT_RING, +#ifdef ENABLE_ADPL + ADPL_EVT_RING, +#endif +#ifdef ENABLE_QDSS + QDSS_EVT_RING, +#endif + + MAX_EVT_RING_IDX +}MHI_EVT_RING_IDX; + +#define MHI_VERSION 0x01000000 +#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */ +#define MHI_MSI_INDEX 1 +#define MAX_NUM_MHI_DEVICES 1 +#define NUM_MHI_XFER_RINGS 128 +#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX +#define NUM_MHI_HW_EVT_RINGS 4 +#define NUM_MHI_XFER_RING_ELEMENTS 16 +#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump +#define NUM_MHI_IPA_IN_RING_ELEMENTS 512 +#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase +#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128 +#define NUM_MHI_SW_IP_RING_ELEMENTS 512 + +#ifdef ENABLE_ADPL +#define NUM_MHI_ADPL_RING_ELEMENTS 256 +#endif + +#ifdef ENABLE_QDSS +#define NUM_MHI_QDSS_RING_ELEMENTS 256 +#endif + +/* +* for if set Interrupt moderation time as 1ms, +and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms. +e.g. firehose upgrade. +modem will not trigger irq for these transfer. +*/ +#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8 +#define MHI_EVT_CMD_QUEUE_SIZE 160 +#define MHI_EVT_STATE_QUEUE_SIZE 128 +#define MHI_EVT_XFER_QUEUE_SIZE 1024 + +#define CHAN_INBOUND(_x) ((_x)%2) + +#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \ + ((_x) == MHI_CLIENT_SAHARA_IN) || \ + ((_x) == MHI_CLIENT_BOOT_LOG_IN)) + +#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \ + ((_x) == MHI_CLIENT_EDL_IN)) + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct image_info; +struct bhi_vec_entry; +struct mhi_timesync; +struct mhi_buf_info; + +/** + * enum MHI_CB - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env + * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover) + * @MHI_CB_FATAL_ERROR: MHI device entered fatal error + */ +enum MHI_CB { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, + MHI_CB_SYS_ERROR, + MHI_CB_FATAL_ERROR, +}; + +/** + * enum MHI_DEBUG_LEVL - various debugging level + */ +enum MHI_DEBUG_LEVEL { + MHI_MSG_LVL_VERBOSE, + MHI_MSG_LVL_INFO, + MHI_MSG_LVL_ERROR, + MHI_MSG_LVL_CRITICAL, + MHI_MSG_LVL_MASK_ALL, +}; + +/* +GSI_XFER_FLAG_BEI: Block event interrupt +1: Event generated by this ring element must not assert an interrupt to the host +0: Event generated by this ring element must assert an interrupt to the host + +GSI_XFER_FLAG_EOT: Interrupt on end of transfer +1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT. +0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1 + +GSI_XFER_FLAG_EOB: Interrupt on end of block +1: Device notifies host after processing this ring element by sending a completion event +0: Completion event is not required after processing this ring element + +GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD +*/ + +/** + * enum MHI_FLAGS - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum MHI_FLAGS { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * enum mhi_device_type - Device types + * @MHI_XFER_TYPE: Handles data transfer + * @MHI_TIMESYNC_TYPE: Use for timesync feature + * @MHI_CONTROLLER_TYPE: Control device + */ +enum mhi_device_type { + MHI_XFER_TYPE, + MHI_TIMESYNC_TYPE, + MHI_CONTROLLER_TYPE, +}; + +/** + * enum mhi_ee - device current execution enviornment + * @MHI_EE_PBL - device in PBL + * @MHI_EE_SBL - device in SBL + * @MHI_EE_AMSS - device in mission mode (firmware fully loaded) + * @MHI_EE_RDDM - device in ram dump collection mode + * @MHI_EE_WFW - device in WLAN firmware mode + * @MHI_EE_PTHRU - device in PBL but configured in pass thru mode + * @MHI_EE_EDL - device in emergency download mode + */ +enum mhi_ee { + MHI_EE_PBL = 0x0, + MHI_EE_SBL = 0x1, + MHI_EE_AMSS = 0x2, + MHI_EE_RDDM = 0x3, + MHI_EE_WFW = 0x4, + MHI_EE_PTHRU = 0x5, + MHI_EE_EDL = 0x6, + MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */ + MHI_EE_MAX_SUPPORTED = MHI_EE_FP, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_MAX, +}; + +/** + * enum mhi_dev_state - device current MHI state + */ +enum mhi_dev_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +/** + * struct image_info - firmware and rddm table table + * @mhi_buf - Contain device firmware and rddm table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + +/** + * struct mhi_controller - Master controller structure for external modem + * @dev: Device associated with this controller + * @of_node: DT that has MHI configuration information + * @regs: Points to base of MHI MMIO register space + * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space + * @wake_db: MHI WAKE doorbell register address + * @dev_id: PCIe device id of the external device + * @domain: PCIe domain the device connected to + * @bus: PCIe bus the device assigned to + * @slot: PCIe slot for the modem + * @iova_start: IOMMU starting address for data + * @iova_stop: IOMMU stop address for data + * @fw_image: Firmware image name for normal booting + * @edl_image: Firmware image name for emergency download mode + * @fbc_download: MHI host needs to do complete image transfer + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size + * @seg_len: BHIe vector size + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @max_chan: Maximum number of channels controller support + * @mhi_chan: Points to channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @msi_required: Number of msi required to operate + * @msi_allocated: Number of msi allocated by bus master + * @irq: base irq # to request + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: Power management state + * @ee: MHI device execution environment + * @dev_state: MHI STATE + * @status_cb: CB function to notify various power states to but master + * @link_status: Query link status in case of abnormal value read from device + * @runtime_get: Async runtime resume function + * @runtimet_put: Release votes + * @time_get: Return host time in us + * @lpm_disable: Request controller to disable link level low power modes + * @lpm_enable: Controller may enable link level low power modes again + * @priv_data: Points to bus master's private data + */ +struct mhi_controller { + struct list_head node; + struct mhi_device *mhi_dev; + + /* device node for iommu ops */ + struct device *dev; + struct device_node *of_node; + + /* mmio base */ + phys_addr_t base_addr; + void __iomem *regs; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + + /* device topology */ + u32 vendor; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + u32 cntrl_idx; + struct device *cntrl_dev; + + /* addressing window */ + dma_addr_t iova_start; + dma_addr_t iova_stop; + + /* fw images */ + const char *fw_image; + const char *edl_image; + + /* mhi host manages downloading entire fbc images */ + bool fbc_download; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + u32 session_id; + u32 sequence_id; + struct image_info *fbc_image; + struct image_info *rddm_image; + + /* physical channel config data */ + u32 max_chan; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; /* these chan require lpm notification */ + + /* physical event config data */ + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 msi_required; + u32 msi_allocated; + u32 msi_irq_base; + int *irq; /* interrupt table */ + struct mhi_event *mhi_event; + + /* cmd rings */ + struct mhi_cmd *mhi_cmd; + + /* mhi context (shared with device) */ + struct mhi_ctxt *mhi_ctxt; + + u32 timeout_ms; + + /* caller should grab pm_mutex for suspend/resume operations */ + struct mutex pm_mutex; + bool pre_init; + rwlock_t pm_lock; + u32 pm_state; + enum mhi_ee ee; + enum mhi_dev_state dev_state; + bool wake_set; + atomic_t dev_wake; + atomic_t alloc_size; + atomic_t pending_pkts; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + + /* debug counters */ + u32 M0, M2, M3; + + /* worker for different state transitions */ + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct syserr_worker; + struct delayed_work ready_worker; + wait_queue_head_t state_event; + + /* shadow functions */ + void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv, + enum MHI_CB reason); + int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv); + u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv); + int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + + /* channel to control DTR messaging */ + struct mhi_device *dtr_dev; + + /* bounce buffer settings */ + bool bounce_buf; + size_t buffer_len; + + /* supports time sync feature */ + struct mhi_timesync *mhi_tsync; + struct mhi_device *tsync_dev; + + /* kernel log level */ + enum MHI_DEBUG_LEVEL klog_lvl; + int klog_slient; + + /* private log level controller driver to set */ + enum MHI_DEBUG_LEVEL log_lvl; + + /* controller specific data */ + void *priv_data; + void *log_buf; + struct dentry *dentry; + struct dentry *parent; + + struct miscdevice miscdev; + +#ifdef ENABLE_MHI_MON + spinlock_t lock; + + /* Ref */ + int nreaders; /* Under mon_lock AND mbus->lock */ + struct list_head r_list; /* Chain of readers (usually one) */ + struct kref ref; /* Under mon_lock */ + + /* Stats */ + unsigned int cnt_events; + unsigned int cnt_text_lost; +#endif +}; + +#ifdef ENABLE_MHI_MON +struct mhi_tre; +struct mon_reader { + struct list_head r_link; + struct mhi_controller *m_bus; + void *r_data; /* Use container_of instead? */ + + void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre); +}; +#endif + +/** + * struct mhi_device - mhi device structure associated bind to channel + * @dev: Device associated with the channels + * @mtu: Maximum # of bytes controller support + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @tiocm: Device current terminal settings + * @priv: Driver private data + */ +struct mhi_device { + struct device dev; + u32 vendor; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + size_t mtu; + int ul_chan_id; + int dl_chan_id; + int ul_event_id; + int dl_event_id; + u32 tiocm; + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + atomic_t dev_wake; + enum mhi_device_type dev_type; + void *priv_data; + int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS flags); + int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t size, enum MHI_FLAGS flags); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason); +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @dir: Channel direction + * @bytes_xfer: # of bytes transferred + * @transaction_status: Status of last trasnferred + */ +struct mhi_result { + void *buf_addr; + enum dma_data_direction dir; + size_t bytes_xferd; + int transaction_status; +}; + +/** + * struct mhi_buf - Describes the buffer + * @page: buffer as a page + * @buf: cpu address for the buffer + * @phys_addr: physical address of the buffer + * @dma_addr: iommu address for the buffer + * @skb: skb of ip packet + * @len: # of bytes + * @name: Buffer label, for offload channel configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + */ +struct mhi_buf { + struct list_head node; + struct page *page; + void *buf; + phys_addr_t phys_addr; + dma_addr_t dma_addr; + struct sk_buff *skb; + size_t len; + const char *name; /* ECA, CCA */ +}; + +/** + * struct mhi_driver - mhi driver information + * @id_table: NULL terminated channel ID names + * @ul_xfer_cb: UL data transfer callback + * @dl_xfer_cb: DL data transfer callback + * @status_cb: Asynchronous status callback + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev, + void *priv) +{ + mhi_dev->priv_data = priv; +} + +static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev) +{ + return mhi_dev->priv_data; +} + +/** + * mhi_queue_transfer - Queue a buffer to hardware + * All transfers are asyncronous transfers + * @mhi_dev: Device associated with the channels + * @dir: Data direction + * @buf: Data buffer (skb for hardware channels) + * @len: Size in bytes + * @mflags: Interrupt flags for the device + */ +static inline int mhi_queue_transfer(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + if (dir == DMA_TO_DEVICE) + return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len, + mflags); + else + return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len, + mflags); +} + +static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl) +{ + return mhi_cntrl->priv_data; +} + +static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} + +/** + * mhi_driver_register - Register driver with MHI framework + * @mhi_drv: mhi_driver structure + */ +int mhi_driver_register(struct mhi_driver *mhi_drv); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: mhi_driver structure + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_device_configure - configure ECA or CCA context + * For offload channels that client manage, call this + * function to configure channel context or event context + * array associated with the channel + * @mhi_div: Device associated with the channels + * @dir: Direction of the channel + * @mhi_buf: Configuration data + * @elements: # of configuration elements + */ +int mhi_device_configure(struct mhi_device *mhi_div, + enum dma_data_direction dir, + struct mhi_buf *mhi_buf, + int elements); + +/** + * mhi_device_get - disable all low power modes + * Only disables lpm, does not immediately exit low power mode + * if controller already in a low power mode + * @mhi_dev: Device associated with the channels + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - disable all low power modes + * Synchronously disable all low power, exit low power mode if + * controller already in a low power state + * @mhi_dev: Device associated with the channels + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - re-enable low power modes + * @mhi_dev: Device associated with the channels + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - setup channel for data transfer + * Moves both UL and DL channel from RESET to START state + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer -unprepare the channels + * Moves both UL and DL channels to RESET state + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_get_no_free_descriptors - Get transfer ring length + * Get # of TD available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** + * mhi_poll - poll for any available data to consume + * This is only applicable for DL direction + * @mhi_dev: Device associated with the channels + * @budget: In descriptors to service before returning + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_ioctl - user space IOCTL support for MHI channels + * Native support for setting TIOCM + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg); + +/** + * mhi_alloc_controller - Allocate mhi_controller structure + * Allocate controller structure and additional data for controller + * private data. You may get the private data pointer by calling + * mhi_controller_get_devdata + * @size: # of additional bytes to allocate + */ +struct mhi_controller *mhi_alloc_controller(size_t size); + +/** + * of_register_mhi_controller - Register MHI controller + * Registers MHI controller with MHI bus framework. DT must be supported + * @mhi_cntrl: MHI controller to register + */ +int of_register_mhi_controller(struct mhi_controller *mhi_cntrl); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +/** + * mhi_bdf_to_controller - Look up a registered controller + * Search for controller based on device identification + * @domain: RC domain of the device + * @bus: Bus device connected to + * @slot: Slot device assigned to + * @dev_id: Device Identification + */ +struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, + u32 dev_id); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up + * This is optional, call this before power up if controller do not + * want bus framework to automatically free any allocated memory during shutdown + * process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Starts MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: link is still accessible, do a graceful shutdown process otherwise + * we will shutdown host w/o putting device into RESET state + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_powre_down - free any allocated memory for power up + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_suspend - Move MHI into a suspended state + * Transition to MHI state M3 state from M0||M1||M2 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * Transition to MHI state M0 state from M3 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: If we trying to capture image while in kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force external device into rddm mode + * to collect device ramdump. This is useful if host driver assert + * and we need to see device state as well. + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_remote_time_sync - Get external soc time relative to local soc time + * using MMIO method. + * @mhi_dev: Device associated with the channels + * @t_host: Pointer to output local soc time + * @t_dev: Pointer to output remote soc time + */ +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev); + +/** + * mhi_get_mhi_state - Return MHI state of device + * @mhi_cntrl: MHI controller + */ +enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); + +/** + * mhi_set_mhi_state - Set device state + * @mhi_cntrl: MHI controller + * @state: state to set + */ +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_dev_state state); + + +/** + * mhi_is_active - helper function to determine if MHI in active state + * @mhi_dev: client device + */ +static inline bool mhi_is_active(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3); +} + +/** + * mhi_debug_reg_dump - dump MHI registers for debug purpose + * @mhi_cntrl: MHI controller + */ +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_VERB(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ +} while (0) + +#else + +#define MHI_VERB(fmt, ...) + +#endif + +#define MHI_LOG(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ + else if (!mhi_cntrl->klog_slient) \ + printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ERR(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ +} while (0) + +#define MHI_CRITICAL(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \ + pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ +} while (0) + +int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl); +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +#ifndef MHI_NAME_SIZE +#define MHI_NAME_SIZE 32 +/** + * * struct mhi_device_id - MHI device identification + * * @chan: MHI channel name + * * @driver_data: driver data; + * */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + unsigned long driver_data; +}; +#endif + +#endif /* _MHI_H_ */ diff --git a/driver/quectel_MHI/src/core/mhi_boot.c b/driver/quectel_MHI/src/core/mhi_boot.c new file mode 100644 index 0000000..8f1924f --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_boot.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* Software defines */ +/* BHI Version */ +#define BHI_MAJOR_VERSION 0x1 +#define BHI_MINOR_VERSION 0x1 + +#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */ +#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */ + +#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) ) + +typedef u32 ULONG; + +typedef struct _bhi_info_type +{ + ULONG bhi_ver_minor; + ULONG bhi_ver_major; + ULONG bhi_image_address_low; + ULONG bhi_image_address_high; + ULONG bhi_image_size; + ULONG bhi_rsvd1; + ULONG bhi_imgtxdb; + ULONG bhi_rsvd2; + ULONG bhi_msivec; + ULONG bhi_rsvd3; + ULONG bhi_ee; + ULONG bhi_status; + ULONG bhi_errorcode; + ULONG bhi_errdbg1; + ULONG bhi_errdbg2; + ULONG bhi_errdbg3; + ULONG bhi_sernum; + ULONG bhi_sblantirollbackver; + ULONG bhi_numsegs; + ULONG bhi_msmhwid[6]; + ULONG bhi_oempkhash[48]; + ULONG bhi_rsvd5; +}BHI_INFO_TYPE, *PBHI_INFO_TYPE; + +static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + char str[128]; + + MHI_LOG("BHI Device Info...\n"); + MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor); + MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee); + MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status); + MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3); + MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum); + MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver); + MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs); + for (index = 0; index < 6; index++) + { + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]); + } + MHI_LOG("BHI MSM HW-Id = %s\n", str); + + for (index = 0; index < 24; index++) + { + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]); + } + MHI_LOG("BHI OEM PK Hash = %s\n", str); +} + +static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset) +{ + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out); + + return (ret) ? 0 : out; +} + +static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + + memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE)); + + /* bhi_ver */ + bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW); + bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH); + bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE); + bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1); + bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB); + bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2); + bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC); + bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3); + bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV); + bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS); + bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE); + bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1); + bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2); + bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3); + bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU); + bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER); + bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG); + for (index = 0; index < MSMHWID_NUMDWORDS; index++) + { + bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index)); + } + for (index = 0; index < OEMPKHASH_NUMDWORDS; index++) + { + bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index)); + } + bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5); + PrintBhiInfo(mhi_cntrl, bhi_info); + /* Check the Execution Environment */ + if (!IsPBLExecEnv(bhi_info->bhi_ee)) + { + MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee); + } + + /* Return the number of bytes read */ + return 0; +} + +/* setup rddm vector table for rddm transfer */ +static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + int i = 0; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + MHI_VERB("Setting vector:%pad size:%zu\n", + &mhi_buf->dma_addr, mhi_buf->len); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } +} + +/* collect rddm during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + struct mhi_buf *mhi_buf; + u32 sequence_id; + u32 rx_status; + enum mhi_ee ee; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */ + void __iomem *base = mhi_cntrl->bhie; + + MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting rddm buffer. After + * returning from this function, we expect device to reset. + * + * Normaly, we would read/write pm_state only after grabbing + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee this state change would take effect since + * we're setting it w/o grabbing pmlock, it's best effort + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* setup the RX vector table */ + mhi_rddm_prepare(mhi_cntrl, rddm_image); + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + MHI_LOG("Starting BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#else + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#endif + if (unlikely(!sequence_id)) + sequence_id = 1; + + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + MHI_LOG("Trigger device into RDDM mode\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + MHI_LOG("Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* This is a hardware reset, will force device to enter rddm */ + MHI_LOG( + "Did not enter RDDM triggering host req. reset to force rddm\n"); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, + MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + MHI_LOG("Waiting for image download completion, current EE:%s\n", + TO_MHI_EXEC_STR(ee)); + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) { + MHI_LOG("RDDM successfully collected\n"); + return 0; + } + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + MHI_ERR("Did not complete RDDM transfer\n"); + MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee)); + MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret); + + return -EIO; +} + +/* download ramdump image from device */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + struct mhi_buf *mhi_buf; + int ret; + u32 rx_status; + u32 sequence_id; + + if (!rddm_image) + return -ENOMEM; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + return -EIO; + } + + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + + /* vector table is the last entry */ + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#else + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#endif + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, sequence_id); + MHI_LOG("Waiting for image download completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_download_rddm_img); + +static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; +#else + mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; +#endif + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + mhi_cntrl->sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, mhi_cntrl->sequence_id); + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} + +static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, + dma_addr_t dma_addr, + size_t size) +{ + u32 tx_status, val; + u32 ImgTxDb = 0x1; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + MHI_LOG("Starting BHI programming\n"); + + /* program start sbl download via bhi protocol */ + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb); + read_unlock_bh(pm_lock); + + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + + if (tx_status == BHI_STATUS_ERROR) { + MHI_ERR("Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + MHI_ERR("reg:%s value:0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT; + +invalid_pm_state: + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + /* requier additional entry for vec table */ + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n", + alloc_size, seg_size, segments); + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* last entry is for vector table */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, + &mhi_buf->dma_addr, GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + + MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i, + (unsigned long long)mhi_buf->dma_addr, + mhi_buf->len); + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + MHI_LOG("Successfully allocated bhi vec table\n"); + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + int i = 0; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + MHI_ASSERT(i >= img_info->entries, "malformed vector table"); + + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + MHI_VERB("Setting Vector:0x%llx size: %llu\n", + bhi_vec->dma_addr, bhi_vec->size); + buf += to_cpy; + remainder -= to_cpy; + i++; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_worker(struct work_struct *work) +{ + int ret; + struct mhi_controller *mhi_cntrl; + const char *fw_name; + const struct firmware *firmware; + struct image_info *image_info; + void *buf; + dma_addr_t dma_addr; + size_t size; + + mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); + + MHI_LOG("Waiting for device to enter PBL from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_PBL(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state\n"); + return; + } + + MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* if device in pthru, we do not have to load firmware */ + if (mhi_cntrl->ee == MHI_EE_PTHRU) + return; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n"); + return; + } + + ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev); + if (ret) { + MHI_ERR("Error loading firmware, ret:%d\n", ret); + return; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* the sbl size provided is maximum size, not necessarily image size */ + if (size > firmware->size) + size = firmware->size; + + buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + if (!buf) { + MHI_ERR("Could not allocate memory for image\n"); + release_firmware(firmware); + return; + } + + /* load sbl image */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); + + /* error or in edl, we're done */ + if (ret || mhi_cntrl->ee == MHI_EE_EDL) { + release_firmware(firmware); + return; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * if we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + MHI_ERR("Error alloc size of %zu\n", firmware->size); + goto error_alloc_fw_table; + } + + MHI_LOG("Copying firmware image into vector table\n"); + + /* load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + if (!mhi_cntrl->fbc_download) { + release_firmware(firmware); + return; + } + + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_read; + } + + /* wait for SBL event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_SBL || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter BHIE\n"); + goto error_read; + } + + /* start full firmware image download */ + image_info = mhi_cntrl->fbc_image; + ret = mhi_fw_load_amss(mhi_cntrl, + /* last entry is vec table */ + &image_info->mhi_buf[image_info->entries - 1]); + + MHI_LOG("amss fw_load, ret:%d\n", ret); + + release_firmware(firmware); + + return; + +error_read: + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + +error_alloc_fw_table: + release_firmware(firmware); +} + +int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size) +{ + int ret; + dma_addr_t dma_addr; + void *dma_buf; + + MHI_LOG("Device current EE:%s, M:%s, PM:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + +#if 0 + if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) { + mhi_cntrl->ee = MHI_EE_EDL; + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms + 500)); + } +#endif + +#if 0 + if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid BHI state\n"); + return -EINVAL; + } +#endif + + if (mhi_cntrl->ee != MHI_EE_EDL) { + MHI_ERR("MHI is not in EDL state\n"); + return -EINVAL; + } + + dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + if (!dma_buf) { + MHI_ERR("Could not allocate memory for image\n"); + return -ENOMEM; + } + + ret = copy_from_user(dma_buf, ubuf, size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);; + return ret; + } + + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr); + + if (ret) { + MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee); + goto error_state; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_state; + } + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + /* wait for BHIE event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_FP || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter Flash Programmer Environment\n"); + goto error_state; + } + + MHI_LOG("MHI enter Flash Programmer Environment\n"); + return 0; + +error_state: + MHI_LOG("Device current EE:%s, M:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + return ret; +} + +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf) +{ + long ret = -EINVAL; + BHI_INFO_TYPE bhi_info; + + ret = BhiRead(mhi_cntrl, &bhi_info); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret); + return ret; + } + + ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info)); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret); + } + + return ret; +} + +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf) +{ + long ret = -EINVAL; + size_t size; + + ret = copy_from_user(&size, ubuf, sizeof(size)); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret); + return ret; + } + if (size <= 0) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, size\n"); + return -EINVAL; + } + ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret); + } + + return ret; +} diff --git a/driver/quectel_MHI/src/core/mhi_dtr.c b/driver/quectel_MHI/src/core/mhi_dtr.c new file mode 100644 index 0000000..7ce44b3 --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_dtr.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +struct __packed dtr_ctrl_msg { + u32 preamble; + u32 msg_id; + u32 dest_id; + u32 size; + u32 msg; +}; + +#define CTRL_MAGIC (0x4C525443) +#define CTRL_MSG_DTR BIT(0) +#define CTRL_MSG_RTS BIT(1) +#define CTRL_MSG_DCD BIT(0) +#define CTRL_MSG_DSR BIT(1) +#define CTRL_MSG_RI BIT(3) +#define CTRL_HOST_STATE (0x10) +#define CTRL_DEVICE_STATE (0x11) +#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF) + +static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev, + u32 tiocm) +{ + struct dtr_ctrl_msg *dtr_msg = NULL; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + spinlock_t *res_lock = &mhi_dev->dev.devres_lock; + u32 cur_tiocm; + int ret = 0; + + cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + tiocm &= (TIOCM_DTR | TIOCM_RTS); + + /* state did not changed */ + if (cur_tiocm == tiocm) + return 0; + + mutex_lock(&dtr_chan->mutex); + + dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL); + if (!dtr_msg) { + ret = -ENOMEM; + goto tiocm_exit; + } + + dtr_msg->preamble = CTRL_MAGIC; + dtr_msg->msg_id = CTRL_HOST_STATE; + dtr_msg->dest_id = mhi_dev->ul_chan_id; + dtr_msg->size = sizeof(u32); + if (tiocm & TIOCM_DTR) + dtr_msg->msg |= CTRL_MSG_DTR; + if (tiocm & TIOCM_RTS) + dtr_msg->msg |= CTRL_MSG_RTS; + +/* +* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit. +* RTS:0 will prevent modem output AT response. +* But 'busybox microcom' do not send any RTS to modem. +* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1 +* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0 +*/ + dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__, + !!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS)); + + reinit_completion(&dtr_chan->completion); + ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg, + sizeof(*dtr_msg), MHI_EOT); + if (ret) + goto tiocm_exit; + + ret = wait_for_completion_timeout(&dtr_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret) { + MHI_ERR("Failed to receive transfer callback\n"); + ret = -EIO; + goto tiocm_exit; + } + + ret = 0; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS); + mhi_dev->tiocm |= tiocm; + spin_unlock_irq(res_lock); + +tiocm_exit: + kfree(dtr_msg); + mutex_unlock(&dtr_chan->mutex); + + return ret; +} + +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + /* ioctl not supported by this controller */ + if (!mhi_cntrl->dtr_dev) + return -EIO; + + switch (cmd) { + case TIOCMGET: + return mhi_dev->tiocm; + case TIOCMSET: + { + u32 tiocm; + + ret = get_user(tiocm, (u32 *)arg); + if (ret) + return ret; + + return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm); + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(mhi_ioctl); + +static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = mhi_dev->mtu; + void *buf; + int ret = -EIO, i; + + for (i = 0; i < nr_trbs; i++) { + buf = kmalloc(mtu, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + kfree(buf); + return ret; + } + } + + return ret; +} + +static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr; + u32 chan; + spinlock_t *res_lock; + + if (mhi_result->transaction_status == -ENOTCONN) { + kfree(mhi_result->buf_addr); + return; + } + + if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) { + MHI_ERR("Unexpected length %zu received\n", + mhi_result->bytes_xferd); + return; + } + + MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n", + dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id, + dtr_msg->msg); + + chan = CTRL_GET_CHID(dtr_msg); + if (chan >= mhi_cntrl->max_chan) + goto auto_queue; + + mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev; + if (!mhi_dev) + goto auto_queue; + + res_lock = &mhi_dev->dev.devres_lock; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + if (dtr_msg->msg & CTRL_MSG_DCD) + mhi_dev->tiocm |= TIOCM_CD; + + if (dtr_msg->msg & CTRL_MSG_DSR) + mhi_dev->tiocm |= TIOCM_DSR; + + if (dtr_msg->msg & CTRL_MSG_RI) + mhi_dev->tiocm |= TIOCM_RI; + spin_unlock_irq(res_lock); + +auto_queue: + mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr, + mhi_cntrl->dtr_dev->mtu, MHI_EOT); +} + +static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + + MHI_VERB("Received with status:%d\n", mhi_result->transaction_status); + if (!mhi_result->transaction_status) + complete(&dtr_chan->completion); +} + +static void mhi_dtr_remove(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_cntrl->dtr_dev = NULL; +} + +static int mhi_dtr_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + MHI_LOG("Enter for DTR control channel\n"); + + mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); + ret = mhi_prepare_for_transfer(mhi_dev); + if (!ret) + mhi_cntrl->dtr_dev = mhi_dev; + + if (!ret) + ret = mhi_dtr_queue_inbound(mhi_cntrl); + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +static const struct mhi_device_id mhi_dtr_table[] = { + { .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) }, + {}, +}; + +static struct mhi_driver mhi_dtr_driver = { + .id_table = mhi_dtr_table, + .remove = mhi_dtr_remove, + .probe = mhi_dtr_probe, + .ul_xfer_cb = mhi_dtr_ul_xfer_cb, + .dl_xfer_cb = mhi_dtr_dl_xfer_cb, + .driver = { + .name = "MHI_DTR", + .owner = THIS_MODULE, + } +}; + +int __init mhi_dtr_init(void) +{ + return mhi_driver_register(&mhi_dtr_driver); +} +void mhi_dtr_exit(void) { + mhi_driver_unregister(&mhi_dtr_driver); +} diff --git a/driver/quectel_MHI/src/core/mhi_init.c b/driver/quectel_MHI/src/core/mhi_init.c new file mode 100644 index 0000000..bd8c267 --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_init.c @@ -0,0 +1,2782 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,11,0 )) +#include +#else +#include +#endif +#include "mhi.h" +#include "mhi_internal.h" + +struct mhi_controller_map { + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + struct mhi_controller *mhi_cntrl; +}; + +#define MAX_MHI_CONTROLLER 16 +struct mhi_controller_map mhi_controller_minors[MAX_MHI_CONTROLLER]; + +#define MHI_CNTRL_DRIVER_NAME "mhi_cntrl_q" +struct mhi_cntrl_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; +}; +static struct mhi_cntrl_drv mhi_cntrl_drv; + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_WFW] = "WFW", + [MHI_EE_PTHRU] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_FP] = "FlashProg", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", +}; + +const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = { + [MHI_ST_TRANSITION_PBL] = "PBL", + [MHI_ST_TRANSITION_READY] = "READY", + [MHI_ST_TRANSITION_SBL] = "SBL", + [MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", + [MHI_ST_TRANSITION_FP] = "FlashProg", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_BIT_DISABLE] = "DISABLE", + [MHI_PM_BIT_POR] = "POR", + [MHI_PM_BIT_M0] = "M0", + [MHI_PM_BIT_M2] = "M2", + [MHI_PM_BIT_M3_ENTER] = "M?->M3", + [MHI_PM_BIT_M3] = "M3", + [MHI_PM_BIT_M3_EXIT] = "M3->M0", + [MHI_PM_BIT_FW_DL_ERR] = "FW DL Error", + [MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect", + [MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process", + [MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect", +}; + +struct mhi_bus mhi_bus; + +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state) +{ + int index = find_last_bit((unsigned long *)&state, 32); + + if (index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +#if 0 +/* MHI protocol require transfer ring to be aligned to ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + return 0; +} +#endif + +static void mhi_ring_aligned_check(struct mhi_controller *mhi_cntrl, u64 rbase, u64 rlen) { + uint64_t ra; + + ra = rbase; + do_div(ra, roundup_pow_of_two(rlen)); + + if (rbase != ra * roundup_pow_of_two(rlen)) { + MHI_ERR("bad params ring base not aligned 0x%llx align 0x%lx\n", rbase, roundup_pow_of_two(rlen)); + } +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + + if (mhi_cntrl->msi_allocated == 1) { + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); + return; + } + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + int i; + int ret; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + if (mhi_cntrl->msi_allocated == 1) { + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + mhi_event->msi = 0; + } + + ret = request_threaded_irq(mhi_cntrl->irq[0], NULL, + mhi_one_msi_handlr, IRQF_ONESHOT, "mhi", mhi_cntrl); + if (ret) { + MHI_ERR("Error requesting irq:%d, ret=%d\n", mhi_cntrl->irq[0], ret); + } + return ret; + } + + /* for BHI INTVEC msi */ + ret = request_threaded_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_intvec_handlr, + mhi_intvec_threaded_handlr, IRQF_ONESHOT, + "mhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ret = request_irq(mhi_cntrl->irq[mhi_event->msi], + mhi_msi_handlr, IRQF_SHARED, "mhi", + mhi_event); + if (ret) { + MHI_ERR("Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->msi], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; +#if 0 + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); +#endif + ring->base = NULL; + ring->iommu_base = 0; + } + +#if 0 + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); +#endif + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; +#if 0 + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); +#endif + ring->base = NULL; + ring->iommu_base = 0; + } + +#if 0 + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); +#endif + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +static int mhi_init_debugfs_mhi_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_states_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_event_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_event_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_chan_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_chan_show, inode->i_private); +} + +static const struct file_operations debugfs_state_ops = { + .open = mhi_init_debugfs_mhi_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_ev_ops = { + .open = mhi_init_debugfs_mhi_event_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_chan_ops = { + .open = mhi_init_debugfs_mhi_chan_open, + .release = single_release, + .read = seq_read, +}; + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL, + mhi_debugfs_trigger_reset, "%llu\n"); + +#ifdef ENABLE_MHI_MON +struct mon_event_text { + struct list_head e_link; + int type; /* submit, complete, etc. */ + unsigned int tstamp; + u32 chan; + dma_addr_t wp; + struct mhi_tre mhi_tre; + u8 data[32]; + size_t len; +}; + +#define EVENT_MAX (16*PAGE_SIZE / sizeof(struct mon_event_text)) +#define PRINTF_DFL 250 +#define SLAB_NAME_SZ 30 + +struct mon_reader_text { + struct kmem_cache *e_slab; + int nevents; + struct list_head e_list; + struct mon_reader r; /* In C, parent class can be placed anywhere */ + + wait_queue_head_t wait; + int printf_size; + char *printf_buf; + int left_size; + int left_pos; + struct mutex printf_lock; + + char slab_name[SLAB_NAME_SZ]; +}; + +struct mon_text_ptr { + int cnt, limit; + char *pbuf; +}; + +static DEFINE_MUTEX(mon_lock); + +static inline unsigned int mon_get_timestamp(void) +{ + struct timespec64 now; + unsigned int stamp; + + ktime_get_ts64(&now); + stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC; + return stamp; +} + +static void mon_text_event(struct mon_reader_text *rp, + u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len, + char ev_type) +{ + struct mon_event_text *ep; + + if (rp->nevents >= EVENT_MAX || + (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + rp->r.m_bus->cnt_text_lost++; + return; + } + + ep->type = ev_type; + ep->tstamp = mon_get_timestamp(); + ep->chan = chan; + ep->wp = wp; + ep->mhi_tre = *mhi_tre; + if (len > sizeof(ep->data)) + len = sizeof(ep->data); + memcpy(ep->data, buf, len); + ep->len = len; + rp->nevents++; + list_add_tail(&ep->e_link, &rp->e_list); + wake_up(&rp->wait); +} + +static void mon_text_submit(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'W'); +} + +static void mon_text_receive(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'R'); +} + +static void mon_text_complete(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, NULL, 0, 'E'); +} + +static void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->nreaders++; + list_add_tail(&r->r_link, &mbus->r_list); + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_get(&mbus->ref); +} + +static void mon_bus_drop(struct kref *r) +{ + struct mhi_controller *mbus = container_of(r, struct mhi_controller, ref); + kfree(mbus); +} + +static void mon_reader_del(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + list_del(&r->r_link); + --mbus->nreaders; + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_put(&mbus->ref, mon_bus_drop); +} + +static void mon_text_ctor(void *mem) +{ + /* + * Nothing to initialize. No, really! + * So, we fill it with garbage to emulate a reused object. + */ + memset(mem, 0xe5, sizeof(struct mon_event_text)); +} + +static int mon_text_open(struct inode *inode, struct file *file) +{ + struct mhi_controller *mbus; + struct mon_reader_text *rp; + int rc; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL); + if (rp == NULL) { + rc = -ENOMEM; + goto err_alloc; + } + INIT_LIST_HEAD(&rp->e_list); + init_waitqueue_head(&rp->wait); + mutex_init(&rp->printf_lock); + + rp->printf_size = PRINTF_DFL; + rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL); + if (rp->printf_buf == NULL) { + rc = -ENOMEM; + goto err_alloc_pr; + } + + rp->r.m_bus = mbus; + rp->r.r_data = rp; + rp->r.rnf_submit = mon_text_submit; + rp->r.rnf_receive = mon_text_receive; + rp->r.rnf_complete = mon_text_complete; + + snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); + rp->e_slab = kmem_cache_create(rp->slab_name, + sizeof(struct mon_event_text), sizeof(long), 0, + mon_text_ctor); + if (rp->e_slab == NULL) { + rc = -ENOMEM; + goto err_slab; + } + + mon_reader_add(mbus, &rp->r); + + file->private_data = rp; + mutex_unlock(&mon_lock); + return 0; + +// err_busy: +// kmem_cache_destroy(rp->e_slab); +err_slab: + kfree(rp->printf_buf); +err_alloc_pr: + kfree(rp); +err_alloc: + mutex_unlock(&mon_lock); + return rc; +} + +static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp, + struct mhi_controller *mbus) +{ + struct list_head *p; + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + if (list_empty(&rp->e_list)) { + spin_unlock_irqrestore(&mbus->lock, flags); + return NULL; + } + p = rp->e_list.next; + list_del(p); + --rp->nevents; + spin_unlock_irqrestore(&mbus->lock, flags); + return list_entry(p, struct mon_event_text, e_link); +} + +static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, + struct file *file) +{ + struct mhi_controller *mbus = rp->r.m_bus; + DECLARE_WAITQUEUE(waita, current); + struct mon_event_text *ep; + + add_wait_queue(&rp->wait, &waita); + set_current_state(TASK_INTERRUPTIBLE); + while ((ep = mon_text_fetch(rp, mbus)) == NULL) { + if (file->f_flags & O_NONBLOCK) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EWOULDBLOCK); + } + /* + * We do not count nwaiters, because ->release is supposed + * to be called when all openers are gone only. + */ + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EINTR); + } + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ep; +} + +static ssize_t mon_text_read_u(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; + + if (rp->left_size) { + int cnt = rp->left_size; + + if (cnt > nbytes) + cnt = nbytes; + if (copy_to_user(buf, rp->printf_buf + rp->left_pos, cnt)) + return -EFAULT; + rp->left_pos += cnt; + rp->left_size -= cnt; + return cnt; + } + + if (IS_ERR(ep = mon_text_read_wait(rp, file))) + return PTR_ERR(ep); + mutex_lock(&rp->printf_lock); + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + "%u %c %03d WP:%llx TRE: %llx %08x %08x", + ep->tstamp, ep->type, ep->chan, (long long unsigned int)ep->wp, + ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]); + + if (ep->len) { + struct mon_text_ptr *p = &ptr; + size_t i = 0; + + for (i = 0; i < ep->len; i++) { + if (i % 4 == 0) { + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + " "); + } + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + "%02x", ep->data[i]); + } + + } + + ptr.cnt += snprintf(ptr.pbuf +ptr.cnt, ptr.limit - ptr.cnt, "\n"); + + if (ptr.cnt > nbytes) { + rp->left_pos = nbytes; + rp->left_size = ptr.cnt - nbytes; + ptr.cnt = nbytes; + } + + if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) + ptr.cnt = -EFAULT; + mutex_unlock(&rp->printf_lock); + kmem_cache_free(rp->e_slab, ep); + return ptr.cnt; +} + +static int mon_text_release(struct inode *inode, struct file *file) +{ + struct mon_reader_text *rp = file->private_data; + struct mhi_controller *mbus; + /* unsigned long flags; */ + struct list_head *p; + struct mon_event_text *ep; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + if (mbus->nreaders <= 0) { + mutex_unlock(&mon_lock); + return 0; + } + mon_reader_del(mbus, &rp->r); + + /* + * In theory, e_list is protected by mbus->lock. However, + * after mon_reader_del has finished, the following is the case: + * - we are not on reader list anymore, so new events won't be added; + * - whole mbus may be dropped if it was orphaned. + * So, we better not touch mbus. + */ + /* spin_lock_irqsave(&mbus->lock, flags); */ + while (!list_empty(&rp->e_list)) { + p = rp->e_list.next; + ep = list_entry(p, struct mon_event_text, e_link); + list_del(p); + --rp->nevents; + kmem_cache_free(rp->e_slab, ep); + } + /* spin_unlock_irqrestore(&mbus->lock, flags); */ + + kmem_cache_destroy(rp->e_slab); + kfree(rp->printf_buf); + kfree(rp); + + mutex_unlock(&mon_lock); + return 0; +} + + +static const struct file_operations mon_fops_text_u = { + .owner = THIS_MODULE, + .open = mon_text_open, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0)) + .llseek = no_llseek, +#endif + .read = mon_text_read_u, + .release = mon_text_release, +}; +#endif + +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl) +{ + struct dentry *dentry; + char node[64]; + +#ifdef ENABLE_MHI_MON + struct mhi_controller *mbus = mhi_cntrl; + + mbus->nreaders = 0; + kref_init(&mbus->ref); + spin_lock_init(&mbus->lock); + INIT_LIST_HEAD(&mbus->r_list); +#endif + + if (!mhi_cntrl->parent) + snprintf(node, sizeof(node), "mhi_%04x_%02x:%02x.%02x", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + else + snprintf(node, sizeof(node), "%04x_%02x:%02x.%02x", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + + dentry = debugfs_create_dir(node, mhi_cntrl->parent); + if (IS_ERR_OR_NULL(dentry)) + return; + + debugfs_create_file("states", 0444, dentry, mhi_cntrl, + &debugfs_state_ops); + debugfs_create_file("events", 0444, dentry, mhi_cntrl, + &debugfs_ev_ops); + debugfs_create_file("chan", 0444, dentry, mhi_cntrl, + &debugfs_chan_ops); + debugfs_create_file("reset", 0444, dentry, mhi_cntrl, + &debugfs_trigger_reset_fops); +#ifdef ENABLE_MHI_MON + debugfs_create_file("mhimon", 0444, dentry, mhi_cntrl, + &mon_fops_text_u); +#endif + mhi_cntrl->dentry = dentry; +} + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->dentry); + mhi_cntrl->dentry = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->alloc_size, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + mhi_ctxt->ctrl_seg = mhi_alloc_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), + &mhi_ctxt->ctrl_seg_addr, GFP_KERNEL); + MHI_LOG("mhi_ctxt->ctrl_seg = %p\n", mhi_ctxt->ctrl_seg); + if (!mhi_ctxt->ctrl_seg) + goto error_alloc_chan_ctxt; + + if ((unsigned long)mhi_ctxt->ctrl_seg & (4096-1)) { + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + goto error_alloc_chan_ctxt; + } + + /* setup channel ctxt */ +#if 1 + mhi_ctxt->chan_ctxt = mhi_ctxt->ctrl_seg->chan_ctxt; + mhi_ctxt->chan_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ctxt); +#else + mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; +#endif + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* If it's offload channel skip this step */ + if (mhi_chan->offload_ch) + continue; + + chan_ctxt->chstate = MHI_CH_STATE_DISABLED; + chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode; + chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg; + chan_ctxt->chtype = mhi_chan->type; + chan_ctxt->erindex = mhi_chan->er_index; + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = &chan_ctxt->wp; + } + + /* setup event context */ +#if 1 + mhi_ctxt->er_ctxt = mhi_ctxt->ctrl_seg->er_ctxt; + mhi_ctxt->er_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, er_ctxt); +#else + mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; +#endif + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* it's a satellite ev, we do not touch it */ + if (mhi_event->offload_ev) + continue; + + er_ctxt->intmodc = 0; + er_ctxt->intmodt = mhi_event->intmod; + er_ctxt->ertype = MHI_ER_TYPE_VALID; + if (mhi_cntrl->msi_allocated == 1) { + mhi_event->msi = 0; + } + er_ctxt->msivec = mhi_event->msi; + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; +#if 1 + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->event_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, event_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; +#endif + + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = ring->iommu_base; + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = ring->len; + ring->ctxt_wp = &er_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, er_ctxt->rbase, er_ctxt->rlen); + memset(ring->base, 0xCC, ring->len); + } + + /* setup cmd context */ +#if 1 + mhi_ctxt->cmd_ctxt = mhi_ctxt->ctrl_seg->cmd_ctxt; + mhi_ctxt->cmd_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ctxt); +#else + mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; +#endif + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; +#if 1 + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->cmd_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; +#endif + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = ring->len; + ring->ctxt_wp = &cmd_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, cmd_ctxt->rbase, cmd_ctxt->rlen); + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +#if 0 +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); +#endif + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +static int mhi_get_tsync_er_cfg(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + /* find event ring with timesync support */ + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) + if (mhi_event->data_type == MHI_ER_TSYNC_ELEMENT_TYPE) + return mhi_event->er_index; + + return -ENOENT; +} + +int mhi_init_timesync(struct mhi_controller *mhi_cntrl) +{ + struct mhi_timesync *mhi_tsync; + u32 time_offset, db_offset; + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + ret = -EIO; + goto exit_timesync; + } + + ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID, + &time_offset); + if (ret) { + MHI_LOG("No timesync capability found\n"); + goto exit_timesync; + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable || + !mhi_cntrl->lpm_enable) + return -EINVAL; + + /* register method supported */ + mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL); + if (!mhi_tsync) + return -ENOMEM; + + spin_lock_init(&mhi_tsync->lock); + INIT_LIST_HEAD(&mhi_tsync->head); + init_completion(&mhi_tsync->completion); + + /* save time_offset for obtaining time */ + MHI_LOG("TIME OFFS:0x%x\n", time_offset); + mhi_tsync->time_reg = mhi_cntrl->regs + time_offset + + TIMESYNC_TIME_LOW_OFFSET; + + mhi_cntrl->mhi_tsync = mhi_tsync; + + ret = mhi_create_timesync_sysfs(mhi_cntrl); + if (unlikely(ret)) { + /* kernel method still work */ + MHI_ERR("Failed to create timesync sysfs nodes\n"); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + ret = -EIO; + goto exit_timesync; + } + + /* get DB offset if supported, else return */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + time_offset + TIMESYNC_DB_OFFSET, &db_offset); + if (ret || !db_offset) { + ret = 0; + goto exit_timesync; + } + + MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset); + mhi_tsync->db = mhi_cntrl->regs + db_offset; + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* get time-sync event ring configuration */ + ret = mhi_get_tsync_er_cfg(mhi_cntrl); + if (ret < 0) { + MHI_LOG("Could not find timesync event ring\n"); + return ret; + } + + mhi_tsync->er_index = ret; + + ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG); + if (ret) { + MHI_ERR("Failed to send time sync cfg cmd\n"); + return ret; + } + + ret = wait_for_completion_timeout(&mhi_tsync->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to get time cfg cmd completion\n"); + return -EIO; + } + + return 0; + +exit_timesync: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val = 0; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, +#if 0 //carl.yin 20190527 UDE-WIN-InitMmio + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, +#endif + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr+sizeof(struct mhi_ctrl_seg)), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr+sizeof(struct mhi_ctrl_seg)), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0, 0 } + }; + + MHI_LOG("Initializing MMIO\n"); + + /* set up DB register for all the chan rings */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("CHDBOFF:0x%x\n", val); + + /* setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); +#if 0 //'EM120RGLAPR02A07M4G_11' will treate as chan 127's interrput, and report complete event over cmd ring, but cmd ring is not set by now + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; +#endif + + /* setup channel db addresses */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* setup event ring db addresses */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("ERDBOFF:0x%x\n", val); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* set up DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + MHI_LOG("Programming all MMIO values.\n"); + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + +#if 0 + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); +#endif + kfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + chan_ctxt->rbase = 0; +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; +#if 1 + tre_ring->alloc_size = tre_ring->len; + if (MHI_CLIENT_IP_HW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_HW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_out_chan_ring[mhi_chan->ring]); + } +#ifdef ENABLE_IP_SW0 + else if (MHI_CLIENT_IP_SW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_SW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_out_chan_ring[mhi_chan->ring]); + } +#endif + +#ifdef ENABLE_ADPL + else if (MHI_CLIENT_ADPL == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->adpl_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, adpl_in_chan_ring[mhi_chan->ring]); + } +#endif + +#ifdef ENABLE_QDSS + else if (MHI_CLIENT_IP_HW_QDSS == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->qdss_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, qdss_in_chan_ring[mhi_chan->ring]); + } +#endif + + else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]); + } + else { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ring[mhi_chan->ring]); + } + tre_ring->iommu_base = tre_ring->dma_handle; + tre_ring->base = tre_ring->pre_aligned + (tre_ring->iommu_base - tre_ring->dma_handle); + ret = 0; +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); +#endif + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = kzalloc(buf_ring->len, GFP_KERNEL); + + if (!buf_ring->base) { +#if 0 + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); +#endif + return -ENOMEM; + } + + chan_ctxt->chstate = MHI_CH_STATE_ENABLED; + chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = tre_ring->len; + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = true; + + mhi_ring_aligned_check(mhi_cntrl, chan_ctxt->rbase, chan_ctxt->rlen); + /* update to all cores */ + smp_wmb(); + + return 0; +} + +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *ch_ctxt; + int er_index, chan; + + switch (dir) { + case DMA_TO_DEVICE: + mhi_chan = mhi_dev->ul_chan; + break; + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + case DMA_NONE: + mhi_chan = mhi_dev->dl_chan; + break; + default: + return -EINVAL; + } + + er_index = mhi_chan->er_index; + chan = mhi_chan->chan; + + for (; elements > 0; elements--, cfg_tbl++) { + /* update event context array */ + if (!strcmp(cfg_tbl->name, "ECA")) { + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index]; + if (sizeof(*er_ctxt) != cfg_tbl->len) { + MHI_ERR( + "Invalid ECA size, expected:%zu actual%zu\n", + sizeof(*er_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt)); + continue; + } + + /* update channel context array */ + if (!strcmp(cfg_tbl->name, "CCA")) { + ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan]; + if (cfg_tbl->len != sizeof(*ch_ctxt)) { + MHI_ERR( + "Invalid CCA size, expected:%zu actual:%zu\n", + sizeof(*ch_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt)); + continue; + } + + return -EINVAL; + } + + return 0; +} + +#if 0 +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int i, ret, num = 0; + struct mhi_event *mhi_event; + struct device_node *child; + + of_node = of_find_node_by_name(of_node, "mhi_events"); + if (!of_node) + return -EINVAL; + + for_each_available_child_of_node(of_node, child) { + if (!strcmp(child->name, "mhi_event")) + num++; + } + + if (!num) + return -EINVAL; + + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + i = 0; + for_each_available_child_of_node(of_node, child) { + if (strcmp(child->name, "mhi_event")) + continue; + + mhi_event->er_index = i++; + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_event->ring.elements); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,intmod", + &mhi_event->intmod); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,msi", + &mhi_event->msi); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,chan", + &mhi_event->chan); + if (!ret) { + if (mhi_event->chan >= mhi_cntrl->max_chan) + goto error_ev_cfg; + /* this event ring has a dedicated channel */ + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + ret = of_property_read_u32(child, "mhi,priority", + &mhi_event->priority); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,brstmode", + &mhi_event->db_cfg.brstmode); + if (ret || MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_event->data_type); + if (ret) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; + + if (mhi_event->data_type > MHI_ER_DATA_TYPE_MAX) + goto error_ev_cfg; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev"); + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + mhi_event->cl_manage = of_property_read_bool(child, + "mhi,client-manage"); + mhi_event->offload_ev = of_property_read_bool(child, + "mhi,offload"); + mhi_event++; + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1; + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int ret; + struct device_node *child; + u32 chan; + + ret = of_property_read_u32(of_node, "mhi,max-channels", + &mhi_cntrl->max_chan); + if (ret) + return ret; + + of_node = of_find_node_by_name(of_node, "mhi_channels"); + if (!of_node) + return -EINVAL; + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for_each_available_child_of_node(of_node, child) { + struct mhi_chan *mhi_chan; + + if (strcmp(child->name, "mhi_chan")) + continue; + + ret = of_property_read_u32(child, "reg", &chan); + if (ret || chan >= mhi_cntrl->max_chan) + goto error_chan_cfg; + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + ret = of_property_read_string(child, "label", + &mhi_chan->name); + if (ret) + goto error_chan_cfg; + + mhi_chan->chan = chan; + + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_chan->tre_ring.elements); + if (!ret && !mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring len should be bigger than + * transfer ring len due to internal logical channels in device. + * So host can queue much more buffers than transfer ring len. + * Example, RSC channels should have a larger local channel + * than transfer ring length. + */ + ret = of_property_read_u32(child, "mhi,local-elements", + (u32 *)&mhi_chan->buf_ring.elements); + if (ret) + mhi_chan->buf_ring.elements = + mhi_chan->tre_ring.elements; + + ret = of_property_read_u32(child, "mhi,event-ring", + &mhi_chan->er_index); + if (ret) + goto error_chan_cfg; + + ret = of_property_read_u32(child, "mhi,chan-dir", + &mhi_chan->dir); + if (ret) + goto error_chan_cfg; + + /* + * For most channels, chtype is identical to channel directions, + * if not defined, assign ch direction to chtype + */ + ret = of_property_read_u32(child, "mhi,chan-type", + &mhi_chan->type); + if (ret) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + ret = of_property_read_u32(child, "mhi,ee", &mhi_chan->ee_mask); + if (ret) + goto error_chan_cfg; + + of_property_read_u32(child, "mhi,pollcfg", + &mhi_chan->db_cfg.pollcfg); + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_chan->xfer_type); + if (ret) + goto error_chan_cfg; + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + case MHI_XFER_DMA: + case MHI_XFER_RSC_DMA: + mhi_chan->queue_xfer = mhi_queue_dma; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = of_property_read_bool(child, + "mhi,lpm-notify"); + mhi_chan->offload_ch = of_property_read_bool(child, + "mhi,offload-chan"); + mhi_chan->db_cfg.reset_req = of_property_read_bool(child, + "mhi,db-mode-switch"); + mhi_chan->pre_alloc = of_property_read_bool(child, + "mhi,auto-queue"); + mhi_chan->auto_start = of_property_read_bool(child, + "mhi,auto-start"); + mhi_chan->wake_capable = of_property_read_bool(child, + "mhi,wake-capable"); + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* bi-dir and dirctionless channels must be a offload chan */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + ret = of_property_read_u32(child, "mhi,doorbell-mode", + &mhi_chan->db_cfg.brstmode); + if (ret || + MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} +#else +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int i, num = 0; + struct mhi_event *mhi_event; + + num = NUM_MHI_EVT_RINGS; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + mhi_cntrl->msi_irq_base = 0; + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + i = 0; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++) { + mhi_event->er_index = i; + + mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements + if (i == PRIMARY_EVENT_RING) + mhi_event->ring.elements = 256; //256 is enough, and 1024 some times make driver fail to open channel (reason is x6x fail to malloc) + + mhi_event->intmod = 1; //Interrupt moderation time in ms + +#ifdef ENABLE_ADPL + if (i == ADPL_EVT_RING) + mhi_event->ring.elements = 256; +#endif + +#ifdef ENABLE_QDSS + if (i == QDSS_EVT_RING) + mhi_event->ring.elements = 512; +#endif + + /* see mhi_netdev_status_cb(), when interrupt come, the napi_poll maybe scheduled, so can reduce interrupts + root@OpenWrt:/# cat /proc/interrupts | grep mhi + root@OpenWrt:/# cat /sys/kernel/debug/mhi_q/mhi_netdev/pcie_mhi_0306_00.01.00_0/rx_int + */ + if (i == IPA_IN_EVENT_RING || i == IPA_OUT_EVENT_RING) + mhi_event->intmod = 5; + +#ifdef ENABLE_IP_SW0 + if (i == SW_0_IN_EVT_RING) + mhi_event->intmod = 5; +#endif + +#ifdef ENABLE_ADPL + if (i == ADPL_EVT_RING) + mhi_event->intmod = 0; +#endif + +#ifdef ENABLE_QDSS + if (i == QDSS_EVT_RING) + mhi_event->intmod = 0; +#endif + + mhi_event->msi = 1 + i + mhi_cntrl->msi_irq_base; //MSI associated with this event ring + + if (i == IPA_OUT_EVENT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_0_OUT; //Dedicated channel number, if it's a dedicated event ring + else if (i == IPA_IN_EVENT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_0_IN; //Dedicated channel number, if it's a dedicated event ring +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_OUT; + else if (i == SW_0_IN_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_IN; +#endif + +#ifdef ENABLE_ADPL + else if (i == ADPL_EVT_RING) + mhi_event->chan = MHI_CLIENT_ADPL; +#endif + +#ifdef ENABLE_QDSS + else if (i == QDSS_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_QDSS; +#endif + else + mhi_event->chan = 0; + + /* this event ring has a dedicated channel */ + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + + mhi_event->priority = 1; //Event ring priority, set to 1 for now + + if (mhi_event->chan && mhi_event->mhi_chan->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + else + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING || i == SW_0_IN_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + +#ifdef ENABLE_ADPL + else if (i == ADPL_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + +#ifdef ENABLE_QDSS + else if (i == QDSS_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + else + mhi_event->data_type = MHI_ER_CTRL_ELEMENT_TYPE; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING +#ifdef ENABLE_ADPL + || i == ADPL_EVT_RING +#endif +#ifdef ENABLE_QDSS + || i == QDSS_EVT_RING +#endif + ) + mhi_event->hw_ring = true; + else + mhi_event->hw_ring = false; + + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = false; + if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN || mhi_event->chan == MHI_CLIENT_IP_SW_0_IN) + mhi_event->cl_manage = true; + mhi_event->offload_ev = false; + mhi_event++; + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1 + mhi_cntrl->msi_irq_base; + + return 0; +} + +struct chan_cfg_t { + const char *chan_name; + u32 chan_id; + u32 elements; +}; + +static struct chan_cfg_t chan_cfg[] = { +//"Qualcomm PCIe Loopback" + {"LOOPBACK", MHI_CLIENT_LOOPBACK_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"LOOPBACK", MHI_CLIENT_LOOPBACK_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Sahara" + {"SAHARA", MHI_CLIENT_SAHARA_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"SAHARA", MHI_CLIENT_SAHARA_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Diagnostics" + {"DIAG", MHI_CLIENT_DIAG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DIAG", MHI_CLIENT_DIAG_IN, NUM_MHI_DIAG_IN_RING_ELEMENTS}, +//"Qualcomm PCIe QDSS Data" +//"Do not use this QDSS. xingduo.du 2023-02-16" +// {"QDSS", MHI_CLIENT_QDSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, +// {"QDSS", MHI_CLIENT_QDSS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe EFS" + {"EFS", MHI_CLIENT_EFS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EFS", MHI_CLIENT_EFS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe MBIM" + {"MBIM", MHI_CLIENT_MBIM_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"MBIM", MHI_CLIENT_MBIM_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe QMI" + {"QMI0", MHI_CLIENT_QMI_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"QMI0", MHI_CLIENT_QMI_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe QMI" + //{"QMI1", MHI_CLIENT_QMI_2_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"QMI1", MHI_CLIENT_QMI_2_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe IP CTRL" + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#if 0 //AG15 +//"Qualcomm PCIe IPCR" + {"IPCR", MHI_CLIENT_DIAG_CONS_IF_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IPCR", MHI_CLIENT_DIAG_CONS_IF_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#endif +//"Qualcomm PCIe Boot Logging" + //{"BL", MHI_CLIENT_BOOT_LOG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"BL", MHI_CLIENT_BOOT_LOG_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Modem" + {"DUN", MHI_CLIENT_DUN_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DUN", MHI_CLIENT_DUN_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm EDL " + {"EDL", MHI_CLIENT_EDL_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EDL", MHI_CLIENT_EDL_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#ifdef ENABLE_IP_SW0 +//"Qualcomm PCIe LOCAL Adapter" + {"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_SW_IP_RING_ELEMENTS}, + {"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_SW_IP_RING_ELEMENTS}, +#endif +//"Qualcomm PCIe WWAN Adapter" + {"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS}, + {"IP_HW0", MHI_CLIENT_IP_HW_0_IN, NUM_MHI_IPA_IN_RING_ELEMENTS}, +#ifdef ENABLE_ADPL + {"ADPL", MHI_CLIENT_ADPL, NUM_MHI_ADPL_RING_ELEMENTS}, +#endif + +#ifdef ENABLE_QDSS + {"QDSS", MHI_CLIENT_IP_HW_QDSS, NUM_MHI_QDSS_RING_ELEMENTS}, +#endif +}; + +extern int mhi_netdev_mbin_enabled(void); +extern int mhi_netdev_use_xfer_type_dma(unsigned chan); +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + u32 chan; + u32 i, num; + u32 ring = 0; + + mhi_cntrl->max_chan = MHI_MAX_CHANNELS; + num = sizeof(chan_cfg)/sizeof(chan_cfg[0]); + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for (i = 0; i < num; i++) { + struct mhi_chan *mhi_chan; + + if (!strncmp( chan_cfg[i].chan_name, "MBIM", 4)) { + if (!mhi_netdev_mbin_enabled()) + continue; + } + else if (!strncmp( chan_cfg[i].chan_name, "QMI", 3)) { + if (mhi_netdev_mbin_enabled()) + continue; + } + + chan = chan_cfg[i].chan_id; + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + mhi_chan->name = chan_cfg[i].chan_name; + + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = chan_cfg[i].elements; + + /* + * For some channels, local ring len should be bigger than + * transfer ring len due to internal logical channels in device. + * So host can queue much more buffers than transfer ring len. + * Example, RSC channels should have a larger local channel + * than transfer ring length. + */ + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN + || chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN +#ifdef ENABLE_ADPL + || chan == MHI_CLIENT_ADPL +#endif +#ifdef ENABLE_QDSS + || chan == MHI_CLIENT_IP_HW_QDSS +#endif + ) { + mhi_chan->ring = 0; + } + else { + mhi_chan->ring = ring; + ring += mhi_chan->buf_ring.elements; + } + + if (chan == MHI_CLIENT_IP_HW_0_OUT) + mhi_chan->er_index = IPA_OUT_EVENT_RING; + else if (chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->er_index = IPA_IN_EVENT_RING; +#ifdef ENABLE_IP_SW0 + else if (chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->er_index = SW_0_OUT_EVT_RING; + else if (chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->er_index = SW_0_IN_EVT_RING; +#endif + +#ifdef ENABLE_ADPL + else if (chan == MHI_CLIENT_ADPL) + mhi_chan->er_index = ADPL_EVT_RING; +#endif +#ifdef ENABLE_QDSS + else if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->er_index = QDSS_EVT_RING; +#endif + else + mhi_chan->er_index = PRIMARY_EVENT_RING; + + mhi_chan->dir = CHAN_INBOUND(chan) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + +#ifdef ENABLE_ADPL + if (chan == MHI_CLIENT_ADPL) + mhi_chan->dir = DMA_FROM_DEVICE; +#endif + +#ifdef ENABLE_QDSS + if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->dir = DMA_FROM_DEVICE; +#endif + /* + * For most channels, chtype is identical to channel directions, + * if not defined, assign ch direction to chtype + */ + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = BIT(MHI_EE_AMSS); + if (CHAN_SBL(chan)) + mhi_chan->ee_mask = BIT(MHI_EE_SBL); + else if (CHAN_EDL(chan)) + mhi_chan->ee_mask = BIT(MHI_EE_FP); + + mhi_chan->db_cfg.pollcfg = 0; + + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->xfer_type = MHI_XFER_SKB; + else if (chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->xfer_type = mhi_netdev_use_xfer_type_dma(chan) ? MHI_XFER_DMA: MHI_XFER_SKB; + else if (chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->xfer_type = MHI_XFER_SKB; //MHI_XFER_DMA; +#ifdef ENABLE_ADPL + else if (chan == MHI_CLIENT_ADPL) + mhi_chan->xfer_type = MHI_XFER_BUFFER; +#endif +#ifdef ENABLE_QDSS + else if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->xfer_type = MHI_XFER_BUFFER; +#endif + else + mhi_chan->xfer_type = MHI_XFER_BUFFER; + + if (chan_cfg[i].elements == 0) { + mhi_chan->dir = DMA_BIDIRECTIONAL; + mhi_chan->xfer_type = MHI_XFER_NOP; + } + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + case MHI_XFER_DMA: + case MHI_XFER_RSC_DMA: + mhi_chan->queue_xfer = mhi_queue_dma; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = false; + mhi_chan->offload_ch = (chan_cfg[i].elements == 0); + mhi_chan->db_cfg.reset_req = false; + mhi_chan->pre_alloc = false; + mhi_chan->auto_start = false; + mhi_chan->wake_capable = false; + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* bi-dir and dirctionless channels must be a offload chan */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + +#ifdef ENABLE_ADPL + if (chan == MHI_CLIENT_ADPL) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; +#endif +#ifdef ENABLE_QDSS + if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; +#endif + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} +#endif + +static int of_parse_dt(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int ret; + + /* parse MHI channel configuration */ + ret = of_parse_ch_cfg(mhi_cntrl, of_node); + if (ret) + return ret; + + /* parse MHI event configuration */ + ret = of_parse_ev_cfg(mhi_cntrl, of_node); + if (ret) + goto error_ev_cfg; +#if defined(QCOM_AP_QCA6490_DMA_IOMMU) + /* for QCS6490 iommu-dma is fastmap + for SG845 iommu-dma is set in driver + for ipq iommu-dma is disabled + */ + const char *str; + ret = of_property_read_string(of_node, "qcom,iommu-dma", &str); + if (ret) + MHI_ERR("mhi qcom,iommu-dma need set"); + +#endif +#if 0 + ret = of_property_read_u32(of_node, "mhi,timeout", + &mhi_cntrl->timeout_ms); + if (ret) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = of_property_read_bool(of_node, "mhi,use-bb"); + ret = of_property_read_u32(of_node, "mhi,buffer-len", + (u32 *)&mhi_cntrl->buffer_len); + if (ret) + mhi_cntrl->buffer_len = MHI_MAX_MTU; +#else + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->bounce_buf = false; + mhi_cntrl->buffer_len = MHI_MAX_MTU; +#endif + + return 0; + +error_ev_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + int ret; + int i; + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + + //if (!mhi_cntrl->of_node) + // return -EINVAL; + + for (i = 0; i < MAX_MHI_CONTROLLER; i++) { + if (mhi_controller_minors[i].dev_id == mhi_cntrl->dev_id + && mhi_controller_minors[i].domain == mhi_cntrl->domain + && mhi_controller_minors[i].bus == mhi_cntrl->bus + && mhi_controller_minors[i].slot == mhi_cntrl->slot) { + mhi_cntrl->cntrl_idx = i; + break; + } + else if (mhi_controller_minors[i].dev_id == 0 + && mhi_controller_minors[i].domain == 0 + && mhi_controller_minors[i].bus == 0 + && mhi_controller_minors[i].slot == 0) { + mhi_controller_minors[i].dev_id = mhi_cntrl->dev_id; + mhi_controller_minors[i].domain = mhi_cntrl->domain; + mhi_controller_minors[i].bus = mhi_cntrl->bus; + mhi_controller_minors[i].slot = mhi_cntrl->slot; + mhi_controller_minors[i].mhi_cntrl = mhi_cntrl; + mhi_cntrl->cntrl_idx = i; + break; + } + } + + if (i == MAX_MHI_CONTROLLER) + return -EINVAL; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) + return -EINVAL; + + if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + return -EINVAL; + + ret = of_parse_dt(mhi_cntrl, mhi_cntrl->of_node); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto error_alloc_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); + INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); + INIT_DELAYED_WORK(&mhi_cntrl->ready_worker, mhi_pm_ready_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* register controller with mhi_bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) { + ret = -ENOMEM; + goto error_alloc_dev; + } + + mhi_dev->dev_type = MHI_CONTROLLER_TYPE; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto error_add_dev; + + if (mhi_cntrl->cntrl_idx) + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI%d", mhi_cntrl->cntrl_idx); + else + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI"); + + mhi_cntrl->mhi_dev = mhi_dev; + + mhi_cntrl->parent = mhi_bus.dentry; + mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; + + /* adding it to this list only for debug purpose */ + mutex_lock(&mhi_bus.lock); + list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list); + mutex_unlock(&mhi_bus.lock); + + return 0; + +error_add_dev: + mhi_dealloc_device(mhi_cntrl, mhi_dev); + +error_alloc_dev: + kfree(mhi_cntrl->mhi_cmd); + +error_alloc_cmd: + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_event); + + return ret; +}; +EXPORT_SYMBOL(of_register_mhi_controller); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_tsync); + + if (mhi_cntrl->cntrl_dev) + device_destroy(mhi_cntrl_drv.class, MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx)); + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + mutex_lock(&mhi_bus.lock); + list_del(&mhi_cntrl->node); + mutex_unlock(&mhi_bus.lock); +} + +/* set ptr to control private data */ +static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl, + void *priv) +{ + mhi_cntrl->priv_data = priv; +} + + +/* allocate mhi controller to register */ +struct mhi_controller *mhi_alloc_controller(size_t size) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(size + sizeof(*mhi_cntrl), GFP_KERNEL); + + if (mhi_cntrl && size) + mhi_controller_set_devdata(mhi_cntrl, mhi_cntrl + 1); + + return mhi_cntrl; +} +EXPORT_SYMBOL(mhi_alloc_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 bhie_off; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error with init dev_ctxt\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + + /* + * allocate rddm table if specified, this table is for debug purpose + * so we'll ignore erros + */ + if (mhi_cntrl->rddm_size) { + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + + /* + * This controller supports rddm, we need to manually clear + * BHIE RX registers since por values are undefined. + */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + MHI_ERR("Error getting bhie offset\n"); + goto bhie_error; + } + + memset_io(mhi_cntrl->regs + bhie_off + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + } + + mhi_cntrl->pre_init = true; + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +bhie_error: + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + mhi_cntrl->pre_init = false; +} + +/* match dev to drv */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0)) +static int mhi_match(struct device *dev, const struct device_driver *drv) +#else +static int mhi_match(struct device *dev, struct device_driver *drv) +#endif +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* if controller type there is no client driver associated with it */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->chan_name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct bus_type mhi_bus_type = { + .name = "mhi_q", + .dev_name = "mhi_q", + .match = mhi_match, +}; + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + bool auto_start = false; + int ret; + + /* bring device out of lpm */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; + if (ul_chan) { + /* lpm notification require status_cb */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + goto exit_probe; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + mhi_dev->status_cb = mhi_drv->status_cb; + auto_start = ul_chan->auto_start; + } + + if (dl_chan) { + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + goto exit_probe; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * if this channal event ring manage by client, then + * status_cb must be defined so we can send the async + * cb whenever there are pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + goto exit_probe; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + /* ul & dl uses same status cb */ + mhi_dev->status_cb = mhi_drv->status_cb; + auto_start = (auto_start || dl_chan->auto_start); + } + + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + + if (!ret && auto_start) + mhi_prepare_for_transfer(mhi_dev); + +exit_probe: + mhi_device_put(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum MHI_CH_STATE ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* control device has no work to do */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name); + + /* reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* move channel state to disable, no more processing */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + /* reset the channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + /* destroy the device */ + mhi_drv->remove(mhi_dev); + + /* de_init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + if (ch_state[dir] == MHI_CH_STATE_ENABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + + if (mhi_cntrl->tsync_dev == mhi_dev) + mhi_cntrl->tsync_dev = NULL; + + /* relinquish any pending votes */ + read_lock_bh(&mhi_cntrl->pm_lock); + while (atomic_read(&mhi_dev->dev_wake)) + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_driver_register(struct mhi_driver *mhi_drv) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + return driver_register(driver); +} +EXPORT_SYMBOL(mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL(mhi_driver_unregister); + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + struct device *dev; + + if (!mhi_dev) + return NULL; + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + dev->parent = mhi_cntrl->dev; + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->vendor = mhi_cntrl->vendor; + mhi_dev->dev_id = mhi_cntrl->dev_id; + mhi_dev->domain = mhi_cntrl->domain; + mhi_dev->bus = mhi_cntrl->bus; + mhi_dev->slot = mhi_cntrl->slot; + mhi_dev->mtu = MHI_MAX_MTU; + atomic_set(&mhi_dev->dev_wake, 0); + + return mhi_dev; +} + +static int mhi_cntrl_open(struct inode *inode, struct file *f) +{ + int ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (MINOR(inode->i_rdev) == mhi_cntrl->cntrl_idx) { + ret = 0; + f->private_data = mhi_cntrl; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + return ret; +} + +static int mhi_cntrl_release(struct inode *inode, struct file *f) +{ + f->private_data = NULL; + return 0; +} + +#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1 +#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2 +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *to); +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *from); + +static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +{ + long ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (mhi_cntrl == (struct mhi_controller *)f->private_data) { + ret = 0; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + if (ret) + return ret; + + switch (cmd) { + case IOCTL_BHI_GETDEVINFO: + ret = bhi_get_dev_info(mhi_cntrl, (void __user *)__arg); + break; + + case IOCTL_BHI_WRITEIMAGE: + ret = bhi_write_image(mhi_cntrl, (void __user *)__arg); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct file_operations mhi_cntrl_fops = { + .unlocked_ioctl = mhi_cntrl_ioctl, + .open = mhi_cntrl_open, + .release = mhi_cntrl_release, +}; + +static int __init mhi_cntrl_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_CNTRL_DRIVER_NAME, &mhi_cntrl_fops); + if (ret < 0) + return ret; + + mhi_cntrl_drv.major = ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)) + mhi_cntrl_drv.class = class_create(MHI_CNTRL_DRIVER_NAME); +#else + mhi_cntrl_drv.class = class_create(THIS_MODULE, MHI_CNTRL_DRIVER_NAME); +#endif + if (IS_ERR(mhi_cntrl_drv.class)) { + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_cntrl_drv.lock); + INIT_LIST_HEAD(&mhi_cntrl_drv.head); + + return 0; +} + +void mhi_cntrl_exit(void) +{ + class_destroy(mhi_cntrl_drv.class); + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); +} + +extern int mhi_dtr_init(void); +extern void mhi_dtr_exit(void); +extern int mhi_device_netdev_init(struct dentry *parent); +extern void mhi_device_netdev_exit(void); +extern int mhi_device_uci_init(void); +extern void mhi_device_uci_exit(void); +extern int mhi_controller_qcom_init(void); +extern void mhi_controller_qcom_exit(void); + +static char mhi_version[] = "Quectel_Linux_PCIE_MHI_Driver_"PCIE_MHI_DRIVER_VERSION; +module_param_string(mhi_version, mhi_version, sizeof(mhi_version), S_IRUGO); + +static int __init mhi_init(void) +{ + int ret; + + pr_info("%s %s\n", __func__, mhi_version); + + mutex_init(&mhi_bus.lock); + INIT_LIST_HEAD(&mhi_bus.controller_list); + + /* parent directory */ + mhi_bus.dentry = debugfs_create_dir(mhi_bus_type.name, NULL); + + ret = bus_register(&mhi_bus_type); + if (ret) { + pr_err("Error bus_register ret:%d\n", ret); + return ret; + } + + ret = mhi_dtr_init(); + if (ret) { + pr_err("Error mhi_dtr_init ret:%d\n", ret); + bus_unregister(&mhi_bus_type); + return ret; + } + + ret = mhi_device_netdev_init(mhi_bus.dentry); + if (ret) { + pr_err("Error mhi_device_netdev_init ret:%d\n", ret); + } + + ret = mhi_device_uci_init(); + if (ret) { + pr_err("Error mhi_device_uci_init ret:%d\n", ret); + } + + ret = mhi_cntrl_init(); + if (ret) { + pr_err("Error mhi_cntrl_init ret:%d\n", ret); + } + + ret = mhi_controller_qcom_init(); + if (ret) { + pr_err("Error mhi_controller_qcom_init ret:%d\n", ret); + } + + return ret; +} + +static void mhi_exit(void) +{ + mhi_controller_qcom_exit(); + mhi_cntrl_exit(); + mhi_device_uci_exit(); + mhi_device_netdev_exit(); + mhi_dtr_exit(); + bus_unregister(&mhi_bus_type); + debugfs_remove_recursive(mhi_bus.dentry); +} + +module_init(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/driver/quectel_MHI/src/core/mhi_internal.h b/driver/quectel_MHI/src/core/mhi_internal.h new file mode 100644 index 0000000..09f3aac --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_internal.h @@ -0,0 +1,1190 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef writel_relaxed_no_log +#define writel_relaxed_no_log writel_relaxed +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); +} +#endif + +#ifndef readq_relaxed +#define readq_relaxed readq +#endif + +#ifndef readq_relaxed_no_log +#define readq_relaxed_no_log readq_relaxed +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} +#endif + +#ifndef __ATTR_RO +#define __ATTR_RO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .show = _name##_show, \ +} +#endif +#ifndef __ATTR_WO +#define __ATTR_WO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ + .store = _name##_store, \ +} +#endif +#ifndef __ATTR_RW +#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ + _name##_show, _name##_store) +#endif +#ifndef DEVICE_ATTR_RO +#define DEVICE_ATTR_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#endif +#ifndef DEVICE_ATTR_WO +#define DEVICE_ATTR_WO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_WO(_name) +#endif +#ifndef DEVICE_ATTR_RW +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#endif + +#ifdef EXPORT_SYMBOL +#undef EXPORT_SYMBOL +#define EXPORT_SYMBOL(sym) +#endif + +extern struct bus_type mhi_bus_type; + +/* MHI mmio register mapping */ +#define PCI_INVALID_READ(val) (val == U32_MAX) + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +#define MHI_READ_REG_FIELD(_VAR,_REG,_FIELD) \ + ((_VAR & _REG ## _ ## _FIELD ## _MASK) >> _REG ## _ ## _FIELD ## _SHIFT) + +#define MHI_WRITE_REG_FIELD(_VAR,_REG,_FIELD,_VAL) \ + do { \ + _VAR &= ~_REG ## _ ## _FIELD ## _MASK; \ + _VAR |= (_VAL << _REG ## _ ## _FIELD ## _SHIFT); \ + } while(0) + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET (0xB0) +#define MHI_SOC_RESET_REQ BIT(0) + +/* MHI misc capability registers */ +#define MISC_OFFSET (0x24) +#define MISC_CAP_MASK (0xFFFFFFFF) +#define MISC_CAP_SHIFT (0) + +#define CAP_CAPID_MASK (0xFF000000) +#define CAP_CAPID_SHIFT (24) +#define CAP_NEXT_CAP_MASK (0x00FFF000) +#define CAP_NEXT_CAP_SHIFT (12) + +/* MHI Timesync offsets */ +#define TIMESYNC_CFG_OFFSET (0x00) +#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK) +#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT) +#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK) +#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT) +#define TIMESYNC_CFG_NUMCMD_MASK (0xFF) +#define TIMESYNC_CFG_NUMCMD_SHIFT (0) +#define TIMESYNC_DB_OFFSET (0x4) +#define TIMESYNC_TIME_LOW_OFFSET (0x8) +#define TIMESYNC_TIME_HIGH_OFFSET (0xC) + +#define TIMESYNC_CAP_ID (2) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +/* convert ticks to micro seconds by dividing by 19.2 */ +#define TIME_TICKS_TO_US(x) (div_u64((x) * 10, 192)) + +struct mhi_event_ctxt { + u32 reserved : 8; + u32 intmodc : 8; + u32 intmodt : 16; + u32 ertype; + u32 msivec; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_chan_ctxt { + u32 chstate : 8; + u32 brstmode : 2; + u32 pollcfg : 6; + u32 reserved : 16; + u32 chtype; + u32 erindex; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + u32 reserved0; + u32 reserved1; + u32 reserved2; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + uint32_t /*enum mhi_dev_state*/ mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + uint32_t /*enum mhi_dev_state*/ mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element cmd_transfer; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_TYPE_NOP = 1, + MHI_CMD_TYPE_RESET = 16, + MHI_CMD_TYPE_STOP = 17, + MHI_CMD_TYPE_START = 18, + MHI_CMD_TYPE_TSYNC = 24, +}; + +/* no operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16) + +/* channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_RESET << 16)) + +/* channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16)) + +/* channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_START << 16)) + +/* time sync cfg command */ +#define MHI_TRE_CMD_TSYNC_CFG_PTR (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \ + (er << 24)) + +#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) + +/* event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (ptr) +#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) +#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) + +/* transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (ptr) +#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +/* rsc transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) +#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) + +enum MHI_CMD { + MHI_CMD_RESET_CHAN, + MHI_CMD_START_CHAN, + MHI_CMD_TIMSYNC_CFG, +}; + +enum MHI_PKT_TYPE { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum MHI_EV_CCS { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, + MHI_EV_CC_OOB = 0x5, + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum MHI_CH_STATE { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +enum MHI_BRSTMODE { + MHI_BRSTMODE_DISABLE = 0x2, + MHI_BRSTMODE_ENABLE = 0x3, +}; + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \ + mode != MHI_BRSTMODE_ENABLE) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) + +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW) + +enum MHI_ST_TRANSITION { + MHI_ST_TRANSITION_PBL, + MHI_ST_TRANSITION_READY, + MHI_ST_TRANSITION_SBL, + MHI_ST_TRANSITION_MISSION_MODE, + MHI_ST_TRANSITION_FP, + MHI_ST_TRANSITION_MAX, +}; + +extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX]; +#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : mhi_state_tran_str[state]) + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +enum { + MHI_PM_BIT_DISABLE, + MHI_PM_BIT_POR, + MHI_PM_BIT_M0, + MHI_PM_BIT_M2, + MHI_PM_BIT_M3_ENTER, + MHI_PM_BIT_M3, + MHI_PM_BIT_M3_EXIT, + MHI_PM_BIT_FW_DL_ERR, + MHI_PM_BIT_SYS_ERR_DETECT, + MHI_PM_BIT_SYS_ERR_PROCESS, + MHI_PM_BIT_SHUTDOWN_PROCESS, + MHI_PM_BIT_LD_ERR_FATAL_DETECT, + MHI_PM_BIT_MAX +}; + +/* internal power states */ +enum MHI_PM_STATE { + MHI_PM_DISABLE = BIT(MHI_PM_BIT_DISABLE), /* MHI is not enabled */ + MHI_PM_POR = BIT(MHI_PM_BIT_POR), /* reset state */ + MHI_PM_M0 = BIT(MHI_PM_BIT_M0), + MHI_PM_M2 = BIT(MHI_PM_BIT_M2), + MHI_PM_M3_ENTER = BIT(MHI_PM_BIT_M3_ENTER), + MHI_PM_M3 = BIT(MHI_PM_BIT_M3), + MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT), + /* firmware download failure state */ + MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR), + MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT), + MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS), + MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS), + /* link not accessible */ + MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT), +}; + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & MHI_PM_M0) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +/* accepted buffer type for the channel */ +enum MHI_XFER_TYPE { + MHI_XFER_BUFFER, + MHI_XFER_SKB, + MHI_XFER_SCLIST, + MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */ + MHI_XFER_DMA, /* receive dma address, already mapped by client */ + MHI_XFER_RSC_DMA, /* RSC type, accept premapped buffer */ +}; + +#define NR_OF_CMD_RINGS (1) +#define CMD_EL_PER_RING (128) +#define PRIMARY_CMD_RING (0) +#define MHI_DEV_WAKE_DB (127) +#define MHI_MAX_MTU (0xffff) + +enum MHI_ER_TYPE { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +enum mhi_er_data_type { + MHI_ER_DATA_ELEMENT_TYPE, + MHI_ER_CTRL_ELEMENT_TYPE, + MHI_ER_TSYNC_ELEMENT_TYPE, + MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE, +}; + +enum mhi_ch_ee_mask { + MHI_CH_EE_PBL = BIT(MHI_EE_PBL), + MHI_CH_EE_SBL = BIT(MHI_EE_SBL), + MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), + MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), + MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), + MHI_CH_EE_WFW = BIT(MHI_EE_WFW), + MHI_CH_EE_EDL = BIT(MHI_EE_EDL), +}; + +enum mhi_ch_type { + MHI_CH_TYPE_INVALID = 0, + MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, + MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, + MHI_CH_TYPE_INBOUND_COALESCED = 3, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum MHI_BRSTMODE brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum MHI_PM_STATE from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum MHI_ST_TRANSITION state; +}; + +/* Control Segment */ +struct mhi_ctrl_seg +{ + struct mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#ifdef ENABLE_IP_SW0 + struct mhi_tre sw_in_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre sw_out_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#endif + +#ifdef ENABLE_ADPL + struct mhi_tre adpl_in_chan_ring[NUM_MHI_ADPL_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); +#endif + +#ifdef ENABLE_QDSS + struct mhi_tre qdss_in_chan_ring[NUM_MHI_QDSS_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); +#endif + + struct mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); + struct mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __packed __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16); + struct mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __packed __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); + struct mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __packed __aligned(CMD_EL_PER_RING*16); + + struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128); + struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128); + struct mhi_cmd_ctxt cmd_ctxt[NR_OF_CMD_RINGS] __aligned(128); +} __aligned(4096); + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; + struct mhi_ctrl_seg *ctrl_seg; + dma_addr_t ctrl_seg_addr; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + dma_addr_t p_addr; + void *v_addr; + void *bb_addr; + void *wp; + size_t len; + void *cb_buf; + bool used; /* indicate element is free to use */ + bool pre_mapped; /* already pre-mapped by client */ + enum dma_data_direction dir; +}; + +struct mhi_event { + u32 er_index; + u32 intmod; + u32 msi; + int chan; /* this event ring is dedicated to a channel */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + u32 used_elements; + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ + spinlock_t lock; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + struct tasklet_struct task; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + struct mhi_controller *mhi_cntrl; +}; + +struct mhi_chan { + u32 chan; + const char *name; + /* + * important, when consuming increment tre_ring first, when releasing + * decrement buf_ring first. If tre_ring has space, buf_ring + * guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + + u32 used_elements; + u32 used_events[MHI_EV_CC_DB_MODE+1]; + + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + u32 ee_mask; + enum MHI_XFER_TYPE xfer_type; + enum MHI_CH_STATE ch_state; + enum MHI_EV_CCS ccs; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool auto_start; + bool wake_capable; /* channel should wake up system */ + /* functions that generate the transfer ring elements */ + int (*gen_tre)(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, void *buf, void *cb, + size_t len, enum MHI_FLAGS flags); + int (*queue_xfer)(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, void *buf, + size_t len, enum MHI_FLAGS flags); + /* xfer call back */ + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + u32 ring; + u32 tiocm; + struct list_head node; +}; + +struct tsync_node { + struct list_head node; + u32 sequence; + u64 local_time; + u64 remote_time; + struct mhi_device *mhi_dev; + void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time); +}; + +struct mhi_timesync { + u32 er_index; + void __iomem *db; + void __iomem *time_reg; + enum MHI_EV_CCS ccs; + struct completion completion; + spinlock_t lock; /* list protection */ + struct mutex lpm_mutex; /* lpm protection */ + struct list_head head; +}; + +struct mhi_bus { + struct list_head controller_list; + struct mutex lock; + struct dentry *dentry; +}; + +/* default MHI timeout */ +#define MHI_TIMEOUT_MS (3000) +extern struct mhi_bus mhi_bus; + +/* debug fs related functions */ +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d); +int mhi_debugfs_trigger_reset(void *data, u64 val); + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl); + +/* power management apis */ +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state); +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_fw_load_worker(struct work_struct *work); +void mhi_pm_sys_err_worker(struct work_struct *work); +void mhi_pm_ready_worker(struct work_struct *work); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum MHI_CMD cmd); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); + +/* queue transfer buffer */ +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags); +int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_dma(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); + +/* register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t wp); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t wp); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t wp); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability, + u32 *offset); +int mhi_init_timesync(struct mhi_controller *mhi_cntrl); +int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl); + +/* memory allocation methods */ +static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + dma_addr_t *dma_handle, + gfp_t gfp) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,0,0 )) + void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp); +#else + void *buf = dma_alloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp | __GFP_ZERO); +#endif + + MHI_LOG("size = %zd, dma_handle = %llx\n", size, (u64)*dma_handle); + if (buf) + atomic_add(size, &mhi_cntrl->alloc_size); + + return buf; +} +static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + atomic_sub(size, &mhi_cntrl->alloc_size); + dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle); +} +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); +static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + kfree(mhi_dev); +} +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +/* initialization methods */ +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_dtr_init(void); + +/* isr handlers */ +irqreturn_t mhi_one_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev); +void mhi_ev_task(unsigned long data); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#else + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MHI_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#endif + +#endif /* _MHI_INT_H */ diff --git a/driver/quectel_MHI/src/core/mhi_main.c b/driver/quectel_MHI/src/core/mhi_main.c new file mode 100644 index 0000000..12d44e6 --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_main.c @@ -0,0 +1,2722 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 *out) +{ + u32 tmp = readl_relaxed(base + offset); + + /* unexpected value, query the link status */ + if (PCI_INVALID_READ(tmp) && + mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data)) + return -EIO; + + *out = tmp; + + return 0; +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, + u32 capability, + u32 *offset) +{ + u32 cur_cap, next_offset; + int ret; + + /* get the 1st supported capability offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET, + MISC_CAP_MASK, MISC_CAP_SHIFT, offset); + if (ret) + return ret; + if (*offset >= 0x1000) + return -ENXIO; + do { + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_CAPID_MASK, CAP_CAPID_SHIFT, + &cur_cap); + if (ret) + return ret; + + if (cur_cap == capability) + return 0; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT, + &next_offset); + if (ret) + return ret; + + *offset += next_offset; + } while (next_offset); + + return -ENXIO; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 val) +{ + writel_relaxed(val, base + offset); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, + void __iomem *db_addr, + dma_addr_t wp) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp)); +#if 0 //carl.yin 20190527 for debug + if ((lower_32_bits(db_addr)&0xFFF) != 0x620) + { + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, db_addr, 0, &out); + if (out != lower_32_bits(wp)) + MHI_ERR("%s db=%x, wp=w:%x - r:%x, ret=%d\n", __func__, lower_32_bits(db_addr), lower_32_bits(wp), out, ret); + } +#endif +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); + db_cfg->db_mode = false; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, *ring->ctxt_wp); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +//#define DEBUG_CHAN100_DB +#ifdef DEBUG_CHAN100_DB +static atomic_t chan100_seq = ATOMIC_INIT(0); +#define CHAN100_SIZE 0x1000 +static unsigned int chan100_t[CHAN100_SIZE]; +#endif + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + /* + * Writes to the new ring element must be visible to the hardware + * before letting h/w know there is new element to fetch. + */ + dma_wmb(); + *ring->ctxt_wp = db; + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, + db); +} + +enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} + +enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} + +int mhi_queue_sclist(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +int mhi_queue_nop(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) + wp = ring->base; + ring->wp = wp; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; + /* smp update */ + smp_wmb(); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + return nr_el; +} + +static u32 get_used_ring_elements(void *rp, void *wp, u32 el_num) +{ + u32 nr_el; + + if (wp >= rp) + nr_el = (wp - rp)/sizeof(struct mhi_tre); + else { + nr_el = (rp - wp)/sizeof(struct mhi_tre); + nr_el = el_num - nr_el; + } + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr) +{ + return (addr - ring->base) + ring->iommu_base; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *rp, *wp; + + /* update the WP */ + wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) { + wp = ring->base; + } + ring->wp = wp; + + *ring->ctxt_wp = ring->iommu_base + (ring->wp - ring->base); + + /* update the RP */ + rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; + + /* visible to other cores */ + smp_wmb(); +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, + buf_info->len, buf_info->dir); + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + +#ifdef ENABLE_MHI_MON +static void mon_bus_submit(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_submit(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_receive(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_receive(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_complete(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_complete(r->r_data, chan, wp, mhi_tre); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} +#endif + +int mhi_queue_skb(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct sk_buff *skb = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + int ret; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + buf_info->v_addr = skb->data; + buf_info->cb_buf = skb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + goto map_error; + + mhi_tre = tre_ring->wp; + + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (0<<30); + } +#endif + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +map_error: + if (assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, false); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_queue_dma(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_buf *mhi_buf = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + MHI_ASSERT(buf_info->used, "TRE Not Freed\n"); + buf_info->p_addr = mhi_buf->dma_addr; + buf_info->pre_mapped = true; + buf_info->cb_buf = mhi_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + + mhi_tre = tre_ring->wp; + + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) { + buf_info->used = true; + mhi_tre->ptr = + MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len); + mhi_tre->dword[0] = + MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); + mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; + } else { + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + } + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0: buf_info->len); + } +#endif + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + void *buf, + void *cb, + size_t buf_len, + enum MHI_FLAGS flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + buf_info->v_addr = buf; + buf_info->cb_buf = cb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = buf_len; + + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring; + unsigned long flags; + bool assert_wake = false; + int ret; + + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + return -EIO; + } + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; +} + +static ssize_t ul_chan_id_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", mhi_dev->ul_chan_id); +} + +static DEVICE_ATTR_RO(ul_chan_id); + +static struct attribute *mhi_dev_attrs[] = { + &dev_attr_ul_chan_id.attr, + NULL, +}; + +static struct attribute_group mhi_dev_attr_group = { + .attrs = mhi_dev_attrs, +}; + +/* destroy specific device */ +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* only destroying virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name); + + sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + /* notify the client and remove the device from mhi bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} + +static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + struct device_node *controller, *node; + const char *dt_name; + int ret; + + controller = of_find_node_by_name(mhi_cntrl->of_node, "mhi_devices"); + if (!controller) + return; + + for_each_available_child_of_node(controller, node) { + ret = of_property_read_string(node, "mhi,chan", &dt_name); + if (ret) + continue; + if (!strcmp(mhi_dev->chan_name, dt_name)) { + mhi_dev->dev.of_node = node; + break; + } + } +} + +static ssize_t time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n", + t_host, t_device); +} +static DEVICE_ATTR_RO(time); + +static ssize_t time_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (us)\n", + TIME_TICKS_TO_US(t_host), TIME_TICKS_TO_US(t_device)); +} +static DEVICE_ATTR_RO(time_us); + +static struct attribute *mhi_tsync_attrs[] = { + &dev_attr_time.attr, + &dev_attr_time_us.attr, + NULL, +}; + +static const struct attribute_group mhi_tsync_group = { + .attrs = mhi_tsync_attrs, +}; + +void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->mhi_tsync) { + sysfs_remove_group(&mhi_cntrl->mhi_dev->dev.kobj, + &mhi_tsync_group); + kfree(mhi_cntrl->mhi_tsync); + mhi_cntrl->mhi_tsync = NULL; + } +} + +int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl) +{ + return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, + &mhi_tsync_group); +} + +static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + int ret; + + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + return; + + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_TIMESYNC_TYPE; + mhi_dev->chan_name = "TIME_SYNC"; + dev_set_name(&mhi_dev->dev, "%04x_%02x.%02x.%02x_%s", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + return; + } + + mhi_cntrl->tsync_dev = mhi_dev; +} + +/* bind mhi channels into mhi devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + int ret; + + /* + * we need to create time sync device before creating other + * devices, because client may try to capture time during + * clint probe. + */ + mhi_create_time_sync_dev(mhi_cntrl); + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_XFER_TYPE; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = mhi_chan->er_index; + break; + case DMA_NONE: + case DMA_BIDIRECTIONAL: + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_event_id = mhi_chan->er_index; + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + case DMA_FROM_DEVICE: + /* we use dl_chan for offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + } + + mhi_chan->mhi_dev = mhi_dev; + + /* check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = + mhi_chan->er_index; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = + mhi_chan->er_index; + } + mhi_chan->mhi_dev = mhi_dev; + } + } + + mhi_dev->chan_name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%04x_%02x.%02x.%02x_%s", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, + mhi_dev->slot, mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + /* init wake source */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + } + ret = sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + } +} + +static void mhi_dump_tre(struct mhi_controller *mhi_cntrl, struct mhi_tre *_ev) { + union mhi_dev_ring_element_type *ev = (union mhi_dev_ring_element_type *)_ev; + + switch (ev->generic.type) { + case MHI_DEV_RING_EL_INVALID: { + MHI_ERR("carl_ev cmd_invalid, ptr=%llx, %x, %x\n", _ev->ptr, _ev->dword[0], _ev->dword[1]); + } + break; + case MHI_DEV_RING_EL_NOOP: { + MHI_LOG("carl_ev cmd_no_op chan=%u\n", ev->cmd_no_op.chid); + } + break; + case MHI_DEV_RING_EL_TRANSFER: { + MHI_LOG("carl_ev cmd_transfer data=%llx, len=%u, chan=%u\n", + ev->cmd_transfer.data_buf_ptr, ev->cmd_transfer.len, ev->cmd_transfer.chain); + } + break; + case MHI_DEV_RING_EL_RESET: { + MHI_LOG("carl_ev cmd_reset chan=%u\n", ev->cmd_reset.chid); + } + break; + case MHI_DEV_RING_EL_STOP: { + MHI_LOG("carl_ev cmd_stop chan=%u\n", ev->cmd_stop.chid); + } + break; + case MHI_DEV_RING_EL_START: { + MHI_LOG("carl_ev cmd_start chan=%u\n", ev->cmd_start.chid); + } + break; + case MHI_DEV_RING_EL_MHI_STATE_CHG: { + MHI_LOG("carl_ev evt_state_change mhistate=%u\n", ev->evt_state_change.mhistate); + } + break; + case MHI_DEV_RING_EL_CMD_COMPLETION_EVT:{ + MHI_LOG("carl_ev evt_cmd_comp code=%u, type=%u\n", ev->evt_cmd_comp.code, ev->evt_cmd_comp.type); + } + break; + case MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT:{ + MHI_VERB("carl_ev evt_tr_comp ptr=%llx, len=%u, code=%u, chan=%u\n", + ev->evt_tr_comp.ptr, ev->evt_tr_comp.len, ev->evt_tr_comp.code, ev->evt_tr_comp.chid); + } + break; + case MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY:{ + MHI_LOG("carl_ev evt_ee_state execenv=%u\n", ev->evt_ee_state.execenv); + } + break; + case MHI_DEV_RING_EL_UNDEF: + default: { + MHI_ERR("carl_ev el_undef type=%d\n", ev->generic.type); + }; + break; + } +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + u32 ev_code; + struct mhi_result result; + unsigned long flags = 0; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * if it's a DB Event then we need to grab the lock + * with preemption disable and as a write because we + * have to update db register and another thread could + * be doing same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + /* device rp after servicing the TREs */ + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + mhi_chan->used_events[ev_code]++; + + result.dir = mhi_chan->dir; + + /* local rp */ + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* Always get the get len from the event */ + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* unmap if it's not premapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + result.bytes_xferd = xfer_len; +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + void *buf = NULL; + size_t len = 0; + + if (mhi_chan->queue_xfer == mhi_queue_skb) { + struct sk_buff *skb = result.buf_addr; + buf = skb->data; + len = result.bytes_xferd; + } + else if (CHAN_INBOUND(mhi_chan->chan)) { + buf = result.buf_addr; + len = result.bytes_xferd; + } + mon_bus_receive(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, local_rp), local_rp, buf, len); + } +#endif + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + /* + * recycle the buffer if buffer is pre-allocated, + * if there is error, not much we can do apart from + * dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + MHI_ERR( + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + mhi_chan->used_events[ev_code]++; + MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan); + mhi_chan->db_cfg.db_mode = true; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->rp)&0xffff) | (0xf0000); + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (1<<30); + } +#endif + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + MHI_ASSERT(1, "Received BAD TRE event for ring"); + break; + default: + MHI_CRITICAL("Unknown TX completion.\n"); + + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* received out of bound cookie */ + MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n"); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + result.bytes_xferd = xfer_len; + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + MHI_ASSERT(!buf_info->used, "TRE already Freed\n"); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + struct mhi_timesync *mhi_tsync; + enum mhi_cmd_type type; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + /* out of order completion received */ + MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion"); + + type = MHI_TRE_GET_CMD_TYPE(cmd_pkt); + + if (type == MHI_CMD_TYPE_TSYNC) { + mhi_tsync = mhi_cntrl->mhi_tsync; + mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_tsync->completion); + } else { + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + + /* + * this is a quick check to avoid unnecessary event processing + * in case we already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + +//#define QL_READ_EVENT_WA //from Quectel Windows driver +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + if (local_rp->ptr == 0 && local_rp->dword[0] == 0 && local_rp->dword[1] == 0) { + // event content no sync to memory, just break and wait next event. + MHI_ERR("Zero Event!\n"); + break; + } + } +#endif + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif + + switch (type) { + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_dev_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + MHI_LOG("MHI state change event to state:%s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum MHI_PM_STATE new_state; + + MHI_ERR("MHI system error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + schedule_work( + &mhi_cntrl->syserr_worker); + break; + } + default: + MHI_ERR("Unsupported STE:%s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX; + enum mhi_ee event = MHI_TRE_GET_EV_EXECENV(local_rp); + + MHI_LOG("MHI EE received event:%s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = MHI_ST_TRANSITION_SBL; + break; + case MHI_EE_FP: + st = MHI_ST_TRANSITION_FP; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = MHI_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, + mhi_cntrl->priv_data, + MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + MHI_ERR("Unhandled EE event:%s\n", + TO_MHI_EXEC_STR(event)); + } + if (st != MHI_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + break; + } +#if 1 //Add by Quectel + case MHI_PKT_TYPE_TX_EVENT: + case MHI_PKT_TYPE_RSC_TX_EVENT: + { + u32 chan = MHI_TRE_GET_EV_CHID(local_rp); + struct mhi_chan *mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + } + break; + } +#endif + default: + MHI_ASSERT(1, "Unsupported ev type"); + break; + } + +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + local_rp->ptr = 0; + local_rp->dword[0] = local_rp->dword[1] = 0; + } +#endif + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan = NULL; + u32 chan_count = 0; + void *chan_local_rp = NULL; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + chan_local_rp = mhi_chan->tre_ring.rp; + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + + chan_count += get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements); + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + if (local_rp == dev_rp || event_quota == 0) { + if (chan_count > mhi_chan->used_elements) + mhi_chan->used_elements = chan_count; + chan_count = 0; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + } + count++; + } + + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int count = 0; + u32 sequence; + u64 remote_time; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + struct tsync_node *tsync_node; + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event"); + + sequence = MHI_TRE_GET_EV_SEQ(local_rp); + remote_time = MHI_TRE_GET_EV_TIME(local_rp); + + do { + spin_lock_irq(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + MHI_ASSERT(!tsync_node, "Unexpected Event"); + + if (unlikely(!tsync_node)) + break; + + list_del(&tsync_node->node); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * device may not able to process all time sync commands + * host issue and only process last command it receive + */ + if (tsync_node->sequence == sequence) { + tsync_node->cb_func(tsync_node->mhi_dev, + sequence, + tsync_node->local_time, + remote_time); + kfree(tsync_node); + } else { + kfree(tsync_node); + } + } while (true); + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + enum mhi_dev_state state; + enum MHI_PM_STATE pm_state = 0; + int ret; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + /* + * we can check pm_state w/o a lock here because there is no way + * pm_state can change from reg access valid to no access while this + * therad being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return; + + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); + /* process ctrl events events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * we received a MSI but no events to process maybe device went to + * SYS_ERR state, check the state + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } +} + +irqreturn_t mhi_msi_handlr(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + /* confirm ER has pending events to process before scheduling work */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA); + } else + tasklet_schedule(&mhi_event->task); + + return IRQ_HANDLED; +} + +/* this is the threaded fn */ +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + enum mhi_dev_state state = MHI_STATE_MAX; + enum MHI_PM_STATE pm_state = 0; + enum mhi_ee ee = MHI_EE_MAX; + unsigned long flags; + + MHI_VERB("Enter\n"); + + write_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + if (mhi_cntrl->msi_allocated >= 5 ||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + if (mhi_cntrl->pm_state != MHI_PM_SYS_ERR_DETECT) + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + if (pm_state == MHI_PM_SYS_ERR_DETECT) { + wake_up_all(&mhi_cntrl->state_event); + + if (mhi_cntrl->ee != ee) { + MHI_LOG("device ee:%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee)); + schedule_work(&mhi_cntrl->syserr_worker); + } + /* for fatal errors, we let controller decide next step */ + else if (MHI_IN_PBL(ee)) + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_FATAL_ERROR); + else + schedule_work(&mhi_cntrl->syserr_worker); + } + if (mhi_cntrl->msi_allocated >= 5||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (pm_state == MHI_PM_POR) { + wake_up_all(&mhi_cntrl->state_event); + } + + MHI_VERB("Exit\n"); + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev) +{ + + struct mhi_controller *mhi_cntrl = dev; + + /* wake up any events waiting for state change */ + MHI_VERB("Enter\n"); + wake_up_all(&mhi_cntrl->state_event); + MHI_VERB("Exit\n"); + + return IRQ_WAKE_THREAD; +} + +irqreturn_t mhi_one_msi_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + struct mhi_event_ctxt *er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + u32 i; + u32 handle = 0; + + for (i = 0; i < NUM_MHI_EVT_RINGS; i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (ev_ring->rp != dev_rp) { + handle++; + mhi_msi_handlr(irq_number, mhi_event); + } + } + + if (handle ==0) { + mhi_intvec_threaded_handlr(irq_number, dev); + } + + return IRQ_HANDLED; +} + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum MHI_CMD cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + int chan = 0; + + MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + case MHI_CMD_TIMSYNC_CFG: + cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1 + (mhi_cntrl->mhi_tsync->er_index); + break; + } + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, 128, mhi_to_physical(ring, cmd_tre), cmd_tre, NULL, 0); + } +#endif + MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", + (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr, + cmd_tre->dword[0], cmd_tre->dword[1]); + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + + MHI_LOG("Entered: preparing channel:%d\n", mhi_chan->chan); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + MHI_ERR("Current EE:%s Required EE Mask:0x%x for chan:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, + mhi_chan->name); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* if channel is not disable state do not allow to start */ + if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) { + ret = -EIO; + MHI_LOG("channel:%d is not in disabled state, ch_state%d\n", + mhi_chan->chan, mhi_chan->ch_state); + goto error_init_chan; + } + + /* client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error with init chan\n"); + goto error_init_chan; + } + } + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI host is not in active state\n"); + read_unlock_bh(&mhi_cntrl->pm_lock); + ret = -EIO; + goto error_pm_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + if (ret) { + MHI_ERR("Failed to send start chan cmd\n"); + goto error_send_cmd; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive cmd completion for chan:%d\n", + mhi_chan->chan); + ret = -EIO; + goto error_send_cmd; + } + + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_ENABLED; + write_unlock_irq(&mhi_chan->lock); + + /* pre allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* prepare transfer descriptors */ + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, + len, MHI_EOT); + if (ret) { + MHI_ERR("Chan:%d error prepare buffer\n", + mhi_chan->chan); + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + + MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan); + + return 0; + +error_send_cmd: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + unsigned long flags; + + MHI_LOG("Marking all events for chan:%d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == + MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + + MHI_LOG("Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) + mhi_cntrl->wake_put(mhi_cntrl, false); + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + struct mhi_buf_info *buf_info; + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + + buf_info = buf_ring->base; + for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) { + if (!buf_info->used) + continue; + + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + buf_info->used = false; + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* nothing to reset, client don't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) + mhi_reset_rsc_chan(mhi_cntrl, mhi_chan); + else + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); + MHI_LOG("Reset complete.\n"); +} + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + + MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan); + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan); + write_unlock_irq(&mhi_chan->lock); + mutex_unlock(&mhi_chan->mutex); + return; + } + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + goto error_invalid_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + if (ret) { + MHI_ERR("Failed to send reset chan cmd\n"); + goto error_completion; + } + + /* even if it fails we will still reset */ + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) + MHI_ERR("Failed to receive cmd completion, still resetting\n"); + +error_completion: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_invalid_state: + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + int reg = 0; + int ret; + u32 val[4]; + + seq_printf(m, + "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3, + mhi_cntrl->wake_set, + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->alloc_size)); + + seq_printf(m, + "mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + seq_printf(m, "dump mhi reg addr:%p\n", mhi_cntrl->regs); + for (reg = 0; reg < 0x100; reg+=16) { + val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]); + seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); + } + + seq_printf(m, "dump bhi reg addr:%p\n", mhi_cntrl->bhi); + for (reg = 0; reg < 0x100; reg+=16) { + val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]); + seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); + } + + return 0; +} + +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + + int i; + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index:%d offload event ring\n", i); + } else { + seq_printf(m, + "Index:%d modc:%d modt:%d base:0x%0llx len:0x%llx", + i, er_ctxt->intmodc, er_ctxt->intmodt, + er_ctxt->rbase, er_ctxt->rlen); + seq_printf(m, + " rp:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + er_ctxt->rp, er_ctxt->wp, + (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), + (unsigned long long)mhi_event->db_cfg.db_val); + seq_printf(m, "used:%u\n", mhi_event->used_elements); + +#ifdef DEBUG_CHAN100_DB + if (mhi_event->mhi_chan && mhi_event->chan == 100) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t j; + + for (j = 0; j < ring->elements; j++, tre++) { + seq_printf(m, + "%08x: %llx, %08x, %08x\n", + (unsigned int)(j*sizeof(struct mhi_tre)), + tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif + } + } + + return 0; +} + +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->ch_state == MHI_CH_STATE_DISABLED) + continue; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) offload channel\n", + mhi_chan->name, mhi_chan->chan); + } else if (mhi_chan->mhi_dev) { + seq_printf(m, + "%s(%u) state:0x%x brstmode:0x%x pllcfg:0x%x type:0x%x erindex:%u", + mhi_chan->name, mhi_chan->chan, + chan_ctxt->chstate, chan_ctxt->brstmode, + chan_ctxt->pollcfg, chan_ctxt->chtype, + chan_ctxt->erindex); + seq_printf(m, + " base:0x%llx len:0x%llx rp:%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + chan_ctxt->rbase, chan_ctxt->rlen, + chan_ctxt->rp, chan_ctxt->wp, + (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), + (unsigned long long)mhi_chan->db_cfg.db_val); + seq_printf(m, "used:%u, EOB:%u, EOT:%u, OOB:%u, DB_MODE:%u\n", mhi_chan->used_elements, + mhi_chan->used_events[MHI_EV_CC_EOB], mhi_chan->used_events[MHI_EV_CC_EOT], + mhi_chan->used_events[MHI_EV_CC_OOB],mhi_chan->used_events[MHI_EV_CC_DB_MODE]); + +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + unsigned int n = 0; + seq_printf(m, "chan100_seq = %04x\n", atomic_read(&chan100_seq)%CHAN100_SIZE); + for (n = 0; n < CHAN100_SIZE; n++) { + seq_printf(m, "%04x: %08x\n", n, chan100_t[n]); + } + } +#endif + +#if 0 + if (ring->base && /*(i&1) &&*/ (i < MHI_CLIENT_IP_HW_0_OUT)) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t e; + + for (e = 0; e < ring->elements; e++, tre++) { + seq_printf(m, "[%03d] %llx, %08x, %08x\n", i, tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif + } + } + + return 0; +} + +/* move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error moving chan %s,%d to START state\n", + mhi_chan->name, mhi_chan->chan); + goto error_open_chan; + } + + if (mhi_dev->dl_chan == mhi_dev->ul_chan) { + break; + } + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + if (mhi_dev->dl_chan == mhi_dev->ul_chan) { + break; + } + } +} +EXPORT_SYMBOL(mhi_unprepare_from_transfer); + +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) + return 0; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL(mhi_get_no_free_descriptors); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,3,0 )) +static int __mhi_bdf_to_controller(struct device *dev, void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} +#else +static int __mhi_bdf_to_controller(struct device *dev, const void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + const struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} +#endif + +struct mhi_controller *mhi_bdf_to_controller(u32 domain, + u32 bus, + u32 slot, + u32 dev_id) +{ + struct mhi_device tmp, *mhi_dev; + struct device *dev; + + tmp.domain = domain; + tmp.bus = bus; + tmp.slot = slot; + tmp.dev_id = dev_id; + + dev = bus_find_device(&mhi_bus_type, NULL, &tmp, + __mhi_bdf_to_controller); + if (!dev) + return NULL; + + mhi_dev = to_mhi_device(dev); + + return mhi_dev->mhi_cntrl; +} +EXPORT_SYMBOL(mhi_bdf_to_controller); + +int mhi_poll(struct mhi_device *mhi_dev, + u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL(mhi_poll); + +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* bring to M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + mutex_lock(&mhi_tsync->lpm_mutex); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + /* disable link level low power modes */ + ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + if (ret) + goto error_invalid_state; + + /* + * time critical code to fetch device times, + * delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + *t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + *t_dev = readq_relaxed_no_log(mhi_tsync->time_reg); + + local_irq_enable(); + preempt_enable(); + + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_invalid_state: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_tsync->lpm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time_sync); + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + struct tsync_node *tsync_node; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* tsync db can only be rung in M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + /* + * technically we can use GFP_KERNEL, but wants to avoid + * # of times scheduling out + */ + tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC); + if (!tsync_node) { + ret = -ENOMEM; + goto error_no_mem; + } + + tsync_node->sequence = sequence; + tsync_node->cb_func = cb_func; + tsync_node->mhi_dev = mhi_dev; + + /* disable link level low power modes */ + mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + spin_lock_irq(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * time critical code, delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + tsync_node->local_time = + mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db); + /* write must go thru immediately */ + wmb(); + + local_irq_enable(); + preempt_enable(); + + ret = 0; + +error_invalid_state: + if (ret) + kfree(tsync_node); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_no_mem: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time); + +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl) +{ + enum mhi_dev_state state; + enum mhi_ee ee; + int i, ret; + u32 val = 0; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void *base; + } debug_reg[] = { + { "MHI_CNTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + MHI_LOG("host pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + + MHI_LOG("device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + + for (i = 0; debug_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base, + debug_reg[i].offset, &val); + MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val, + ret); + } +} +EXPORT_SYMBOL(mhi_debug_reg_dump); diff --git a/driver/quectel_MHI/src/core/mhi_pm.c b/driver/quectel_MHI/src/core/mhi_pm.c new file mode 100644 index 0000000..117a600 --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_pm.c @@ -0,0 +1,1255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* + * Not all MHI states transitions are sync transitions. Linkdown, SSR, and + * shutdown can happen anytime asynchronously. This function will transition to + * new state only if we're allowed to transitions. + * + * Priority increase as we go down, example while in any states from L0, start + * state from L1, L2, or L3 can be set. Notable exception to this rule is state + * DISABLE. From DISABLE state we can transition to only POR or state. Also + * for example while in L2 state, user cannot jump back to L1 or L0 states. + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + */ +static struct mhi_pm_transitions const mhi_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | + MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + }, +}; + +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) { + MHI_CRITICAL("cur_state:%s is not a valid pm_state\n", + to_mhi_pm_state_str(cur_state)); + return cur_state; + } + + if (unlikely(mhi_state_transitions[index].from_state != cur_state)) { + MHI_ERR("index:%u cur_state:%s != actual_state: %s\n", + index, to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str + (mhi_state_transitions[index].from_state)); + return cur_state; + } + + if (unlikely(!(mhi_state_transitions[index].to_states & state))) { + MHI_LOG( + "Not allowing pm state transition from:%s to:%s state\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(state)); + return cur_state; + } + + MHI_LOG("Transition to pm state from:%s to:%s\n", + to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state)); + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_dev_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* set device wake */ +void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + if (mhi_cntrl->dev_state == MHI_STATE_M2) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } + return; +#endif + + /* if set, regardless of count set the bit if not set */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* if resources requested already, then increment and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* clear device wake */ +void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override) +{ + unsigned long flags; + +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + return; +#endif + +#if 1 //Add by Quectel + if (atomic_read(&mhi_cntrl->dev_wake) == 0) + return; +#endif + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0"); + + /* resources not dropping to 0, decrement and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + u32 reset = 1, ready = 0; + struct mhi_event *mhi_event; + enum MHI_PM_STATE cur_state; + int ret, i; + + MHI_LOG("Waiting to enter READY state\n"); + + /* wait for RESET to be cleared and READY bit to be set */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &reset) || + mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &ready) || + (!reset && ready), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + /* device enter into error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* device did not transition to ready state */ + if (reset || !ready) + return -ETIMEDOUT; + + MHI_LOG("Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + MHI_ERR("Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto error_mmio; + + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + MHI_ERR("Error programming mmio registers\n"); + goto error_mmio; + } + + /* add elements to all sw event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* needs to update to all cores */ + smp_wmb(); + + /* ring the db for event rings */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* set device into M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + struct mhi_chan *mhi_chan; + int i; + + MHI_LOG("Entered With State:%s PM_STATE:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M0), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + mhi_cntrl->M0++; + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + + /* ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* only ring primary cmd ring */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* ring channel db registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->db_cfg.reset_req) + mhi_chan->db_cfg.db_mode = true; + + /* only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + write_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + MHI_VERB("Exited\n"); + + return 0; +} + +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + /* if it fails, means we transition to M3 */ + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + MHI_VERB("Entered M2 State\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + mhi_cntrl->M2++; + + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + /* transfer pending, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->dev_wake))) { + MHI_VERB("Exiting M2 Immediately, count:%d\n", + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M3), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + wake_up_all(&mhi_cntrl->state_event); + mhi_cntrl->M3++; + + MHI_LOG("Entered mhi_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return 0; +} + +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + int i, ret; + struct mhi_event *mhi_event; + + MHI_LOG("Processing Mission Mode Transition\n"); + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_EE_MISSION_MODE); + + /* force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + ret = -EIO; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + goto error_mission_mode; + + wake_up_all(&mhi_cntrl->state_event); + + /* add elements to all HW event rings */ + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto error_mission_mode; + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* all ring updates must get updated immediately */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* setup support for time sync */ + mhi_init_timesync(mhi_cntrl); + + MHI_LOG("Adding new devices\n"); + + /* add supported devices */ + mhi_create_devices(mhi_cntrl); + + ret = 0; + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +/* handles both sys_err and shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE transition_state) +{ + enum MHI_PM_STATE cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + int ret, i; + + MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(transition_state)); + + /* We must notify MHI control driver so it can clean up first */ + if (transition_state == MHI_PM_SYS_ERR_PROCESS) + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state == transition_state) { + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* wake up any threads waiting for state transitions */ + wake_up_all(&mhi_cntrl->state_event); + + /* not handling sys_err, could be middle of shut down */ + if (cur_state != transition_state) { + MHI_LOG("Failed to transition to state:0x%x from:0x%x\n", + transition_state, cur_state); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* trigger MHI RESET so device will not access host ddr */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* wait for reset to be cleared */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &in_reset) + || !in_reset, timeout); + if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + MHI_CRITICAL("Device failed to exit RESET state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* Set the numbers of Event Rings supported */ + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + } + + MHI_LOG("Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Reset all active channels and remove mhi devices\n"); + mhi_cntrl->klog_slient = 1; + device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device); + mhi_cntrl->klog_slient = 0; + + MHI_LOG("Finish resetting channels\n"); + + MHI_LOG("Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + flush_delayed_work(&mhi_cntrl->ready_worker); + flush_work(&mhi_cntrl->st_worker); + flush_work(&mhi_cntrl->fw_worker); + + mutex_lock(&mhi_cntrl->pm_mutex); + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0"); + + /* reset the ev rings and cmd rings */ + MHI_LOG("Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* do not touch offload er */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* remove support for time sync */ + mhi_destroy_timesync(mhi_cntrl); + + if (cur_state == MHI_PM_SYS_ERR_PROCESS) { + if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_get_mhi_state(mhi_cntrl) == MHI_STATE_RESET) { + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_EDL; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + write_unlock_irq(&mhi_cntrl->pm_lock); + } + else + mhi_ready_state_transition(mhi_cntrl); + } else { + /* move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + MHI_ERR("Error moving from pm state:%s to state:%s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + } + + MHI_LOG("Exit with pm_state:%s mhi_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +int mhi_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + enum MHI_PM_STATE cur_state; + int ret; + + MHI_LOG("Trigger MHI Reset\n"); + + /* exit lpm first */ + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + + return 0; +} + +/* queue a new work item and scheduler work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + schedule_work(&mhi_cntrl->st_worker); + + return 0; +} + +void mhi_pm_sys_err_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + syserr_worker); + + MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); +} + +void mhi_pm_ready_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + ready_worker.work); + enum mhi_ee ee = MHI_EE_MAX; + + if (mhi_cntrl->dev_state != MHI_STATE_RESET) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (ee == MHI_EE_PTHRU) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else if (ee == MHI_EE_AMSS || ee == MHI_EE_SBL) + mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY); + else if (ee == MHI_EE_EDL) + mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_PBL); +} + +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + MHI_LOG("Transition to state:%s\n", + TO_MHI_STATE_TRANS_STR(itr->state)); + + if (mhi_cntrl->ee != mhi_get_exec_env(mhi_cntrl)) { + MHI_LOG("%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + } + + switch (itr->state) { + case MHI_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + wake_up_all(&mhi_cntrl->state_event); + break; + case MHI_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + case MHI_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_FP: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 val, regVal; + enum mhi_ee current_ee; + enum MHI_ST_TRANSITION next_state; + + MHI_LOG("Requested to power on\n"); + +#if 0 + if (mhi_cntrl->msi_allocated < mhi_cntrl->total_ev_rings) + return -EINVAL; +#endif + + if (mhi_get_mhi_state(mhi_cntrl) >= MHI_STATE_M0) { + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhi_get_mhi_state(mhi_cntrl)]); + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + msleep(50); + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhi_get_mhi_state(mhi_cntrl)]); + } + +#if 1 //GLUE.SDX55_LE.1.0-00098-NOOP_TEST-1\common\hostdrivers\win\MhiHost MhiInitNewDev() + /* Check device Channels support */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, ®Val); +#if 0 + val = MHI_READ_REG_FIELD(regVal, MHICFG, NCH); + MHI_LOG("Device CHs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NHWCH); + MHI_LOG("Device HW CHs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NER); + MHI_LOG("Device ERs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NHWER); + MHI_LOG("Device HW ERs: %d\n", val); +#endif + /* Set the numbers of Event Rings supported */ + MHI_WRITE_REG_FIELD(regVal, MHICFG, NER, NUM_MHI_EVT_RINGS); + MHI_WRITE_REG_FIELD(regVal, MHICFG, NHWER, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, regVal); +#endif + + /* set to default wake if not set */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + if (!mhi_cntrl->pre_init) { + /* setup device context */ + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting dev_context\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + } + + /* setup bhi offset & intvec */ + write_lock_irq(&mhi_cntrl->pm_lock); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhi offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhi = mhi_cntrl->regs + val; + + /* setup bhie offset */ + if (mhi_cntrl->fbc_download || true) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhie offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhie = mhi_cntrl->regs + val; + } + + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + MHI_LOG("dev_state:%s ee:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + /* confirm device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + //MHI_ERR("Not a valid ee for power on\n"); + //ret = -EIO; + //goto error_bhi_offset; + } + + /* transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY; + + //if (next_state == MHI_ST_TRANSITION_PBL) + // schedule_work(&mhi_cntrl->fw_worker); + + if (next_state == MHI_ST_TRANSITION_PBL) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else + mhi_queue_state_transition(mhi_cntrl, next_state); + + mhi_init_debugfs(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Power on setup success\n"); + + return 0; + +error_bhi_offset: + if (!mhi_cntrl->pre_init) + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + if (!mhi_cntrl->pre_init) + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum MHI_PM_STATE cur_state; + + /* if it's not graceful shutdown, force MHI to a linkdown state */ + if (!graceful) { + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_LD_ERR_FATAL_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) + MHI_ERR("Failed to move to state:%s from:%s\n", + to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + + mhi_deinit_debugfs(mhi_cntrl); + + if (!mhi_cntrl->pre_init) { + /* free all allocated resources */ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + } +} +EXPORT_SYMBOL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + int ret; + enum MHI_PM_STATE new_state; + struct mhi_chan *itr, *tmp; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* do a quick check to see if any pending data, then exit */ + if (atomic_read(&mhi_cntrl->dev_wake)) { + MHI_VERB("Busy, aborting M3\n"); + return -EBUSY; + } + + /* exit MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Did not enter M0||M1 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_m0_entry; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + /* we're asserting wake so count would be @ least 1 */ + if (atomic_read(&mhi_cntrl->dev_wake) > 1) { + MHI_VERB("Busy, aborting M3\n"); + write_unlock_irq(&mhi_cntrl->pm_lock); + ret = -EBUSY; + goto error_m0_entry; + } + + /* anytime after this, we will resume thru runtime pm framework */ + MHI_LOG("Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + ret = -EIO; + goto error_m0_entry; + } + + /* set dev to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_LOG("Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M3 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; + +error_m0_entry: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + int ret; + struct mhi_chan *itr, *tmp; + + MHI_LOG("Entered with pm_state:%s dev_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3"); + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* set dev to M0 and wait for completion */ + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + /* + * It's possible device already in error state and we didn't + * process it due to low power mode, force a check + */ + mhi_intvec_threaded_handlr(0, mhi_cntrl); + return -EIO; + } + + return 0; +} + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_inc(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + atomic_inc(&mhi_dev->dev_wake); + + return ret; +} +EXPORT_SYMBOL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_dec(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_put); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + int ret; + + MHI_LOG("Enter with pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + MHI_LOG("Triggering SYS_ERR to force rddm state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* wait for rddm event */ + MHI_LOG("Waiting for device to enter RDDM state\n"); + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + MHI_LOG("Exiting with pm_state:%s ee:%s ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + return ret; +} +EXPORT_SYMBOL(mhi_force_rddm_mode); diff --git a/driver/quectel_MHI/src/core/mhi_sdx20.h b/driver/quectel_MHI/src/core/mhi_sdx20.h new file mode 100644 index 0000000..5a92efa --- /dev/null +++ b/driver/quectel_MHI/src/core/mhi_sdx20.h @@ -0,0 +1,362 @@ +#ifndef __SDX20_MHI_H +#define __SDX20_MHI_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element cmd_transfer; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; +#endif /* _SDX20_MHI_H_ */ diff --git a/driver/quectel_MHI/src/core/sdx20_mhi.h b/driver/quectel_MHI/src/core/sdx20_mhi.h new file mode 100644 index 0000000..a7d3783 --- /dev/null +++ b/driver/quectel_MHI/src/core/sdx20_mhi.h @@ -0,0 +1,426 @@ +#ifndef __SDX20_MHI_H +#define __SDX20_MHI_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element tre; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; + +#if 0 +/* SW channel client list */ +enum mhi_client_channel { + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_IP_CTRL_0_OUT = 16, + MHI_CLIENT_IP_CTRL_0_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_DCI_OUT = 20, + MHI_CLIENT_DCI_IN = 21, + MHI_CLIENT_IP_CTRL_3_OUT = 22, + MHI_CLIENT_IP_CTRL_3_IN = 23, + MHI_CLIENT_IP_CTRL_4_OUT = 24, + MHI_CLIENT_IP_CTRL_4_IN = 25, + MHI_CLIENT_IP_CTRL_5_OUT = 26, + MHI_CLIENT_IP_CTRL_5_IN = 27, + MHI_CLIENT_IP_CTRL_6_OUT = 28, + MHI_CLIENT_IP_CTRL_6_IN = 29, + MHI_CLIENT_IP_CTRL_7_OUT = 30, + MHI_CLIENT_IP_CTRL_7_IN = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_IP_SW_0_OUT = 34, + MHI_CLIENT_IP_SW_0_IN = 35, + MHI_CLIENT_IP_SW_1_OUT = 36, + MHI_CLIENT_IP_SW_1_IN = 37, + MHI_CLIENT_IP_SW_2_OUT = 38, + MHI_CLIENT_IP_SW_2_IN = 39, + MHI_CLIENT_IP_SW_3_OUT = 40, + MHI_CLIENT_IP_SW_3_IN = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_4_OUT = 46, + MHI_CLIENT_IP_SW_4_IN = 47, + MHI_MAX_SOFTWARE_CHANNELS = 48, + MHI_CLIENT_TEST_OUT = 60, + MHI_CLIENT_TEST_IN = 61, + MHI_CLIENT_RESERVED_1_LOWER = 62, + MHI_CLIENT_RESERVED_1_UPPER = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_RESERVED_2_LOWER = 102, + MHI_CLIENT_RESERVED_2_UPPER = 127, + MHI_MAX_CHANNELS = 102, +}; +#endif +#endif /* _SDX20_MHI_H_ */ diff --git a/driver/quectel_MHI/src/devices/Kconfig b/driver/quectel_MHI/src/devices/Kconfig new file mode 100644 index 0000000..d92e95b --- /dev/null +++ b/driver/quectel_MHI/src/devices/Kconfig @@ -0,0 +1,33 @@ +menu "MHI device support" + +config MHI_NETDEV + tristate "MHI NETDEV" + depends on MHI_BUS + help + MHI based net device driver for transferring IP traffic + between host and modem. By enabling this driver, clients + can transfer data using standard network interface. Over + the air traffic goes thru mhi netdev interface. + +config MHI_UCI + tristate "MHI UCI" + depends on MHI_BUS + help + MHI based uci driver is for transferring data between host and + modem using standard file operations from user space. Open, read, + write, ioctl, and close operations are supported by this driver. + Please check mhi_uci_match_table for all supported channels that + are exposed to userspace. + +config MHI_SATELLITE + tristate "MHI SATELLITE" + depends on MHI_BUS + help + MHI proxy satellite device driver enables NON-HLOS MHI satellite + drivers to communicate with device over PCIe link without host + involvement. Host facilitates propagation of events from device + to NON-HLOS MHI satellite drivers, channel states, and power + management over IPC communication. It helps in HLOS power + savings. + +endmenu diff --git a/driver/quectel_MHI/src/devices/Makefile b/driver/quectel_MHI/src/devices/Makefile new file mode 100644 index 0000000..e720069 --- /dev/null +++ b/driver/quectel_MHI/src/devices/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o +obj-$(CONFIG_MHI_UCI) +=mhi_uci.o +obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o diff --git a/driver/quectel_MHI/src/devices/mhi_netdev.c b/driver/quectel_MHI/src/devices/mhi_netdev.c new file mode 100644 index 0000000..1d3a431 --- /dev/null +++ b/driver/quectel_MHI/src/devices/mhi_netdev.c @@ -0,0 +1,1068 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +//#include +#include +#include +#include +//#include +#if 1 +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } +#endif +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define QUECTEL_NO_DTS + +extern void rmnet_data_init(struct net_device *real_dev, u32 nr_rmnet_devs); +extern void rmnet_data_deinit(struct net_device *real_dev, u32 nr_rmnet_devs); + +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + uint i; + uint *d = (uint *)data; + + printk(KERN_DEBUG "%s data=%p, len=%x\n", tag, data, len); + len = (len+3)/4; + for (i = 0; i < len; i+=4) { + printk(KERN_DEBUG "%08x %08x %08x %08x %08x\n", i*4, d[i+0], d[i+1], d[i+2], d[i+3]); + } +} + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) +#define IPC_LOG_PAGES (100) +#define MAX_NETBUF_SIZE (128) + +#ifdef CONFIG_MHI_DEBUG + +#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_VERBOSE)) \ + ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#else + +#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_VERB(fmt, ...) + +#endif + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_INFO)) \ + ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_ERROR)) \ + ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_net_chain { + struct sk_buff *head, *tail; /* chained skb */ +}; + +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + struct mhi_netdev *rsc_dev; /* rsc linked node */ + bool is_rsc_dev; + int wake; + + u32 mru; + u32 order; + const char *interface_name; + struct napi_struct *napi; + struct net_device *ndev; + bool ethernet_interface; + + struct mhi_netbuf **netbuf_pool; + int pool_size; /* must be power of 2 */ + int current_index; + bool chain_skb; + struct mhi_net_chain *chain; + + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; + enum MHI_DEBUG_LEVEL ipc_log_lvl; + void *ipc_log; + + //struct rmnet_port port; +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +/* Try not to make this structure bigger than 128 bytes, since this take space + * in payload packet. + * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf) + */ +struct mhi_netbuf { + struct mhi_buf mhi_buf; /* this must be first element */ + void (*unmap)(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +}; + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + /* default is QMAP */ + protocol = htons(ETH_P_MAP); + break; + } + protocol = htons(ETH_P_MAP); //carl.yin fix set + return protocol; +} + +static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev, + gfp_t gfp, + unsigned int order) +{ + struct page *page; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + void *vaddr; + + page = __dev_alloc_pages(gfp, order); + if (!page) + return NULL; + + vaddr = page_address(page); + + /* we going to use the end of page to store cached data */ + netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf); + + mhi_buf = (struct mhi_buf *)netbuf; + mhi_buf->page = page; + mhi_buf->buf = vaddr; + mhi_buf->len = (void *)netbuf - vaddr; + mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mhi_buf->dma_addr)) { + __free_pages(mhi_buf->page, order); + return NULL; + } + + return netbuf; +} + +static void mhi_netdev_unmap_page(struct device *dev, + dma_addr_t dma_addr, + size_t len, + enum dma_data_direction dir) +{ + dma_unmap_page(dev, dma_addr, len, dir); +} + +static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + const u32 order = mhi_netdev->order; + int i, ret; + + for (i = 0; i < nr_tre; i++) { + struct mhi_buf *mhi_buf; + struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC, + order); + if (!netbuf) + return -ENOMEM; + + mhi_buf = (struct mhi_buf *)netbuf; + netbuf->unmap = mhi_netdev_unmap_page; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue transfer, ret:%d\n", ret); + mhi_netdev_unmap_page(dev, mhi_buf->dma_addr, + mhi_buf->len, DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + return ret; + } + } + + return 0; +} + +static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool; + int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i, peak, cur_index, ret; + const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4; + + MSG_VERB("Enter free_desc:%d\n", nr_tre); + + if (!nr_tre) + return; + + /* try going thru reclaim pool first */ + for (i = 0; i < nr_tre; i++) { + /* peak for the next buffer, we going to peak several times, + * and we going to give up if buffers are not yet free + */ + cur_index = mhi_netdev->current_index; + netbuf = NULL; + for (peak = 0; peak < max_peak; peak++) { + struct mhi_netbuf *tmp = netbuf_pool[cur_index]; + + mhi_buf = &tmp->mhi_buf; + + cur_index = (cur_index + 1) & pool_size; + + /* page == 1 idle, buffer is free to reclaim */ + if (page_count(mhi_buf->page) == 1) { + netbuf = tmp; + break; + } + } + + /* could not find a free buffer */ + if (!netbuf) + break; + + /* increment reference count so when network stack is done + * with buffer, the buffer won't be freed + */ + get_page(mhi_buf->page); + dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue buffer, ret:%d\n", ret); + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + put_page(mhi_buf->page); + return; + } + mhi_netdev->current_index = cur_index; + } + + /* recyling did not work, buffers are still busy allocate temp pkts */ + if (i < nr_tre) + mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i); +} + +/* allocating pool of memory */ +static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool; + struct mhi_buf *mhi_buf; + const u32 order = mhi_netdev->order; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + + netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool), + GFP_KERNEL); + if (!netbuf_pool) + return -ENOMEM; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + /* allocate paged data */ + netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order); + if (!netbuf) + goto error_alloc_page; + + netbuf->unmap = dma_sync_single_for_cpu; + netbuf_pool[i] = netbuf; + } + + mhi_netdev->netbuf_pool = netbuf_pool; + + return 0; + +error_alloc_page: + for (--i; i >= 0; i--) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + } + + kfree(netbuf_pool); + + return -ENOMEM; +} + +static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + struct mhi_buf *mhi_buf; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, mhi_netdev->order); + } + + kfree(mhi_netdev->netbuf_pool); + mhi_netdev->netbuf_pool = NULL; +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev; + struct mhi_net_chain *chain = mhi_netdev->chain; + int rx_work = 0; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + /* chained skb, push it to stack */ + if (chain && chain->head) { + netif_receive_skb(chain->head); + chain->head = NULL; + } + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + /* queue new buffers */ + mhi_netdev_queue(mhi_netdev); + + if (rsc_dev) + mhi_netdev_queue(rsc_dev); + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} + +static int mhi_netdev_open(struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + + MSG_VERB("Entered\n"); + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + //qmap_hex_dump(__func__, skb->data, 32); + if (skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (res) { + MSG_VERB("Failed to queue with reason:%d\n", res); + netif_stop_queue(dev); + res = NETDEV_TX_BUSY; + } + + MSG_VERB("Exited\n"); + + return res; +} + +#if 0 +static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr) +{ + struct rmnet_ioctl_extended_s ext_cmd; + int rc = 0; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s)); + if (rc) + return rc; + + switch (ext_cmd.extended_ioctl) { + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + ext_cmd.u.data = 0; + break; + case RMNET_IOCTL_GET_DRIVER_NAME: + strlcpy(ext_cmd.u.if_name, mhi_netdev->interface_name, + sizeof(ext_cmd.u.if_name)); + break; + case RMNET_IOCTL_SET_SLEEP_STATE: + if (ext_cmd.u.data && mhi_netdev->wake) { + /* Request to enable LPM */ + MSG_VERB("Enable MHI LPM"); + mhi_netdev->wake--; + mhi_device_put(mhi_dev); + } else if (!ext_cmd.u.data && !mhi_netdev->wake) { + /* Request to disable LPM */ + MSG_VERB("Disable MHI LPM"); + mhi_netdev->wake++; + mhi_device_get(mhi_dev); + } + break; + default: + rc = -EINVAL; + break; + } + + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, + sizeof(struct rmnet_ioctl_extended_s)); + return rc; +} + +static int mhi_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + struct rmnet_ioctl_data_s ioctl_data; + + switch (cmd) { + case RMNET_IOCTL_SET_LLP_IP: /* set RAWIP protocol */ + break; + case RMNET_IOCTL_GET_LLP: /* get link protocol state */ + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + case RMNET_IOCTL_GET_OPMODE: /* get operation mode */ + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + case RMNET_IOCTL_SET_QOS_ENABLE: + rc = -EINVAL; + break; + case RMNET_IOCTL_SET_QOS_DISABLE: + rc = 0; + break; + case RMNET_IOCTL_OPEN: + case RMNET_IOCTL_CLOSE: + /* we just ignore them and return success */ + rc = 0; + break; + case RMNET_IOCTL_EXTENDED: + rc = mhi_netdev_ioctl_extended(dev, ifr); + break; + default: + /* don't fail any IOCTL right now */ + rc = 0; + break; + } + + return rc; +} +#endif + +static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) +{ + //struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + /* strlcpy() is deprecated in kernel 6.8.0+, using strscpy instead */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,8,0)) + strlcpy(info->driver, "pcie_mhi", sizeof(info->driver)); + strlcpy(info->version, PCIE_MHI_DRIVER_VERSION, sizeof(info->version)); +#else + strscpy(info->driver, "pcie_mhi", sizeof(info->driver)); + strscpy(info->version, PCIE_MHI_DRIVER_VERSION, sizeof(info->version)); +#endif +} + +static const struct ethtool_ops mhi_netdev_ethtool_ops = { + .get_drvinfo = mhi_netdev_get_drvinfo, +}; + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + //.ndo_do_ioctl = mhi_netdev_ioctl, + .ndo_change_mtu = mhi_netdev_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->watchdog_timeo = WATCHDOG_TIMEOUT; + + dev->ethtool_ops = &mhi_netdev_ethtool_ops; + memcpy (dev->dev_addr, node_id, sizeof node_id); + dev->flags |= IFF_NOARP; +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; + char ifalias[IFALIASZ]; + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; +#ifndef QUECTEL_NO_DTS + struct device_node *of_node = mhi_dev->dev.of_node; +#endif + struct mhi_netdev_priv *mhi_netdev_priv; + + mhi_netdev->alias = 0;//of_alias_get_id(of_node, "mhi-netdev"); + if (mhi_netdev->alias < 0) + mhi_netdev->alias = 0; + +#ifdef QUECTEL_NO_DTS + mhi_netdev->interface_name = "rmnet_mhi"; +#else + + ret = of_property_read_string(of_node, "mhi,interface-name", + &mhi_netdev->interface_name); +#endif + if (ret) + mhi_netdev->interface_name = mhi_netdev_driver.driver.name; + + snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + snprintf(ifname, sizeof(ifname), "%s%%d", mhi_netdev->interface_name); + +#ifdef QUECTEL_NO_DTS + mhi_netdev->ethernet_interface = 0; +#else + mhi_netdev->ethernet_interface = of_property_read_bool(of_node, + "mhi,ethernet-interface"); +#endif + rtnl_lock(); + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); + if (!mhi_netdev->ndev) { + rtnl_unlock(); + return -ENOMEM; + } + + mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); + //dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias)); + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + rtnl_unlock(); + + mhi_netdev->napi = devm_kzalloc(&mhi_dev->dev, + sizeof(*mhi_netdev->napi), GFP_KERNEL); + if (!mhi_netdev->napi) { + ret = -ENOMEM; + goto napi_alloc_fail; + } + + netif_napi_add(mhi_netdev->ndev, mhi_netdev->napi, + mhi_netdev_poll, NAPI_POLL_WEIGHT); + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + napi_enable(mhi_netdev->napi); + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(mhi_netdev->napi); + +napi_alloc_fail: + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); +} + +static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev, + struct mhi_buf *mhi_buf, + struct mhi_result *mhi_result) +{ + struct sk_buff *skb; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + if (!mhi_netdev->ethernet_interface) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + if (skb_linearize(skb)) + return; + } else { + skb_add_rx_frag(skb, 0, mhi_buf->page, ETH_HLEN, + mhi_result->bytes_xferd - ETH_HLEN, + mhi_netdev->mru); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(((u8 *)mhi_buf->buf)[ETH_HLEN]); + } + netif_receive_skb(skb); +} + +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct mhi_netbuf *netbuf = mhi_result->buf_addr; + struct mhi_buf *mhi_buf = &netbuf->mhi_buf; + struct sk_buff *skb; + struct net_device *ndev = mhi_netdev->ndev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_net_chain *chain = mhi_netdev->chain; + + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE); + + /* modem is down, drop the buffer */ + if (mhi_result->transaction_status == -ENOTCONN) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += mhi_result->bytes_xferd; + + if (unlikely(!chain)) { + mhi_netdev_push_skb(mhi_netdev, mhi_buf, mhi_result); + return; + } + + /* we support chaining */ + skb = alloc_skb(0, GFP_ATOMIC); + if (likely(skb)) { + if (!mhi_netdev->ethernet_interface) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + } else { + skb_add_rx_frag(skb, 0, mhi_buf->page, ETH_HLEN, + mhi_result->bytes_xferd - ETH_HLEN, + mhi_netdev->mru); + } + + /* this is first on list */ + if (!chain->head) { + skb->dev = ndev; + if (!mhi_netdev->ethernet_interface) { + skb->protocol = + mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + } else { + skb->protocol = + mhi_netdev_ip_type_trans(((u8 *)mhi_buf->buf)[ETH_HLEN]); + } + chain->head = skb; + } else { + skb_shinfo(chain->tail)->frag_list = skb; + } + + chain->tail = skb; + } else { + __free_pages(mhi_buf->page, mhi_netdev->order); + } +} + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + napi_schedule(mhi_netdev->napi); +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *dentry; + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; +} + +static void mhi_netdev_create_debugfs_dir(void) +{ + dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, 0); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev_private *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(void) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Remove notification received\n"); + + /* rsc parent takes cares of the cleanup */ + if (mhi_netdev->is_rsc_dev) { + mhi_netdev_free_pool(mhi_netdev); + return; + } + + rmnet_data_deinit(mhi_netdev->ndev, 1); + netif_stop_queue(mhi_netdev->ndev); + napi_disable(mhi_netdev->napi); + unregister_netdev(mhi_netdev->ndev); + netif_napi_del(mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev_free_pool(mhi_netdev); + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_match(struct device *dev, void *data) +{ + /* if phandle dt == device dt, we found a match */ + return (dev->of_node == data); +} + +static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev, + struct mhi_netdev *parent) +{ + mhi_netdev->ndev = parent->ndev; + mhi_netdev->napi = parent->napi; + mhi_netdev->ipc_log = parent->ipc_log; + mhi_netdev->msg_lvl = parent->msg_lvl; + mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl; + mhi_netdev->is_rsc_dev = true; + mhi_netdev->chain = parent->chain; +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev, *p_netdev = NULL; +#ifndef QUECTEL_NO_DTS + struct device_node *of_node = mhi_dev->dev.of_node; +#endif + int nr_tre; + char node_name[32]; + struct device_node *phandle; + bool no_chain; + +#ifndef QUECTEL_NO_DTS + if (!of_node) + return -ENODEV; +#endif + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + +#ifdef QUECTEL_NO_DTS + mhi_netdev->mru = 16384; + ret = 0; +#else + ret = of_property_read_u32(of_node, "mhi,mru", &mhi_netdev->mru); +#endif + if (ret) + return -ENODEV; + + /* MRU must be multiplication of page size */ + mhi_netdev->order = __ilog2_u32(mhi_netdev->mru / PAGE_SIZE); + if ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru) + return -EINVAL; + + /* check if this device shared by a parent device */ +#ifdef QUECTEL_NO_DTS + phandle = NULL; +#else + phandle = of_parse_phandle(of_node, "mhi,rsc-parent", 0); +#endif + if (phandle) { + struct device *dev; + struct mhi_device *pdev; + /* find the parent device */ + dev = driver_find_device(mhi_dev->dev.driver, NULL, phandle, + mhi_netdev_match); + if (!dev) + return -ENODEV; + + /* this device is shared with parent device. so we won't be + * creating a new network interface. Clone parent + * information to child node + */ + pdev = to_mhi_device(dev); + p_netdev = mhi_device_get_devdata(pdev); + mhi_netdev_clone_dev(mhi_netdev, p_netdev); + put_device(dev); + } else { + mhi_netdev->msg_lvl = MHI_MSG_LVL_ERROR; +#ifdef QUECTEL_NO_DTS + no_chain = 0; +#else + no_chain = of_property_read_bool(of_node, + "mhi,disable-chain-skb"); +#endif + if (!no_chain) { + mhi_netdev->chain = devm_kzalloc(&mhi_dev->dev, + sizeof(*mhi_netdev->chain), + GFP_KERNEL); + if (!mhi_netdev->chain) + return -ENOMEM; + } + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + /* create ipc log buffer */ + snprintf(node_name, sizeof(node_name), + "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_netdev->alias); + mhi_netdev->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, + node_name, 0); + mhi_netdev->ipc_log_lvl = IPC_LOG_LVL; + + mhi_netdev_create_debugfs(mhi_netdev); + } + + /* move mhi channels to start state */ + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start channels ret %d\n", ret); + goto error_start; + } + + rmnet_data_init(mhi_netdev->ndev, 1); + + /* setup pool size ~2x ring length*/ + nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre); + if (nr_tre > mhi_netdev->pool_size) + mhi_netdev->pool_size <<= 1; + mhi_netdev->pool_size <<= 1; + + /* allocate memory pool */ + ret = mhi_netdev_alloc_pool(mhi_netdev); + if (ret) + goto error_start; + + /* link child node with parent node if it's children dev */ + if (p_netdev) + p_netdev->rsc_dev = mhi_netdev; + + /* now we have a pool of buffers allocated, queue to hardware + * by triggering a napi_poll + */ + napi_schedule(mhi_netdev->napi); + + return 0; + +error_start: + if (phandle) + return ret; + + netif_stop_queue(mhi_netdev->ndev); + napi_disable(mhi_netdev->napi); + unregister_netdev(mhi_netdev->ndev); + netif_napi_del(mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + + return ret; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_HW_ADPL" }, + { .chan = "IP_HW0_RSC" }, + { .chan = "IP_SW0" }, + {}, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +static int __init mhi_netdev_init(void) +{ + BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE); + mhi_netdev_create_debugfs_dir(); + + return mhi_driver_register(&mhi_netdev_driver); +} +//module_init(mhi_netdev_init); + +int __init mhi_device_netdev_init(struct dentry *parent) +{ + BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE); + + return mhi_netdev_init(); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} + diff --git a/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c b/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c new file mode 100644 index 0000000..e0ce5cf --- /dev/null +++ b/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c @@ -0,0 +1,3426 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define CONFIG_IPQ5018_RATE_CONTROL //Only used with spf11.5 for IPQ5018 +#if defined(CONFIG_IPQ5018_RATE_CONTROL) +//#include +#include +#endif + +#include "../core/mhi.h" +//#define MHI_NETDEV_ONE_CARD_MODE +//#define ANDROID_gki //some fuction not allow used in this TEST + +#ifndef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#ifndef ARPHRD_RAWIP +#define ARPHRD_RAWIP ARPHRD_NONE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,2,0 )) +static bool netdev_is_rx_handler_busy(struct net_device *dev) +{ + ASSERT_RTNL(); + return dev && rtnl_dereference(dev->rx_handler); +} +#endif + +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; +static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) +#ifdef CONFIG_RMNET_DATA +#define CONFIG_QCA_NSS_DRV +#define CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY +/* define at qca/src/linux-4.4/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c */ //for spf11.x +/* define at qsdk/qca/src/datarmnet/core/rmnet_config.c */ //for spf12.x +/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ +/* need add DEPENDS:= kmod-rmnet-core in feeds/makefile */ +extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; +#endif +#endif + + +int mhi_netdev_use_xfer_type_dma(unsigned chan) +{ + (void)chan; +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + return 1; +#endif + return 0; +} + + +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; + +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) +#define QUECTEL_BRIDGE_MODE +#endif + +#define QUECTEL_RMNET_MODE + +#ifdef QUECTEL_BRIDGE_MODE +static uint __read_mostly bridge_mode = 0/*|BIT(1)*/; +module_param( bridge_mode, uint, S_IRUGO ); +#endif + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; +#define QUECTEL_QMAP_MUX_ID 0x81 + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u8 reserved2; + u8 ip_family:2; + u8 reserved:6; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +struct mhi_mbim_hdr { + struct usb_cdc_ncm_nth16 nth16; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16[2]; +} __attribute__ ((packed)); + +#define QCUSB_MRECEIVE_MAX_BUFFER_SIZE (1024*32) //maybe 31KB is enough +#define QCUSB_MTRANSMIT_MAX_BUFFER_SIZE (1024*16) +#define NTB_OUT_MAX_DATAGRAMS 16 + +static const struct usb_cdc_ncm_ntb_parameters ncmNTBParams = { + .bmNtbFormatsSupported = USB_CDC_NCM_NTB16_SUPPORTED, + .dwNtbInMaxSize = QCUSB_MRECEIVE_MAX_BUFFER_SIZE, + .wNdpInDivisor = 0x04, + .wNdpInPayloadRemainder = 0x0, + .wNdpInAlignment = 0x4, + + .dwNtbOutMaxSize = QCUSB_MTRANSMIT_MAX_BUFFER_SIZE, + .wNdpOutDivisor = 0x04, + .wNdpOutPayloadRemainder = 0x0, + .wNdpOutAlignment = 0x4, + .wNtbOutMaxDatagrams = NTB_OUT_MAX_DATAGRAMS, +}; + +#if 0 +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + uint i; + uint *d = (uint *)data; + + printk(KERN_DEBUG "%s data=%p, len=%x\n", tag, data, len); + len = (len+3)/4; + for (i = 0; i < len; i+=4) { + printk(KERN_DEBUG "%08x %08x %08x %08x %08x\n", i*4, d[i+0], d[i+1], d[i+2], d[i+3]); + } +} +#else +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { +} +#endif + +#define MBIM_MUX_ID_SDX7X 112 //sdx7x is 112-126, others is 0-14 + +static uint __read_mostly mhi_mbim_enabled = 0; +module_param(mhi_mbim_enabled, uint, S_IRUGO); +int mhi_netdev_mbin_enabled(void) { return mhi_mbim_enabled; } + +static uint __read_mostly qmap_mode = 1; +module_param(qmap_mode, uint, S_IRUGO); + +static uint __read_mostly poll_weight = NAPI_POLL_WEIGHT; +module_param(poll_weight, uint, S_IRUGO); + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_stats { + u32 rx_int; + u32 tx_full; + u32 tx_pkts; + u32 rx_budget_overflow; + u32 tx_allocated; + u32 rx_allocated; + u32 alloc_failed; +}; + +/* important: do not exceed sk_buf->cb (48 bytes) */ +struct mhi_skb_priv { + void *buf; + size_t size; + struct mhi_netdev *bind_netdev; +}; + +struct skb_data { /* skb->cb is one of these */ + struct mhi_netdev *bind_netdev; + unsigned int length; + unsigned int packets; +}; + +#define MHI_NETDEV_STATUS64 1 + +typedef struct { + uint size; + uint rx_urb_size; + uint ep_type; + uint iface_id; + uint MuxId; + uint ul_data_aggregation_max_datagrams; //0x17 + uint ul_data_aggregation_max_size ;//0x18 + uint dl_minimum_padding; //0x1A +} QMAP_SETTING; + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +typedef struct { + u16 tx_seq; + u16 rx_seq; + u32 rx_max; +} MHI_MBIM_CTX; + +enum mhi_net_type { + MHI_NET_UNKNOW, + MHI_NET_RMNET, + MHI_NET_MBIM, + MHI_NET_ETHER +}; + +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY +/* Try not to make this structure bigger than 128 bytes, since this take space + * in payload packet. + * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf) + */ +struct mhi_netbuf { + struct mhi_buf mhi_buf; /* this must be first element */ + void (*unmap)(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +}; + +struct mhi_net_chain { + struct sk_buff *head, *tail; /* chained skb */ +}; +#endif + +//#define TS_DEBUG +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + spinlock_t rx_lock; + bool enabled; + rwlock_t pm_lock; /* state change lock */ + int (*rx_queue)(struct mhi_netdev *mhi_netdev, gfp_t gfp_t); + struct delayed_work alloc_work; + int wake; + + struct sk_buff_head tx_allocated; + struct sk_buff_head rx_allocated; + struct sk_buff_head qmap_chain; + struct sk_buff_head skb_chain; +#ifdef TS_DEBUG + uint clear_ts; + struct timespec diff_ts; + struct timespec qmap_ts; + struct timespec skb_ts; +#endif + + MHI_MBIM_CTX mbim_ctx; + u32 mbim_mux_id; + + u32 mru; + u32 max_mtu; + const char *interface_name; + struct napi_struct napi; + struct net_device *ndev; + enum mhi_net_type net_type; + struct sk_buff *frag_skb; + bool recycle_buf; + +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + u32 order; + struct mhi_netbuf **netbuf_pool; + int pool_size; /* must be power of 2 */ + int current_index; + struct mhi_net_chain chain; +#endif + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + struct mhi_stats stats; + + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; + + struct net_device *mpQmapNetDev[8]; + u32 qmap_mode; + u32 qmap_version; // 5 ~ QMAP V1, 9 ~ QMAP V5 + u32 qmap_size; + u32 link_state; + u32 flow_control; + u32 dl_minimum_padding; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#endif + uint use_rmnet_usb; + RMNET_INFO rmnet_info; + +#if defined(CONFIG_IPQ5018_RATE_CONTROL) + u64 first_jiffy; + u64 bytes_received_1; + u64 bytes_received_2; + u32 cntfrq_per_msec; + bool mhi_rate_control; +#endif + + u32 rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH]; +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +struct qmap_priv { + void *pQmapDev; + struct net_device *real_dev; + struct net_device *self_dev; + u8 offset_id; + u8 mux_id; + u8 qmap_version; // 5~v1, 9~v5 + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + + spinlock_t agg_lock; + struct sk_buff *agg_skb; + unsigned agg_count; + struct timespec64 agg_time; + struct hrtimer agg_hrtimer; + struct work_struct agg_wq; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#endif + uint use_qca_nss; +}; + +static struct mhi_netdev *ndev_to_mhi(struct net_device *ndev) { + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(ndev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + return mhi_netdev; +} + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + +#if 0 +static void mhi_netdev_skb_destructor(struct sk_buff *skb) +{ + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + struct mhi_netdev *mhi_netdev = skb_priv->mhi_netdev; + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + MHI_ASSERT(skb->data != skb_priv->buf, "incorrect buf"); + skb_queue_tail(&mhi_netdev->rx_allocated, skb); +} +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static const struct net_device_ops mhi_netdev_ops_ip; +static const struct net_device_ops rmnet_vnd_ops; + +static int is_qmap_netdev(const struct net_device *ndev) { + return ndev->netdev_ops == &rmnet_vnd_ops; +} + +static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4], ipv4[4]; + struct sk_buff *reply = NULL; + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net), + sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]); + //wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255 +#ifndef ANDROID_gki + if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3]) + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha); +#endif + + if (reply) { + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) + netif_rx(reply); +#else + netif_rx_ni(reply); +#endif + } + return 1; + } + + return 0; +} + +static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) { + struct ethhdr *ehdr; + const struct iphdr *iph; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + if (bridge_ipv4) + bridge_arp_reply(net, skb, bridge_ipv4); + return NULL; + } + + iph = ip_hdr(skb); + //DBG("iphdr: "); + //PrintHex((void *)iph, sizeof(struct iphdr)); + +// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7 + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) { + //if (udp_hdr(skb)->dest == htons(67)) //DHCP Request + { + memcpy(bridge_mac, ehdr->h_source, ETH_ALEN); + pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]); + } + } + + if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) { + return NULL; + } + + return skb; +} + +static void bridge_mode_rx_fixup(struct mhi_netdev *mhi_netdev, struct net_device *net, struct sk_buff *skb) { + uint bridge_mode = 0; + unsigned char *bridge_mac; + + if (mhi_netdev->qmap_mode > 0) { + struct qmap_priv *priv = netdev_priv(net); + bridge_mode = priv->bridge_mode; + bridge_mac = priv->bridge_mac; + } + else { + bridge_mode = mhi_netdev->bridge_mode; + bridge_mac = mhi_netdev->bridge_mac; + } + + if (bridge_mode) + memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN); +} + +static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = 0; + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + bridge_mode = priv->bridge_mode; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + bridge_mode = mhi_netdev->bridge_mode; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", bridge_mode); +} + +static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = simple_strtoul(buf, NULL, 0); + + if (ndev->type != ARPHRD_ETHER) { + if (bridge_mode) + netdev_err(ndev, "netdevice is not ARPHRD_ETHER\n"); + return count; + } + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + priv->bridge_mode = bridge_mode; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + mhi_netdev->bridge_mode = bridge_mode; + } + + return count; +} + + +static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + unsigned int bridge_ipv4 = 0; + unsigned char ipv4[4]; + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + bridge_ipv4 = priv->bridge_ipv4; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + bridge_ipv4 = mhi_netdev->bridge_ipv4; + } + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]); +} + +static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + priv->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + mhi_netdev->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + + return count; +} + +static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store); +static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store); + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static struct attribute *pcie_mhi_qmap_sysfs_attrs[] = { + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, + NULL, +}; + +static struct attribute_group pcie_mhi_qmap_sysfs_attr_group = { + .attrs = pcie_mhi_qmap_sysfs_attrs, +}; +#endif +#endif + +static struct sk_buff * add_mbim_hdr(struct sk_buff *skb, u8 mux_id) { + struct mhi_mbim_hdr *mhdr; + __le32 sign; + u8 *c; + u16 tci = mux_id; + unsigned int skb_len = skb->len; + + if (qmap_mode > 1) + tci += 1; //rmnet_mhi0.X map to session X + + if (skb_headroom(skb) < sizeof(struct mhi_mbim_hdr)) { + printk("skb_headroom small! headroom is %u, need %zd\n", skb_headroom(skb), sizeof(struct mhi_mbim_hdr)); + return NULL; + } + + skb_push(skb, sizeof(struct mhi_mbim_hdr)); + + mhdr = (struct mhi_mbim_hdr *)skb->data; + + //printk("%s %p\n", __func__, skb->data); + mhdr->nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + mhdr->nth16.wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); +/* + Sequence number. The transmitter of a block shall set this to zero in the first NTB transferred after every 'function reset' event, + and shall increment for every NTB subsequently transferred. + The effect of an out-of-sequence block on the receiver is not specified. + Thespecification allows the receiver to decide whether tocheck the sequence number, + and to decide how to respond if it's incorrect. The sequence number is pri-marily supplied for debugging purposes. +*/ + //mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->tx_seq++); +/* + Size of this NTB in bytes. Represented in little-endian form. + NTB size (IN/OUT) shall not exceed dwNtbInMaxSize or dwNtbOutMaxSize respectively +*/ + mhdr->nth16.wBlockLength = cpu_to_le16(skb->len); +/* + Offset, in little endian, of the first NDP16 from byte zeroof the NTB. + This value must be a multiple of 4, and must be >= 0x000C +*/ + mhdr->nth16.wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + + sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); + c = (u8 *)&sign; + c[3] = tci; + + mhdr->ndp16.dwSignature = sign; + mhdr->ndp16.wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2); + mhdr->ndp16.wNextNdpIndex = 0; + + mhdr->ndp16.dpe16[0].wDatagramIndex = sizeof(struct mhi_mbim_hdr); + mhdr->ndp16.dpe16[0].wDatagramLength = skb_len; + + mhdr->ndp16.dpe16[1].wDatagramIndex = 0; + mhdr->ndp16.dpe16[1].wDatagramLength = 0; + + return skb; +} + +static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) { + struct qmap_hdr *qhdr; + int pad = 0; + + pad = skb->len%4; + if (pad) { + pad = 4 - pad; + if (skb_tailroom(skb) < pad) { + printk("skb_tailroom small!\n"); + pad = 0; + } + if (pad) + __skb_put(skb, pad); + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = pad; + qhdr->mux_id = mux_id; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if 0 //TODO + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; +#endif + } + + return skb; +} + +static void rmnet_map_send_ack(struct mhi_netdev *pQmapDev, + unsigned char type, + struct rmnet_map_header *map_header) +{ + struct rmnet_map_control_command *cmd; + struct sk_buff *skb; + size_t skb_len = sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_control_command); + + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (!skb) + return; + + skb_put(skb, skb_len); + memcpy(skb->data, map_header, skb_len); + cmd = (struct rmnet_map_control_command *)(skb->data + sizeof(struct rmnet_map_header)); + cmd->cmd_type = type & 0x03; + skb->protocol = htons(ETH_P_MAP); + skb->dev = pQmapDev->ndev; + dev_queue_xmit(skb); +} + +static int rmnet_data_vnd_do_flow_control(struct net_device *dev, + uint32_t map_flow_id, + uint16_t v4_seq, + uint16_t v6_seq, + int enable) +{ + //TODO + return 0; +} + +static uint8_t rmnet_map_do_flow_control(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header, + int enable) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + struct net_device *vnd; + uint8_t mux_id; + uint16_t ip_family; + uint16_t fc_seq; + uint32_t qos_id; + int r; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + vnd = pQmapDev->mpQmapNetDev[mux_id]; + if (vnd == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + if (enable) + pQmapDev->flow_control |= (1 << mux_id); + else + pQmapDev->flow_control &= ~(1 << mux_id); + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_data_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable); + netdev_dbg(vnd, "qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d", + qos_id, ip_family & 3, fc_seq, enable); + + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_data_map_command(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + command_name = cmd->command_name; + + if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH) + pQmapDev->rmnet_map_command_stats[command_name]++; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 0); + break; + + default: + pQmapDev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++; + netdev_info(ndev, "UNSupport MAP command: %d", command_name); + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(pQmapDev, rc, map_header); + + return; +} + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static void rmnet_vnd_upate_rx_stats(struct net_device *net, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes, rx_bytes); +#endif + u64_stats_update_end(&stats64->syncp); +#else + priv->self_dev->stats.rx_packets += rx_packets; + priv->self_dev->stats.rx_bytes += rx_bytes; +#endif +} + +static void rmnet_vnd_upate_tx_stats(struct net_device *net, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes, tx_bytes); +#endif + u64_stats_update_end(&stats64->syncp); +#else + net->stats.rx_packets += tx_packets; + net->stats.rx_bytes += tx_bytes; +#endif +} + +#if defined(MHI_NETDEV_STATUS64) +#ifdef ANDROID_gki +static void _netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats) +{ +#if BITS_PER_LONG == 64 + BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); + memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); + /* zero out counters that only exist in rtnl_link_stats64 */ + memset((char *)stats64 + sizeof(*netdev_stats), 0, + sizeof(*stats64) - sizeof(*netdev_stats)); +#else + size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); + const unsigned long *src = (const unsigned long *)netdev_stats; + u64 *dst = (u64 *)stats64; + + BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); + for (i = 0; i < n; i++) + dst[i] = src[i]; + /* zero out counters that only exist in rtnl_link_stats64 */ + memset((char *)stats64 + n * sizeof(u64), 0, + sizeof(*stats64) - n * sizeof(u64)); +#endif +} +#else +static void my_netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats) +{ + netdev_stats_to_stats64(stats64, netdev_stats); +} +#endif + +static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) +{ + struct qmap_priv *dev = netdev_priv(net); + unsigned int start; + int cpu; + + my_netdev_stats_to_stats64(stats, &net->stats); + + if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats + stats->rx_packets = 0; + stats->rx_bytes = 0; + } + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; +#else + u64_stats_t rx_packets, rx_bytes; + u64_stats_t tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry(&stats64->syncp, start)); + + stats->rx_packets += u64_stats_read(&rx_packets); + stats->rx_bytes += u64_stats_read(&rx_bytes); + stats->tx_packets += u64_stats_read(&tx_packets); + stats->tx_bytes += u64_stats_read(&tx_bytes); +#endif + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + _rmnet_vnd_get_stats64(net, stats); +} +#else +static struct rtnl_link_stats64 *rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + return _rmnet_vnd_get_stats64(net, stats); +} +#endif +#endif + +static void rmnet_vnd_tx_agg_work(struct work_struct *work) +{ + struct qmap_priv *priv = + container_of(work, struct qmap_priv, agg_wq); + struct sk_buff *skb = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->agg_lock, flags); + if (likely(priv->agg_skb)) { + skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + ktime_get_ts64(&priv->agg_time); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (skb) + dev_queue_xmit(skb); +} + +static enum hrtimer_restart rmnet_vnd_tx_agg_timer_cb(struct hrtimer *timer) +{ + struct qmap_priv *priv = + container_of(timer, struct qmap_priv, agg_hrtimer); + + schedule_work(&priv->agg_wq); + return HRTIMER_NORESTART; +} + +static int rmnet_vnd_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) { + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + + return dev_queue_xmit(skb); +} + + +static int rmnet_vnd_open(struct net_device *dev) +{ + struct qmap_priv *priv = netdev_priv(dev); + struct net_device *real_dev = priv->real_dev; + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (netif_carrier_ok(real_dev)) + netif_carrier_on(dev); + + return 0; +} + +static int rmnet_vnd_stop(struct net_device *pNet) +{ + netif_carrier_off(pNet); + return 0; +} + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *pNet) +{ + int err; + struct qmap_priv *priv = netdev_priv(pNet); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(priv->real_dev); + int skb_len = skb->len; + + if (netif_queue_stopped(priv->real_dev)) { + netif_stop_queue(pNet); + return NETDEV_TX_BUSY; + } + + //printk("%s 1 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (pNet->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if (skb_pull(skb, ETH_HLEN) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (mhi_netdev->net_type == MHI_NET_MBIM) { + if (add_mbim_hdr(skb, priv->mux_id) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + else { + if (priv->qmap_version == 5) { + add_qhdr(skb, priv->mux_id); + } + else if (priv->qmap_version == 9) { + add_qhdr_v5(skb, priv->mux_id); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + err = rmnet_vnd_tx_agg(skb, priv); + if (err == NET_XMIT_SUCCESS) { + rmnet_vnd_upate_tx_stats(pNet, 1, skb_len); + } else { + pNet->stats.tx_errors++; + } + + return err; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + struct mhi_netdev *mhi_netdev; + + mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(rmnet_dev); + + if (mhi_netdev == NULL) { + printk("warning, mhi_netdev == null\n"); + return -EINVAL; + } + + if (new_mtu < 0 ) + return -EINVAL; + + if (new_mtu > mhi_netdev->max_mtu) { + printk("warning, set mtu=%d greater than max mtu=%d\n", new_mtu, mhi_netdev->max_mtu); + return -EINVAL; + } + + rmnet_dev->mtu = new_mtu; + return 0; +} + +/* drivers may override default ethtool_ops in their bind() routine */ +static const struct ethtool_ops rmnet_vnd_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static void rmnet_vnd_rawip_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->needed_headroom = 16; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ +//for Qualcomm's NSS, must set type as ARPHRD_RAWIP, or NSS performace is very bad. + rmnet_dev->type = ARPHRD_RAWIP; // do not support moify mac, for dev_set_mac_address() need ARPHRD_ETHER + rmnet_dev->hard_header_len = 0; +//for Qualcomm's SFE, do not add IFF_POINTOPOINT to type, or SFE donot work. + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_open = rmnet_vnd_open, + .ndo_stop = rmnet_vnd_stop, + .ndo_start_xmit = rmnet_vnd_start_xmit, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = rmnet_vnd_get_stats64, +#endif + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (!skb) + return RX_HANDLER_CONSUMED; + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + return RX_HANDLER_PASS; + } + + if (nss_cb) { + nss_cb->nss_tx(skb); + return RX_HANDLER_CONSUMED; + } + + return RX_HANDLER_PASS; +} +#endif + +static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in) +{ + struct mhi_netdev *pQmapDev = (struct mhi_netdev *)dev; + struct mhi_netdev *mhi_netdev = (struct mhi_netdev *)dev; + MHI_MBIM_CTX *ctx = &pQmapDev->mbim_ctx; + //struct net_device *ndev = pQmapDev->ndev; + struct usb_cdc_ncm_nth16 *nth16; + int ndpoffset, len; + u16 wSequence; + struct sk_buff_head skb_chain; + struct sk_buff *qmap_skb; + + __skb_queue_head_init(&skb_chain); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("frame too short\n"); + goto error; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + MSG_ERR("invalid NTH16 signature <%#010x>\n", le32_to_cpu(nth16->dwSignature)); + goto error; + } + + len = le16_to_cpu(nth16->wBlockLength); + if (len > ctx->rx_max) { + MSG_ERR("unsupported NTB block length %u/%u\n", len, ctx->rx_max); + goto error; + } + + wSequence = le16_to_cpu(nth16->wSequence); + if (ctx->rx_seq != wSequence) { + MSG_ERR("sequence number glitch prev=%d curr=%d\n", ctx->rx_seq, wSequence); + } + ctx->rx_seq = wSequence + 1; + + ndpoffset = nth16->wNdpIndex; + + while (ndpoffset > 0) { + struct usb_cdc_ncm_ndp16 *ndp16 ; + struct usb_cdc_ncm_dpe16 *dpe16; + int nframes, x; + u8 *c; + u16 tci = 0; + struct net_device *qmap_net; + + if (skb_in->len < (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("invalid NDP offset <%u>\n", ndpoffset); + goto error; + } + + ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset); + + if (le16_to_cpu(ndp16->wLength) < 0x10) { + MSG_ERR("invalid DPT16 length <%u>\n", le16_to_cpu(ndp16->wLength)); + goto error; + } + + nframes = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16)); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_ndp16) + nframes * (sizeof(struct usb_cdc_ncm_dpe16)))) { + MSG_ERR("Invalid nframes = %d\n", nframes); + goto error; + } + + switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) { + case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3]; + /* tag IPS<0> packets too if MBIM_IPS0_VID exists */ + //if (!tci && info->flags & FLAG_IPS0_VLAN) + // tci = MBIM_IPS0_VID; + break; + case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3] + 256; + break; + default: + MSG_ERR("unsupported NDP signature <0x%08x>\n", le32_to_cpu(ndp16->dwSignature)); + goto error; + } + + if ((qmap_mode == 1 && tci != mhi_netdev->mbim_mux_id) || (qmap_mode > 1 && (tci - mhi_netdev->mbim_mux_id) > qmap_mode)){ + MSG_ERR("unsupported tci %d by now\n", tci); + goto error; + } + tci = abs(tci); + qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1 - mhi_netdev->mbim_mux_id]; + + dpe16 = ndp16->dpe16; + + for (x = 0; x < nframes; x++, dpe16++) { + int offset = le16_to_cpu(dpe16->wDatagramIndex); + int skb_len = le16_to_cpu(dpe16->wDatagramLength); + + if (offset == 0 || skb_len == 0) { + break; + } + + /* sanity checking */ + if (((offset + skb_len) > skb_in->len) || (skb_len > ctx->rx_max)) { + MSG_ERR("invalid frame detected (ignored) x=%d, offset=%d, skb_len=%u\n", x, offset, skb_len); + goto error; + } + + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (!qmap_skb) { + mhi_netdev->stats.alloc_failed++; + //MSG_ERR("skb_clone fail\n"); //do not print in softirq + goto error; + } + + switch (skb_in->data[offset] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[offset]); + if (ip4h->protocol == IPPROTO_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif + qmap_skb->protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[offset]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif + qmap_skb->protocol = htons(ETH_P_IPV6); + break; + default: + MSG_ERR("unknow skb->protocol %02x\n", skb_in->data[offset]); + goto error; + } + + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + offset, skb_len); + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = qmap_skb->protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + __skb_pull(qmap_skb, ETH_HLEN); + } + +#ifndef MHI_NETDEV_ONE_CARD_MODE + rmnet_vnd_upate_rx_stats(qmap_net, 1, skb_len); +#endif + __skb_queue_tail(&skb_chain, qmap_skb); + } + + /* are there more NDPs to process? */ + ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex); + } + +error: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + netif_receive_skb(qmap_skb); + } +} + +static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) +{ + struct mhi_netdev *pQmapDev = (struct mhi_netdev *)dev; + struct net_device *ndev = pQmapDev->ndev; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + uint dl_minimum_padding = 0; + + if (pQmapDev->qmap_version == 9) + dl_minimum_padding = pQmapDev->dl_minimum_padding; + + __skb_queue_head_init(&skb_chain); + + while (skb_in->len > sizeof(struct qmap_hdr)) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data; + struct rmnet_map_v5_csum_header *ul_header = NULL; + size_t hdr_size = sizeof(struct rmnet_map_header); + struct net_device *qmap_net; + int pkt_len = ntohs(map_header->pkt_len); + int skb_len; + __be16 protocol; + int mux_id; + int skip_nss = 0; + + if (map_header->next_hdr) { + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + hdr_size += sizeof(struct rmnet_map_v5_csum_header); + } + + skb_len = pkt_len - (map_header->pad_len&0x3F); +#if 0 //just for debug dl_minimum_padding BUG + if ((skb_in->data[hdr_size] & 0xf0) == 0x45) { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ntohs(ip4h->tot_len) != skb_len) { + netdev_info(ndev, "tot_len=%d skb_len=%d\n", ntohs(ip4h->tot_len), skb_len); + } + } +#endif + skb_len -= dl_minimum_padding; + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto error_pkt; + } + mux_id = abs(mux_id); + qmap_net = pQmapDev->mpQmapNetDev[mux_id]; + if (qmap_net == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto skip_pkt; + } + + if (skb_len > qmap_net->mtu) { + netdev_info(ndev, "drop skb_len=%x larger than qmap mtu=%d\n", skb_len, qmap_net->mtu); + goto error_pkt; + } + + if (skb_in->len < (pkt_len + hdr_size)) { + netdev_info(ndev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error_pkt; + } + + if (map_header->cd_bit) { + rmnet_data_map_command(pQmapDev, map_header); + goto skip_pkt; + } + + switch (skb_in->data[hdr_size] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ip4h->protocol == IPPROTO_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[hdr_size]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IPV6); + break; + default: + netdev_info(ndev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]); + goto error_pkt; + } + +//for Qualcomm's SFE, do not use skb_clone(), or SFE 's performace is very bad. +//for Qualcomm's NSS, do not use skb_clone(), or NSS 's performace is very bad. + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (qmap_skb) { + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len); + } + + if (qmap_skb == NULL) { + pQmapDev->stats.alloc_failed++; + //netdev_info(ndev, "fail to alloc skb, pkt_len = %d\n", skb_len); //do not print in softirq + goto error_pkt; + } + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + qmap_skb->protocol = protocol; + + if(skip_nss) + qmap_skb->cb[0] = 1; + + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD + && ul_header->csum_valid_required) { +#if 0 //TODO + qmap_skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif + } + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + __skb_pull(qmap_skb, ETH_HLEN); + } + +#ifndef MHI_NETDEV_ONE_CARD_MODE + rmnet_vnd_upate_rx_stats(qmap_net, 1, skb_len); +#endif + __skb_queue_tail(&skb_chain, qmap_skb); + +skip_pkt: + skb_pull(skb_in, pkt_len + hdr_size); + } + +error_pkt: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + netif_receive_skb(qmap_skb); + } +} + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct mhi_netdev *mhi_netdev; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + /* when open hyfi function, run cm will make system crash */ + //dev = rcu_dereference(skb->dev->rx_handler_data); + mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(skb->dev); + + if (mhi_netdev == NULL) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); + else + rmnet_qmi_rx_handler(mhi_netdev, skb); + + if (!skb_cloned(skb)) { + if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + return RX_HANDLER_CONSUMED; + } + } + + consume_skb(skb); + +done: + return RX_HANDLER_CONSUMED; +} + +static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev, u8 offset_id, u8 mux_id) +{ + struct net_device *real_dev = pQmapDev->ndev; + struct net_device *qmap_net; + struct qmap_priv *priv; + int err; + int use_qca_nss = !!nss_cb; + unsigned char temp_addr[ETH_ALEN]; + + qmap_net = alloc_etherdev(sizeof(*priv)); + if (!qmap_net) + return NULL; + + SET_NETDEV_DEV(qmap_net, &real_dev->dev); + priv = netdev_priv(qmap_net); + priv->offset_id = offset_id; + priv->real_dev = pQmapDev->ndev; + priv->self_dev = qmap_net; + priv->pQmapDev = pQmapDev; + priv->qmap_version = pQmapDev->qmap_version; + priv->mux_id = mux_id; + sprintf(qmap_net->name, "%.12s.%d", real_dev->name, offset_id + 1); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + __dev_addr_set(qmap_net, real_dev->dev_addr, ETH_ALEN); +#else + memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); +#endif + //qmap_net->dev_addr[5] = offset_id + 1; + //eth_random_addr(qmap_net->dev_addr); + memcpy(temp_addr, qmap_net->dev_addr, ETH_ALEN); + temp_addr[5] = offset_id + 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + __dev_addr_set(qmap_net, temp_addr, ETH_ALEN); +#else + memcpy(qmap_net->dev_addr, temp_addr, ETH_ALEN); +#endif +#if defined(MHI_NETDEV_STATUS64) + priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!priv->stats64) + goto out_free_newdev; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + priv->bridge_mode = !!(pQmapDev->bridge_mode & BIT(offset_id)); + qmap_net->sysfs_groups[0] = &pcie_mhi_qmap_sysfs_attr_group; + if (priv->bridge_mode) + use_qca_nss = 0; +#endif + + priv->agg_skb = NULL; + priv->agg_count = 0; + hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->agg_hrtimer.function = rmnet_vnd_tx_agg_timer_cb; + INIT_WORK(&priv->agg_wq, rmnet_vnd_tx_agg_work); + ktime_get_ts64(&priv->agg_time); + spin_lock_init(&priv->agg_lock); + priv->use_qca_nss = 0; + + qmap_net->ethtool_ops = &rmnet_vnd_ethtool_ops; + qmap_net->netdev_ops = &rmnet_vnd_ops; + qmap_net->flags |= IFF_NOARP; + qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + qmap_net->max_mtu = pQmapDev->max_mtu; +#endif + + if (nss_cb && use_qca_nss) { + rmnet_vnd_rawip_setup(qmap_net); + } +#ifdef CONFIG_PINCTRL_IPQ9574 + rmnet_vnd_rawip_setup(qmap_net); +#endif + if (pQmapDev->net_type == MHI_NET_MBIM) { + qmap_net->needed_headroom = sizeof(struct mhi_mbim_hdr); + } + + err = register_netdev(qmap_net); + pr_info("%s(%s)=%d\n", __func__, qmap_net->name, err); + if (err == -EEXIST) { + //'ifdown wan' for openwrt, why? + } + if (err < 0) + goto out_free_newdev; + + netif_device_attach (qmap_net); + netif_carrier_off(qmap_net); + + if (nss_cb && use_qca_nss) { + int rc = nss_cb->nss_create(qmap_net); + WARN_ON(rc); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc); + } else { + priv->use_qca_nss = 1; + netdev_info(qmap_net, "NSS context created\n"); + rtnl_lock(); + netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL); + rtnl_unlock(); + } + } + + return qmap_net; + +out_free_newdev: + free_netdev(qmap_net); + return qmap_net; +} + +static void rmnet_vnd_unregister_device(struct net_device *qmap_net) { + struct qmap_priv *priv; + unsigned long flags; + + pr_info("%s(%s)\n", __func__, qmap_net->name); + netif_carrier_off(qmap_net); + + priv = netdev_priv(qmap_net); + hrtimer_cancel(&priv->agg_hrtimer); + cancel_work_sync(&priv->agg_wq); + + spin_lock_irqsave(&priv->agg_lock, flags); + if (priv->agg_skb) { + kfree_skb(priv->agg_skb); + priv->agg_skb = NULL; + priv->agg_count = 0; + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (nss_cb && priv->use_qca_nss) { + rtnl_lock(); + netdev_rx_handler_unregister(qmap_net); + rtnl_unlock(); + nss_cb->nss_free(qmap_net); + } +#if defined(MHI_NETDEV_STATUS64) + free_percpu(priv->stats64); +#endif + unregister_netdev (qmap_net); + free_netdev(qmap_net); +} +#endif + +static void rmnet_info_set(struct mhi_netdev *pQmapDev, RMNET_INFO *rmnet_info) +{ + rmnet_info->size = sizeof(RMNET_INFO); + rmnet_info->rx_urb_size = pQmapDev->qmap_size; + rmnet_info->ep_type = 3; //DATA_EP_TYPE_PCIE + rmnet_info->iface_id = 4; + rmnet_info->qmap_mode = pQmapDev->qmap_mode; + rmnet_info->qmap_version = pQmapDev->qmap_version; + rmnet_info->dl_minimum_padding = 0; +} + +static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_netdev->qmap_mode); +} + +static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL); + +static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_netdev->qmap_size); +} + +static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL); + +static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "0x%x\n", mhi_netdev->link_state); +} + +static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + //struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + unsigned link_state = 0; + unsigned old_link = mhi_netdev->link_state; + uint offset_id = 0; + + link_state = simple_strtoul(buf, NULL, 0); + if (mhi_netdev->qmap_mode > 1) { + offset_id = ((link_state&0xF) - 1); + + if (0 < link_state && link_state <= mhi_netdev->qmap_mode) + mhi_netdev->link_state |= (1 << offset_id); + else if (0x80 < link_state && link_state <= (0x80 + mhi_netdev->qmap_mode)) + mhi_netdev->link_state &= ~(1 << offset_id); + } + else { + mhi_netdev->link_state = !!link_state; + } + + if (old_link != mhi_netdev->link_state) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[offset_id]; + + if (mhi_netdev->link_state) + netif_carrier_on(mhi_netdev->ndev); + else { + netif_carrier_off(mhi_netdev->ndev); + } + + if (qmap_net) { + if (mhi_netdev->link_state & (1 << offset_id)) + netif_carrier_on(qmap_net); + else + netif_carrier_off(qmap_net); + } + + dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, mhi_netdev->link_state); + } + + return count; +} + +static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store); + +static struct attribute *pcie_mhi_sysfs_attrs[] = { + &dev_attr_qmap_mode.attr, + &dev_attr_qmap_size.attr, + &dev_attr_link_state.attr, +#ifdef QUECTEL_BRIDGE_MODE + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, +#endif + NULL, +}; + +static struct attribute_group pcie_mhi_sysfs_attr_group = { + .attrs = pcie_mhi_sysfs_attrs, +}; + +static void mhi_netdev_upate_rx_stats(struct mhi_netdev *mhi_netdev, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes, rx_bytes); +#endif + u64_stats_update_begin(&stats64->syncp); +#else + mhi_netdev->ndev->stats.rx_packets += rx_packets; + mhi_netdev->ndev->stats.rx_bytes += rx_bytes; +#endif +} + +static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes, tx_bytes); +#endif + u64_stats_update_begin(&stats64->syncp); +#else + mhi_netdev->ndev->stats.tx_packets += tx_packets; + mhi_netdev->ndev->stats.tx_bytes += tx_bytes; +#endif +} + +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + protocol = htons(ETH_P_MAP); + break; + } + + return protocol; +} + +static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) +{ + u32 cur_mru = mhi_netdev->mru; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_skb_priv *skb_priv; + int ret; + struct sk_buff *skb; + int no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i; + + for (i = 0; i < no_tre; i++) { + skb = skb_dequeue(&mhi_netdev->rx_allocated); + if (!skb) { + skb = alloc_skb(/*32+*/cur_mru, gfp_t); + if (skb) + mhi_netdev->stats.rx_allocated++; + } + if (!skb) + return -ENOMEM; + + read_lock_bh(&mhi_netdev->pm_lock); + if (unlikely(!mhi_netdev->enabled)) { + MSG_ERR("Interface not enabled\n"); + ret = -EIO; + goto error_queue; + } + + skb_priv = (struct mhi_skb_priv *)skb->cb; + skb_priv->buf = skb->data; + skb_priv->size = cur_mru; + skb_priv->bind_netdev = mhi_netdev; + skb->dev = mhi_netdev->ndev; + //skb_reserve(skb, 32); //for ethernet header + + spin_lock_bh(&mhi_netdev->rx_lock); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, + skb_priv->size, MHI_EOT); + spin_unlock_bh(&mhi_netdev->rx_lock); + + if (ret) { + skb_priv->bind_netdev = NULL; + MSG_ERR("Failed to queue skb, ret:%d\n", ret); + ret = -EIO; + goto error_queue; + } + + read_unlock_bh(&mhi_netdev->pm_lock); + } + + return 0; + +error_queue: + skb->destructor = NULL; + read_unlock_bh(&mhi_netdev->pm_lock); + dev_kfree_skb_any(skb); + + return ret; +} + +static void mhi_netdev_alloc_work(struct work_struct *work) +{ + struct mhi_netdev *mhi_netdev = container_of(work, struct mhi_netdev, + alloc_work.work); + /* sleep about 1 sec and retry, that should be enough time + * for system to reclaim freed memory back. + */ + const int sleep_ms = 1000; + int retry = 60; + int ret; + + MSG_LOG("Entered\n"); + do { + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + /* sleep and try again */ + if (ret == -ENOMEM) { + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + return; + msleep(sleep_ms); + retry--; + } + } while (ret == -ENOMEM && retry); + + MSG_LOG("Exit with status:%d retry:%d\n", ret, retry); +} + +static void mhi_netdev_dealloc(struct mhi_netdev *mhi_netdev) +{ + struct sk_buff *skb; + + skb = skb_dequeue(&mhi_netdev->rx_allocated); + while (skb) { + skb->destructor = NULL; + kfree_skb(skb); + skb = skb_dequeue(&mhi_netdev->rx_allocated); + } +} + +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY +static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev, + gfp_t gfp, + unsigned int order) +{ + struct page *page; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + void *vaddr; + + page = __dev_alloc_pages(gfp, order); + if (!page) + return NULL; + + vaddr = page_address(page); + + /* we going to use the end of page to store cached data */ + netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf); + + mhi_buf = (struct mhi_buf *)netbuf; + mhi_buf->page = page; + mhi_buf->buf = vaddr; + mhi_buf->len = (void *)netbuf - vaddr; + mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mhi_buf->dma_addr)) { + __free_pages(mhi_buf->page, order); + return NULL; + } + + return netbuf; +} + +static void mhi_netdev_unmap_page(struct device *dev, + dma_addr_t dma_addr, + size_t len, + enum dma_data_direction dir) +{ + dma_unmap_page(dev, dma_addr, len, dir); +} + +static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + const u32 order = mhi_netdev->order; + int i, ret; + + for (i = 0; i < nr_tre; i++) { + struct mhi_buf *mhi_buf; + struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC, + order); + if (!netbuf) + return -ENOMEM; + + mhi_buf = (struct mhi_buf *)netbuf; + netbuf->unmap = mhi_netdev_unmap_page; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue transfer, ret:%d\n", ret); + mhi_netdev_unmap_page(dev, mhi_buf->dma_addr, + mhi_buf->len, DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + return ret; + } + } + + return 0; +} + +static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool; + int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i, peak, cur_index, ret; + const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4; + + MSG_VERB("Enter free_desc:%d\n", nr_tre); + + if (!nr_tre) + return; + + /* try going thru reclaim pool first */ + for (i = 0; i < nr_tre; i++) { + /* peak for the next buffer, we going to peak several times, + * and we going to give up if buffers are not yet free + */ + cur_index = mhi_netdev->current_index; + netbuf = NULL; + for (peak = 0; peak < max_peak; peak++) { + struct mhi_netbuf *tmp = netbuf_pool[cur_index]; + + mhi_buf = &tmp->mhi_buf; + + cur_index = (cur_index + 1) & pool_size; + + /* page == 1 idle, buffer is free to reclaim */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 )) + if (atomic_read(&mhi_buf->page->_count) == 1) +#else + if (atomic_read(&mhi_buf->page->_refcount) == 1) +#endif + { + netbuf = tmp; + break; + } + } + + /* could not find a free buffer */ + if (!netbuf) + break; + + /* increment reference count so when network stack is done + * with buffer, the buffer won't be freed + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 )) + atomic_inc(&mhi_buf->page->_count); +#else + atomic_inc(&mhi_buf->page->_refcount); +#endif + dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue buffer, ret:%d\n", ret); + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 )) + atomic_dec(&mhi_buf->page->_count); +#else + atomic_dec(&mhi_buf->page->_refcount); +#endif + return; + } + mhi_netdev->current_index = cur_index; + } + + /* recyling did not work, buffers are still busy allocate temp pkts */ + if (i < nr_tre) + mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i); +} + +/* allocating pool of memory */ +static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool; + struct mhi_buf *mhi_buf; + const u32 order = mhi_netdev->order; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + + netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool), + GFP_KERNEL); + if (!netbuf_pool) + return -ENOMEM; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + /* allocate paged data */ + netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order); + if (!netbuf) + goto error_alloc_page; + + netbuf->unmap = dma_sync_single_for_cpu; + netbuf_pool[i] = netbuf; + } + + mhi_netdev->netbuf_pool = netbuf_pool; + + return 0; + +error_alloc_page: + for (--i; i >= 0; i--) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + } + + kfree(netbuf_pool); + + return -ENOMEM; +} + +static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + struct mhi_buf *mhi_buf; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, mhi_netdev->order); + } + + kfree(mhi_netdev->netbuf_pool); + mhi_netdev->netbuf_pool = NULL; +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_net_chain *chain = &mhi_netdev->chain; + int rx_work = 0; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + /* chained skb, push it to stack */ + if (chain && chain->head) { + netif_receive_skb(chain->head); + chain->head = NULL; + } + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + /* queue new buffers */ + mhi_netdev_queue(mhi_netdev); + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} +#else +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct sk_buff *skb; + int rx_work = 0; + int ret; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + if (mhi_netdev->net_type == MHI_NET_MBIM || mhi_netdev->net_type == MHI_NET_RMNET) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { +#ifdef MHI_NETDEV_ONE_CARD_MODE + int recly_skb = 0; + + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); + else + rmnet_qmi_rx_handler(mhi_netdev, skb); + + if (!skb_cloned(skb)) { + if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + recly_skb = 1; + } + } + if (recly_skb == 0) + dev_kfree_skb(skb); +#else + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = htons(ETH_P_MAP); + netif_receive_skb(skb); +#endif + } + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(skb->data[0]); + netif_receive_skb(skb); + } + } + + /* queue new buffers */ + if (!delayed_work_pending(&mhi_netdev->alloc_work)) { + ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); + if (ret == -ENOMEM) { + //MSG_LOG("out of tre, queuing bg worker\n"); //do not print in softirq + mhi_netdev->stats.alloc_failed++; + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + } + } + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} +#endif + +static int mhi_netdev_open(struct net_device *ndev) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(ndev); + else + netif_start_queue(ndev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + ndev->mtu = new_mtu; + return 0; +} + +static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + struct skb_data *entry = (struct skb_data *)(skb->cb); + + entry->packets = 1; + entry->length = skb->len; + entry->bind_netdev = mhi_netdev; + + MSG_VERB("Entered\n"); + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + //qmap_hex_dump(__func__, skb->data, 32); + +#ifdef MHI_NETDEV_ONE_CARD_MODE + //printk("%s dev->type=%d\n", __func__, dev->type); + + if (dev->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (mhi_netdev->bridge_mode && bridge_mode_tx_fixup(dev, skb, mhi_netdev->bridge_ipv4, mhi_netdev->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && (skb_pull(skb, ETH_HLEN) == NULL)) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + + if (mhi_netdev->net_type == MHI_NET_MBIM) { + if (add_mbim_hdr(skb, mhi_netdev->mbim_mux_id) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + else if (mhi_netdev->net_type == MHI_NET_RMNET) { + if (mhi_netdev->qmap_version == 5) { + add_qhdr(skb, QUECTEL_QMAP_MUX_ID); + } + else if (mhi_netdev->qmap_version == 9) { + add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } +#else + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } +#endif + + if (mhi_netdev->net_type == MHI_NET_MBIM) { + struct mhi_mbim_hdr *mhdr = (struct mhi_mbim_hdr *)skb->data; + mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->mbim_ctx.tx_seq++); + } + + if (unlikely(mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) < 16)) { + u32 i = 0; + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; + if (qmap_net) { + netif_stop_queue(qmap_net); + } + } + + netif_stop_queue(dev); + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + + //printk("%s transfer res=%d\n", __func__, res); + if (unlikely(res)) { + dev_kfree_skb_any(skb); + dev->stats.tx_errors++; + } + + MSG_VERB("Exited\n"); + + return NETDEV_TX_OK; +} + +#if defined(MHI_NETDEV_STATUS64) +static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &ndev->stats); + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(mhi_netdev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; +#else + u64_stats_t rx_packets, rx_bytes; + u64_stats_t tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(mhi_netdev->stats64, cpu); + + do { + start = u64_stats_fetch_begin(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry(&stats64->syncp, start)); + + stats->rx_packets += u64_stats_read(&rx_packets); + stats->rx_bytes += u64_stats_read(&rx_bytes); + stats->tx_packets += u64_stats_read(&tx_packets); + stats->tx_bytes += u64_stats_read(&tx_bytes); +#endif + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { + _mhi_netdev_get_stats64(ndev, stats); +} +#else +static struct rtnl_link_stats64 * mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { + _mhi_netdev_get_stats64(ndev, stats); + return stats; +} +#endif +#endif + +static int qmap_setting_store(struct mhi_netdev *mhi_netdev, QMAP_SETTING *qmap_settings, size_t size) { + if (qmap_settings->size != size) { + netdev_err(mhi_netdev->ndev, "ERROR: qmap_settings.size donot match!\n"); + return -EOPNOTSUPP; + } + + mhi_netdev->dl_minimum_padding = qmap_settings->dl_minimum_padding; + + return 0; +} + +static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(dev); + int rc = -EOPNOTSUPP; + uint link_state = 0; + QMAP_SETTING qmap_settings = {0}; + + switch (cmd) { + case 0x89F1: //SIOCDEVPRIVATE + rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state)); + if (!rc) { + char buf[32]; + snprintf(buf, sizeof(buf), "%u", link_state); + link_state_store(&dev->dev, NULL, buf, strlen(buf)); + } + break; + + case 0x89F2: //SIOCDEVPRIVATE + rc = copy_from_user(&qmap_settings, ifr->ifr_ifru.ifru_data, sizeof(qmap_settings)); + if (!rc) { + rc = qmap_setting_store(mhi_netdev, &qmap_settings, sizeof(qmap_settings)); + } + break; + + case 0x89F3: //SIOCDEVPRIVATE + if (mhi_netdev->use_rmnet_usb) { + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &mhi_netdev->rmnet_info, sizeof(mhi_netdev->rmnet_info)); + } + break; + + default: + break; + } + + return rc; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,14,0 )) //b9067f5dc4a07c8e24e01a1b277c6722d91be39e +#define use_ndo_siocdevprivate +#endif +#ifdef use_ndo_siocdevprivate +static int qmap_ndo_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { + return qmap_ndo_do_ioctl(dev, ifr, cmd); +} +#endif + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + //.ndo_do_ioctl = mhi_netdev_ioctl, + .ndo_change_mtu = mhi_netdev_change_mtu, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = mhi_netdev_get_stats64, +#endif + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = qmap_ndo_do_ioctl, +#ifdef use_ndo_siocdevprivate + .ndo_siocdevprivate = qmap_ndo_siocdevprivate, +#endif +}; + +static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) +{ + //struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + /* strlcpy() is deprecated in kernel 6.8.0+, using strscpy instead */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,8,0)) + strlcpy(info->driver, "pcie_mhi", sizeof(info->driver)); + strlcpy(info->version, PCIE_MHI_DRIVER_VERSION, sizeof(info->version)); +#else + strscpy(info->driver, "pcie_mhi", sizeof(info->driver)); + strscpy(info->version, PCIE_MHI_DRIVER_VERSION, sizeof(info->version)); +#endif +} + +static const struct ethtool_ops mhi_netdev_ethtool_ops = { + .get_drvinfo = mhi_netdev_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + dev->ethtool_ops = &mhi_netdev_ethtool_ops; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + __dev_addr_set (dev, node_id, sizeof node_id); +#else + memcpy (dev->dev_addr, node_id, sizeof node_id); +#endif + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->hard_header_len = 0; + dev->type = ARPHRD_NONE; + dev->addr_len = 0; + dev->flags |= IFF_NOARP; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); //POINTOPOINT will make SFE work wrong + dev->watchdog_timeo = WATCHDOG_TIMEOUT; + //on OpenWrt, if set rmnet_mhi0.1 as WAN, '/sbin/netifd' will auto create VLAN for rmnet_mhi0 + dev->features |= (NETIF_F_VLAN_CHALLENGED); + +#ifdef MHI_NETDEV_ONE_CARD_MODE + if (mhi_mbim_enabled) { + dev->needed_headroom = sizeof(struct mhi_mbim_hdr); + } +#endif +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; +#if 0 + char ifalias[IFALIASZ]; +#endif + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int no_tre; + + MSG_LOG("Prepare the channels for transfer\n"); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start TX chan ret %d\n", ret); + goto mhi_failed_to_start; + } + + /* first time enabling the node */ + if (!mhi_netdev->ndev) { + struct mhi_netdev_priv *mhi_netdev_priv; + +#if 0 + snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_netdev->alias); +#endif + + snprintf(ifname, sizeof(ifname), "%s%d", + mhi_netdev->interface_name, mhi_netdev->mhi_dev->mhi_cntrl->cntrl_idx); + + rtnl_lock(); +#ifdef NET_NAME_PREDICTABLE + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); +#else + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, + mhi_netdev_setup); +#endif + + if (!mhi_netdev->ndev) { + ret = -ENOMEM; + rtnl_unlock(); + goto net_dev_alloc_fail; + } + + //mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); +#if 0 + dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias)); +#endif + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + + if (mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) { +#ifdef QUECTEL_BRIDGE_MODE + mhi_netdev->bridge_mode = bridge_mode; +#endif + mhi_netdev->ndev->sysfs_groups[0] = &pcie_mhi_sysfs_attr_group; + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->ndev->mtu = mhi_netdev->mru; + } + rtnl_unlock(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + mhi_netdev->ndev->max_mtu = mhi_netdev->max_mtu; //first net card +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); +#else + netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); +#endif + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + netif_carrier_off(mhi_netdev->ndev); + } + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = true; + write_unlock_irq(&mhi_netdev->pm_lock); + +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + /* MRU must be multiplication of page size */ + mhi_netdev->order = 1; + while ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru) + mhi_netdev->order += 1; + + /* setup pool size ~2x ring length*/ + no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + mhi_netdev->pool_size = 1 << __ilog2_u32(no_tre); + if (no_tre > mhi_netdev->pool_size) + mhi_netdev->pool_size <<= 1; + mhi_netdev->pool_size <<= 1; + + /* allocate memory pool */ + ret = mhi_netdev_alloc_pool(mhi_netdev); + if (ret) { + MSG_ERR("mhi_netdev_alloc_pool Fail!\n"); + goto error_start; + } + + napi_enable(&mhi_netdev->napi); + + /* now we have a pool of buffers allocated, queue to hardware + * by triggering a napi_poll + */ + napi_schedule(&mhi_netdev->napi); +error_start: +#else + /* queue buffer for rx path */ + no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + if (ret) + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + + napi_enable(&mhi_netdev->napi); +#endif + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(&mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + +net_dev_alloc_fail: + mhi_unprepare_from_transfer(mhi_dev); + +mhi_failed_to_start: + MSG_ERR("Exited ret %d.\n", ret); + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + struct skb_data *entry = (struct skb_data *)(skb->cb); + + if (entry->bind_netdev != mhi_netdev) { + MSG_ERR("%s error!\n", __func__); + return; + } + + if (likely(mhi_result->transaction_status == 0)) { + mhi_netdev_upate_tx_stats(mhi_netdev, entry->packets, entry->length); + + if (netif_queue_stopped(ndev) && mhi_netdev->enabled + && mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 32) { + int i = 0; + + netif_wake_queue(ndev); + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; + if (qmap_net) { + if (netif_queue_stopped(qmap_net)) + netif_wake_queue(qmap_net); + } + } + } + } + + entry->bind_netdev = NULL; + entry->packets = 1; + entry->length = 0; + dev_kfree_skb(skb); +} + +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct mhi_netbuf *netbuf = mhi_result->buf_addr; + struct mhi_buf *mhi_buf = &netbuf->mhi_buf; + struct sk_buff *skb; + struct net_device *ndev = mhi_netdev->ndev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_net_chain *chain = &mhi_netdev->chain; + + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE); + + /* modem is down, drop the buffer */ + if (mhi_result->transaction_status == -ENOTCONN) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + mhi_netdev_upate_rx_stats(mhi_netdev, 1, mhi_result->bytes_xferd); + + /* we support chaining */ + skb = alloc_skb(0, GFP_ATOMIC); + if (likely(skb)) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + + /* this is first on list */ + if (!chain->head) { + skb->dev = ndev; + skb->protocol = htons(ETH_P_MAP); + chain->head = skb; + } else { + skb_shinfo(chain->tail)->frag_list = skb; + } + + chain->tail = skb; + } else { + __free_pages(mhi_buf->page, mhi_netdev->order); + } +} +#else +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + + if (unlikely(skb_priv->bind_netdev != mhi_netdev)) { + MSG_ERR("%s error!\n", __func__); + return; + } + + if (unlikely(mhi_result->transaction_status)) { + if (mhi_result->transaction_status != -ENOTCONN) + MSG_ERR("%s transaction_status = %d!\n", __func__, mhi_result->transaction_status); + skb_priv->bind_netdev = NULL; + dev_kfree_skb(skb); + return; + } + +#if defined(CONFIG_IPQ5018_RATE_CONTROL) + if (likely(mhi_netdev->mhi_rate_control)) { + u32 time_interval = 0; + u32 time_difference = 0; + u32 cntfrq; + u64 second_jiffy; + u64 bytes_received_2; + struct net_device *ndev = mhi_netdev->ndev; + + if (mhi_netdev->first_jiffy) { + #if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0) + second_jiffy = arch_counter_get_cntvct(); + #else + second_jiffy = __arch_counter_get_cntvct(); + #endif + bytes_received_2 = mhi_netdev->bytes_received_2; + if ((second_jiffy > mhi_netdev->first_jiffy) && + (bytes_received_2 > mhi_netdev->bytes_received_1)) { + + time_difference = (second_jiffy - mhi_netdev->first_jiffy); + time_interval = (time_difference / mhi_netdev->cntfrq_per_msec); + + /* 1.8Gbps is 225,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 225000 bytes */ + /* For 100ms 22,500,000 bytes */ + /* For 10ms 2,250,000 bytes */ + + /* 1.7Gbps is 212,500,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 212500 bytes */ + /* For 100ms 21,250,000 bytes */ + /* For 10ms 2,125,000 bytes */ + + /* 1.6Gbps is 200,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 200,000 bytes */ + /* For 100ms 20,000,000 bytes */ + /* For 10ms 2,000,000 bytes */ + + if (time_interval < 100) { + if ((bytes_received_2 - mhi_netdev->bytes_received_1) > 22500000) { + ndev->stats.rx_dropped ++; + dev_kfree_skb(skb); + return; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + #if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0) + mhi_netdev->first_jiffy = arch_counter_get_cntvct(); + #else + mhi_netdev->first_jiffy = __arch_counter_get_cntvct(); + #endif + + cntfrq = arch_timer_get_cntfrq(); + mhi_netdev->cntfrq_per_msec = cntfrq / 1000; + } + mhi_netdev->bytes_received_2 += mhi_result->bytes_xferd; + } +#endif + +#if 0 + { + static size_t bytes_xferd = 0; + if (mhi_result->bytes_xferd > bytes_xferd) { + bytes_xferd = mhi_result->bytes_xferd; + printk(KERN_DEBUG "bytes_xferd=%zd\n", bytes_xferd); + } + } +#endif + + skb_put(skb, mhi_result->bytes_xferd); + + qmap_hex_dump(__func__, skb->data, skb->len); + + skb_priv->bind_netdev = NULL; + skb_queue_tail(&mhi_netdev->qmap_chain, skb); +} +#endif + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + if (napi_schedule_prep(&mhi_netdev->napi)) { + __napi_schedule(&mhi_netdev->napi); + mhi_netdev->stats.rx_int++; + return; + } +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *mhi_netdev_debugfs_dentry; + +static int mhi_netdev_init_debugfs_states_show(struct seq_file *m, void *d) +{ + struct mhi_netdev *mhi_netdev = m->private; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + +#ifdef TS_DEBUG + struct timespec now_ts, diff_ts; + getnstimeofday(&now_ts); + diff_ts = timespec_sub(now_ts, mhi_netdev->diff_ts); + mhi_netdev->diff_ts = now_ts; +#endif + + seq_printf(m, + "tx_tre:%d rx_tre:%d qmap_chain:%u skb_chain:%u tx_allocated:%u rx_allocated:%u\n", + mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE), + mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE), + mhi_netdev->qmap_chain.qlen, + mhi_netdev->skb_chain.qlen, + mhi_netdev->tx_allocated.qlen, + mhi_netdev->rx_allocated.qlen); + + seq_printf(m, + "netif_queue_stopped:%d, link_state:0x%x, flow_control:0x%x\n", + netif_queue_stopped(mhi_netdev->ndev), mhi_netdev->link_state, mhi_netdev->flow_control); + + seq_printf(m, + "rmnet_map_command_stats: %u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_NONE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_DISABLE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_ENABLE], + mhi_netdev->rmnet_map_command_stats[3], + mhi_netdev->rmnet_map_command_stats[4], + mhi_netdev->rmnet_map_command_stats[5], + mhi_netdev->rmnet_map_command_stats[6], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_START], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_END], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]); + +#ifdef TS_DEBUG + seq_printf(m, + "qmap_ts:%ld.%ld, skb_ts:%ld.%ld, diff_ts:%ld.%ld\n", + mhi_netdev->qmap_ts.tv_sec, mhi_netdev->qmap_ts.tv_nsec, + mhi_netdev->skb_ts.tv_sec, mhi_netdev->skb_ts.tv_nsec, + diff_ts.tv_sec, diff_ts.tv_nsec); + mhi_netdev->clear_ts = 1; +#endif + + return 0; +} + +static int mhi_netdev_init_debugfs_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_netdev_init_debugfs_states_show, inode->i_private); +} + +static const struct file_operations mhi_netdev_debugfs_state_ops = { + .open = mhi_netdev_init_debugfs_states_open, + .release = single_release, + .read = seq_read, +}; + +static int mhi_netdev_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_netdev *mhi_netdev = data; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int ret; + + MSG_LOG("Triggering channel reset\n"); + + /* disable the interface so no data processing */ + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + napi_disable(&mhi_netdev->napi); + + /* disable all hardware channels */ + mhi_unprepare_from_transfer(mhi_dev); + + /* clean up all alocated buffers */ + mhi_netdev_dealloc(mhi_netdev); + + MSG_LOG("Restarting iface\n"); + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_netdev_debugfs_trigger_reset_fops, NULL, + mhi_netdev_debugfs_trigger_reset, "%llu\n"); + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + int i; + const umode_t mode = 0600; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct dentry *dentry = mhi_netdev_debugfs_dentry; + + const struct { + char *name; + u32 *ptr; + } debugfs_table[] = { + { + "rx_int", + &mhi_netdev->stats.rx_int + }, + { + "tx_full", + &mhi_netdev->stats.tx_full + }, + { + "tx_pkts", + &mhi_netdev->stats.tx_pkts + }, + { + "rx_budget_overflow", + &mhi_netdev->stats.rx_budget_overflow + }, + { + "rx_allocated", + &mhi_netdev->stats.rx_allocated + }, + { + "tx_allocated", + &mhi_netdev->stats.tx_allocated + }, + { + "alloc_failed", + &mhi_netdev->stats.alloc_failed + }, + { + NULL, NULL + }, + }; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; + + debugfs_create_u32("msg_lvl", mode, mhi_netdev->dentry, + (u32 *)&mhi_netdev->msg_lvl); + + /* Add debug stats table */ + for (i = 0; debugfs_table[i].name; i++) { + debugfs_create_u32(debugfs_table[i].name, mode, + mhi_netdev->dentry, + debugfs_table[i].ptr); + } + + debugfs_create_file("reset", mode, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_trigger_reset_fops); + debugfs_create_file("states", 0444, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_state_ops); +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ + mhi_netdev_debugfs_dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, parent); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb; + + MSG_LOG("Remove notification received\n"); +#ifndef MHI_NETDEV_ONE_CARD_MODE +#ifndef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + + unsigned i; + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + if (mhi_netdev->mpQmapNetDev[i] + && mhi_netdev->mpQmapNetDev[i] != mhi_netdev->ndev) { + rmnet_vnd_unregister_device(mhi_netdev->mpQmapNetDev[i]); + } + mhi_netdev->mpQmapNetDev[i] = NULL; + } + + rtnl_lock(); + if (mhi_netdev->ndev + && rtnl_dereference(mhi_netdev->ndev->rx_handler) == rmnet_rx_handler) + netdev_rx_handler_unregister(mhi_netdev->ndev); + rtnl_unlock(); +#endif +#endif + while ((skb = skb_dequeue (&mhi_netdev->skb_chain))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->rx_allocated))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->tx_allocated))) + dev_kfree_skb_any(skb); + + napi_disable(&mhi_netdev->napi); + netif_napi_del(&mhi_netdev->napi); + mhi_netdev_dealloc(mhi_netdev); + unregister_netdev(mhi_netdev->ndev); +#if defined(MHI_NETDEV_STATUS64) + free_percpu(mhi_netdev->stats64); +#endif + free_netdev(mhi_netdev->ndev); + flush_delayed_work(&mhi_netdev->alloc_work); +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + mhi_netdev_free_pool(mhi_netdev); +#endif + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev; + unsigned i; + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + if (!strcmp(id->chan, "IP_HW0")) { + if (mhi_mbim_enabled) + mhi_netdev->net_type = MHI_NET_MBIM; + else + mhi_netdev->net_type = MHI_NET_RMNET; + } + else if (!strcmp(id->chan, "IP_SW0")) { + mhi_netdev->net_type = MHI_NET_ETHER; + } + else { + return -EINVAL; + } + + mhi_netdev->alias = 0; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + + mhi_netdev->mru = (15*1024); ///etc/data/qnicorn_config.xml dataformat_agg_dl_size 15*1024 + mhi_netdev->max_mtu = mhi_netdev->mru - (sizeof(struct rmnet_map_v5_csum_header) + sizeof(struct rmnet_map_header)); + if (mhi_netdev->net_type == MHI_NET_MBIM) { + mhi_netdev->mru = ncmNTBParams.dwNtbInMaxSize; + mhi_netdev->mbim_ctx.rx_max = mhi_netdev->mru; + mhi_netdev->max_mtu = mhi_netdev->mru - sizeof(struct mhi_mbim_hdr); + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mru = 8*1024; + mhi_netdev->max_mtu = mhi_netdev->mru; + } + mhi_netdev->qmap_size = mhi_netdev->mru; + +#if defined(MHI_NETDEV_STATUS64) + mhi_netdev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!mhi_netdev->stats64) + return -ENOMEM; +#endif + + if (!strcmp(id->chan, "IP_HW0")) + mhi_netdev->interface_name = "rmnet_mhi"; + else if (!strcmp(id->chan, "IP_SW0")) + mhi_netdev->interface_name = "mhi_swip"; + else + mhi_netdev->interface_name = id->chan; + + mhi_netdev->qmap_mode = qmap_mode; + mhi_netdev->qmap_version = 5; + mhi_netdev->use_rmnet_usb = 1; + if ((mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0306) + || (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0308) + || (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x1004) + || (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x011a) + || (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x100b) + || (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0309) + ) { + mhi_netdev->qmap_version = 9; + } + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->qmap_mode = 1; + mhi_netdev->qmap_version = 0; + mhi_netdev->use_rmnet_usb = 0; + } + + mhi_netdev->mbim_mux_id = 0; + if (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0309) { + mhi_netdev->mbim_mux_id = MBIM_MUX_ID_SDX7X; + } + rmnet_info_set(mhi_netdev, &mhi_netdev->rmnet_info); + + mhi_netdev->rx_queue = mhi_netdev_alloc_skb; + + spin_lock_init(&mhi_netdev->rx_lock); + rwlock_init(&mhi_netdev->pm_lock); + INIT_DELAYED_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work); + skb_queue_head_init(&mhi_netdev->qmap_chain); + skb_queue_head_init(&mhi_netdev->skb_chain); + skb_queue_head_init(&mhi_netdev->tx_allocated); + skb_queue_head_init(&mhi_netdev->rx_allocated); + + mhi_netdev->msg_lvl = MHI_MSG_LVL_INFO; + + /* setup network interface */ + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) { + pr_err("Error mhi_netdev_enable_iface ret:%d\n", ret); + return ret; + } + + mhi_netdev_create_debugfs(mhi_netdev); + + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + netif_carrier_on(mhi_netdev->ndev); + } +#ifdef MHI_NETDEV_ONE_CARD_MODE + else if (1) { + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + strcpy(mhi_netdev->rmnet_info.ifname[0], mhi_netdev->mpQmapNetDev[0]->name); + mhi_netdev->rmnet_info.mux_id[0] = QUECTEL_QMAP_MUX_ID; + if (mhi_mbim_enabled) { + mhi_netdev->rmnet_info.mux_id[0] = mhi_netdev->mbim_mux_id; + } + } +#else + +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + else if (1) { + BUG_ON(mhi_netdev->net_type != MHI_NET_RMNET); + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + mhi_netdev->rmnet_info.mux_id[i] = QUECTEL_QMAP_MUX_ID + i; + strcpy(mhi_netdev->rmnet_info.ifname[i], "use_rmnet_data"); + } + } +#endif + else if (mhi_netdev->use_rmnet_usb) { + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + u8 mux_id = QUECTEL_QMAP_MUX_ID + i; + if (mhi_mbim_enabled) { + mux_id = mhi_netdev->mbim_mux_id + i; + } + mhi_netdev->mpQmapNetDev[i] = rmnet_vnd_register_device(mhi_netdev, i, mux_id); + if (mhi_netdev->mpQmapNetDev[i]) { + strcpy(mhi_netdev->rmnet_info.ifname[i], mhi_netdev->mpQmapNetDev[i]->name); + mhi_netdev->rmnet_info.mux_id[i] = mux_id; + } + } + + rtnl_lock(); + /* when open hyfi function, run cm will make system crash */ + //netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, mhi_netdev); + netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, NULL); + rtnl_unlock(); + } + +#if defined(CONFIG_IPQ5018_RATE_CONTROL) + mhi_netdev->mhi_rate_control = 1; +#endif +#endif + + return 0; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_SW0" }, + // ADPL do not register as a netcard. xingduo.du 2023-02-20 + // { .chan = "IP_HW_ADPL" }, + { }, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +int __init mhi_device_netdev_init(struct dentry *parent) +{ +#ifdef CONFIG_QCA_NSS_DRV + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) { + printk(KERN_ERR "mhi_device_netdev_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n"); + } +#endif + + mhi_netdev_create_debugfs_dir(parent); + + return mhi_driver_register(&mhi_netdev_driver); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mhi_netdev_debugfs_dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} + +void mhi_netdev_quectel_avoid_unused_function(void) { +#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY + qmap_hex_dump(NULL, NULL, 0); + mhi_netdev_ip_type_trans(0); +#else +#endif +} diff --git a/driver/quectel_MHI/src/devices/mhi_satellite.c b/driver/quectel_MHI/src/devices/mhi_satellite.c new file mode 100644 index 0000000..d1071ec --- /dev/null +++ b/driver/quectel_MHI/src/devices/mhi_satellite.c @@ -0,0 +1,1153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MHI_SAT_DRIVER_NAME "mhi_satellite" + +static bool mhi_sat_defer_init = true; /* set by default */ + +/* logging macros */ +#define IPC_LOG_PAGES (10) +#define IPC_LOG_LVL (MHI_MSG_LVL_INFO) +#define KLOG_LVL (MHI_MSG_LVL_ERROR) + +#define MHI_SUBSYS_LOG(fmt, ...) do { \ + if (!subsys) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][%s][%s] " fmt, __func__, subsys->name, \ + ##__VA_ARGS__);\ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_INFO) \ + ipc_log_string(subsys->ipc_log, "[I][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_LOG(fmt, ...) do { \ + if (!subsys || !sat_cntrl) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][%s][%s][%x] " fmt, __func__, subsys->name, \ + sat_cntrl->dev_id, ##__VA_ARGS__);\ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_INFO) \ + ipc_log_string(subsys->ipc_log, "[I][%s][%x] " fmt, __func__, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_ERR(fmt, ...) do { \ + if (!subsys || !sat_cntrl) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s][%s][%x] " fmt, __func__, subsys->name, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_ERROR) \ + ipc_log_string(subsys->ipc_log, "[E][%s][%x] " fmt, __func__, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +/* mhi sys error command */ +#define MHI_TRE_CMD_SYS_ERR_PTR (0) +#define MHI_TRE_CMD_SYS_ERR_D0 (0) +#define MHI_TRE_CMD_SYS_ERR_D1 (MHI_PKT_TYPE_SYS_ERR_CMD << 16) + +/* mhi state change event */ +#define MHI_TRE_EVT_MHI_STATE_PTR (0) +#define MHI_TRE_EVT_MHI_STATE_D0(state) (state << 24) +#define MHI_TRE_EVT_MHI_STATE_D1 (MHI_PKT_TYPE_STATE_CHANGE_EVENT << 16) + +/* mhi exec env change event */ +#define MHI_TRE_EVT_EE_PTR (0) +#define MHI_TRE_EVT_EE_D0(ee) (ee << 24) +#define MHI_TRE_EVT_EE_D1 (MHI_PKT_TYPE_EE_EVENT << 16) + +/* mhi config event */ +#define MHI_TRE_EVT_CFG_PTR(base_addr) (base_addr) +#define MHI_TRE_EVT_CFG_D0(er_base, num) ((er_base << 16) | (num & 0xFFFF)) +#define MHI_TRE_EVT_CFG_D1 (MHI_PKT_TYPE_CFG_EVENT << 16) + +/* command completion event */ +#define MHI_TRE_EVT_CMD_COMPLETION_PTR(ptr) (ptr) +#define MHI_TRE_EVT_CMD_COMPLETION_D0(code) (code << 24) +#define MHI_TRE_EVT_CMD_COMPLETION_D1 (MHI_PKT_TYPE_CMD_COMPLETION_EVENT << 16) + +/* packet parser macros */ +#define MHI_TRE_GET_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_SIZE(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_CCS(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_ID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_IS_ER_CTXT_TYPE(tre) (((tre)->dword[1]) & 0x1) + +/* creates unique device ID based on connection topology */ +#define MHI_SAT_CREATE_DEVICE_ID(dev, domain, bus, slot) \ + ((dev & 0xFFFF) << 16 | (domain & 0xF) << 12 | (bus & 0xFF) << 4 | \ + (slot & 0xF)) + +/* mhi core definitions */ +#define MHI_CTXT_TYPE_GENERIC (0xA) + +struct __packed mhi_generic_ctxt { + u32 reserved0; + u32 type; + u32 reserved1; + u64 ctxt_base; + u64 ctxt_size; + u64 reserved[2]; +}; + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_CTXT_UPDATE_CMD = 0x64, + MHI_PKT_TYPE_IOMMU_MAP_CMD = 0x65, + MHI_PKT_TYPE_CFG_EVENT = 0x6E, + MHI_PKT_TYPE_SYS_ERR_CMD = 0xFF, +}; + +enum mhi_cmd_type { + MHI_CMD_TYPE_RESET = 0x10, + MHI_CMD_TYPE_STOP = 0x11, + MHI_CMD_TYPE_START = 0x12, +}; + +/* mhi event completion codes */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +/* satellite subsystem definitions */ +enum subsys_id { + SUBSYS_ADSP, + SUBSYS_CDSP, + SUBSYS_SLPI, + SUBSYS_MODEM, + SUBSYS_MAX, +}; + +static const char * const subsys_names[SUBSYS_MAX] = { + [SUBSYS_ADSP] = "adsp", + [SUBSYS_CDSP] = "cdsp", + [SUBSYS_SLPI] = "slpi", + [SUBSYS_MODEM] = "modem", +}; + +struct mhi_sat_subsys { + const char *name; + + struct rpmsg_device *rpdev; /* rpmsg device */ + + /* + * acquire either mutex or spinlock to walk controller list + * acquire both when modifying list + */ + struct list_head cntrl_list; /* controllers list */ + struct mutex cntrl_mutex; /* mutex to walk/modify controllers list */ + spinlock_t cntrl_lock; /* lock to walk/modify controllers list */ + + void *ipc_log; +}; + +/* satellite IPC definitions */ +#define SAT_MAJOR_VERSION (1) +#define SAT_MINOR_VERSION (0) +#define SAT_RESERVED_SEQ_NUM (0xFFFF) +#define SAT_MSG_SIZE(n) (sizeof(struct sat_header) + \ + (n * sizeof(struct sat_tre))) +#define SAT_TRE_SIZE(msg_size) (msg_size - sizeof(struct sat_header)) +#define SAT_TRE_OFFSET(msg) (msg + sizeof(struct sat_header)) +#define SAT_TRE_NUM_PKTS(payload_size) ((payload_size) / sizeof(struct sat_tre)) + +/* satellite IPC msg type */ +enum sat_msg_id { + SAT_MSG_ID_ACK = 0xA, + SAT_MSG_ID_CMD = 0xC, + SAT_MSG_ID_EVT = 0xE, +}; + +/* satellite IPC context type */ +enum sat_ctxt_type { + SAT_CTXT_TYPE_CHAN = 0x0, + SAT_CTXT_TYPE_EVENT = 0x1, + SAT_CTXT_TYPE_MAX, +}; + +/* satellite IPC context string */ +#define TO_SAT_CTXT_TYPE_STR(type) (type >= SAT_CTXT_TYPE_MAX ? "INVALID" : \ + sat_ctxt_str[type]) + +const char * const sat_ctxt_str[SAT_CTXT_TYPE_MAX] = { + [SAT_CTXT_TYPE_CHAN] = "CCA", + [SAT_CTXT_TYPE_EVENT] = "ECA", +}; + +/* satellite IPC transfer ring element */ +struct __packed sat_tre { + u64 ptr; + u32 dword[2]; +}; + +/* satellite IPC header */ +struct __packed sat_header { + u16 major_ver; + u16 minor_ver; + u16 msg_id; + u16 seq; + u16 reply_seq; + u16 payload_size; + u32 dev_id; + u8 reserved[8]; +}; + +/* satellite driver definitions */ +struct mhi_sat_packet { + struct list_head node; + + struct mhi_sat_cntrl *cntrl; /* satellite controller reference */ + void *msg; /* incoming message */ +}; + +struct mhi_sat_cntrl { + struct list_head node; + + struct mhi_controller *mhi_cntrl; /* device MHI controller reference */ + struct mhi_sat_subsys *subsys; + + struct list_head dev_list; + struct list_head addr_map_list; /* IOMMU mapped addresses list */ + struct mutex list_mutex; /* mutex for devices and address map lists */ + + struct list_head packet_list; + spinlock_t pkt_lock; /* lock to walk/modify received packets list */ + + struct work_struct connect_work; /* subsystem connection worker */ + struct work_struct process_work; /* incoming packets processor */ + + /* mhi core/controller configurations */ + u32 dev_id; /* unique device ID with BDF as per connection topology */ + int er_base; /* event rings base index */ + int er_max; /* event rings max index */ + int num_er; /* total number of event rings */ + + /* satellite controller function counts */ + int num_devices; /* mhi devices current count */ + int max_devices; /* count of maximum devices for subsys/controller */ + u16 seq; /* internal sequence number for all outgoing packets */ + bool active; /* flag set if hello packet/MHI_CFG event was sent */ + + /* command completion variables */ + u16 last_cmd_seq; /* sequence number of last sent command packet */ + enum mhi_ev_ccs last_cmd_ccs; /* last command completion event code */ + struct completion completion; /* command completion event wait */ + struct mutex cmd_wait_mutex; /* command completion wait mutex */ +}; + +struct mhi_sat_device { + struct list_head node; + + struct mhi_device *mhi_dev; /* mhi device pointer */ + struct mhi_sat_cntrl *cntrl; /* parent controller */ + + bool chan_started; +}; + +struct mhi_sat_driver { + enum MHI_DEBUG_LEVEL ipc_log_lvl; /* IPC log level */ + enum MHI_DEBUG_LEVEL klog_lvl; /* klog/dmesg levels */ + + struct mhi_sat_subsys *subsys; /* pointer to subsystem array */ + unsigned int num_subsys; + + struct dentry *dentry; /* debugfs directory */ + bool deferred_init_done; /* flag for deferred init protection */ +}; + +static struct mhi_sat_driver mhi_sat_driver; + +static struct mhi_sat_subsys *find_subsys_by_name(const char *name) +{ + int i; + struct mhi_sat_subsys *subsys = mhi_sat_driver.subsys; + + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + if (!strcmp(name, subsys->name)) + return subsys; + } + + return NULL; +} + +static struct mhi_sat_cntrl *find_sat_cntrl_by_id(struct mhi_sat_subsys *subsys, + u32 dev_id) +{ + struct mhi_sat_cntrl *sat_cntrl; + unsigned long flags; + + spin_lock_irqsave(&subsys->cntrl_lock, flags); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) { + if (sat_cntrl->dev_id == dev_id) { + spin_unlock_irqrestore(&subsys->cntrl_lock, flags); + return sat_cntrl; + } + } + spin_unlock_irqrestore(&subsys->cntrl_lock, flags); + + return NULL; +} + +static struct mhi_sat_device *find_sat_dev_by_id( + struct mhi_sat_cntrl *sat_cntrl, int id, + enum sat_ctxt_type evt) +{ + struct mhi_sat_device *sat_dev; + int compare_id; + + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry(sat_dev, &sat_cntrl->dev_list, node) { + compare_id = (evt == SAT_CTXT_TYPE_EVENT) ? + sat_dev->mhi_dev->dl_event_id : + sat_dev->mhi_dev->dl_chan_id; + + if (compare_id == id) { + mutex_unlock(&sat_cntrl->list_mutex); + return sat_dev; + } + } + mutex_unlock(&sat_cntrl->list_mutex); + + return NULL; +} + +static bool mhi_sat_isvalid_header(struct sat_header *hdr, int len) +{ + /* validate payload size */ + if (len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr))) + return false; + + /* validate SAT IPC version */ + if (hdr->major_ver != SAT_MAJOR_VERSION && + hdr->minor_ver != SAT_MINOR_VERSION) + return false; + + /* validate msg ID */ + if (hdr->msg_id != SAT_MSG_ID_CMD && hdr->msg_id != SAT_MSG_ID_EVT) + return false; + + return true; +} + +static int mhi_sat_wait_cmd_completion(struct mhi_sat_cntrl *sat_cntrl) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + int ret; + + reinit_completion(&sat_cntrl->completion); + + MHI_SAT_LOG("Wait for command completion\n"); + ret = wait_for_completion_timeout(&sat_cntrl->completion, + msecs_to_jiffies(sat_cntrl->mhi_cntrl->timeout_ms)); + if (!ret || sat_cntrl->last_cmd_ccs != MHI_EV_CC_SUCCESS) { + MHI_SAT_ERR("Command completion failure:seq:%u:ret:%d:ccs:%d\n", + sat_cntrl->last_cmd_seq, ret, sat_cntrl->last_cmd_ccs); + return -EIO; + } + + MHI_SAT_LOG("Command completion successful for seq:%u\n", + sat_cntrl->last_cmd_seq); + + return 0; +} + +static int mhi_sat_send_msg(struct mhi_sat_cntrl *sat_cntrl, + enum sat_msg_id type, u16 reply_seq, + void *msg, u16 msg_size) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct sat_header *hdr = msg; + + /* create sequence number for controller */ + sat_cntrl->seq++; + if (sat_cntrl->seq == SAT_RESERVED_SEQ_NUM) + sat_cntrl->seq = 0; + + /* populate header */ + hdr->major_ver = SAT_MAJOR_VERSION; + hdr->minor_ver = SAT_MINOR_VERSION; + hdr->msg_id = type; + hdr->seq = sat_cntrl->seq; + hdr->reply_seq = reply_seq; + hdr->payload_size = SAT_TRE_SIZE(msg_size); + hdr->dev_id = sat_cntrl->dev_id; + + /* save last sent command sequence number for completion event */ + if (type == SAT_MSG_ID_CMD) + sat_cntrl->last_cmd_seq = sat_cntrl->seq; + + return rpmsg_send(subsys->rpdev->ept, msg, msg_size); +} + +static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl, + struct sat_header *hdr, struct sat_tre *pkt) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + int num_pkts = SAT_TRE_NUM_PKTS(hdr->payload_size), i; + + for (i = 0; i < num_pkts; i++, pkt++) { + enum mhi_ev_ccs code = MHI_EV_CC_INVALID; + + switch (MHI_TRE_GET_TYPE(pkt)) { + case MHI_PKT_TYPE_IOMMU_MAP_CMD: + { + struct mhi_buf *buf; + struct mhi_controller *mhi_cntrl = sat_cntrl->mhi_cntrl; + dma_addr_t iova = DMA_ERROR_CODE; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + goto iommu_map_cmd_completion; + + buf->phys_addr = MHI_TRE_GET_PTR(pkt); + buf->len = MHI_TRE_GET_SIZE(pkt); + + iova = dma_map_resource(mhi_cntrl->dev, buf->phys_addr, + buf->len, DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(mhi_cntrl->dev, iova)) { + kfree(buf); + goto iommu_map_cmd_completion; + } + + buf->dma_addr = iova; + + mutex_lock(&sat_cntrl->list_mutex); + list_add_tail(&buf->node, + &sat_cntrl->addr_map_list); + mutex_unlock(&sat_cntrl->list_mutex); + + code = MHI_EV_CC_SUCCESS; + +iommu_map_cmd_completion: + MHI_SAT_LOG("IOMMU MAP 0x%llx CMD processing %s\n", + MHI_TRE_GET_PTR(pkt), + (code == MHI_EV_CC_SUCCESS) ? "successful" : + "failed"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(iova); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_CTXT_UPDATE_CMD: + { + u64 ctxt_ptr = MHI_TRE_GET_PTR(pkt); + u64 ctxt_size = MHI_TRE_GET_SIZE(pkt); + int id = MHI_TRE_GET_ID(pkt); + enum sat_ctxt_type evt = MHI_TRE_IS_ER_CTXT_TYPE(pkt); + struct mhi_generic_ctxt gen_ctxt; + struct mhi_buf buf; + struct mhi_sat_device *sat_dev = find_sat_dev_by_id( + sat_cntrl, id, evt); + int ret; + + MHI_SAT_ASSERT(!sat_dev, + "No device with given chan/evt ID"); + + memset(&gen_ctxt, 0, sizeof(gen_ctxt)); + memset(&buf, 0, sizeof(buf)); + + gen_ctxt.type = MHI_CTXT_TYPE_GENERIC; + gen_ctxt.ctxt_base = ctxt_ptr; + gen_ctxt.ctxt_size = ctxt_size; + + buf.buf = &gen_ctxt; + buf.len = sizeof(gen_ctxt); + buf.name = TO_SAT_CTXT_TYPE_STR(evt); + + ret = mhi_device_configure(sat_dev->mhi_dev, + DMA_BIDIRECTIONAL, &buf, 1); + if (!ret) + code = MHI_EV_CC_SUCCESS; + + MHI_SAT_LOG("CTXT UPDATE CMD %s:%d processing %s\n", + buf.name, id, (code == MHI_EV_CC_SUCCESS) ? + "successful" : "failed"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_START_CHAN_CMD: + { + int id = MHI_TRE_GET_ID(pkt); + struct mhi_sat_device *sat_dev = find_sat_dev_by_id( + sat_cntrl, id, + SAT_CTXT_TYPE_CHAN); + int ret; + + MHI_SAT_ASSERT(!sat_dev, + "No device with given channel ID\n"); + + MHI_SAT_ASSERT(sat_dev->chan_started, + "Channel already started!"); + + ret = mhi_prepare_for_transfer(sat_dev->mhi_dev); + if (!ret) { + sat_dev->chan_started = true; + code = MHI_EV_CC_SUCCESS; + } + + MHI_SAT_LOG("START CHANNEL %d CMD processing %s\n", + id, (code == MHI_EV_CC_SUCCESS) ? "successful" : + "failure"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_RESET_CHAN_CMD: + { + int id = MHI_TRE_GET_ID(pkt); + struct mhi_sat_device *sat_dev = + find_sat_dev_by_id(sat_cntrl, id, + SAT_CTXT_TYPE_CHAN); + + MHI_SAT_ASSERT(!sat_dev, + "No device with given channel ID\n"); + + MHI_SAT_ASSERT(!sat_dev->chan_started, + "Resetting unstarted channel!"); + + mhi_unprepare_from_transfer(sat_dev->mhi_dev); + sat_dev->chan_started = false; + + MHI_SAT_LOG( + "RESET CHANNEL %d CMD processing successful\n", + id); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0( + MHI_EV_CC_SUCCESS); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + default: + MHI_SAT_ASSERT(1, "Unhandled command!"); + break; + } + } +} + +static void mhi_sat_process_worker(struct work_struct *work) +{ + struct mhi_sat_cntrl *sat_cntrl = container_of(work, + struct mhi_sat_cntrl, process_work); + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct mhi_sat_packet *packet, *tmp; + struct sat_header *hdr; + struct sat_tre *pkt; + LIST_HEAD(head); + + MHI_SAT_LOG("Entered\n"); + + spin_lock_irq(&sat_cntrl->pkt_lock); + list_splice_tail_init(&sat_cntrl->packet_list, &head); + spin_unlock_irq(&sat_cntrl->pkt_lock); + + list_for_each_entry_safe(packet, tmp, &head, node) { + hdr = packet->msg; + pkt = SAT_TRE_OFFSET(packet->msg); + + list_del(&packet->node); + + mhi_sat_process_cmds(sat_cntrl, hdr, pkt); + + /* send response event(s) */ + mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT, hdr->seq, + packet->msg, + SAT_MSG_SIZE(SAT_TRE_NUM_PKTS( + hdr->payload_size))); + + kfree(packet); + } + + MHI_SAT_LOG("Exited\n"); +} + +static void mhi_sat_connect_worker(struct work_struct *work) +{ + struct mhi_sat_cntrl *sat_cntrl = container_of(work, + struct mhi_sat_cntrl, connect_work); + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct sat_tre *pkt; + void *msg; + int ret; + + if (!subsys->rpdev || sat_cntrl->max_devices != sat_cntrl->num_devices + || sat_cntrl->active) + return; + + MHI_SAT_LOG("Entered\n"); + + msg = kmalloc(SAT_MSG_SIZE(3), GFP_ATOMIC); + if (!msg) + return; + + sat_cntrl->active = true; + + pkt = SAT_TRE_OFFSET(msg); + + /* prepare #1 MHI_CFG HELLO event */ + pkt->ptr = MHI_TRE_EVT_CFG_PTR(sat_cntrl->mhi_cntrl->base_addr); + pkt->dword[0] = MHI_TRE_EVT_CFG_D0(sat_cntrl->er_base, + sat_cntrl->num_er); + pkt->dword[1] = MHI_TRE_EVT_CFG_D1; + pkt++; + + /* prepare M0 event */ + pkt->ptr = MHI_TRE_EVT_MHI_STATE_PTR; + pkt->dword[0] = MHI_TRE_EVT_MHI_STATE_D0(MHI_STATE_M0); + pkt->dword[1] = MHI_TRE_EVT_MHI_STATE_D1; + pkt++; + + /* prepare AMSS event */ + pkt->ptr = MHI_TRE_EVT_EE_PTR; + pkt->dword[0] = MHI_TRE_EVT_EE_D0(MHI_EE_AMSS); + pkt->dword[1] = MHI_TRE_EVT_EE_D1; + + ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT, SAT_RESERVED_SEQ_NUM, + msg, SAT_MSG_SIZE(3)); + kfree(msg); + if (ret) { + MHI_SAT_ERR("Failed to send hello packet:%d\n", ret); + sat_cntrl->active = false; + return; + } + + MHI_SAT_LOG("Device 0x%x sent hello packet\n", sat_cntrl->dev_id); +} + +static void mhi_sat_process_events(struct mhi_sat_cntrl *sat_cntrl, + struct sat_header *hdr, struct sat_tre *pkt) +{ + int num_pkts = SAT_TRE_NUM_PKTS(hdr->payload_size); + int i; + + for (i = 0; i < num_pkts; i++, pkt++) { + if (MHI_TRE_GET_TYPE(pkt) == + MHI_PKT_TYPE_CMD_COMPLETION_EVENT) { + if (hdr->reply_seq != sat_cntrl->last_cmd_seq) + continue; + + sat_cntrl->last_cmd_ccs = MHI_TRE_GET_CCS(pkt); + complete(&sat_cntrl->completion); + } + } +} + +static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len, + void *priv, u32 src) +{ + struct mhi_sat_subsys *subsys = dev_get_drvdata(&rpdev->dev); + struct sat_header *hdr = data; + struct sat_tre *pkt = SAT_TRE_OFFSET(data); + struct mhi_sat_cntrl *sat_cntrl; + struct mhi_sat_packet *packet; + + MHI_SAT_ASSERT(!mhi_sat_isvalid_header(hdr, len), "Invalid header!\n"); + + /* find controller packet was sent for */ + sat_cntrl = find_sat_cntrl_by_id(subsys, hdr->dev_id); + + MHI_SAT_ASSERT(!sat_cntrl, "Packet for unknown device!\n"); + + /* handle events directly regardless of controller active state */ + if (hdr->msg_id == SAT_MSG_ID_EVT) { + mhi_sat_process_events(sat_cntrl, hdr, pkt); + return 0; + } + + /* Inactive controller cannot process incoming commands */ + if (unlikely(!sat_cntrl->active)) { + MHI_SAT_ERR("Message for inactive controller!\n"); + return 0; + } + + /* offload commands to process worker */ + packet = kmalloc(sizeof(*packet) + len, GFP_ATOMIC); + if (!packet) + return 0; + + packet->cntrl = sat_cntrl; + packet->msg = packet + 1; + memcpy(packet->msg, data, len); + + spin_lock_irq(&sat_cntrl->pkt_lock); + list_add_tail(&packet->node, &sat_cntrl->packet_list); + spin_unlock_irq(&sat_cntrl->pkt_lock); + + schedule_work(&sat_cntrl->process_work); + + return 0; +} + +static void mhi_sat_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct mhi_sat_subsys *subsys = dev_get_drvdata(&rpdev->dev); + struct mhi_sat_cntrl *sat_cntrl; + struct mhi_sat_device *sat_dev; + struct mhi_buf *buf, *tmp; + + MHI_SUBSYS_LOG("Enter\n"); + + /* unprepare each controller/device from transfer */ + mutex_lock(&subsys->cntrl_mutex); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) { + sat_cntrl->active = false; + + flush_work(&sat_cntrl->connect_work); + flush_work(&sat_cntrl->process_work); + + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry(sat_dev, &sat_cntrl->dev_list, node) { + if (sat_dev->chan_started) { + mhi_unprepare_from_transfer(sat_dev->mhi_dev); + sat_dev->chan_started = false; + } + } + + list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, + node) { + dma_unmap_resource(sat_cntrl->mhi_cntrl->dev, + buf->dma_addr, buf->len, + DMA_BIDIRECTIONAL, 0); + list_del(&buf->node); + kfree(buf); + } + mutex_unlock(&sat_cntrl->list_mutex); + + MHI_SAT_LOG("Removed RPMSG link\n"); + } + mutex_unlock(&subsys->cntrl_mutex); + + subsys->rpdev = NULL; +} + +static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct mhi_sat_subsys *subsys; + struct mhi_sat_cntrl *sat_cntrl; + const char *subsys_name; + int ret; + + ret = of_property_read_string(rpdev->dev.parent->of_node, "label", + &subsys_name); + if (ret) + return ret; + + /* find which subsystem has probed */ + subsys = find_subsys_by_name(subsys_name); + if (!subsys) + return -EINVAL; + + MHI_SUBSYS_LOG("Received RPMSG probe\n"); + + dev_set_drvdata(&rpdev->dev, subsys); + + subsys->rpdev = rpdev; + + /* schedule work for each controller as GLINK has connected */ + spin_lock_irq(&subsys->cntrl_lock); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) + schedule_work(&sat_cntrl->connect_work); + spin_unlock_irq(&subsys->cntrl_lock); + + return 0; +} + +static struct rpmsg_device_id mhi_sat_rpmsg_match_table[] = { + { .name = "mhi_sat" }, + { }, +}; + +static struct rpmsg_driver mhi_sat_rpmsg_driver = { + .id_table = mhi_sat_rpmsg_match_table, + .probe = mhi_sat_rpmsg_probe, + .remove = mhi_sat_rpmsg_remove, + .callback = mhi_sat_rpmsg_cb, + .drv = { + .name = "mhi,sat_rpmsg", + }, +}; + +static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev, + enum MHI_CB mhi_cb) +{ +} + +static void mhi_sat_dev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_sat_device *sat_dev = mhi_device_get_devdata(mhi_dev); + struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl; + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct mhi_buf *buf, *tmp; + struct sat_tre *pkt; + void *msg; + int ret; + + /* remove device node from probed list */ + mutex_lock(&sat_cntrl->list_mutex); + list_del(&sat_dev->node); + mutex_unlock(&sat_cntrl->list_mutex); + + sat_cntrl->num_devices--; + + /* prepare SYS_ERR command if first device is being removed */ + if (sat_cntrl->active) { + sat_cntrl->active = false; + + /* flush all pending work */ + flush_work(&sat_cntrl->connect_work); + flush_work(&sat_cntrl->process_work); + + msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL); + + MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n"); + + pkt = SAT_TRE_OFFSET(msg); + pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR; + pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0; + pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1; + + /* acquire cmd_wait_mutex before sending command */ + mutex_lock(&sat_cntrl->cmd_wait_mutex); + + ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD, + SAT_RESERVED_SEQ_NUM, msg, + SAT_MSG_SIZE(1)); + kfree(msg); + if (ret) { + MHI_SAT_ERR("Failed to notify SYS_ERR\n"); + mutex_unlock(&sat_cntrl->cmd_wait_mutex); + goto exit_sys_err_send; + } + + MHI_SAT_LOG("SYS_ERR command sent\n"); + + /* blocking call to wait for command completion event */ + mhi_sat_wait_cmd_completion(sat_cntrl); + + mutex_unlock(&sat_cntrl->cmd_wait_mutex); + } + +exit_sys_err_send: + /* exit if some devices are still present */ + if (sat_cntrl->num_devices) + return; + + /* remove address mappings */ + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) { + dma_unmap_resource(sat_cntrl->mhi_cntrl->dev, buf->dma_addr, + buf->len, DMA_BIDIRECTIONAL, 0); + list_del(&buf->node); + kfree(buf); + } + mutex_unlock(&sat_cntrl->list_mutex); + + /* remove controller */ + mutex_lock(&subsys->cntrl_mutex); + spin_lock_irq(&subsys->cntrl_lock); + list_del(&sat_cntrl->node); + spin_unlock_irq(&subsys->cntrl_lock); + mutex_unlock(&subsys->cntrl_mutex); + + mutex_destroy(&sat_cntrl->cmd_wait_mutex); + mutex_destroy(&sat_cntrl->list_mutex); + MHI_SAT_LOG("Satellite controller node removed\n"); + kfree(sat_cntrl); +} + +static int mhi_sat_dev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_sat_device *sat_dev; + struct mhi_sat_cntrl *sat_cntrl; + struct device_node *of_node = mhi_dev->dev.of_node; + struct mhi_sat_subsys *subsys = &mhi_sat_driver.subsys[id->driver_data]; + u32 dev_id = MHI_SAT_CREATE_DEVICE_ID(mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot); + int ret; + + /* find controller with unique device ID based on topology */ + sat_cntrl = find_sat_cntrl_by_id(subsys, dev_id); + if (!sat_cntrl) { + sat_cntrl = kzalloc(sizeof(*sat_cntrl), GFP_KERNEL); + if (!sat_cntrl) + return -ENOMEM; + + /* + * max_devices will be read from device tree node. Set it to + * -1 before it is populated to avoid false positive when + * RPMSG probe schedules connect worker but no device has + * probed in which case num_devices and max_devices are both + * zero. + */ + sat_cntrl->max_devices = -1; + sat_cntrl->dev_id = dev_id; + sat_cntrl->er_base = mhi_dev->dl_event_id; + sat_cntrl->mhi_cntrl = mhi_dev->mhi_cntrl; + sat_cntrl->last_cmd_seq = SAT_RESERVED_SEQ_NUM; + sat_cntrl->subsys = subsys; + init_completion(&sat_cntrl->completion); + mutex_init(&sat_cntrl->list_mutex); + mutex_init(&sat_cntrl->cmd_wait_mutex); + spin_lock_init(&sat_cntrl->pkt_lock); + INIT_WORK(&sat_cntrl->connect_work, mhi_sat_connect_worker); + INIT_WORK(&sat_cntrl->process_work, mhi_sat_process_worker); + INIT_LIST_HEAD(&sat_cntrl->dev_list); + INIT_LIST_HEAD(&sat_cntrl->addr_map_list); + INIT_LIST_HEAD(&sat_cntrl->packet_list); + + mutex_lock(&subsys->cntrl_mutex); + spin_lock_irq(&subsys->cntrl_lock); + list_add(&sat_cntrl->node, &subsys->cntrl_list); + spin_unlock_irq(&subsys->cntrl_lock); + mutex_unlock(&subsys->cntrl_mutex); + + MHI_SAT_LOG("Controller allocated for 0x%x\n", dev_id); + } + + /* set maximum devices for subsystem from device tree */ + if (of_node) { + ret = of_property_read_u32(of_node, "mhi,max-devices", + &sat_cntrl->max_devices); + if (ret) { + MHI_SAT_ERR("Could not find max-devices in DT node\n"); + return -EINVAL; + } + } + + /* get event ring base and max indexes */ + sat_cntrl->er_base = min(sat_cntrl->er_base, mhi_dev->dl_event_id); + sat_cntrl->er_max = max(sat_cntrl->er_base, mhi_dev->dl_event_id); + + sat_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*sat_dev), GFP_KERNEL); + if (!sat_dev) + return -ENOMEM; + + sat_dev->mhi_dev = mhi_dev; + sat_dev->cntrl = sat_cntrl; + + mutex_lock(&sat_cntrl->list_mutex); + list_add(&sat_dev->node, &sat_cntrl->dev_list); + mutex_unlock(&sat_cntrl->list_mutex); + + mhi_device_set_devdata(mhi_dev, sat_dev); + + sat_cntrl->num_devices++; + + /* schedule connect worker if all devices for controller have probed */ + if (sat_cntrl->num_devices == sat_cntrl->max_devices) { + /* number of event rings is 1 more than difference in IDs */ + sat_cntrl->num_er = (sat_cntrl->er_max - sat_cntrl->er_base) + + 1; + MHI_SAT_LOG("All satellite channels probed!\n"); + schedule_work(&sat_cntrl->connect_work); + } + + return 0; +} + +/* .driver_data stores subsys id */ +static const struct mhi_device_id mhi_sat_dev_match_table[] = { + /* ADSP */ + { .chan = "ADSP_0", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_1", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_2", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_3", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_4", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_5", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_6", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_7", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_8", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_9", .driver_data = SUBSYS_ADSP }, + /* CDSP */ + { .chan = "CDSP_0", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_1", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_2", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_3", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_4", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_5", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_6", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_7", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_8", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_9", .driver_data = SUBSYS_CDSP }, + /* SLPI */ + { .chan = "SLPI_0", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_1", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_2", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_3", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_4", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_5", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_6", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_7", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_8", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_9", .driver_data = SUBSYS_SLPI }, + /* MODEM */ + { .chan = "MODEM_0", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_1", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_2", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_3", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_4", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_5", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_6", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_7", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_8", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_9", .driver_data = SUBSYS_MODEM }, + {}, +}; + +static struct mhi_driver mhi_sat_dev_driver = { + .id_table = mhi_sat_dev_match_table, + .probe = mhi_sat_dev_probe, + .remove = mhi_sat_dev_remove, + .status_cb = mhi_sat_dev_status_cb, + .driver = { + .name = MHI_SAT_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_sat_trigger_init(void *data, u64 val) +{ + struct mhi_sat_subsys *subsys; + int i, ret; + + if (mhi_sat_driver.deferred_init_done) + return -EIO; + + ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver); + if (ret) + goto error_sat_trigger_init; + + ret = mhi_driver_register(&mhi_sat_dev_driver); + if (ret) + goto error_sat_trigger_register; + + mhi_sat_driver.deferred_init_done = true; + + return 0; + +error_sat_trigger_register: + unregister_rpmsg_driver(&mhi_sat_rpmsg_driver); + +error_sat_trigger_init: + subsys = mhi_sat_driver.subsys; + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + ipc_log_context_destroy(subsys->ipc_log); + mutex_destroy(&subsys->cntrl_mutex); + } + kfree(mhi_sat_driver.subsys); + mhi_sat_driver.subsys = NULL; + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL, + mhi_sat_trigger_init, "%llu\n"); + +static int mhi_sat_init(void) +{ + struct mhi_sat_subsys *subsys; + int i, ret; + + subsys = kcalloc(SUBSYS_MAX, sizeof(*subsys), GFP_KERNEL); + if (!subsys) + return -ENOMEM; + + mhi_sat_driver.subsys = subsys; + mhi_sat_driver.num_subsys = SUBSYS_MAX; + mhi_sat_driver.klog_lvl = KLOG_LVL; + mhi_sat_driver.ipc_log_lvl = IPC_LOG_LVL; + + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + char log[32]; + + subsys->name = subsys_names[i]; + mutex_init(&subsys->cntrl_mutex); + spin_lock_init(&subsys->cntrl_lock); + INIT_LIST_HEAD(&subsys->cntrl_list); + scnprintf(log, sizeof(log), "mhi_sat_%s", subsys->name); + subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0); + } + + /* create debugfs entry if defer_init is enabled */ + if (mhi_sat_defer_init) { + mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL); + if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) { + ret = -ENODEV; + goto error_sat_init; + } + + debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL, + &mhi_sat_debugfs_fops); + + return 0; + } + + ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver); + if (ret) + goto error_sat_init; + + ret = mhi_driver_register(&mhi_sat_dev_driver); + if (ret) + goto error_sat_register; + + return 0; + +error_sat_register: + unregister_rpmsg_driver(&mhi_sat_rpmsg_driver); + +error_sat_init: + subsys = mhi_sat_driver.subsys; + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + ipc_log_context_destroy(subsys->ipc_log); + mutex_destroy(&subsys->cntrl_mutex); + } + kfree(mhi_sat_driver.subsys); + mhi_sat_driver.subsys = NULL; + + return ret; +} + +module_init(mhi_sat_init); + diff --git a/driver/quectel_MHI/src/devices/mhi_uci.c b/driver/quectel_MHI/src/devices/mhi_uci.c new file mode 100644 index 0000000..474b022 --- /dev/null +++ b/driver/quectel_MHI/src/devices/mhi_uci.c @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#if 1 +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define DEVICE_NAME "mhi" +#define MHI_UCI_DRIVER_NAME "mhi_uci_q" + +struct uci_chan { + wait_queue_head_t wq; + spinlock_t lock; + struct list_head pending; /* user space waiting to read */ + struct uci_buf *cur_buf; /* current buffer user space reading */ + size_t rx_size; +}; + +struct uci_buf { + struct page *page; + void *data; + size_t len; + unsigned nr_trb; + struct list_head node; +}; + +struct uci_dev { + struct list_head node; + dev_t devt; + struct device *dev; + struct mhi_device *mhi_dev; + const char *chan; + struct mutex mutex; /* sync open and close */ + struct mutex r_mutex; + struct mutex w_mutex; + struct uci_chan ul_chan; + struct uci_chan dl_chan; + size_t mtu; + int ref_count; + bool enabled; + unsigned rx_error; + unsigned nr_trb; + unsigned nr_trbs; + struct uci_buf *uci_buf; + struct ktermios termios; + size_t bytes_xferd; +}; + +struct mhi_uci_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; + dev_t dev_t; +}; + +static int uci_msg_lvl = MHI_MSG_LVL_ERROR; +module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR); + +#define MSG_VERB(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MAX_UCI_DEVICES (64) +#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) +#ifdef TCGETS2 +__weak int user_termios_to_kernel_termios(struct ktermios *k, + struct termios2 __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)); +} +__weak int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)); +} +__weak int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} +__weak int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} + +#else + +__weak int user_termios_to_kernel_termios(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} +__weak int kernel_termios_to_user_termios(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#endif /* TCGETS2 */ +#endif + +static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES); +static struct mhi_uci_drv mhi_uci_drv; + +static int mhi_queue_inbound(struct uci_dev *uci_dev) +{ + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = uci_dev->mtu; + void *buf; + struct uci_buf *uci_buf; + int ret = -EIO, i; + + if (uci_dev->uci_buf == NULL) { + uci_dev->nr_trb = 0; + uci_dev->nr_trbs = (nr_trbs + 1); + uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL); + if (!uci_dev->uci_buf) + return -ENOMEM; + + uci_buf = uci_dev->uci_buf; + for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) { + uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu)); + if (!uci_buf->page) + return -ENOMEM; + uci_buf->data = page_address(uci_buf->page); + uci_buf->len = 0; + uci_buf->nr_trb = i; + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) { + //MSG_ERR("[%d] = %p\n", i, uci_buf->data); + } + } + } + + for (i = 0; i < nr_trbs; i++) { + #if 0 + buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + uci_buf = buf + mtu; + uci_buf->data = buf; + #else + uci_buf = &uci_dev->uci_buf[i]; + buf = uci_buf->data; + #endif + + MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu); + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + #if 0 + kfree(buf); + #endif + MSG_ERR("Failed to queue buffer %d\n", i); + return ret; + } + } + + return ret; +} + +static long mhi_uci_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + long ret = -ERESTARTSYS; + + mutex_lock(&uci_dev->mutex); + if (uci_dev->enabled) + ret = mhi_ioctl(mhi_dev, cmd, arg); + + if (uci_dev->enabled) { + switch (cmd) { + case TCGETS: +#ifndef TCGETS2 + ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios); +#else + ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios); +#endif + break; + + case TCSETSF: + case TCSETS: +#ifndef TCGETS2 + ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg); +#else + ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg); +#endif + break; + + case TCFLSH: + ret = 0; + break; + + default: + break; + } + } + mutex_unlock(&uci_dev->mutex); + + return ret; +} + +static int mhi_uci_release(struct inode *inode, struct file *file) +{ + struct uci_dev *uci_dev = file->private_data; + + mutex_lock(&uci_dev->mutex); + uci_dev->ref_count--; + if (!uci_dev->ref_count) { + struct uci_chan *uci_chan; + + MSG_LOG("Last client left, closing node\n"); + + if (uci_dev->enabled) + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + + /* clean inbound channel */ + uci_chan = &uci_dev->dl_chan; + if (uci_dev->uci_buf) { + unsigned nr_trb = 0; + + for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) { + if (uci_dev->uci_buf[nr_trb].page) + __free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu)); + } + kfree(uci_dev->uci_buf); + } + + uci_chan->cur_buf = NULL; + + if (!uci_dev->enabled) { + MSG_LOG("Node is deleted, freeing dev node\n"); + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + return 0; + } + } + + MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count); + + mutex_unlock(&uci_dev->mutex); + + return 0; +} + +static unsigned int mhi_uci_poll(struct file *file, poll_table *wait) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan; + unsigned int mask = 0; + + poll_wait(file, &uci_dev->dl_chan.wq, wait); + // ADPL and QDSS do not need poll write. xingduo.du 2023-02-16 + // poll_wait(file, &uci_dev->ul_chan.wq, wait); + + uci_chan = &uci_dev->dl_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask = POLLERR; + } else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) { + MSG_VERB("Client can read from node\n"); + mask |= POLLIN | POLLRDNORM; + } + spin_unlock_bh(&uci_chan->lock); + + // ADPL and QDSS are single channel, ul_chan not be initilized. xingduo.du 2023-02-27 + if (mhi_dev->ul_chan) { + poll_wait(file, &uci_dev->ul_chan.wq, wait); + uci_chan = &uci_dev->ul_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask |= POLLERR; + } else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) { + MSG_VERB("Client can write to node\n"); + mask |= POLLOUT | POLLWRNORM; + } + + if (!uci_dev->enabled) + mask |= POLLHUP; + if (uci_dev->rx_error) + mask |= POLLERR; + + spin_unlock_bh(&uci_chan->lock); + } + + MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask); + + return mask; +} + +static ssize_t mhi_uci_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->ul_chan; + size_t bytes_xfered = 0; + int ret, nr_avail; + + if (!buf || !count || uci_dev->rx_error) + return -EINVAL; + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + MSG_VERB("Enter: to xfer:%zu bytes\n", count); + + while (count) { + size_t xfer_size; + void *kbuf; + enum MHI_FLAGS flags; + + spin_unlock_bh(&uci_chan->lock); + + nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE); + if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + /* wait for free descriptors */ + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled) || + (nr_avail = mhi_get_no_free_descriptors(mhi_dev, + DMA_TO_DEVICE)) > 0); + + if (ret == -ERESTARTSYS || !uci_dev->enabled) { + MSG_LOG("Exit signal caught for node or not enabled\n"); + return -ERESTARTSYS; + } + + xfer_size = min_t(size_t, count, uci_dev->mtu); + kbuf = kmalloc(xfer_size, GFP_KERNEL); + if (!kbuf) { + MSG_ERR("Failed to allocate memory %zu\n", xfer_size); + return -ENOMEM; + } + + ret = copy_from_user(kbuf, buf, xfer_size); + if (unlikely(ret)) { + kfree(kbuf); + return ret; + } + + spin_lock_bh(&uci_chan->lock); + + /* if ring is full after this force EOT */ + if (nr_avail > 1 && (count - xfer_size)) + flags = MHI_CHAIN; + else + flags = MHI_EOT; + + if (uci_dev->enabled) + ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf, + xfer_size, flags); + else + ret = -ERESTARTSYS; + + if (ret) { + kfree(kbuf); + goto sys_interrupt; + } + + bytes_xfered += xfer_size; + count -= xfer_size; + buf += xfer_size; + } + + spin_unlock_bh(&uci_chan->lock); + MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered); + + return bytes_xfered; + +sys_interrupt: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->dl_chan; + struct uci_buf *uci_buf; + char *ptr; + size_t to_copy; + int ret = 0; + + if (!buf || uci_dev->rx_error) + return -EINVAL; + + MSG_VERB("Client provided buf len:%zu\n", count); + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + /* No data available to read, wait */ + if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) { + MSG_VERB("No data available to read waiting\n"); + + spin_unlock_bh(&uci_chan->lock); + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled || + !list_empty(&uci_chan->pending))); + if (ret == -ERESTARTSYS) { + MSG_LOG("Exit signal caught for node\n"); + return -ERESTARTSYS; + } + + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + MSG_LOG("node is disabled\n"); + ret = -ERESTARTSYS; + goto read_error; + } + } + + /* new read, get the next descriptor from the list */ + if (!uci_chan->cur_buf) { + uci_buf = list_first_entry_or_null(&uci_chan->pending, + struct uci_buf, node); + if (unlikely(!uci_buf)) { + ret = -EIO; + goto read_error; + } + + if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) { + dump_stack(); + ret = -EIO; + MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n", + mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb); + goto read_error; + } + + list_del(&uci_buf->node); + uci_chan->cur_buf = uci_buf; + uci_chan->rx_size = uci_buf->len; + MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size); + } + + uci_buf = uci_chan->cur_buf; + spin_unlock_bh(&uci_chan->lock); + + /* Copy the buffer to user space */ + to_copy = min_t(size_t, count, uci_chan->rx_size); + ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size); + ret = copy_to_user(buf, ptr, to_copy); + if (ret) + return ret; + + MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size); + uci_chan->rx_size -= to_copy; + + /* we finished with this buffer, queue it back to hardware */ + if (!uci_chan->rx_size) { + spin_lock_bh(&uci_chan->lock); + uci_chan->cur_buf = NULL; + + if (uci_dev->enabled) +#if 1 //this can make the address in ring do not change + { + if (uci_buf->page) { + unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1); + + uci_buf = &uci_dev->uci_buf[nr_trb]; + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, + uci_buf->data, uci_dev->mtu, + MHI_EOT); + } else { + kfree(uci_buf); + ret = 0; + } + } +#endif + else + ret = -ERESTARTSYS; + + if (ret) { + MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret); +#if 0 + kfree(uci_buf->data); +#endif + goto read_error; + } + + spin_unlock_bh(&uci_chan->lock); + } + + MSG_VERB("Returning %zu bytes\n", to_copy); + + return to_copy; + +read_error: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_write_mutex(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_write(file, buf, count, offp); + mutex_unlock(&uci_dev->w_mutex); + + return ret; +} + +static ssize_t mhi_uci_read_mutex(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_read(file, buf, count, ppos); + mutex_unlock(&uci_dev->r_mutex); + + return ret; +} + +static int mhi_uci_open(struct inode *inode, struct file *filp) +{ + struct uci_dev *uci_dev = NULL, *tmp_dev; + int ret = -EIO; + struct uci_chan *dl_chan; + + mutex_lock(&mhi_uci_drv.lock); + list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) { + if (tmp_dev->devt == inode->i_rdev) { + uci_dev = tmp_dev; + break; + } + } + + /* could not find a minor node */ + if (!uci_dev) + goto error_exit; + + mutex_lock(&uci_dev->mutex); + if (!uci_dev->enabled) { + MSG_ERR("Node exist, but not in active state!\n"); + goto error_open_chan; + } + + uci_dev->ref_count++; + + MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count); + + if (uci_dev->ref_count == 1) { + MSG_LOG("Starting channel\n"); + ret = mhi_prepare_for_transfer(uci_dev->mhi_dev); + if (ret) { + MSG_ERR("Error starting transfer channels\n"); + uci_dev->ref_count--; + goto error_open_chan; + } + + ret = mhi_queue_inbound(uci_dev); + if (ret) + goto error_rx_queue; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + uci_dev->ref_count++; +#endif + } + + filp->private_data = uci_dev; + mutex_unlock(&uci_dev->mutex); + mutex_unlock(&mhi_uci_drv.lock); + + return 0; + + error_rx_queue: + dl_chan = &uci_dev->dl_chan; + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + if (uci_dev->uci_buf) { + unsigned nr_trb = 0; + + for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) { + if (uci_dev->uci_buf[nr_trb].page) + __free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu)); + } + kfree(uci_dev->uci_buf); + } + + error_open_chan: + mutex_unlock(&uci_dev->mutex); + +error_exit: + mutex_unlock(&mhi_uci_drv.lock); + + return ret; +} + +static const struct file_operations mhidev_fops = { + .open = mhi_uci_open, + .release = mhi_uci_release, + .read = mhi_uci_read_mutex, + .write = mhi_uci_write_mutex, + .poll = mhi_uci_poll, + .unlocked_ioctl = mhi_uci_ioctl, +}; + +static void mhi_uci_remove(struct mhi_device *mhi_dev) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Enter\n"); + + + mutex_lock(&mhi_uci_drv.lock); + mutex_lock(&uci_dev->mutex); + + /* disable the node */ + spin_lock_irq(&uci_dev->dl_chan.lock); + spin_lock_irq(&uci_dev->ul_chan.lock); + uci_dev->enabled = false; + spin_unlock_irq(&uci_dev->ul_chan.lock); + spin_unlock_irq(&uci_dev->dl_chan.lock); + wake_up(&uci_dev->dl_chan.wq); + wake_up(&uci_dev->ul_chan.wq); + + /* delete the node to prevent new opens */ + device_destroy(mhi_uci_drv.class, uci_dev->devt); + uci_dev->dev = NULL; + list_del(&uci_dev->node); + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count > 0) + uci_dev->ref_count--; +#endif + + /* safe to free memory only if all file nodes are closed */ + if (!uci_dev->ref_count) { + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + mutex_unlock(&mhi_uci_drv.lock); + return; + } + + MSG_LOG("Exit\n"); + mutex_unlock(&uci_dev->mutex); + mutex_unlock(&mhi_uci_drv.lock); + +} + +static int mhi_uci_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct uci_dev *uci_dev; + int minor; + char node_name[32]; + int dir; + + uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL); + if (!uci_dev) + return -ENOMEM; + + mutex_init(&uci_dev->mutex); + mutex_init(&uci_dev->r_mutex); + mutex_init(&uci_dev->w_mutex); + uci_dev->mhi_dev = mhi_dev; + + minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES); + if (minor >= MAX_UCI_DEVICES) { + kfree(uci_dev); + return -ENOSPC; + } + + mutex_lock(&uci_dev->mutex); + mutex_lock(&mhi_uci_drv.lock); + + uci_dev->devt = MKDEV(mhi_uci_drv.major, minor); +#if 1 + if (mhi_dev->mhi_cntrl->cntrl_idx) + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s%d", + mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx); + else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s", + mhi_dev->chan_name); +#else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d", + mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, "_pipe_", + mhi_dev->ul_chan_id); +#endif + + set_bit(minor, uci_minors); + + /* create debugging buffer */ + snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->ul_chan_id); + + for (dir = 0; dir < 2; dir++) { + struct uci_chan *uci_chan = (dir) ? + &uci_dev->ul_chan : &uci_dev->dl_chan; + spin_lock_init(&uci_chan->lock); + init_waitqueue_head(&uci_chan->wq); + INIT_LIST_HEAD(&uci_chan->pending); + } + + uci_dev->termios = tty_std_termios; + + uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); + mhi_device_set_devdata(mhi_dev, uci_dev); + uci_dev->enabled = true; + + list_add(&uci_dev->node, &mhi_uci_drv.head); + mutex_unlock(&mhi_uci_drv.lock); + mutex_unlock(&uci_dev->mutex); + + MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name); + + return 0; +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->ul_chan; + + MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + kfree(mhi_result->buf_addr); + if (!mhi_result->transaction_status) + wake_up(&uci_chan->wq); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->dl_chan; + unsigned long flags; + struct uci_buf *buf; + unsigned nr_trb = uci_dev->nr_trb; + + buf = &uci_dev->uci_buf[nr_trb]; + if (buf == NULL) { + MSG_ERR("buf = NULL"); + return; + } + if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr) + { + uci_dev->rx_error++; + MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n", + mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr); + return; + } + + uci_dev->nr_trb++; + if (uci_dev->nr_trb == uci_dev->nr_trbs) + uci_dev->nr_trb = 0; + + if (mhi_result->transaction_status == -ENOTCONN) { + return; + } + + if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0) + { + MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n", + mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu); + return; + } + if (mhi_result->bytes_xferd > uci_dev->bytes_xferd) + { + uci_dev->bytes_xferd = mhi_result->bytes_xferd; + //MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n", + // mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu); + } + + MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + spin_lock_irqsave(&uci_chan->lock, flags); +#if 0 + buf = mhi_result->buf_addr + uci_dev->mtu; + buf->data = mhi_result->buf_addr; +#endif + buf->len = mhi_result->bytes_xferd; + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN) + { + struct uci_buf *tmp_buf = NULL; + int skip_buf = 0; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count == 1) + skip_buf++; +#endif + if (!skip_buf) + tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);; + + if (tmp_buf) { + tmp_buf->page = NULL; + tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf); + tmp_buf->len = buf->len; + memcpy(tmp_buf->data, buf->data, buf->len); + } + + if (buf) { + struct uci_buf *uci_buf = buf; + unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1); + + uci_buf = &uci_dev->uci_buf[nr_trb]; + mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT); + } + + buf = tmp_buf; + } + + if (buf) + list_add_tail(&buf->node, &uci_chan->pending); + spin_unlock_irqrestore(&uci_chan->lock, flags); + +#ifdef CONFIG_PM_SLEEP + if (mhi_dev->dev.power.wakeup) + __pm_wakeup_event(mhi_dev->dev.power.wakeup, 0); +#endif + + wake_up(&uci_chan->wq); +} + +// repaire sdx6x module can not read qdb file. xingduo.du 2023-01-18 +#define DIAG_MAX_PCIE_PKT_SZ 8192 //define by module + +/* .driver_data stores max mtu */ +static const struct mhi_device_id mhi_uci_match_table[] = { + { .chan = "LOOPBACK", .driver_data = 0x1000 }, + { .chan = "SAHARA", .driver_data = 0x4000 }, + { .chan = "EDL", .driver_data = 0x4000 }, + { .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ }, + { .chan = "MBIM", .driver_data = 0x1000 }, + { .chan = "QMI0", .driver_data = 0x1000 }, + { .chan = "QMI1", .driver_data = 0x1000 }, + { .chan = "DUN", .driver_data = 0x1000 }, +#ifdef ENABLE_ADPL + { .chan = "ADPL", .driver_data = 0x1000 }, +#endif +#ifdef ENABLE_QDSS + { .chan = "QDSS", .driver_data = 0x1000 }, +#endif + {}, +}; + +static struct mhi_driver mhi_uci_driver = { + .id_table = mhi_uci_match_table, + .remove = mhi_uci_remove, + .probe = mhi_uci_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = MHI_UCI_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_device_uci_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); + if (ret < 0) + return ret; + + mhi_uci_drv.major = ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)) + mhi_uci_drv.class = class_create(MHI_UCI_DRIVER_NAME); +#else + mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); +#endif + if (IS_ERR(mhi_uci_drv.class)) { + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_uci_drv.lock); + INIT_LIST_HEAD(&mhi_uci_drv.head); + + ret = mhi_driver_register(&mhi_uci_driver); + if (ret) { + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + } + + return ret; +} + +void mhi_device_uci_exit(void) +{ + mhi_driver_unregister(&mhi_uci_driver); + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); +} diff --git a/driver/quectel_MHI/src/log/AT_OVER_PCIE.txt b/driver/quectel_MHI/src/log/AT_OVER_PCIE.txt new file mode 100644 index 0000000..9f86ac3 --- /dev/null +++ b/driver/quectel_MHI/src/log/AT_OVER_PCIE.txt @@ -0,0 +1,31 @@ +root@imx6qsabresd:~# busybox microcom /dev/mhi_DUN +[ 384.652992] [I][mhi_uci_open] Node open, ref counts 1 +[ 384.658144] [I][mhi_uci_open] Starting channel +[ 384.662612] [I][__mhi_prepare_channel] Entered: preparing channel:32 +[ 384.680397] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 384.685890] [I][__mhi_prepare_channel] Chan:32 successfully moved to start state +[ 384.693312] [I][__mhi_prepare_channel] Entered: preparing channel:33 +[ 384.708692] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 384.714324] [I][__mhi_prepare_channel] Chan:33 successfully moved to start state + +RDY + ++CFUN: 1 + ++CPIN: READY + ++QUSIM: 1 + ++QIND: SMS DONE + ++QIND: PB DONE +ati +Quectel +EM20 +Revision: EM20GR01A01M4G + +OK +at+cpin? ++CPIN: READY + +OK diff --git a/driver/quectel_MHI/src/log/MBIM_OVER_PCIE.txt b/driver/quectel_MHI/src/log/MBIM_OVER_PCIE.txt new file mode 100644 index 0000000..a91b738 --- /dev/null +++ b/driver/quectel_MHI/src/log/MBIM_OVER_PCIE.txt @@ -0,0 +1,145 @@ +root@OpenWrt:~# insmod pcie_mhi.ko mhi_mbim_enabled=1 +root@OpenWrt:~# dmesg | grep mhi +[ 65.587160] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 65.597089] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 65.602250] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit] +[ 65.611690] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 65.619307] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63 +[ 65.619327] [I][mhi_power_up] dev_state:RESET +[ 65.619331] [I][mhi_async_power_up] Requested to power on +[ 65.619449] [I][mhi_alloc_coherent] size = 114688, dma_handle = 6fca0000 +[ 65.619462] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221e000 +[ 65.619731] [I][mhi_async_power_up] dev_state:RESET ee:AMSS +[ 65.619747] [I][mhi_pm_st_worker] Transition to state:READY +[ 65.619760] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 65.619764] [I][mhi_ready_state_transition] Waiting to enter READY state +[ 65.619885] [I][mhi_async_power_up] Power on setup success +[ 65.619897] [I][mhi_pci_probe] Return successful +[ 65.665114] [I][mhi_ready_state_transition] Device in READY State +[ 65.665125] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR +[ 65.665131] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE +[ 65.665133] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR +[ 65.665137] [I][mhi_init_mmio] Initializing MMIO +[ 65.665142] [I][mhi_init_mmio] CHDBOFF:0x300 +[ 65.665151] [I][mhi_init_mmio] ERDBOFF:0x700 +[ 65.665156] [I][mhi_init_mmio] Programming all MMIO values. +[ 65.786283] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2 +[ 65.786289] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0 +[ 65.786295] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR +[ 65.786300] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0 +[ 65.789734] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2 +[ 65.789739] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS +[ 65.789756] [I][mhi_pm_st_worker] Transition to state:MISSION MODE +[ 65.789767] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 65.789771] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition +[ 65.789787] [I][mhi_init_timesync] No timesync capability found +[ 65.789791] [I][mhi_pm_mission_mode_transition] Adding new devices +[ 65.790570] [I][mhi_dtr_probe] Enter for DTR control channel +[ 65.790577] [I][__mhi_prepare_channel] Entered: preparing channel:18 +[ 65.797036] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.797051] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state +[ 65.797055] [I][__mhi_prepare_channel] Entered: preparing channel:19 +[ 65.802457] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.802469] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state +[ 65.802485] [I][mhi_dtr_probe] Exit with ret:0 +[ 65.802748] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 65.802772] [I][__mhi_prepare_channel] Entered: preparing channel:100 +[ 65.825279] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.825293] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state +[ 65.825297] [I][__mhi_prepare_channel] Entered: preparing channel:101 +[ 65.835565] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.835578] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state +[ 65.839141] [I][mhi_netdev_enable_iface] Exited. +[ 65.839875] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 65.843278] net rmnet_mhi0 rmnet_mhi0.1: NSS context created +[ 65.861808] [I][mhi_pm_mission_mode_transition] Exit with ret:0 +[ 68.625595] [I][__mhi_prepare_channel] Entered: preparing channel:12 +[ 68.634610] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 68.634622] [I][__mhi_prepare_channel] Chan:12 successfully moved to start state +[ 68.634625] [I][__mhi_prepare_channel] Entered: preparing channel:13 +[ 68.644978] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 68.644987] [I][__mhi_prepare_channel] Chan:13 successfully moved to start state +[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1 +[ 69.177035] [I][mhi_netdev_open] Opened net dev interface +[ 71.655431] [I][mhi_netdev_open] Opened net dev interface + +root@OpenWrt:~# ./quectel-CM & +[04-02_04:14:12:134] Quectel_QConnectManager_Linux_V1.6.0.5 +[04-02_04:14:12:134] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002 +[04-02_04:14:12:135] network interface '' or qmidev '' is not exist +[04-02_04:14:12:135] netcard driver = pcie_mhi, driver version = V1.3.0.6 +[04-02_04:14:12:135] Modem works in MBIM mode +[04-02_04:14:12:135] apn (null), user (null), passwd (null), auth 0 +[04-02_04:14:12:135] IP Proto MBIMContextIPTypeIPv4 +[04-02_04:14:12:154] mbim_read_thread is created +sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory +[04-02_04:14:12:156] system(echo 0 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256 +[04-02_04:14:12:185] system(ip address flush dev rmnet_mhi0)=0 +[04-02_04:14:12:187] system(ip link set dev rmnet_mhi0 down)=0 +[04-02_04:14:12:188] mbim_open_device() +[04-02_04:14:12:605] mbim_device_caps_query() +[04-02_04:14:12:610] DeviceId: 869710030002905 +[04-02_04:14:12:610] HardwareInfo: 0 +[04-02_04:14:12:610] mbim_set_radio_state( 1 ) +[04-02_04:14:12:613] HwRadioState: 1, SwRadioState: 1 +[04-02_04:14:12:613] mbim_subscriber_status_query() +[04-02_04:14:12:620] SubscriberReadyState NotInitialized -> Initialized +[04-02_04:14:12:620] mbim_register_state_query() +[04-02_04:14:12:625] RegisterState Unknown -> Home +[04-02_04:14:12:625] mbim_packet_service_query() +[04-02_04:14:12:629] PacketServiceState Unknown -> Attached +[04-02_04:14:12:629] mbim_query_connect(sessionID=0) +[04-02_04:14:12:633] ActivationState Unknown -> Deactivated +[04-02_04:14:12:633] mbim_set_connect(onoff=1, sessionID=0) +[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-02_04:14:12:680] ActivationState Deactivated -> Activated +[ 69.177035] [I][mhi_netdev_open] Opened net dev interface +[04-02_04:14:12:680] mbim_ip_config(sessionID=0) +[04-02_04:14:12:683] < SessionId = 0 +[04-02_04:14:12:683] < IPv4ConfigurationAvailable = 0xf +[04-02_04:14:12:683] < IPv6ConfigurationAvailable = 0x0 +[04-02_04:14:12:683] < IPv4AddressCount = 0x1 +[04-02_04:14:12:683] < IPv4AddressOffset = 0x3c +[04-02_04:14:12:683] < IPv6AddressCount = 0x0 +[04-02_04:14:12:683] < IPv6AddressOffset = 0x0 +[04-02_04:14:12:683] < IPv4 = 10.129.59.93/30 +[04-02_04:14:12:683] < gw = 10.129.59.94 +[04-02_04:14:12:683] < dns1 = 211.138.180.2 +[04-02_04:14:12:683] < dns2 = 211.138.180.3 +[04-02_04:14:12:683] < ipv4 mtu = 1500 +sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory +[04-02_04:14:12:684] system(echo 1 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256 +[04-02_04:14:12:689] system(ip link set dev rmnet_mhi0 up)=0 +[04-02_04:14:12:692] system(ip -4 address flush dev rmnet_mhi0)=0 +[04-02_04:14:12:694] system(ip -4 address add 10.129.59.93/30 dev rmnet_mhi0)=0 +[04-02_04:14:12:697] system(ip -4 route add default via 10.129.59.94 dev rmnet_mhi0)=0 +[04-02_04:14:12:699] system(ip -4 link set dev rmnet_mhi0 mtu 1500)=0 + +root@OpenWrt:~# ifconfig rmnet_mhi0 +rmnet_mhi0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:99379 errors:0 dropped:0 overruns:0 frame:0 + TX packets:176569 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1528181052 (1.4 GiB) TX bytes:62467192 (59.5 MiB) + +root@OpenWrt:~# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00 + inet addr:10.129.59.93 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:1089360 errors:0 dropped:0 overruns:0 frame:0 + TX packets:176581 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1521449058 (1.4 GiB) TX bytes:57525792 (54.8 MiB) + +# adjust CPU load balancing +root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +2 +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +4 +root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog +2000 diff --git a/driver/quectel_MHI/src/log/QMI_OVER_PCIE.txt b/driver/quectel_MHI/src/log/QMI_OVER_PCIE.txt new file mode 100644 index 0000000..3604545 --- /dev/null +++ b/driver/quectel_MHI/src/log/QMI_OVER_PCIE.txt @@ -0,0 +1,134 @@ +disable ccflags-y += -DCONFIG_MHI_NETDEV_MBIM in pcie_mhi/Makefile + +root@OpenWrt:~# insmod pcie_mhi.ko + +root@OpenWrt:~# dmesg | grep mhi +[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 138.497564] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit] +[ 138.506952] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 138.514562] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63 +[ 138.514581] [I][mhi_power_up] dev_state:RESET +[ 138.514587] [I][mhi_async_power_up] Requested to power on +[ 138.514728] [I][mhi_alloc_coherent] size = 114688, dma_handle = 72160000 +[ 138.514734] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221f000 +[ 138.515030] [I][mhi_async_power_up] dev_state:RESET ee:AMSS +[ 138.515056] [I][mhi_pm_st_worker] Transition to state:READY +[ 138.515067] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 138.515073] [I][mhi_ready_state_transition] Waiting to enter READY state +[ 138.515210] [I][mhi_async_power_up] Power on setup success +[ 138.515227] [I][mhi_pci_probe] Return successful +[ 138.589013] [I][mhi_ready_state_transition] Device in READY State +[ 138.589029] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR +[ 138.589038] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE +[ 138.589041] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR +[ 138.589046] [I][mhi_init_mmio] Initializing MMIO +[ 138.589050] [I][mhi_init_mmio] CHDBOFF:0x300 +[ 138.589060] [I][mhi_init_mmio] ERDBOFF:0x700 +[ 138.589065] [I][mhi_init_mmio] Programming all MMIO values. +[ 138.706124] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2 +[ 138.706132] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0 +[ 138.706140] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR +[ 138.706146] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0 +[ 138.708699] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2 +[ 138.708706] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS +[ 138.708726] [I][mhi_pm_st_worker] Transition to state:MISSION MODE +[ 138.708736] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 138.708742] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition +[ 138.708758] [I][mhi_init_timesync] No timesync capability found +[ 138.708764] [I][mhi_pm_mission_mode_transition] Adding new devices +[ 138.709785] [I][mhi_dtr_probe] Enter for DTR control channel +[ 138.709794] [I][__mhi_prepare_channel] Entered: preparing channel:18 +[ 138.715378] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.715397] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state +[ 138.715403] [I][__mhi_prepare_channel] Entered: preparing channel:19 +[ 138.720201] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.720218] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state +[ 138.720236] [I][mhi_dtr_probe] Exit with ret:0 +[ 138.720590] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 138.720630] [I][__mhi_prepare_channel] Entered: preparing channel:100 +[ 138.757230] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.757253] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state +[ 138.757259] [I][__mhi_prepare_channel] Entered: preparing channel:101 +[ 138.774352] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.774370] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state +[ 138.778137] [I][mhi_netdev_enable_iface] Exited. +[ 138.779018] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 138.782283] net rmnet_mhi0 rmnet_mhi0.1: NSS context created +[ 138.800865] [I][mhi_pm_mission_mode_transition] Exit with ret:0 + +root@OpenWrt:~# ./quectel-CM & +root@OpenWrt:~# [04-02_04:12:16:477] Quectel_QConnectManager_Linux_V1.6.0.5 +[04-02_04:12:16:477] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002 +[04-02_04:12:16:478] network interface '' or qmidev '' is not exist +[04-02_04:12:16:478] netcard driver = pcie_mhi, driver version = V1.3.0.6 +[04-02_04:12:16:479] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-02_04:12:16:479] Modem works in QMI mode +[04-02_04:12:16:505] cdc_wdm_fd = 7 +[04-02_04:12:17:506] QmiThreadSendQMITimeout pthread_cond_timeout_np timeout +[04-02_04:12:18:516] Get clientWDS = 19 +[04-02_04:12:18:520] Get clientDMS = 1 +[04-02_04:12:18:524] Get clientNAS = 3 +[04-02_04:12:18:527] Get clientUIM = 1 +[04-02_04:12:18:531] Get clientWDA = 1 +[04-02_04:12:18:535] requestBaseBandVersion RM500QGLAAR03A01M4G_BETA_20200107F 1 [Dec 30 2019 17:00:00] +[04-02_04:12:18:539] qmap_settings.rx_urb_size = 16384 +[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_datagrams = 16 +[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-02_04:12:18:539] qmap_settings.dl_minimum_padding = 0 +[04-02_04:12:18:550] requestSetLoopBackState(loopback_state=1, replication_factor=14) +[04-02_04:12:18:557] requestGetSIMStatus SIMStatus: SIM_ABSENT +[04-02_04:12:18:560] requestGetProfile[1] ///0 +[04-02_04:12:18:563] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-02_04:12:18:565] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-02_04:12:18:566] ifconfig rmnet_mhi0.1 down +[04-02_04:12:18:571] ifconfig rmnet_mhi0.1 0.0.0.0 +ifconfig: SIOCSIFFLAGS: Network is down +[04-02_04:12:18:575] SetLoopBackInd: loopback_state=1, replication_factor=14 +[04-02_04:12:18:591] requestSetupDataCall WdsConnectionIPv4Handle: 0xe40182a0 +[04-02_04:12:18:601] ifconfig rmnet_mhi0 up +[04-02_04:12:18:607] ifconfig rmnet_mhi0.1 up +[04-02_04:12:18:613] you are use OpenWrt? +[04-02_04:12:18:614] should not calling udhcpc manually? +[04-02_04:12:18:614] should modify /etc/config/network as below? +[04-02_04:12:18:614] config interface wan +[04-02_04:12:18:614] option ifname rmnet_mhi0.1 +[04-02_04:12:18:614] option proto dhcp +[04-02_04:12:18:614] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status? +[04-02_04:12:18:614] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1 +udhcpc: started, v1.28.3 +udhcpc: sending discover +udhcpc: sending select for 192.168.48.171 +udhcpc: lease of 192.168.48.171 obtained, lease time 7200 +[04-02_04:12:18:809] udhcpc: ifconfig rmnet_mhi0.1 192.168.48.171 netmask 255.255.255.248 broadcast + +[04-02_04:12:18:819] udhcpc: setting default routers: 192.168.48.172 + +root@OpenWrt:~# ifconfig rmnet_mhi0 +rmnet_mhi0 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:608 (608.0 B) TX bytes:672 (672.0 B) + +root@OpenWrt:~# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00 + inet addr:192.168.48.171 Mask:255.255.255.248 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:592 (592.0 B) TX bytes:656 (656.0 B) + +# adjust CPU load balancing +root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +2 +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +4 +root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog +2000 \ No newline at end of file diff --git a/driver/quectel_MHI/src/log/QXDM_OVER_PCIE.txt b/driver/quectel_MHI/src/log/QXDM_OVER_PCIE.txt new file mode 100644 index 0000000..13e9cc3 --- /dev/null +++ b/driver/quectel_MHI/src/log/QXDM_OVER_PCIE.txt @@ -0,0 +1,14 @@ +root@imx6qsabresd:~# ./QLog -p /dev/mhi_DIAG -s log & +root@imx6qsabresd:~# [000.000]QLog Version: Quectel_QLog_Linux&Android_V1.2.4 +[ 298.597963] [I][mhi_uci_open] Node open, ref counts 1 +[ 298.605601] [I][mhi_uci_open] Starting channel +[ 298.612159] [I][__mhi_prepare_channel] Entered: preparing channel:4 +[ 298.629906] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 298.635415] [I][__mhi_prepare_channel] Chan:4 successfully moved to start state +[ 298.642749] [I][__mhi_prepare_channel] Entered: preparing channel:5 +[ 298.658043] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 298.663543] [I][__mhi_prepare_channel] Chan:5 successfully moved to start state +[000.075]open /dev/mhi_DIAG ttyfd = 3 +[000.075]Press CTRL+C to stop catch log. +[000.096]qlog_logfile_create log/20160920_145758_0000.qmdl logfd=4 +[005.268]recv: 0M 70K 490B in 5181 msec