更新PCIE

This commit is contained in:
ling 2023-05-22 21:59:31 +08:00
parent b2a91105b3
commit 53354cc881
4 changed files with 9 additions and 82 deletions

View File

@ -177,16 +177,11 @@ static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
#if 1 //some SOC like rpi_4b need next codes #if 1 //some SOC like rpi_4b need next codes
ret = -EIO; ret = -EIO;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
if((ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64))))
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
#else
if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
} }
#endif
if (ret) { if (ret) {
MHI_ERR("Error dma mask\n"); MHI_ERR("Error dma mask\n");
} }

View File

@ -200,11 +200,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
lower_32_bits(mhi_buf->dma_addr)); lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
if (unlikely(!sequence_id)) if (unlikely(!sequence_id))
sequence_id = 1; sequence_id = 1;
@ -315,11 +312,8 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
lower_32_bits(mhi_buf->dma_addr)); lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id); sequence_id);
@ -370,12 +364,8 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
lower_32_bits(mhi_buf->dma_addr)); lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#else
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id); mhi_cntrl->sequence_id);

View File

@ -449,11 +449,8 @@ static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint br
__skb_pull(reply, skb_network_offset(reply)); __skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY; reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST; reply->pkt_type = PACKET_HOST;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
netif_rx_ni(reply); netif_rx_ni(reply);
#else
netif_rx(reply);
#endif
} }
return 1; return 1;
} }
@ -843,13 +840,8 @@ static void rmnet_vnd_upate_rx_stats(struct net_device *net,
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
u64_stats_update_begin(&stats64->syncp); u64_stats_update_begin(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
stats64->rx_packets += rx_packets; stats64->rx_packets += rx_packets;
stats64->rx_bytes += rx_bytes; stats64->rx_bytes += rx_bytes;
#else
u64_stats_add(&stats64->rx_packets, rx_packets);
u64_stats_add(&stats64->rx_bytes,rx_bytes);
#endif
u64_stats_update_end(&stats64->syncp); u64_stats_update_end(&stats64->syncp);
#else #else
priv->self_dev->stats.rx_packets += rx_packets; priv->self_dev->stats.rx_packets += rx_packets;
@ -864,13 +856,8 @@ static void rmnet_vnd_upate_tx_stats(struct net_device *net,
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
u64_stats_update_begin(&stats64->syncp); u64_stats_update_begin(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
stats64->tx_packets += tx_packets; stats64->tx_packets += tx_packets;
stats64->tx_bytes += tx_bytes; stats64->tx_bytes += tx_bytes;
#else
u64_stats_add(&stats64->tx_packets, tx_packets);
u64_stats_add(&stats64->tx_bytes,tx_bytes);
#endif
u64_stats_update_end(&stats64->syncp); u64_stats_update_end(&stats64->syncp);
#else #else
net->stats.rx_packets += tx_packets; net->stats.rx_packets += tx_packets;
@ -901,17 +888,10 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net,
do { do {
start = u64_stats_fetch_begin_irq(&stats64->syncp); start = u64_stats_fetch_begin_irq(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
rx_packets = stats64->rx_packets; rx_packets = stats64->rx_packets;
rx_bytes = stats64->rx_bytes; rx_bytes = stats64->rx_bytes;
tx_packets = stats64->tx_packets; tx_packets = stats64->tx_packets;
tx_bytes = stats64->tx_bytes; tx_bytes = stats64->tx_bytes;
#else
rx_packets = u64_stats_read(&stats64->rx_packets);
rx_bytes = u64_stats_read(&stats64->rx_bytes);
tx_packets = u64_stats_read(&stats64->tx_packets);
tx_bytes = u64_stats_read(&stats64->tx_bytes);
#endif
} while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
stats->rx_packets += rx_packets; stats->rx_packets += rx_packets;
@ -1505,7 +1485,7 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev
struct qmap_priv *priv; struct qmap_priv *priv;
int err; int err;
int use_qca_nss = !!nss_cb; int use_qca_nss = !!nss_cb;
u8 maddr[ETH_ALEN];
qmap_net = alloc_etherdev(sizeof(*priv)); qmap_net = alloc_etherdev(sizeof(*priv));
if (!qmap_net) if (!qmap_net)
return NULL; return NULL;
@ -1519,9 +1499,8 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev
priv->qmap_version = pQmapDev->qmap_version; priv->qmap_version = pQmapDev->qmap_version;
priv->mux_id = mux_id; priv->mux_id = mux_id;
sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1); sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1);
ether_addr_copy(maddr, real_dev->dev_addr); memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
maddr[5]= offset_id + 1; qmap_net->dev_addr[5] = offset_id + 1;
ether_addr_copy((u8*)qmap_net->dev_addr, maddr);
//eth_random_addr(qmap_net->dev_addr); //eth_random_addr(qmap_net->dev_addr);
#if defined(MHI_NETDEV_STATUS64) #if defined(MHI_NETDEV_STATUS64)
priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@ -1726,14 +1705,8 @@ static void mhi_netdev_upate_rx_stats(struct mhi_netdev *mhi_netdev,
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64);
u64_stats_update_begin(&stats64->syncp); u64_stats_update_begin(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
stats64->rx_packets += rx_packets; stats64->rx_packets += rx_packets;
stats64->rx_bytes += rx_bytes; stats64->rx_bytes += rx_bytes;
#else
u64_stats_add(&stats64->rx_packets, rx_packets);
u64_stats_add(&stats64->rx_bytes,rx_bytes);
#endif
u64_stats_update_begin(&stats64->syncp); u64_stats_update_begin(&stats64->syncp);
#else #else
mhi_netdev->ndev->stats.rx_packets += rx_packets; mhi_netdev->ndev->stats.rx_packets += rx_packets;
@ -1747,14 +1720,8 @@ static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev,
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64);
u64_stats_update_begin(&stats64->syncp); u64_stats_update_begin(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
stats64->tx_packets += tx_packets; stats64->tx_packets += tx_packets;
stats64->tx_bytes += tx_bytes; stats64->tx_bytes += tx_bytes;
#else
u64_stats_add(&stats64->tx_packets, tx_packets);
u64_stats_add(&stats64->tx_bytes,tx_bytes);
#endif
u64_stats_update_begin(&stats64->syncp); u64_stats_update_begin(&stats64->syncp);
#else #else
mhi_netdev->ndev->stats.tx_packets += tx_packets; mhi_netdev->ndev->stats.tx_packets += tx_packets;
@ -2097,17 +2064,10 @@ static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *nde
do { do {
start = u64_stats_fetch_begin_irq(&stats64->syncp); start = u64_stats_fetch_begin_irq(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
rx_packets = stats64->rx_packets; rx_packets = stats64->rx_packets;
rx_bytes = stats64->rx_bytes; rx_bytes = stats64->rx_bytes;
tx_packets = stats64->tx_packets; tx_packets = stats64->tx_packets;
tx_bytes = stats64->tx_bytes; tx_bytes = stats64->tx_bytes;
#else
rx_packets = u64_stats_read(&stats64->rx_packets);
rx_bytes = u64_stats_read(&stats64->rx_bytes);
tx_packets = u64_stats_read(&stats64->tx_packets);
tx_bytes = u64_stats_read(&stats64->tx_bytes);
#endif
} while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
stats->rx_packets += rx_packets; stats->rx_packets += rx_packets;
@ -2309,11 +2269,8 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
mhi_netdev->ndev->mtu = mhi_netdev->mru; mhi_netdev->ndev->mtu = mhi_netdev->mru;
} }
rtnl_unlock(); rtnl_unlock();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
netif_napi_add_weight(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight);
#else
netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight);
#endif
ret = register_netdev(mhi_netdev->ndev); ret = register_netdev(mhi_netdev->ndev);
if (ret) { if (ret) {
MSG_ERR("Network device registration failed\n"); MSG_ERR("Network device registration failed\n");

View File

@ -8,8 +8,6 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/version.h>
#if 1 #if 1
static inline void *ipc_log_context_create(int max_num_pages, static inline void *ipc_log_context_create(int max_num_pages,
const char *modname, uint16_t user_version) const char *modname, uint16_t user_version)
@ -160,20 +158,7 @@ static int mhi_queue_inbound(struct uci_dev *uci_dev)
return ret; return ret;
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
#ifdef TCGETS2
static int kernel_termios_to_user_termios_1(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
static int user_termios_to_kernel_termios_1(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
#endif
#endif
static long mhi_uci_ioctl(struct file *file, static long mhi_uci_ioctl(struct file *file,
unsigned int cmd, unsigned int cmd,
unsigned long arg) unsigned long arg)