Feat: update quectel_QMI_WWAN from 1.2.2 to 1.2.9

This commit is contained in:
sfwtw 2025-03-06 18:11:38 +08:00
parent cdad1e4075
commit 9e4386988d
2 changed files with 197 additions and 133 deletions

View File

@ -8,8 +8,8 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=qmi_wwan_q
PKG_VERSION:=3.0
PKG_RELEASE:=2
PKG_VERSION:=1.2.9
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk

View File

@ -1,13 +1,16 @@
/*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
* The probing code is heavily inspired by cdc_ether, which is:
* Copyright (C) 2003-2005 by David Brownell
* Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
Copyright (c) Bjørn Mork of author <bjorn@mork.no>
This program is free software; you can redistribute it and/ormodify it under the terms of the GNU General
Public licenseas published byFree Software Foundation; either version 2theof the License,(at your option)
any later version.O1
This program isdistributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the
implied warranty ofOr FITNESS FOR A PARTICULAR PURPOSE.MERCHANTABILITYSee theGNU General Public License
for more details.
You should have received a copy of the GNU General Public licensealong withthis program; if not, write to
the Free SoftwareFoundation, Inc.r51 Franklin Street, Fifth FloorBostonMA 02110-1301USA.
Based on version modification, the author is Quectel <fae-support@quectel.com>
*/
#include <linux/module.h>
@ -62,8 +65,10 @@ static struct rmnet_nss_cb __read_mostly *nss_cb = NULL;
#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018)
#ifdef CONFIG_RMNET_DATA
#define CONFIG_QCA_NSS_DRV
/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */
/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ //for spf11.x
/* define at qsdk/qca/src/datarmnet/core/rmnet_config.c */ //for spf12.x
/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */
/* need add DEPENDS:= kmod-rmnet-core in feeds/makefile */
extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly;
#endif
#endif
@ -91,7 +96,7 @@ extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly;
* These devices may alternatively/additionally be configured using AT
* commands on a serial interface
*/
#define VERSION_NUMBER "V1.2.2"
#define VERSION_NUMBER "V1.2.9"
#define QUECTEL_WWAN_VERSION "Quectel_Linux&Android_QMI_WWAN_Driver_"VERSION_NUMBER
static const char driver_name[] = "qmi_wwan_q";
@ -115,6 +120,7 @@ static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
1 - QMAP (Aggregation protocol)
X - QMAP (Multiplexing and Aggregation protocol)
*/
//#define CONFIG_CLEAR_HALT
#define QUECTEL_WWAN_QMAP 4 //MAX is 7
#if defined(QUECTEL_WWAN_QMAP)
@ -319,10 +325,10 @@ static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint br
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
netif_rx_ni(reply);
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 5,18,0 ))
netif_rx(reply);
#else
netif_rx_ni(reply);
#endif
}
return 1;
@ -387,11 +393,7 @@ static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_bu
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
netif_rx_ni(reply);
#else
netif_rx(reply);
#endif
return NULL;
}
#endif
@ -407,7 +409,7 @@ static void bridge_mode_rx_fixup(sQmiWwanQmap *pQmapDev, struct net_device *net,
uint bridge_mode = 0;
unsigned char *bridge_mac;
if (pQmapDev->qmap_mode > 1 || pQmapDev->use_rmnet_usb == 1) {
if (pQmapDev->qmap_mode > 1 || ((pQmapDev->use_rmnet_usb == 1) && !one_card_mode)) {
struct qmap_priv *priv = netdev_priv(net);
bridge_mode = priv->bridge_mode;
bridge_mac = priv->bridge_mac;
@ -693,7 +695,7 @@ static void qmap_wake_queue(sQmiWwanQmap *pQmapDev)
}
}
static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) {
static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id, int *hdr_data, int ip_offset) {
struct qmap_hdr *qhdr;
int pad = 0;
@ -701,44 +703,55 @@ static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) {
if (pad) {
pad = 4 - pad;
if (skb_tailroom(skb) < pad) {
printk("skb_tailroom small!\n");
//printk("skb_tailroom small!\n");
pad = 0;
}
if (pad)
__skb_put(skb, pad);
}
qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr));
if (hdr_data) {
qhdr = (struct qmap_hdr *)hdr_data;
qhdr->pkt_len = cpu_to_be16(skb->len - ip_offset);
}
else {
qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr));
qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr));
}
qhdr->cd_rsvd_pad = pad;
qhdr->mux_id = mux_id;
qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr));
return skb;
}
static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) {
static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id, int *hdr_data, int ip_offset) {
struct rmnet_map_header *map_header;
struct rmnet_map_v5_csum_header *ul_header;
u32 padding, map_datalen;
int pad = 0;
map_datalen = skb->len;
padding = map_datalen%4;
if (padding) {
padding = 4 - padding;
if (skb_tailroom(skb) < padding) {
printk("skb_tailroom small!\n");
padding = 0;
pad = (skb->len - ip_offset) %4;
if (pad) {
pad = 4 - pad;
if (skb_tailroom(skb) < pad) {
//printk("skb_tailroom small!\n");
pad = 0;
}
if (padding)
__skb_put(skb, padding);
if (pad)
__skb_put(skb, pad);
}
map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)));
if (hdr_data) {
map_header = (struct rmnet_map_header *)hdr_data;
map_header->pkt_len = htons(skb->len - ip_offset);
}
else {
map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)));
map_header->pkt_len = htons(skb->len - (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)));
}
map_header->cd_bit = 0;
map_header->next_hdr = 1;
map_header->pad_len = padding;
map_header->pad_len = pad;
map_header->mux_id = mux_id;
map_header->pkt_len = htons(map_datalen + padding);
ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1);
memset(ul_header, 0, sizeof(*ul_header));
@ -761,22 +774,17 @@ static void rmnet_vnd_update_rx_stats(struct net_device *net,
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
u64_stats_update_begin(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,0,0 ))
stats64->rx_packets += rx_packets;
stats64->rx_bytes += rx_bytes;
#else
u64_stats_add(&stats64->rx_packets, rx_packets);
u64_stats_add(&stats64->rx_bytes, rx_bytes);
u64_stats_add(&stats64->rx_packets, rx_packets);
u64_stats_add(&stats64->rx_bytes, rx_bytes);
#endif
u64_stats_update_end(&stats64->syncp);
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
net->stats.rx_packets += rx_packets;
net->stats.rx_bytes += rx_bytes;
#else
u64_stats_add(&net->stats.rx_packets, rx_packets);
u64_stats_add(&net->stats.rx_bytes, rx_bytes);
#endif
#endif
}
@ -787,22 +795,17 @@ static void rmnet_vnd_update_tx_stats(struct net_device *net,
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
u64_stats_update_begin(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,0,0 ))
stats64->tx_packets += tx_packets;
stats64->tx_bytes += tx_bytes;
#else
u64_stats_add(&stats64->tx_packets, tx_packets);
u64_stats_add(&stats64->tx_bytes, tx_bytes);
u64_stats_add(&stats64->tx_packets, tx_packets);
u64_stats_add(&stats64->tx_bytes, tx_bytes);
#endif
u64_stats_update_end(&stats64->syncp);
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
net->stats.tx_packets += tx_packets;
net->stats.tx_bytes += tx_bytes;
#else
u64_stats_add(&net->stats.tx_packets, tx_packets);
u64_stats_add(&net->tx_bytes, tx_bytes);
#endif
#endif
}
@ -822,6 +825,7 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net,
for_each_possible_cpu(cpu) {
struct pcpu_sw_netstats *stats64;
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,1,0 ))
u64 rx_packets, rx_bytes;
u64 tx_packets, tx_bytes;
@ -829,25 +833,35 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net,
do {
start = u64_stats_fetch_begin_irq(&stats64->syncp);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
rx_packets = stats64->rx_packets;
rx_bytes = stats64->rx_bytes;
tx_packets = stats64->tx_packets;
tx_bytes = stats64->tx_bytes;
#else
rx_packets = u64_stats_read(&stats64->rx_packets);
rx_bytes = u64_stats_read(&stats64->rx_bytes);
tx_packets = u64_stats_read(&stats64->tx_packets);
tx_bytes = u64_stats_read(&stats64->tx_bytes);
#endif
} while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
#else
u64_stats_t rx_packets, rx_bytes;
u64_stats_t tx_packets, tx_bytes;
stats64 = per_cpu_ptr(dev->stats64, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats64->syncp);
rx_packets = stats64->rx_packets;
rx_bytes = stats64->rx_bytes;
tx_packets = stats64->tx_packets;
tx_bytes = stats64->tx_bytes;
} while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
stats->rx_packets += u64_stats_read(&rx_packets);
stats->rx_bytes += u64_stats_read(&rx_bytes);
stats->tx_packets += u64_stats_read(&tx_packets);
stats->tx_bytes += u64_stats_read(&tx_bytes);
#endif
}
return stats;
@ -998,7 +1012,7 @@ static long agg_bypass_time __read_mostly = 10000000L;
module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) {
static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv, int *hdr_data, int hdr_len, int ip_offset) {
struct qmi_wwan_state *info = (void *)&priv->dev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
struct tx_agg_ctx *ctx = &pQmapDev->tx_ctx;
@ -1021,15 +1035,22 @@ static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) {
rmnet_vnd_update_tx_stats(pNet, 1, skb->len);
if (ctx->ul_data_aggregation_max_datagrams == 1) {
skb->protocol = htons(ETH_P_MAP);
skb->dev = priv->real_dev;
agg_skb = alloc_skb(skb->len + hdr_len, GFP_ATOMIC);
if (agg_skb) {
memcpy(skb_put(agg_skb, hdr_len), hdr_data, hdr_len);
memcpy(skb_put(agg_skb, skb->len - ip_offset), skb->data + ip_offset, skb->len - ip_offset);
agg_skb->protocol = htons(ETH_P_MAP);
agg_skb->dev = priv->real_dev;
#if 0
if (!skb->destructor)
skb->destructor = rmnet_usb_tx_skb_destructor;
if (!agg_skb->destructor)
agg_skb->destructor = rmnet_usb_tx_skb_destructor;
#endif
err = dev_queue_xmit(skb);
err = dev_queue_xmit(agg_skb);
if (err != NET_XMIT_SUCCESS)
pNet->stats.tx_errors++;
}
dev_kfree_skb_any(skb);
skb = NULL;
return NET_XMIT_SUCCESS;
}
@ -1041,8 +1062,9 @@ new_packet:
diff = timespec64_sub(now, priv->agg_time);
if (priv->agg_skb) {
if ((priv->agg_skb->len + skb->len) < ctx->ul_data_aggregation_max_size) {
memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
if ((priv->agg_skb->len + skb->len + hdr_len) < ctx->ul_data_aggregation_max_size) {
memcpy(skb_put(priv->agg_skb, hdr_len), hdr_data, hdr_len);
memcpy(skb_put(priv->agg_skb, skb->len - ip_offset), skb->data + ip_offset, skb->len - ip_offset);
priv->agg_count++;
if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) {
@ -1052,7 +1074,7 @@ new_packet:
ready2send = 1;
}
else if (xmit_more == 0) {
struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
struct rmnet_map_header *map_header = (struct rmnet_map_header *)hdr_data;
size_t offset = sizeof(struct rmnet_map_header);
if (map_header->next_hdr)
offset += sizeof(struct rmnet_map_v5_csum_header);
@ -1078,7 +1100,7 @@ new_packet:
ready2send = 1;
}
else if (xmit_more == 0) {
struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
struct rmnet_map_header *map_header = (struct rmnet_map_header *)hdr_data;
size_t offset = sizeof(struct rmnet_map_header);
if (map_header->next_hdr)
offset += sizeof(struct rmnet_map_v5_csum_header);
@ -1090,7 +1112,8 @@ new_packet:
priv->agg_skb = alloc_skb(ctx->ul_data_aggregation_max_size, GFP_ATOMIC);
if (priv->agg_skb) {
skb_reset_network_header(priv->agg_skb); //protocol da1a is buggy, dev wwan0
memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
memcpy(skb_put(priv->agg_skb, hdr_len), hdr_data, hdr_len);
memcpy(skb_put(priv->agg_skb, skb->len - ip_offset), skb->data + ip_offset, skb->len - ip_offset);
priv->agg_count++;
dev_kfree_skb_any(skb);
skb = NULL;
@ -1101,7 +1124,12 @@ new_packet:
}
if (ready2send) {
agg_skb = skb;
agg_skb = alloc_skb(skb->len + hdr_len, GFP_ATOMIC);
if (agg_skb) {
memcpy(skb_put(agg_skb, hdr_len), hdr_data, hdr_len);
memcpy(skb_put(agg_skb, skb->len - ip_offset), skb->data + ip_offset, skb->len - ip_offset);
}
dev_kfree_skb_any(skb);
skb = NULL;
}
}
@ -1142,6 +1170,9 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
{
int err;
struct qmap_priv *priv = netdev_priv(pNet);
int qmap_hdr[2];
int hdr_len = 0;
int ip_offset = 0;
if (netif_queue_stopped(priv->real_dev)) {
netif_stop_queue(pNet);
@ -1159,18 +1190,24 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
}
#endif
#if 1
ip_offset = ETH_HLEN;
#else
if (skb_pull(skb, ETH_HLEN) == NULL) {
dev_kfree_skb_any (skb);
return NETDEV_TX_OK;
}
#endif
}
//printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (priv->qmap_version == 5) {
add_qhdr(skb, priv->mux_id);
add_qhdr(skb, priv->mux_id, qmap_hdr, ip_offset);
hdr_len = 4;
}
else if (priv->qmap_version == 9) {
add_qhdr_v5(skb, priv->mux_id);
add_qhdr_v5(skb, priv->mux_id, qmap_hdr, ip_offset);
hdr_len = 8;
}
else {
dev_kfree_skb_any (skb);
@ -1178,7 +1215,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
}
//printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
err = rmnet_usb_tx_agg(skb, priv);
err = rmnet_usb_tx_agg(skb, priv, qmap_hdr, hdr_len, ip_offset);
return err;
}
@ -1295,10 +1332,10 @@ static int qmap_register_device(sQmiWwanQmap * pDev, u8 offset_id)
priv->dev = pDev->mpNetDev;
priv->qmap_version = pDev->qmap_version;
priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)
memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)
__dev_addr_set(qmap_net, real_dev->dev_addr, ETH_ALEN);
#else
eth_hw_addr_set (real_dev, qmap_net->dev_addr);
memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
#endif
#ifdef QUECTEL_BRIDGE_MODE
@ -1314,7 +1351,9 @@ static int qmap_register_device(sQmiWwanQmap * pDev, u8 offset_id)
if (nss_cb && use_qca_nss) {
rmnet_usb_rawip_setup(qmap_net);
}
#ifdef CONFIG_PINCTRL_IPQ9574
rmnet_usb_rawip_setup(qmap_net);
#endif
priv->agg_skb = NULL;
priv->agg_count = 0;
hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@ -1421,6 +1460,8 @@ typedef struct {
} BRMAC_SETTING;
#endif
static int qma_setting_store(struct device *dev, QMAP_SETTING *qmap_settings, size_t size);
int qma_setting_store(struct device *dev, QMAP_SETTING *qmap_settings, size_t size) {
struct net_device *netdev = to_net_dev(dev);
struct usbnet * usbnetdev = netdev_priv( netdev );
@ -1460,6 +1501,10 @@ static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#ifdef CONFIG_BRIDGE_LAN
BRMAC_SETTING brmac_settings = {0};
#endif
#ifdef CONFIG_CLEAR_HALT
uint clear_halt = 0;
#endif
switch (cmd) {
case 0x89F1: //SIOCDEVPRIVATE
@ -1515,6 +1560,16 @@ static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
break;
#endif
#ifdef CONFIG_CLEAR_HALT
case 0x89F5: //SIOCDEVPRIVATE
rc = copy_from_user(&clear_halt, ifr->ifr_ifru.ifru_data, sizeof(clear_halt));
if (rc == 0 && clear_halt == 1) {
usb_clear_halt(usbnetdev->udev,usbnetdev->in);
usb_clear_halt(usbnetdev->udev,usbnetdev->out);
pr_info("usb_clear_halt EPIN EPOUT\n");
}
break;
#endif
default:
break;
@ -1657,9 +1712,9 @@ static struct sk_buff *qmap_qmi_wwan_tx_fixup(struct usbnet *dev, struct sk_buff
if (skb) {
if(pQmapDev->qmap_version == 5)
add_qhdr(skb, QUECTEL_QMAP_MUX_ID);
add_qhdr(skb, QUECTEL_QMAP_MUX_ID, NULL, 0);
else
add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID);
add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID, NULL, 0);
}
else {
return NULL;
@ -1949,7 +2004,7 @@ static void ql_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *i
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
/* strlcpy() is deprecated in kernel 6.8.0+, using strscpy instead */
/* strlcpy() is deprecated in kernel 6.8.0+, using strscpy instead */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,8,0))
strlcpy(info->driver, driver_name, sizeof(info->driver));
strlcpy(info->version, VERSION_NUMBER, sizeof(info->version));
@ -2021,7 +2076,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
atomic_set(&info->pmcount, 0);
/* register subdriver */
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,12,0 )) //cac6fb015f719104e60b1c68c15ca5b734f57b9c
#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 5,14,0 )) //cac6fb015f719104e60b1c68c15ca5b734f57b9c
subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
4096, WWAN_PORT_QMI, &qmi_wwan_cdc_wdm_manage_power);
#else
@ -2072,17 +2127,17 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
/* make MAC addr easily distinguishable from an IP header */
if (possibly_iphdr(dev->net->dev_addr)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)
u8 temp_addr[ETH_ALEN];
memcpy(temp_addr, dev->net->dev_addr, ETH_ALEN);
temp_addr[0] |= 0x02; /* set local assignment bit */
temp_addr[0] &= 0xbf; /* clear "IP" bit */
__dev_addr_set(dev->net, temp_addr, ETH_ALEN);
#else
dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
#else
u8 addr = dev->net->dev_addr[0];
addr |= 0x02; /* set local assignment bit */
addr &= 0xbf; /* clear "IP" bit */
dev_addr_mod(dev->net, 0, &addr, 1);
#endif
}
}
if (!_usbnet_get_stats64)
_usbnet_get_stats64 = dev->net->netdev_ops->ndo_get_stats64;
dev->net->netdev_ops = &qmi_wwan_netdev_ops;
@ -2115,6 +2170,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
NULL, 0, 100);
}
usb_clear_halt(dev->udev,dev->in);
usb_clear_halt(dev->udev,dev->out);
//to advoid module report mtu 1460, but rx 1500 bytes IP packets, and cause the customer's system crash
//next setting can make usbnet.c:usbnet_change_mtu() do not modify rx_urb_size according to hard mtu
dev->rx_urb_size = ETH_DATA_LEN + ETH_HLEN + 6;
@ -2146,7 +2204,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
int qmap_version = (dev->driver_info->data>>8)&0xFF;
int qmap_size = (dev->driver_info->data)&0xFF;
int idProduct = le16_to_cpu(dev->udev->descriptor.idProduct);
int lte_a = (idProduct == 0x0306 || idProduct == 0x030B || idProduct == 0x0512 || idProduct == 0x0620 || idProduct == 0x0800 || idProduct == 0x0801);
int lte_a = (idProduct == 0x0306 || idProduct == 0x030B || idProduct == 0x0512 || idProduct == 0x0620 ||
idProduct == 0x0800 || idProduct == 0x0801 || idProduct == 0x0122 || idProduct == 0x0316);
if (qmap_size > 4096 || dev->udev->speed >= USB_SPEED_SUPER) { //if meet this requirements, must be LTE-A or 5G
lte_a = 1;
@ -2168,7 +2227,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
if (pQmapDev->qmap_mode > 1)
pQmapDev->use_rmnet_usb = 1;
else if (idProduct == 0x0800 || idProduct == 0x0801)
else if (idProduct == 0x0800 || idProduct == 0x0801 || idProduct == 0x0122)
pQmapDev->use_rmnet_usb = 1; //benefit for ul data agg
#ifdef QMI_NETDEV_ONE_CARD_MODE
if(pQmapDev->use_rmnet_usb == 1 && pQmapDev->qmap_mode == 1)
@ -2178,7 +2237,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
pQmapDev->rmnet_info.size = sizeof(RMNET_INFO);
pQmapDev->rmnet_info.rx_urb_size = pQmapDev->qmap_size;
pQmapDev->rmnet_info.ep_type = 2; //DATA_EP_TYPE_HSUSB
pQmapDev->rmnet_info.iface_id = 4;
pQmapDev->rmnet_info.iface_id = 4;//Interface ID
if(idProduct == 0x0316)
pQmapDev->rmnet_info.iface_id = 3;// SDX35 Interface ID
pQmapDev->rmnet_info.qmap_mode = pQmapDev->qmap_mode;
pQmapDev->rmnet_info.qmap_version = pQmapDev->qmap_version;
pQmapDev->rmnet_info.dl_minimum_padding = 0;
@ -2417,7 +2478,7 @@ static const struct driver_info qmi_wwan_info = {
};
#define qmi_wwan_raw_ip_info \
.description = "WWAN/QMI device", \
.description = "WWAN/QMI Raw IP device", \
.flags = FLAG_WWAN | FLAG_RX_ASSEMBLE | FLAG_NOARP | FLAG_SEND_ZLP, \
.bind = qmi_wwan_bind, \
.unbind = qmi_wwan_unbind, \
@ -2465,6 +2526,7 @@ static const struct usb_device_id products[] = {
{ QMI_FIXED_INTF(0x05C6, 0x9215, 4) }, /* Quectel EC20 (MDM9215) */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0125, 4, mdm9x07) }, /* Quectel EC20 (MDM9X07)/EC25/EG25 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0121, 4, mdm9x07) }, /* Quectel EC21 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x030E, 4, mdm9x07) }, /* Quectel EM05G */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0191, 4, mdm9x07) }, /* Quectel EG91 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0195, 4, mdm9x07) }, /* Quectel EG95 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0700, 3, mdm9x07) }, /* Quectel BG95 (at+qcfgext="usbnet","rmnet") */
@ -2474,8 +2536,10 @@ static const struct usb_device_id products[] = {
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0296, 4, mdm9x07) }, /* Quectel BG96 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0435, 4, mdm9x07) }, /* Quectel AG35 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0620, 4, mdm9x40) }, /* Quectel EG20 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0316, 3, mdm9x40) }, /* Quectel RG255 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0800, 4, sdx55) }, /* Quectel RG500 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0801, 4, sdx55) }, /* Quectel RG520 */
{ QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0122, 4, sdx55) }, /* Quectel RG650 */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);