luci: add new auto switch logic to socks config
This commit is contained in:
parent
1ede6499dc
commit
64e8015588
@ -33,6 +33,7 @@ function index()
|
|||||||
entry({"admin", "services", appname, "node_subscribe_config"}, cbi(appname .. "/client/node_subscribe_config")).leaf = true
|
entry({"admin", "services", appname, "node_subscribe_config"}, cbi(appname .. "/client/node_subscribe_config")).leaf = true
|
||||||
entry({"admin", "services", appname, "node_config"}, cbi(appname .. "/client/node_config")).leaf = true
|
entry({"admin", "services", appname, "node_config"}, cbi(appname .. "/client/node_config")).leaf = true
|
||||||
entry({"admin", "services", appname, "shunt_rules"}, cbi(appname .. "/client/shunt_rules")).leaf = true
|
entry({"admin", "services", appname, "shunt_rules"}, cbi(appname .. "/client/shunt_rules")).leaf = true
|
||||||
|
entry({"admin", "services", appname, "socks_config"}, cbi(appname .. "/client/socks_config")).leaf = true
|
||||||
entry({"admin", "services", appname, "acl"}, cbi(appname .. "/client/acl"), _("Access control"), 98).leaf = true
|
entry({"admin", "services", appname, "acl"}, cbi(appname .. "/client/acl"), _("Access control"), 98).leaf = true
|
||||||
entry({"admin", "services", appname, "acl_config"}, cbi(appname .. "/client/acl_config")).leaf = true
|
entry({"admin", "services", appname, "acl_config"}, cbi(appname .. "/client/acl_config")).leaf = true
|
||||||
entry({"admin", "services", appname, "log"}, form(appname .. "/client/log"), _("Watch Logs"), 999).leaf = true
|
entry({"admin", "services", appname, "log"}, form(appname .. "/client/log"), _("Watch Logs"), 999).leaf = true
|
||||||
@ -47,6 +48,8 @@ function index()
|
|||||||
entry({"admin", "services", appname, "server_get_log"}, call("server_get_log")).leaf = true
|
entry({"admin", "services", appname, "server_get_log"}, call("server_get_log")).leaf = true
|
||||||
entry({"admin", "services", appname, "server_clear_log"}, call("server_clear_log")).leaf = true
|
entry({"admin", "services", appname, "server_clear_log"}, call("server_clear_log")).leaf = true
|
||||||
entry({"admin", "services", appname, "link_add_node"}, call("link_add_node")).leaf = true
|
entry({"admin", "services", appname, "link_add_node"}, call("link_add_node")).leaf = true
|
||||||
|
entry({"admin", "services", appname, "socks_autoswitch_add_node"}, call("socks_autoswitch_add_node")).leaf = true
|
||||||
|
entry({"admin", "services", appname, "socks_autoswitch_remove_node"}, call("socks_autoswitch_remove_node")).leaf = true
|
||||||
entry({"admin", "services", appname, "get_now_use_node"}, call("get_now_use_node")).leaf = true
|
entry({"admin", "services", appname, "get_now_use_node"}, call("get_now_use_node")).leaf = true
|
||||||
entry({"admin", "services", appname, "get_redir_log"}, call("get_redir_log")).leaf = true
|
entry({"admin", "services", appname, "get_redir_log"}, call("get_redir_log")).leaf = true
|
||||||
entry({"admin", "services", appname, "get_log"}, call("get_log")).leaf = true
|
entry({"admin", "services", appname, "get_log"}, call("get_log")).leaf = true
|
||||||
@ -104,6 +107,43 @@ function link_add_node()
|
|||||||
luci.sys.call("lua /usr/share/passwall2/subscribe.lua add log")
|
luci.sys.call("lua /usr/share/passwall2/subscribe.lua add log")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
function socks_autoswitch_add_node()
|
||||||
|
local id = luci.http.formvalue("id")
|
||||||
|
local key = luci.http.formvalue("key")
|
||||||
|
if id and id ~= "" and key and key ~= "" then
|
||||||
|
local new_list = ucic:get(appname, id, "autoswitch_backup_node") or {}
|
||||||
|
for i = #new_list, 1, -1 do
|
||||||
|
if (ucic:get(appname, new_list[i], "remarks") or ""):find(key) then
|
||||||
|
table.remove(new_list, i)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
for k, e in ipairs(api.get_valid_nodes()) do
|
||||||
|
if e.node_type == "normal" and e["remark"]:find(key) then
|
||||||
|
table.insert(new_list, e.id)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
ucic:set_list(appname, id, "autoswitch_backup_node", new_list)
|
||||||
|
ucic:commit(appname)
|
||||||
|
end
|
||||||
|
luci.http.redirect(api.url("socks_config", id))
|
||||||
|
end
|
||||||
|
|
||||||
|
function socks_autoswitch_remove_node()
|
||||||
|
local id = luci.http.formvalue("id")
|
||||||
|
local key = luci.http.formvalue("key")
|
||||||
|
if id and id ~= "" and key and key ~= "" then
|
||||||
|
local new_list = ucic:get(appname, id, "autoswitch_backup_node") or {}
|
||||||
|
for i = #new_list, 1, -1 do
|
||||||
|
if (ucic:get(appname, new_list[i], "remarks") or ""):find(key) then
|
||||||
|
table.remove(new_list, i)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
ucic:set_list(appname, id, "autoswitch_backup_node", new_list)
|
||||||
|
ucic:commit(appname)
|
||||||
|
end
|
||||||
|
luci.http.redirect(api.url("socks_config", id))
|
||||||
|
end
|
||||||
|
|
||||||
function get_now_use_node()
|
function get_now_use_node()
|
||||||
local e = {}
|
local e = {}
|
||||||
local data, code, msg = nixio.fs.readfile("/tmp/etc/passwall2/id/global")
|
local data, code, msg = nixio.fs.readfile("/tmp/etc/passwall2/id/global")
|
||||||
@ -257,6 +297,7 @@ function clear_all_nodes()
|
|||||||
ucic:set(appname, '@global[0]', "node", "nil")
|
ucic:set(appname, '@global[0]', "node", "nil")
|
||||||
ucic:foreach(appname, "socks", function(t)
|
ucic:foreach(appname, "socks", function(t)
|
||||||
ucic:delete(appname, t[".name"])
|
ucic:delete(appname, t[".name"])
|
||||||
|
ucic:set_list(appname, t[".name"], "autoswitch_backup_node", {})
|
||||||
end)
|
end)
|
||||||
ucic:foreach(appname, "haproxy_config", function(t)
|
ucic:foreach(appname, "haproxy_config", function(t)
|
||||||
ucic:delete(appname, t[".name"])
|
ucic:delete(appname, t[".name"])
|
||||||
@ -282,6 +323,13 @@ function delete_select_nodes()
|
|||||||
if t["node"] == w then
|
if t["node"] == w then
|
||||||
ucic:delete(appname, t[".name"])
|
ucic:delete(appname, t[".name"])
|
||||||
end
|
end
|
||||||
|
local auto_switch_node_list = ucic:get(appname, t[".name"], "autoswitch_backup_node") or {}
|
||||||
|
for i = #auto_switch_node_list, 1, -1 do
|
||||||
|
if w == auto_switch_node_list[i] then
|
||||||
|
table.remove(auto_switch_node_list, i)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
ucic:set_list(appname, t[".name"], "autoswitch_backup_node", auto_switch_node_list)
|
||||||
end)
|
end)
|
||||||
ucic:foreach(appname, "haproxy_config", function(t)
|
ucic:foreach(appname, "haproxy_config", function(t)
|
||||||
if t["lbss"] == w then
|
if t["lbss"] == w then
|
||||||
|
@ -305,11 +305,15 @@ o = s:taboption("Main", Flag, "socks_enabled", "Socks " .. translate("Main switc
|
|||||||
o.rmempty = false
|
o.rmempty = false
|
||||||
|
|
||||||
s = m:section(TypedSection, "socks", translate("Socks Config"))
|
s = m:section(TypedSection, "socks", translate("Socks Config"))
|
||||||
|
s.template = "cbi/tblsection"
|
||||||
s.anonymous = true
|
s.anonymous = true
|
||||||
s.addremove = true
|
s.addremove = true
|
||||||
s.template = "cbi/tblsection"
|
s.extedit = api.url("socks_config", "%s")
|
||||||
function s.create(e, t)
|
function s.create(e, t)
|
||||||
TypedSection.create(e, api.gen_short_uuid())
|
local uuid = api.gen_short_uuid()
|
||||||
|
t = uuid
|
||||||
|
TypedSection.create(e, t)
|
||||||
|
luci.http.redirect(e.extedit:format(t))
|
||||||
end
|
end
|
||||||
|
|
||||||
o = s:option(DummyValue, "status", translate("Status"))
|
o = s:option(DummyValue, "status", translate("Status"))
|
||||||
|
@ -37,6 +37,11 @@ function s.remove(e, t)
|
|||||||
if s["node"] == t then
|
if s["node"] == t then
|
||||||
m:del(s[".name"])
|
m:del(s[".name"])
|
||||||
end
|
end
|
||||||
|
for k, v in ipairs(m:get(s[".name"], "autoswitch_backup_node") or {}) do
|
||||||
|
if v and v == t then
|
||||||
|
sys.call(string.format("uci -q del_list %s.%s.autoswitch_backup_node='%s'", appname, s[".name"], v))
|
||||||
|
end
|
||||||
|
end
|
||||||
end)
|
end)
|
||||||
m.uci:foreach(appname, "acl_rule", function(s)
|
m.uci:foreach(appname, "acl_rule", function(s)
|
||||||
if s["node"] and s["node"] == t then
|
if s["node"] and s["node"] == t then
|
||||||
|
@ -0,0 +1,118 @@
|
|||||||
|
local api = require "luci.passwall2.api"
|
||||||
|
local appname = api.appname
|
||||||
|
local uci = api.uci
|
||||||
|
local has_v2ray = api.is_finded("v2ray")
|
||||||
|
local has_xray = api.is_finded("xray")
|
||||||
|
|
||||||
|
m = Map(appname)
|
||||||
|
|
||||||
|
local nodes_table = {}
|
||||||
|
for k, e in ipairs(api.get_valid_nodes()) do
|
||||||
|
nodes_table[#nodes_table + 1] = e
|
||||||
|
end
|
||||||
|
|
||||||
|
s = m:section(NamedSection, arg[1], translate("Socks Config"), translate("Socks Config"))
|
||||||
|
s.addremove = false
|
||||||
|
s.dynamic = false
|
||||||
|
|
||||||
|
---- Enable
|
||||||
|
o = s:option(Flag, "enabled", translate("Enable"))
|
||||||
|
o.default = 1
|
||||||
|
o.rmempty = false
|
||||||
|
|
||||||
|
local auto_switch_tip
|
||||||
|
local current_node_file = string.format("/tmp/etc/%s/id/socks_%s", appname, arg[1])
|
||||||
|
local current_node = luci.sys.exec(string.format("[ -f '%s' ] && echo -n $(cat %s)", current_node_file, current_node_file))
|
||||||
|
if current_node and current_node ~= "" and current_node ~= "nil" then
|
||||||
|
local n = uci:get_all(appname, current_node)
|
||||||
|
if n then
|
||||||
|
if tonumber(m:get(arg[1], "enable_autoswitch") or 0) == 1 then
|
||||||
|
if n then
|
||||||
|
local remarks = api.get_node_remarks(n)
|
||||||
|
local url = api.url("node_config", n[".name"])
|
||||||
|
auto_switch_tip = translatef("Current node: %s", string.format('<a href="%s">%s</a>', url, remarks)) .. "<br />"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
socks_node = s:option(ListValue, "node", translate("Node"))
|
||||||
|
if auto_switch_tip then
|
||||||
|
socks_node.description = auto_switch_tip
|
||||||
|
end
|
||||||
|
|
||||||
|
local n = 1
|
||||||
|
uci:foreach(appname, "socks", function(s)
|
||||||
|
if s[".name"] == section then
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
n = n + 1
|
||||||
|
end)
|
||||||
|
|
||||||
|
o = s:option(Value, "port", "Socks " .. translate("Listen Port"))
|
||||||
|
o.default = n + 1080
|
||||||
|
o.datatype = "port"
|
||||||
|
o.rmempty = false
|
||||||
|
|
||||||
|
if has_v2ray or has_xray then
|
||||||
|
o = s:option(Value, "http_port", "HTTP " .. translate("Listen Port") .. " " .. translate("0 is not use"))
|
||||||
|
o.default = 0
|
||||||
|
o.datatype = "port"
|
||||||
|
end
|
||||||
|
|
||||||
|
o = s:option(Flag, "enable_autoswitch", translate("Auto Switch"))
|
||||||
|
o.default = 0
|
||||||
|
o.rmempty = false
|
||||||
|
|
||||||
|
o = s:option(Value, "autoswitch_testing_time", translate("How often to test"), translate("Units:minutes"))
|
||||||
|
o.datatype = "uinteger"
|
||||||
|
o.default = 1
|
||||||
|
o:depends("enable_autoswitch", true)
|
||||||
|
|
||||||
|
o = s:option(Value, "autoswitch_connect_timeout", translate("Timeout seconds"), translate("Units:seconds"))
|
||||||
|
o.datatype = "uinteger"
|
||||||
|
o.default = 3
|
||||||
|
o:depends("enable_autoswitch", true)
|
||||||
|
|
||||||
|
o = s:option(Value, "autoswitch_retry_num", translate("Timeout retry num"))
|
||||||
|
o.datatype = "uinteger"
|
||||||
|
o.default = 1
|
||||||
|
o:depends("enable_autoswitch", true)
|
||||||
|
|
||||||
|
autoswitch_backup_node = s:option(DynamicList, "autoswitch_backup_node", translate("List of backup nodes"))
|
||||||
|
autoswitch_backup_node:depends("enable_autoswitch", true)
|
||||||
|
function o.write(self, section, value)
|
||||||
|
local t = {}
|
||||||
|
local t2 = {}
|
||||||
|
if type(value) == "table" then
|
||||||
|
local x
|
||||||
|
for _, x in ipairs(value) do
|
||||||
|
if x and #x > 0 then
|
||||||
|
if not t2[x] then
|
||||||
|
t2[x] = x
|
||||||
|
t[#t+1] = x
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
else
|
||||||
|
t = { value }
|
||||||
|
end
|
||||||
|
return DynamicList.write(self, section, t)
|
||||||
|
end
|
||||||
|
|
||||||
|
o = s:option(Flag, "autoswitch_restore_switch", translate("Restore Switch"), translate("When detects main node is available, switch back to the main node."))
|
||||||
|
o:depends("enable_autoswitch", true)
|
||||||
|
|
||||||
|
o = s:option(Value, "autoswitch_probe_url", translate("Probe URL"), translate("The URL used to detect the connection status."))
|
||||||
|
o.default = "https://www.google.com/generate_204"
|
||||||
|
|
||||||
|
for k, v in pairs(nodes_table) do
|
||||||
|
if v.node_type == "normal" then
|
||||||
|
autoswitch_backup_node:value(v.id, v["remark"])
|
||||||
|
socks_node:value(v.id, v["remark"])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
m:append(Template(appname .. "/socks_auto_switch/footer"))
|
||||||
|
|
||||||
|
return m
|
@ -0,0 +1,23 @@
|
|||||||
|
<%
|
||||||
|
local api = require "luci.passwall2.api"
|
||||||
|
-%>
|
||||||
|
|
||||||
|
<script type="text/javascript">
|
||||||
|
//<![CDATA[
|
||||||
|
let socks_id = window.location.pathname.substring(window.location.pathname.lastIndexOf("/") + 1)
|
||||||
|
function add_node_by_key() {
|
||||||
|
var key = prompt("<%:Please enter the node keyword, pay attention to distinguish between spaces, uppercase and lowercase.%>", "");
|
||||||
|
if (key) {
|
||||||
|
window.location.href = '<%=api.url("socks_autoswitch_add_node")%>' + "?id=" + socks_id + "&key=" + key;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function remove_node_by_key() {
|
||||||
|
var key = prompt("<%:Please enter the node keyword, pay attention to distinguish between spaces, uppercase and lowercase.%>", "");
|
||||||
|
if (key) {
|
||||||
|
window.location.href = '<%=api.url("socks_autoswitch_remove_node")%>' + "?id=" + socks_id + "&key=" + key;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//]]>
|
||||||
|
</script>
|
||||||
|
<input class="btn cbi-button cbi-button-add" type="button" onclick="add_node_by_key()" value="<%:Add nodes to the standby node list by keywords%>" />
|
||||||
|
<input class="btn cbi-button cbi-button-remove" type="button" onclick="remove_node_by_key()" value="<%:Delete nodes in the standby node list by keywords%>" />
|
@ -535,6 +535,29 @@ run_socks() {
|
|||||||
unset http_flag
|
unset http_flag
|
||||||
}
|
}
|
||||||
|
|
||||||
|
socks_node_switch() {
|
||||||
|
local flag new_node
|
||||||
|
eval_set_val $@
|
||||||
|
[ -n "$flag" ] && [ -n "$new_node" ] && {
|
||||||
|
pgrep -af "$TMP_BIN_PATH" | awk -v P1="${flag}" 'BEGIN{IGNORECASE=1}$0~P1 && !/acl\/|acl_/{print $1}' | xargs kill -9 >/dev/null 2>&1
|
||||||
|
rm -rf $TMP_PATH/SOCKS_${flag}*
|
||||||
|
rm -rf $TMP_PATH/HTTP2SOCKS_${flag}*
|
||||||
|
|
||||||
|
for filename in $(ls ${TMP_SCRIPT_FUNC_PATH}); do
|
||||||
|
cmd=$(cat ${TMP_SCRIPT_FUNC_PATH}/${filename})
|
||||||
|
[ -n "$(echo $cmd | grep "${flag}")" ] && rm -f ${TMP_SCRIPT_FUNC_PATH}/${filename}
|
||||||
|
done
|
||||||
|
local port=$(config_n_get $flag port)
|
||||||
|
local config_file="SOCKS_${flag}.json"
|
||||||
|
local log_file="SOCKS_${flag}.log"
|
||||||
|
local http_port=$(config_n_get $flag http_port 0)
|
||||||
|
local http_config_file="HTTP2SOCKS_${flag}.json"
|
||||||
|
LOG_FILE="/dev/null"
|
||||||
|
run_socks flag=$flag node=$new_node bind=0.0.0.0 socks_port=$port config_file=$config_file http_port=$http_port http_config_file=$http_config_file
|
||||||
|
echo $new_node > $TMP_ID_PATH/socks_${flag}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
run_global() {
|
run_global() {
|
||||||
[ "$NODE" = "nil" ] && return 1
|
[ "$NODE" = "nil" ] && return 1
|
||||||
TYPE=$(echo $(config_n_get $NODE type nil) | tr 'A-Z' 'a-z')
|
TYPE=$(echo $(config_n_get $NODE type nil) | tr 'A-Z' 'a-z')
|
||||||
@ -642,7 +665,11 @@ start_socks() {
|
|||||||
local http_port=$(config_n_get $id http_port 0)
|
local http_port=$(config_n_get $id http_port 0)
|
||||||
local http_config_file="HTTP2SOCKS_${id}.json"
|
local http_config_file="HTTP2SOCKS_${id}.json"
|
||||||
run_socks flag=$id node=$node bind=0.0.0.0 socks_port=$port config_file=$config_file http_port=$http_port http_config_file=$http_config_file
|
run_socks flag=$id node=$node bind=0.0.0.0 socks_port=$port config_file=$config_file http_port=$http_port http_config_file=$http_config_file
|
||||||
echo $node > $TMP_ID_PATH/SOCKS_${id}
|
echo $node > $TMP_ID_PATH/socks_${id}
|
||||||
|
|
||||||
|
#自动切换逻辑
|
||||||
|
local enable_autoswitch=$(config_n_get $id enable_autoswitch 0)
|
||||||
|
[ "$enable_autoswitch" = "1" ] && $APP_PATH/socks_auto_switch.sh ${id} > /dev/null 2>&1 &
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -980,6 +1007,7 @@ stop() {
|
|||||||
[ -s "$TMP_PATH/bridge_nf_ipt" ] && sysctl -w net.bridge.bridge-nf-call-iptables=$(cat $TMP_PATH/bridge_nf_ipt) >/dev/null 2>&1
|
[ -s "$TMP_PATH/bridge_nf_ipt" ] && sysctl -w net.bridge.bridge-nf-call-iptables=$(cat $TMP_PATH/bridge_nf_ipt) >/dev/null 2>&1
|
||||||
[ -s "$TMP_PATH/bridge_nf_ip6t" ] && sysctl -w net.bridge.bridge-nf-call-ip6tables=$(cat $TMP_PATH/bridge_nf_ip6t) >/dev/null 2>&1
|
[ -s "$TMP_PATH/bridge_nf_ip6t" ] && sysctl -w net.bridge.bridge-nf-call-ip6tables=$(cat $TMP_PATH/bridge_nf_ip6t) >/dev/null 2>&1
|
||||||
rm -rf ${TMP_PATH}
|
rm -rf ${TMP_PATH}
|
||||||
|
rm -rf /tmp/lock/${CONFIG}_socks_auto_switch*
|
||||||
echolog "清空并关闭相关程序和缓存完成。"
|
echolog "清空并关闭相关程序和缓存完成。"
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
@ -1043,6 +1071,9 @@ run_v2ray)
|
|||||||
run_socks)
|
run_socks)
|
||||||
run_socks $@
|
run_socks $@
|
||||||
;;
|
;;
|
||||||
|
socks_node_switch)
|
||||||
|
socks_node_switch $@
|
||||||
|
;;
|
||||||
echolog)
|
echolog)
|
||||||
echolog $@
|
echolog $@
|
||||||
;;
|
;;
|
||||||
|
180
luci-app-passwall2/root/usr/share/passwall2/socks_auto_switch.sh
Executable file
180
luci-app-passwall2/root/usr/share/passwall2/socks_auto_switch.sh
Executable file
@ -0,0 +1,180 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
CONFIG=passwall2
|
||||||
|
LOG_FILE=/tmp/log/$CONFIG.log
|
||||||
|
LOCK_FILE_DIR=/tmp/lock
|
||||||
|
|
||||||
|
flag=0
|
||||||
|
|
||||||
|
echolog() {
|
||||||
|
local d="$(date "+%Y-%m-%d %H:%M:%S")"
|
||||||
|
#echo -e "$d: $1"
|
||||||
|
echo -e "$d: $1" >> $LOG_FILE
|
||||||
|
}
|
||||||
|
|
||||||
|
config_n_get() {
|
||||||
|
local ret=$(uci -q get "${CONFIG}.${1}.${2}" 2>/dev/null)
|
||||||
|
echo "${ret:=$3}"
|
||||||
|
}
|
||||||
|
|
||||||
|
test_url() {
|
||||||
|
local url=$1
|
||||||
|
local try=1
|
||||||
|
[ -n "$2" ] && try=$2
|
||||||
|
local timeout=2
|
||||||
|
[ -n "$3" ] && timeout=$3
|
||||||
|
local extra_params=$4
|
||||||
|
curl --help all | grep "\-\-retry-all-errors" > /dev/null
|
||||||
|
[ $? == 0 ] && extra_params="--retry-all-errors ${extra_params}"
|
||||||
|
status=$(/usr/bin/curl -I -o /dev/null -skL --user-agent "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36" ${extra_params} --connect-timeout ${timeout} --retry ${try} -w %{http_code} "$url")
|
||||||
|
case "$status" in
|
||||||
|
204)
|
||||||
|
status=200
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo $status
|
||||||
|
}
|
||||||
|
|
||||||
|
test_proxy() {
|
||||||
|
result=0
|
||||||
|
status=$(test_url "${probe_url}" ${retry_num} ${connect_timeout} "-x socks5h://127.0.0.1:${socks_port}")
|
||||||
|
if [ "$status" = "200" ]; then
|
||||||
|
result=0
|
||||||
|
else
|
||||||
|
status2=$(test_url "https://www.baidu.com" ${retry_num} ${connect_timeout})
|
||||||
|
if [ "$status2" = "200" ]; then
|
||||||
|
result=1
|
||||||
|
else
|
||||||
|
result=2
|
||||||
|
ping -c 3 -W 1 223.5.5.5 > /dev/null 2>&1
|
||||||
|
[ $? -eq 0 ] && {
|
||||||
|
result=1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo $result
|
||||||
|
}
|
||||||
|
|
||||||
|
test_node() {
|
||||||
|
local node_id=$1
|
||||||
|
local _type=$(echo $(config_n_get ${node_id} type nil) | tr 'A-Z' 'a-z')
|
||||||
|
[ "${_type}" != "nil" ] && {
|
||||||
|
local _tmp_port=$(/usr/share/${CONFIG}/app.sh get_new_port 61080 tcp,udp)
|
||||||
|
/usr/share/${CONFIG}/app.sh run_socks flag="test_node_${node_id}" node=${node_id} bind=127.0.0.1 socks_port=${_tmp_port} config_file=test_node_${node_id}.json
|
||||||
|
local curlx="socks5h://127.0.0.1:${_tmp_port}"
|
||||||
|
sleep 1s
|
||||||
|
_proxy_status=$(test_url "${probe_url}" ${retry_num} ${connect_timeout} "-x $curlx")
|
||||||
|
pgrep -af "test_node_${node_id}" | awk '! /socks_auto_switch\.sh/{print $1}' | xargs kill -9 >/dev/null 2>&1
|
||||||
|
rm -rf "/tmp/etc/${CONFIG}/test_node_${node_id}.json"
|
||||||
|
if [ "${_proxy_status}" -eq 200 ]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
test_auto_switch() {
|
||||||
|
flag=$(expr $flag + 1)
|
||||||
|
local b_nodes=$1
|
||||||
|
local now_node=$2
|
||||||
|
[ -z "$now_node" ] && {
|
||||||
|
local f="/tmp/etc/$CONFIG/id/socks_${id}"
|
||||||
|
if [ -f "${f}" ]; then
|
||||||
|
now_node=$(cat ${f})
|
||||||
|
else
|
||||||
|
#echolog "自动切换检测:未知错误"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
[ $flag -le 1 ] && {
|
||||||
|
main_node=$now_node
|
||||||
|
}
|
||||||
|
|
||||||
|
status=$(test_proxy)
|
||||||
|
if [ "$status" == 2 ]; then
|
||||||
|
echolog "自动切换检测:无法连接到网络,请检查网络是否正常!"
|
||||||
|
return 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
#检测主节点是否能使用
|
||||||
|
if [ "$restore_switch" == "1" ] && [ "$main_node" != "nil" ] && [ "$now_node" != "$main_node" ]; then
|
||||||
|
test_node ${main_node}
|
||||||
|
[ $? -eq 0 ] && {
|
||||||
|
#主节点正常,切换到主节点
|
||||||
|
echolog "自动切换检测:${id}主节点【$(config_n_get $main_node type):[$(config_n_get $main_node remarks)]】正常,切换到主节点!"
|
||||||
|
/usr/share/${CONFIG}/app.sh socks_node_switch flag=${id} new_node=${main_node}
|
||||||
|
[ $? -eq 0 ] && {
|
||||||
|
echolog "自动切换检测:${id}节点切换完毕!"
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$status" == 0 ]; then
|
||||||
|
#echolog "自动切换检测:${id}【$(config_n_get $now_node type):[$(config_n_get $now_node remarks)]】正常。"
|
||||||
|
return 0
|
||||||
|
elif [ "$status" == 1 ]; then
|
||||||
|
echolog "自动切换检测:${id}【$(config_n_get $now_node type):[$(config_n_get $now_node remarks)]】异常,切换到下一个备用节点检测!"
|
||||||
|
local new_node
|
||||||
|
in_backup_nodes=$(echo $b_nodes | grep $now_node)
|
||||||
|
# 判断当前节点是否存在于备用节点列表里
|
||||||
|
if [ -z "$in_backup_nodes" ]; then
|
||||||
|
# 如果不存在,设置第一个节点为新的节点
|
||||||
|
new_node=$(echo $b_nodes | awk -F ' ' '{print $1}')
|
||||||
|
else
|
||||||
|
# 如果存在,设置下一个备用节点为新的节点
|
||||||
|
#local count=$(expr $(echo $b_nodes | grep -o ' ' | wc -l) + 1)
|
||||||
|
local next_node=$(echo $b_nodes | awk -F "$now_node" '{print $2}' | awk -F " " '{print $1}')
|
||||||
|
if [ -z "$next_node" ]; then
|
||||||
|
new_node=$(echo $b_nodes | awk -F ' ' '{print $1}')
|
||||||
|
else
|
||||||
|
new_node=$next_node
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
test_node ${new_node}
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
[ "$restore_switch" == "0" ] && {
|
||||||
|
uci set $CONFIG.${id}.node=$new_node
|
||||||
|
[ -z "$(echo $b_nodes | grep $main_node)" ] && uci add_list $CONFIG.${id}.autoswitch_backup_node=$main_node
|
||||||
|
uci commit $CONFIG
|
||||||
|
}
|
||||||
|
echolog "自动切换检测:${id}【$(config_n_get $new_node type):[$(config_n_get $new_node remarks)]】正常,切换到此节点!"
|
||||||
|
/usr/share/${CONFIG}/app.sh socks_node_switch flag=${id} new_node=${new_node}
|
||||||
|
[ $? -eq 0 ] && {
|
||||||
|
echolog "自动切换检测:${id}节点切换完毕!"
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
test_auto_switch "${b_nodes}" ${new_node}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
start() {
|
||||||
|
id=$1
|
||||||
|
LOCK_FILE=${LOCK_FILE_DIR}/${CONFIG}_socks_auto_switch_${id}.lock
|
||||||
|
main_node=$(config_n_get $id node nil)
|
||||||
|
socks_port=$(config_n_get $id port 0)
|
||||||
|
delay=$(config_n_get $id autoswitch_testing_time 1)
|
||||||
|
sleep 5s
|
||||||
|
connect_timeout=$(config_n_get $id autoswitch_connect_timeout 3)
|
||||||
|
retry_num=$(config_n_get $id autoswitch_retry_num 1)
|
||||||
|
restore_switch=$(config_n_get $id autoswitch_restore_switch 0)
|
||||||
|
probe_url=$(config_n_get $id autoswitch_probe_url "https://www.google.com/generate_204")
|
||||||
|
backup_node=$(config_n_get $id autoswitch_backup_node nil)
|
||||||
|
while [ -n "$backup_node" -a "$backup_node" != "nil" ]; do
|
||||||
|
[ -f "$LOCK_FILE" ] && {
|
||||||
|
sleep 6s
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
touch $LOCK_FILE
|
||||||
|
backup_node=$(echo $backup_node | tr -s ' ' '\n' | uniq | tr -s '\n' ' ')
|
||||||
|
test_auto_switch "$backup_node"
|
||||||
|
rm -f $LOCK_FILE
|
||||||
|
sleep ${delay}m
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
start $@
|
||||||
|
|
@ -164,6 +164,49 @@ do
|
|||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
uci:foreach(appname, "socks", function(o)
|
||||||
|
local id = o[".name"]
|
||||||
|
local node_table = uci:get(appname, id, "autoswitch_backup_node")
|
||||||
|
if node_table then
|
||||||
|
local nodes = {}
|
||||||
|
local new_nodes = {}
|
||||||
|
for k,node_id in ipairs(node_table) do
|
||||||
|
if node_id then
|
||||||
|
local currentNode = uci:get_all(appname, node_id) or nil
|
||||||
|
if currentNode then
|
||||||
|
if currentNode.protocol and (currentNode.protocol == "_balancing" or currentNode.protocol == "_shunt") then
|
||||||
|
currentNode = nil
|
||||||
|
end
|
||||||
|
nodes[#nodes + 1] = {
|
||||||
|
log = true,
|
||||||
|
remarks = "Socks[" .. id .. "]备用节点的列表[" .. k .. "]",
|
||||||
|
currentNode = currentNode,
|
||||||
|
set = function(o, server)
|
||||||
|
for kk, vv in pairs(CONFIG) do
|
||||||
|
if (vv.remarks == id .. "备用节点的列表") then
|
||||||
|
table.insert(vv.new_nodes, server)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
CONFIG[#CONFIG + 1] = {
|
||||||
|
remarks = id .. "备用节点的列表",
|
||||||
|
nodes = nodes,
|
||||||
|
new_nodes = new_nodes,
|
||||||
|
set = function(o)
|
||||||
|
for kk, vv in pairs(CONFIG) do
|
||||||
|
if (vv.remarks == id .. "备用节点的列表") then
|
||||||
|
uci:set_list(appname, id, "autoswitch_backup_node", vv.new_nodes)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
}
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
|
||||||
uci:foreach(appname, "nodes", function(node)
|
uci:foreach(appname, "nodes", function(node)
|
||||||
if node.protocol and node.protocol == '_shunt' then
|
if node.protocol and node.protocol == '_shunt' then
|
||||||
local node_id = node[".name"]
|
local node_id = node[".name"]
|
||||||
|
Loading…
Reference in New Issue
Block a user