Delete get_proxy_info.py
parent
838788cb9d
commit
f102688e9f
|
|
@ -1,142 +0,0 @@
|
|||
import base64
|
||||
import urllib
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def base64_decode(encoded_str):
|
||||
"""
|
||||
对 Base64 编码的字符串进行解码,并处理填充问题
|
||||
|
||||
:param encoded_str: 要解码的 Base64 编码字符串
|
||||
:return: 解码后的字符串
|
||||
"""
|
||||
# 补足缺失的填充字符
|
||||
padded_str = encoded_str + '=' * (-len(encoded_str) % 4)
|
||||
|
||||
# 将字符串转换为字节串
|
||||
bytes_str = padded_str.encode('utf-8')
|
||||
|
||||
# Base64 解码
|
||||
decoded_bytes = base64.b64decode(bytes_str)
|
||||
|
||||
# 尝试将字节串转换为 UTF-8 字符串
|
||||
try:
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
# 如果解码失败,返回原始字节串
|
||||
return decoded_bytes
|
||||
|
||||
|
||||
def encode_to_base64(input_string):
|
||||
# Convert the input string to bytes
|
||||
input_bytes = input_string.encode('utf-8')
|
||||
|
||||
# Encode the bytes to Base64
|
||||
encoded_bytes = base64.b64encode(input_bytes)
|
||||
|
||||
# Convert the Base64 bytes back to string
|
||||
encoded_string = encoded_bytes.decode('utf-8')
|
||||
|
||||
return encoded_string
|
||||
|
||||
|
||||
def get_subscription(url):
|
||||
"""获取订阅数据"""
|
||||
response = requests.get(url)
|
||||
print(response.status_code)
|
||||
if response.status_code == 200:
|
||||
return response.text
|
||||
else:
|
||||
raise Exception("无法获取订阅数据: HTTP 状态码 {}".format(response.status_code))
|
||||
|
||||
|
||||
def parse_subscription(subscription):
|
||||
"""解析订阅数据"""
|
||||
decoded_data = base64_decode(subscription)
|
||||
proxy_list = decoded_data.split('\n')
|
||||
return [proxy for proxy in proxy_list if proxy]
|
||||
|
||||
|
||||
def url_decode(encoded_str):
|
||||
"""
|
||||
对 URL 编码的字符串进行解码
|
||||
|
||||
:param encoded_str: 要解码的 URL 编码字符串
|
||||
:return: 解码后的字符串
|
||||
"""
|
||||
return urllib.parse.unquote(encoded_str)
|
||||
|
||||
|
||||
def parse_ss_urls(ss_url_list):
|
||||
"""解析 Shadowsocks 链接列表"""
|
||||
ss_proxies = []
|
||||
for ss_proxy in ss_url_list:
|
||||
# 提取各项信息
|
||||
base_str = ss_proxy[5:] # 移除 'ss://'
|
||||
name = url_decode(base_str.split("@")[1].split("/?")[1].split("#")[1].rstrip("\r"))[3:]
|
||||
encryption_mode_and_password = base64_decode(base_str.split("@")[0])
|
||||
encryption_mode, password = encryption_mode_and_password.split(":")
|
||||
server, port = base_str.split("@")[1].split("/?")[0].split(":")
|
||||
plugin = url_decode(base_str.split("@")[1].split("/?")[1].split("#")[0])
|
||||
# 存储信息
|
||||
ss_proxies.append({
|
||||
"server": server,
|
||||
"port": port,
|
||||
"plugin": plugin,
|
||||
"name": name,
|
||||
"encryption_mode": encryption_mode,
|
||||
"password": password
|
||||
})
|
||||
return ss_proxies
|
||||
|
||||
|
||||
def save_to_json(sub_url):
|
||||
data = get_subscription(sub_url)
|
||||
print(f"data: \n{data}.")
|
||||
ss_url_list = parse_subscription(data)
|
||||
print(f"ss_url_list: \n{ss_url_list}.")
|
||||
ss_proxies = parse_ss_urls(ss_url_list)
|
||||
|
||||
# Convert ss_proxies to JSON format and save it to a file with Unicode encoding
|
||||
with open('ss_proxies.json', 'w', encoding='utf-8') as file:
|
||||
json.dump(ss_proxies, file, ensure_ascii=False, indent=4)
|
||||
|
||||
print("JSON file saved as 'ss_proxies.json' with Unicode encoding.")
|
||||
|
||||
|
||||
def generate_data(ss_url_list):
|
||||
required_regions_list = ['HK', 'SG']
|
||||
required_ss_url_list = []
|
||||
|
||||
# Collect URLs that match the required regions
|
||||
for ss_url in ss_url_list:
|
||||
region_code = ss_url.split("#")[1].rstrip("\r")[-2:]
|
||||
if region_code in required_regions_list:
|
||||
required_ss_url_list.append(ss_url.rstrip('\r'))
|
||||
|
||||
# Concatenate URLs
|
||||
concatenated_urls = ''
|
||||
for url in required_ss_url_list:
|
||||
concatenated_urls += url # Added newline for readability
|
||||
|
||||
data = encode_to_base64(concatenated_urls)
|
||||
print(data)
|
||||
|
||||
|
||||
def extract_ss_link():
|
||||
sub_url = "https://dy.tagsub.net/api/v1/client/subscribe?token=26d553424da6d84dccb20dd05dc844c0"
|
||||
data = get_subscription(sub_url)
|
||||
print(f"data: \n{data}.\n")
|
||||
ss_url_list = parse_subscription(data)
|
||||
# print(f"ss_url_list: \n{ss_url_list}.")
|
||||
for i in ss_url_list:
|
||||
data1 = i.rstrip("\n")
|
||||
# print(data1)
|
||||
front_part = data1.split("?")[0]
|
||||
latter_part = url_decode(data1.split("?")[1])
|
||||
print(front_part + latter_part)
|
||||
|
||||
|
||||
extract_ss_link()
|
||||
Loading…
Reference in New Issue