介绍
SiteInfoScan
是一款用于解析url/domain IPv4地址的扫描工具,将结果输出到Excel中,输出字段:域名、解析IPv4、DNS服务器、解析时间。
脚本参数
usage: DomainsIpv4Scan.py [-h] [-u URL] [-f FILE] [-t THREAD]
options:
-h, --help show this help message and exit
-u URL, --url URL 单个domain/url解析
-f FILE, --file FILE 指定domains/urls文件(一行一条)
-t THREAD, --thread THREAD
并发数(int: 5)
输出日志
[ www.baidu.com ] 103.235.46.96
[ www.baidu.com ] 103.235.47.188
+---------------+----------------+-----------------+---------------------+
| 域名 | 解析IPv4 | DNS服务器 | 解析时间 |
+---------------+----------------+-----------------+---------------------+
| www.baidu.com | 103.235.46.96 | 114.114.114.114 | 2024-12-20 16:02:41 |
| www.baidu.com | 103.235.47.188 | 114.114.114.114 | 2024-12-20 16:02:41 |
+---------------+----------------+-----------------+---------------------+
[ output: ] output\DomainsIpv4Scan_1734681761\result.xlsx
代码源码
# -*- coding: utf-8 -*-
from secScript import dns, Function, outPath, log, program_exit
import openpyxl, argparse, os.path, time
from prettytable import PrettyTable
from urllib import parse
import concurrent.futures
result = []
model = 'DomainsIpv4Scan'
# 输出和记录结果方法
def output(domain, ip: list, server, timer: str) -> list:
res = []
for i in ip:
r = [domain, i, server, timer]
res.append(r)
result.append(r)
log(domain, i)
return res
# 主要检测方法
def run(_url: str) -> list:
res = dns(_url)
if res: return output(
res.get("domain"),
res.get("ip"),
res.get('dnsServer'),
res.get('timer'),
)
# 输出Excel表格
def outExcel(path: str) -> str:
path = os.path.join(outPath, "{}_{}".format(model, path))
if not os.path.exists(path): os.makedirs(path)
table = PrettyTable()
table.field_names = ["域名", "解析IPv4", "DNS服务器", "解析时间"]
workbook = openpyxl.Workbook()
worksheet = workbook.active
worksheet.append(table.field_names)
for i in result:
if i is None or len(i) == 0: continue
table.add_row([i[0], i[1], i[2], i[3]])
worksheet.append([i[0], i[1], i[2], i[3]])
if len(table.rows) <= 30: print(table)
file = os.path.join(path, f"result.xlsx")
workbook.save(file)
log("output:", file)
return file
# 获取域名或者域名列表,处理url等格式
def getDomain(url: str | list) -> str | list:
if type(url) is list:
res = []
for i in url:
res.append(parse.urlparse(i).hostname if i.startswith('http') else i)
return res
else:
return parse.urlparse(url).hostname if url.startswith('http') else url
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', help='单个domain/url解析')
parser.add_argument('-f', '--file', help='指定domains/urls文件(一行一条)')
parser.add_argument('-t', '--thread', help='并发数(int: 5)', default=5, type=int)
args = parser.parse_args()
if args.thread < 1: args.thread = 1
if args.url:
run(args.url)
elif args.file:
try:
with concurrent.futures.ProcessPoolExecutor(max_workers=args.thread) as executor:
futures = [executor.submit(run, i) for i in getDomain(Function.fileGetLine(args.file))]
result = []
for i in futures: result.extend(i.result())
result = list(set(result))
except KeyboardInterrupt:
program_exit()
else:
parser.print_help()
if len(result) > 0: outExcel(str(int(time.time())))