#使用C#winform編寫滲透測試工具--子域名挖掘

吟風芥塵發表於2021-08-03

使用C#winform編寫滲透測試工具--子域名挖掘

這篇文章主要介紹使用C#winform編寫滲透測試工具--子域名挖掘。在滲透測試中,子域名的收集十分重要,通常一個網站的主站的防禦能力特別強,而他們的非主站相對較弱,我們便可以通過收集子站資訊從而擴大攻擊範圍,增大滲透的可能。

  • 下面是使用C#winform編寫的滲透測試工具,前面我們已經完成了埠掃描、敏感目錄掃描和暴力破解的工作,這一部分將介紹如何實現子域名挖掘。


目錄

  1. 各種子域名挖掘技術
  2. 程式碼實現
  3. 使用步驟

一、各種子域名挖掘技術

字典爆破

  • 字典爆破就是通過收集來的字典,拼接到頂級域名前面,然後通過自動化工具進行訪問,判斷返回結果,從而跑出子域名是否存在。比如ESDsubDomainsBrute

證照SSL查詢

  • 因為SSL證照支援證照透明度,而SSL裡包含子域名。證照SSL查詢就是通過HTTPS 證照,ssl證照等蒐集子域名記錄。比如網站cet就是從SSL證照收集子域名。

DNS資料

  • DNS原理就是蒐集DNS的解析歷史,通過查詢dns記錄來獲取到對方的解析記錄,從而獲取到子域名,正常來說你的域名經DNS解析過一般就會搜到。比如virustotal執行DNS解析來構建資料庫,來檢索子域名。

爬蟲提取子域名(js檔案提取)

  • 利用爬蟲從頁面原始碼中提取子域名,比如JSFinder

二、程式碼實現

這裡分別使用兩種方式實現子域名的挖掘,即通過證照SSL查詢和js檔案提取。

1.使用證照SSL查詢方式

參考文章漏洞挖掘-子域名,從線上網站crt中直接提取相關的子域名。

# !/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys
import urllib.request
import urllib.parse
import re
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

def crt_domain(domains):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
    }
    with urllib.request.urlopen('https://crt.sh/?q=' + domains) as f:
        code = f.read().decode('utf-8')
        for cert, domain in re.findall('<tr>(?:\s|\S)*?href="\?id=([0-9]+?)"(?:\s|\S)*?<td>([*_a-zA-Z0-9.-]+?\.' + re.escape(domains) + ')</td>(?:\s|\S)*?</tr>', code, re.IGNORECASE):
            domain = domain.split('@')[-1]
            print(domain)
            with open('crt_result.txt', 'a+') as f:
                f.write(str(domain)+'\n')

if __name__ == '__main__':
    if len(sys.argv) == 2:
        domains=sys.argv[1]
        crt_domain(domains[11:])
    else:
        print('User: python3 crt_domain.py domain')

C#呼叫指令碼

對於python指令碼中包含第三方模組的情況,同樣,通過直接建立Process程式,呼叫python指令碼,返回掃描結果。

  • 建立按鈕按下事件button1_Click,執行“呼叫python指令碼”函式runPythonSubdomain_ssl()
 private void button9_Click(object sender, EventArgs e)
        {
            richTextBox4.Clear();
            runPythonSubdomain_ssl();//執行python函式
            label22.Text = "開始掃描...";
        }
  • 例項化一個python程式 呼叫.py 指令碼
void runPythonSubdomain_ssl()
        {
            string url = textBox9.Text;
            p = new Process();
            string path = "Subdomain.py";//待處理python檔案的路徑,本例中放在debug資料夾下
            string sArguments = path;
            ArrayList arrayList = new ArrayList();
            arrayList.Add(url);//需要挖掘的域名
            foreach (var param in arrayList)//拼接引數
            {
                sArguments += " " + param;
            }
            p.StartInfo.FileName = @"D:\Anaconda\python.exe"; //沒有配環境變數的話,可以寫"xx\xx\python.exe"的絕對路徑。如果配了,直接寫"python"即可
            p.StartInfo.Arguments = sArguments;//python命令的引數
            p.StartInfo.UseShellExecute = false;
            p.StartInfo.RedirectStandardOutput = true;
            p.StartInfo.RedirectStandardInput = true;
            p.StartInfo.RedirectStandardError = true;
            p.StartInfo.CreateNoWindow = true;
            p.StartInfo.WindowStyle = ProcessWindowStyle.Hidden;
            p.Start();//啟動程式
            //MessageBox.Show("啟動成功");
            p.BeginOutputReadLine();
            p.OutputDataReceived += new DataReceivedEventHandler(p_OutputDataReceived_subdomain_ssl);
            Console.ReadLine();
            //p.WaitForExit();
        }

        void p_OutputDataReceived_subdomain_ssl(object sender, DataReceivedEventArgs e)
        {
            var printedStr = e.Data;
            Action at = new Action(delegate ()
            {
                //接受.py程式列印的字元資訊到文字顯示框
                richTextBox4.AppendText(printedStr + "\n");
                label22.Text = "掃描結束";
            });
            Invoke(at);
        }

2.使用js檔案提取

#!/usr/bin/env python"
# coding: utf-8

import requests, argparse, sys, re
from requests.packages import urllib3
from urllib.parse import urlparse
from bs4 import BeautifulSoup

def parse_args():
    parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -u http://www.baidu.com")
    parser.add_argument("-u", "--url", help="The website")
    parser.add_argument("-c", "--cookie", help="The website cookie")
    parser.add_argument("-f", "--file", help="The file contains url or js")
    parser.add_argument("-ou", "--outputurl", help="Output file name. ")
    parser.add_argument("-os", "--outputsubdomain", help="Output file name. ")
    parser.add_argument("-j", "--js", help="Find in js file", action="store_true")
    parser.add_argument("-d", "--deep",help="Deep find", action="store_true")
    return parser.parse_args()

def extract_URL(JS):
	pattern_raw = r"""
	  (?:"|')                               # Start newline delimiter
	  (
	    ((?:[a-zA-Z]{1,10}://|//)           # Match a scheme [a-Z]*1-10 or //
	    [^"'/]{1,}\.                        # Match a domainname (any character + dot)
	    [a-zA-Z]{2,}[^"']{0,})              # The domainextension and/or path
	    |
	    ((?:/|\.\./|\./)                    # Start with /,../,./
	    [^"'><,;| *()(%%$^/\\\[\]]          # Next character can't be...
	    [^"'><,;|()]{1,})                   # Rest of the characters can't be
	    |
	    ([a-zA-Z0-9_\-/]{1,}/               # Relative endpoint with /
	    [a-zA-Z0-9_\-/]{1,}                 # Resource name
	    \.(?:[a-zA-Z]{1,4}|action)          # Rest + extension (length 1-4 or action)
	    (?:[\?|/][^"|']{0,}|))              # ? mark with parameters
	    |
	    ([a-zA-Z0-9_\-]{1,}                 # filename
	    \.(?:php|asp|aspx|jsp|json|
	         action|html|js|txt|xml)             # . + extension
	    (?:\?[^"|']{0,}|))                  # ? mark with parameters
	  )
	  (?:"|')                               # End newline delimiter
	"""
	pattern = re.compile(pattern_raw, re.VERBOSE)
	result = re.finditer(pattern, str(JS))
	if result == None:
		return None
	js_url = []
	return [match.group().strip('"').strip("'") for match in result
		if match.group() not in js_url]

# 傳送請求
def Extract_html(URL):
	header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36",
	"Cookie": args.cookie}
	try:
		raw = requests.get(URL, headers = header, timeout=3, verify=False)
		raw = raw.content.decode("utf-8", "ignore")
		return raw
	except:
		return None

# 處理url
def process_url(URL, re_URL):
	black_url = ["javascript:"]	# Add some keyword for filter url.
	URL_raw = urlparse(URL)
	ab_URL = URL_raw.netloc
	host_URL = URL_raw.scheme
	if re_URL[0:2] == "//":
		result = host_URL  + ":" + re_URL
	elif re_URL[0:4] == "http":
		result = re_URL
	elif re_URL[0:2] != "//" and re_URL not in black_url:
		if re_URL[0:1] == "/":
			result = host_URL + "://" + ab_URL + re_URL
		else:
			if re_URL[0:1] == ".":
				if re_URL[0:2] == "..":
					result = host_URL + "://" + ab_URL + re_URL[2:]
				else:
					result = host_URL + "://" + ab_URL + re_URL[1:]
			else:
				result = host_URL + "://" + ab_URL + "/" + re_URL
	else:
		result = URL
	return result

def find_last(string,str):
	positions = []
	last_position=-1
	while True:
		position = string.find(str,last_position+1)
		if position == -1:break
		last_position = position
		positions.append(position)
	return positions

def find_by_url(url, js = False):
	if js == False:
		try:
			print("url:" + url)
		except:
			print("Please specify a URL like https://www.baidu.com")
		html_raw = Extract_html(url)
		if html_raw == None: 
			print("Fail to access " + url)
			return None
		#print(html_raw)
		html = BeautifulSoup(html_raw, "html.parser")
		html_scripts = html.findAll("script")
		script_array = {}
		script_temp = ""
		for html_script in html_scripts:
			script_src = html_script.get("src")
			if script_src == None:
				script_temp += html_script.get_text() + "\n"
			else:
				purl = process_url(url, script_src)
				script_array[purl] = Extract_html(purl)
		script_array[url] = script_temp
		allurls = []
		for script in script_array:
			#print(script)
			temp_urls = extract_URL(script_array[script])
			if len(temp_urls) == 0: continue
			for temp_url in temp_urls:
				allurls.append(process_url(script, temp_url)) 
		result = []
		for singerurl in allurls:
			url_raw = urlparse(url)
			domain = url_raw.netloc
			positions = find_last(domain, ".")
			miandomain = domain
			if len(positions) > 1:miandomain = domain[positions[-2] + 1:]
			#print(miandomain)
			suburl = urlparse(singerurl)
			subdomain = suburl.netloc
			#print(singerurl)
			if miandomain in subdomain or subdomain.strip() == "":
				if singerurl.strip() not in result:
					result.append(singerurl)
		return result
	return sorted(set(extract_URL(Extract_html(url)))) or None


def find_subdomain(urls, mainurl):
	url_raw = urlparse(mainurl)
	domain = url_raw.netloc
	miandomain = domain
	positions = find_last(domain, ".")
	if len(positions) > 1:miandomain = domain[positions[-2] + 1:]
	subdomains = []
	for url in urls:
		suburl = urlparse(url)
		subdomain = suburl.netloc
		#print(subdomain)
		if subdomain.strip() == "": continue
		if miandomain in subdomain:
			if subdomain not in subdomains:
				subdomains.append(subdomain)
	return subdomains

def find_by_url_deep(url):
	html_raw = Extract_html(url)
	if html_raw == None: 
		print("Fail to access " + url)
		return None
	html = BeautifulSoup(html_raw, "html.parser")
	html_as = html.findAll("a")
	links = []
	for html_a in html_as:
		src = html_a.get("href")
		if src == "" or src == None: continue
		link = process_url(url, src)
		if link not in links:
			links.append(link)
	if links == []: return None
	print("ALL Find " + str(len(links)) + " links")
	urls = []
	i = len(links)
	for link in links:
		temp_urls = find_by_url(link)
		if temp_urls == None: continue
		print("Remaining " + str(i) + " | Find " + str(len(temp_urls)) + " URL in " + link)
		for temp_url in temp_urls:
			if temp_url not in urls:
				urls.append(temp_url)
		i -= 1
	return urls

	
def find_by_file(file_path, js=False):
	with open(file_path, "r") as fobject:
		links = fobject.read().split("\n")
	if links == []: return None
	print("ALL Find " + str(len(links)) + " links")
	urls = []
	i = len(links)
	for link in links:
		if js == False:
			temp_urls = find_by_url(link)
		else:
			temp_urls = find_by_url(link, js=True)
		if temp_urls == None: continue
		print(str(i) + " Find " + str(len(temp_urls)) + " URL in " + link)
		for temp_url in temp_urls:
			if temp_url not in urls:
				urls.append(temp_url)
		i -= 1
	return urls

def giveresult(urls, domian):
	if urls == None:
		return None
	print("Find " + str(len(urls)) + " URL:")
	content_url = ""
	content_subdomain = ""
	for url in urls:
		content_url += url + "\n"
		print(url)
	subdomains = find_subdomain(urls, domian)
	print("\nFind " + str(len(subdomains)) + " Subdomain:")
	for subdomain in subdomains:
		content_subdomain += subdomain + "\n"
		print(subdomain)
	if args.outputurl != None:
		with open(args.outputurl, "a", encoding='utf-8') as fobject:
			fobject.write(content_url)
		print("\nOutput " + str(len(urls)) + " urls")
		print("Path:" + args.outputurl)
	if args.outputsubdomain != None:
		with open(args.outputsubdomain, "a", encoding='utf-8') as fobject:
			fobject.write(content_subdomain)
		print("\nOutput " + str(len(subdomains)) + " subdomains")
		print("Path:" + args.outputsubdomain)

if __name__ == "__main__":
	urllib3.disable_warnings()
	args = parse_args()
	if args.file == None:
		if args.deep is not True:
			urls = find_by_url(args.url)
			giveresult(urls, args.url)
		else:
			urls = find_by_url_deep(args.url)
			giveresult(urls, args.url)
	else:
		if args.js is not True:
			urls = find_by_file(args.file)
			giveresult(urls, urls[0])
		else:
			urls = find_by_file(args.file, js = True)
			giveresult(urls, urls[0])

C#呼叫指令碼

  • 建立按鈕按下事件button1_Click,執行“呼叫python指令碼”函式runPythonSubdomain_js()
private void button10_Click(object sender, EventArgs e)
        {
            richTextBox5.Clear();
            runPythonSubdomain_js();//執行python函式
            label24.Text = "開始掃描...";
        }
  • 例項化一個python程式 呼叫.py 指令碼
void runPythonSubdomain_js()
        {
            string url = textBox9.Text;
            p = new Process();
            string path = "JSFinder.py";//待處理python檔案的路徑,本例中放在debug資料夾下
            string sArguments = path;
            ArrayList arrayList = new ArrayList();
            arrayList.Add("-u");
            arrayList.Add(url);//需要挖掘的域名
            foreach (var param in arrayList)//拼接引數
            {
                sArguments += " " + param;
            }
            p.StartInfo.FileName = @"D:\Anaconda\python.exe"; //沒有配環境變數的話,可以寫"xx\xx\python.exe"的絕對路徑。如果配了,直接寫"python"即可
            p.StartInfo.Arguments = sArguments;//python命令的引數
            p.StartInfo.UseShellExecute = false;
            p.StartInfo.RedirectStandardOutput = true;
            p.StartInfo.RedirectStandardInput = true;
            p.StartInfo.RedirectStandardError = true;
            p.StartInfo.CreateNoWindow = true;
            p.StartInfo.WindowStyle = ProcessWindowStyle.Hidden;
            p.Start();//啟動程式
            //MessageBox.Show("啟動成功");
            p.BeginOutputReadLine();
            p.OutputDataReceived += new DataReceivedEventHandler(p_OutputDataReceived_subdomain_js);
            Console.ReadLine();
            //p.WaitForExit();
        }

        void p_OutputDataReceived_subdomain_js(object sender, DataReceivedEventArgs e)
        {
            var printedStr = e.Data;
            Action at = new Action(delegate ()
            {
                //接受.py程式列印的字元資訊到文字顯示框
                richTextBox5.AppendText(printedStr + "\n");
                label24.Text = "掃描結束";
            });
            Invoke(at);
        }
    }
}

三、使用步驟

  • 首先在url欄中輸入地址,接著可以選擇是使用ssl證照提取或者Js檔案提取的方式進行挖掘,最後得到子域名資訊。

github地址:https://github.com/Chenmengx/Penetration-testing-tool

相關文章