首页 > 编程语言 >源码包扫描

源码包扫描

时间:2022-11-20 21:12:54浏览次数:35  
标签:domain url 扫描 list ret 源码 path com

源码包扫描

# import requests
import subprocess
import os, re
from urllib.parse import urlparse
from multiprocessing.pool import ThreadPool
pool = ThreadPool(10)

# 取消验证警告
# from requests.packages.urllib3.exceptions import InsecureRequestWarning 
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

path = os.path.split(os.path.realpath(__file__))[0]
dict_path = os.path.join(path, "domain_dict")
result_path = os.path.join(path, "result")

# 判断结果
if not os.path.exists(dict_path):
    # 如果不存在则创建目录
    # 创建目录操作函数
    os.mkdir(dict_path)
if not os.path.exists(result_path):
	os.mkdir(result_path)

headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0"}

# 提取纯净的domain(xxxx.com)
def chuli_url(url):
	url = urlparse(url).netloc
	# url = url.split(".")[0]+"."+url.split(".")[1].split("/")[0]
	# if "http://" in url:
	# 	url = url.replace("http://", "")
	# if "https://" in url:
	# 	url = url.replace("https://", "")

	return url


def rule_1(domain):
	'''
	xxx.com xxx1.com xxx2.com  xxx01.com  xxx_01.com
	xxx_com xxx1_com xxx2_com  xxx01_com
	'''
	my_list = []
	ret = domain.split(".")
	# with open("ret_demo.txt", "a") as f:
	# 	f.write(str(ret)+"\n")

	num_list = []
	for x in range(1, 6):
		num_list.append('0{}'.format(x))
	for x in range(1,6):
		num_list.append(str(x))
	#print(num_list)

	if len(ret) == 2:
		# 情况一  xxx1.com  1
		for z in num_list:
			ret_1 = ret[0]+z+"."+ret[1]
			my_list.append(ret_1)

		# 情况二  xxx1_com   1
		for z in num_list:
			ret_2 = ret[0]+z+"_"+ret[1]
			my_list.append(ret_2)

		# 情况三  xxx.1.com
		for z in num_list:
			ret_3 = ret[0]+"."+z+"."+ret[1]
			my_list.append(ret_3)

		# 情况四  xxx_1.com  1
		for z in num_list:
			ret_4 = ret[0]+"_"+z+"."+ret[1]
			my_list.append(ret_4)

		# 情况五  xxx_1_com  1
		for z in num_list:
			ret_5 = ret[0]+"_"+z+"_"+ret[1]
			my_list.append(ret_5)

		# 情况六  xxx.1_com
		for z in num_list:
			ret_6 = ret[0]+"."+z+"_"+ret[1]
			my_list.append(ret_6)
	elif len(ret) == 3:
		# 情况一  www.xxx1.com    1
		for z in num_list:
			ret_1 = ret[0]+"."+ret[1]+z+"."+ret[2]
			my_list.append(ret_1)

		# 情况二  www.xxx1_com    1
		for z in num_list:
			ret_2 = ret[0]+"."+ret[1]+z+"_"+ret[2]
			my_list.append(ret_2)

		# # 情况三  www.xxx.1.com
		# for z in num_list:
		# 	ret_3 = ret[0]+"."+z+"."+ret[1]
		# 	my_list.append(ret_3)

		# # 情况四  www.xxx_1.com
		# for z in num_list:
		# 	ret_4 = ret[0]+"_"+z+"."+ret[1]
		# 	my_list.append(ret_4)

		# 情况五  www_xxx_1_com    1
		for z in num_list:
			ret_5 = ret[0]+"_"+ret[1]+"_"+z+"_"+ret[2]
			my_list.append(ret_5)

		# 情况六  www_xxx1_com    1
		for z in num_list:
			ret_6 = ret[0]+"_"+ret[1]+z+"_"+ret[2]
			my_list.append(ret_6)
	return my_list

def rule_2(domain):
	temp_list = []
	temp_list.append(domain)	# xx.com
	if len(re.findall(".", domain)) == 1:
		temp_list.append(domain.split(".")[0]+"_"+domain.split(".")[1]) # xxx_com
	else:
		# www.xx.com site.xx.vip   web.sss.sss.cn
		temp_list.append(domain.replace(".", "_"))
	return temp_list

def my_requests(domain, domain_list_dict):
	for x in domain_list_dict:
		url_1 = "http://"+domain+"/"+x
		url_2 = "https://"+domain+"/"+x
		try:
			print("[*] {}".format(domain+"/"+x))
			ret_1 = requests.get(url=url_1, headers=headers, verify=False, timeout=5)
			ret_2 = requests.get(url=url_2, headers=headers, verify=False, timeout=5)

			if ret_1.status_code == 200:
				with open("resulit.txt", "a", encodeing="utf-8") as f:
					f.write(url_1+"\n")
				print("[*] find {} :".format(url_1))
				return
			if ret_2.status_code == 200:
				with open("resulit.txt", "a", encodeing="utf-8") as f:
					f.write(url_2+"\n")
				return
				print("[*] find {} :".format(url_2))
		except Exception as e:
			print(e)
			pass
# my_requests("192.168.17.139", ["xxx","www.tar", "sadga.txt"])
# exit()


def my_subprocess(rce):
    print("[*] {}".format(rce))
    child = subprocess.Popen(rce,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
    while child.poll() is None:
        output = child.stdout.readline().decode("gbk", "ignore")
        print(output.strip())


my_list = []
tar_list = [".bz2",".gz",".rar",".tar",".tar.bz2",".tar.gz",".tgz",".zip",".7z",".tar.gz2",".Z",".xz","tar.xz",".mdb.rar",".0SP1.rar",".0SP1.zip",".txt",".mdb.zip",".config.rar",".config.zip",".net.cn.rar",".net.cn.zip",".wjw.cn.rar",".wjw.cn.zip",".html",".sql"]

# domain = str(input("[*] domain(xx.com):"))
with open("domain.txt", "r") as f:
	ret = f.readlines()
for domain in ret:
    real_url = chuli_url(domain.strip())
    domain = chuli_url(domain.strip())
    domain_list = []
    domain_list.extend(rule_2(domain)) # 构造基础
    domain_list.extend(rule_1(domain))

    temp_list = []
    for a in domain_list:
        for x in tar_list:
            x = x.strip()
            temp_list.append(a+x)

    for x in temp_list: # 生成对应域名的字典
        # print(x)
        with open(os.path.join(dict_path,domain+".txt"), "a", encoding="utf-8") as f:
            f.write(x+"\n")
    # my_subprocess("python3 /root/dirsearch/dirsearch.py --random-agent -e * -t 20 -u {} -w {}  --json-report={}".format(real_url, os.path.join(dict_path,domain+".txt"), os.path.join(path,domain+"_result.json")))
    my_subprocess("python3 /root/dirsearch/dirsearch.py --random-agent -e * -t 20 -u {} -w {}  --json-report={}".format(real_url, os.path.join(dict_path,domain+".txt"), os.path.join(result_path,domain+"_result.json",)))
# pool.close()
# pool.join()
    
# for x in my_list:
# 	print(x)
# 	with open("my_tarzip.txt", "a", encoding="utf-8") as f:
# 		f.write(x+"\n")

标签:domain,url,扫描,list,ret,源码,path,com
From: https://www.cnblogs.com/startstart/p/16909535.html

相关文章

  • k8s源码分析3-kubectl命令行设置7大命令分组
    本节重点总结:设置cmd工厂函数f,主要是封装了与kube-apiserver交互客户端用cmd工厂函数f创建7大分组命令,如下基础初级命令BasicCommands(Beginner):基础中级命......
  • nydusd 源码理解(一)
    “尝试通过nydus[1]源码理解工作流程。可能由于代码变动导致和本文记录的内容有出入。1.环境准备gitclonehttps://github.com/dragonflyoss/image-service.gitc......
  • JDK源码分析实战系列-PriorityQueue
    完全二叉树一棵深度为k的有n个结点的二叉树,对树中的结点按从上至下、从左到右的顺序进行编号,如果编号为i(1≤i≤n)的结点与满二叉树中编号为i的结点在二叉树中的位置相同,则......
  • libusb系列-007-Qt下使用libusb1.0.26源码
    libusb系列-007-Qt下使用libusb1.0.26源码文章目录​​libusb系列-007-Qt下使用libusb1.0.26源码​​​​摘要​​​​安装编译环境​​​​确认需要的文件​​​​开始编译......
  • 使用Gradle编译Spring源码
    最近想研究下spring的源码,决定先把源码跑起来,在此记录一下本次遇到的问题。本次使用的工具有:git、IDEA2021社区版、jdk17、gradle7.5.1从BuildfromSource得知,需要git和......
  • Seata 1.5.2 源码学习(Client端)
    在上一篇中通过阅读Seata服务端的代码,我们了解到TC是如何处理来自客户端的请求的,今天这一篇一起来了解一下客户端是如何处理TC发过来的请求的。要想搞清楚这一点,还得从Globa......
  • chrome浏览器 开发者工具F12中某网站的sources下的源码如何批量保存
    1.常用保存Sources源码的两种方法1.1单个文件直接右键另存为1.2单个页面保存网页,就会把引用到的所有文件下载下来如果页面很多,文件也很多,静态资源也很多,那么得一个一个去......
  • Spring源码循环依赖
    一级缓存:存储单例Bean的Map对象。二级缓存:为了将成熟Bean和纯净Bean分离开来,职责更明确,代码更容易维护,避免由于多线程的环境下读取到不完整的Bean。二级缓存不能存储原生......
  • openjdk15源码编译调试
    前言之前使用的openjdk11调试JVM源码和Java源码是分开的,而且各自都有不足之处openjdk在Clion引入头文件飘红Java源码换行注释导致class和src对应不上因此重新使用ope......
  • 高校校企合作平台设计与实现-计算机毕业设计源码+LW文档
    数据库脚本:/*NavicatMySQLDataTransferSourceServer:localhost_3306SourceServerVersion:50561SourceHost:localhost:3306SourceDat......