解压后发现是流量包,好多icmp包
发现icmp包尾部有$$STRAT打头16进制的字符串,好多重复得。我们只需要提取尾部这些字符串是当icmp的type=0时上图标识为褐色的字符串,还需要把16进制的字符串转为对应的字符串(bytes 类型)并去重。
使用python脚本
import pyshark
import binascii
def process_pcap():
# 使用pyshark的FileCapture打开名为out.pcap的文件,
# 并设置显示过滤器,只捕获icmp.type等于0的ICMP数据包
packets = pyshark.FileCapture('out.pcap', display_filter="icmp.type==0")
res = []
# 以写入模式打开名为out.txt的文件,指定编码为'utf - 8'
with open('out.txt', 'w', encoding='utf - 8') as f:
# 遍历捕获到的每个数据包
for each in packets:
try:
# 将数据包中的十六进制数据(each.icmp.data)先转换为字节串,
# 再使用'utf - 8'编码将字节串解码为字符串
data = binascii.unhexlify(each.icmp.data).decode('utf - 8')
# 如果解码后的字符串不在结果列表res中
if data not in res:
# 将该字符串写入到out.txt文件中
f.write(data)
# 将该字符串添加到结果列表res中,实现去重功能
res.append(data)
# 如果在binascii.unhexlify或decode操作中出现错误,捕获binascii.Error异常并跳过
except binascii.Error:
pass
# 关闭数据包捕获对象
packets.close()
print('done')
if __name__ == '__main__':
process_pcap()
把out.txt首行和尾的开始和结束标志去除,去掉每行的头部的,
复制内容到cyberchef
或者使用下面两个python脚本直接输出processed_out.txt。内容复制到cyberchef
有两个好处一是直接生成最终结果,二是由于数据较大处理时间约两分钟,初始化有提示带进度条用户体验好。
import os
import pyshark
import binascii
from tqdm import tqdm
import time
def process_pcap_sync():
res = []
print("开始获取数据包总数...")
start_time = time.perf_counter()
last_print_time = start_time
current_time = time.perf_counter()
elapsed_time = current_time - start_time
print(f"\r获取数据包总数已耗时: {elapsed_time:.3f}秒", end='')
try:
packet_capture = pyshark.FileCapture('out.pcap', display_filter="icmp.type==0")
total_packets = len(list(packet_capture))
for packet in packet_capture:
current_time = time.perf_counter()
if current_time - last_print_time >= 0.1:
elapsed_time = current_time - start_time
print(f"\r获取数据包总数已耗时: {elapsed_time:.3f}秒", end='')
last_print_time = current_time
packet_capture.close()
end_time = time.perf_counter()
print(f"\r获取数据包总数耗时: {end_time - start_time:.3f}秒")
print("开始捕获和处理数据包...")
# 重新创建packet_capture对象,因为之前已经关闭了
packet_capture = pyshark.FileCapture('out.pcap', display_filter="icmp.type==0")
progress_bar = tqdm(total = total_packets)
for packet in packet_capture:
try:
data = binascii.unhexlify(packet.icmp.data).decode('utf - 8')
if data not in res:
res.append(data)
except binascii.Error as e:
print(f"处理数据包时出现binascii.Error异常: {e}")
progress_bar.update(1)
progress_bar.close()
packet_capture.close()
except Exception as e:
print(f"在处理pcap文件时发生错误: {e}")
if not res:
print("没有获取到有效的数据,可能是过滤条件问题或者pcap文件内容问题")
return
new_res = res[1: - 1]
new_content = []
for line in new_res:
if line.startswith('$$START$$'):
line = line.replace('$$START$$', '', 1)
line = line.rstrip('\n')
new_content.append(line)
output_file = 'processed_out.txt'
with open(output_file, 'w', encoding='utf - 8') as f_out:
for line in new_content:
f_out.write(line + '\n')
print('done')
if __name__ == '__main__':
try:
process_pcap_sync()
except Exception as e:
print(f"在运行主程序时发生错误: {e}")
import pyshark
import time
import sqlite3
from tqdm import tqdm
def get_ip_info(packet):
try:
source_ip = packet.ip.src
destination_ip = packet.ip.dst
except AttributeError:
source_ip = 'N/A'
destination_ip = 'N/A'
return source_ip, destination_ip
def process_pcap_sync():
res = []
unique_data = set()
print("开始获取数据包总数...")
start_time = time.perf_counter()
try:
conn = sqlite3.connect('packets.db')
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS packets
(id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp REAL,
source_ip TEXT,
destination_ip TEXT,
protocol TEXT,
data TEXT)''')
packet_count = 0
capture = pyshark.FileCapture('out.pcap')
data_to_insert = []
for packet in capture:
if hasattr(packet, 'icmp') and int(packet.icmp.type) == 0:
packet_count += 1
timestamp = float(packet.sniff_time.timestamp())
source_ip, destination_ip = get_ip_info(packet)
protocol = packet.transport_layer
data = 'N/A'
if hasattr(packet, 'icmp'):
data_hex = packet.icmp.data.binary_value
if data_hex.startswith(b'$$START$$'):
try:
data = data_hex.decode('utf - 8')
if data not in unique_data:
unique_data.add(data)
res.append(data)
data_to_insert.append((timestamp, source_ip, destination_ip, protocol, data))
except UnicodeDecodeError:
pass
total_packets = packet_count
cursor.executemany('INSERT INTO packets (timestamp, source_ip, destination_ip, protocol, data) VALUES (?,?,?,?,?)',
data_to_insert)
conn.commit()
end_time = time.perf_counter()
print(f"\r获取数据包总数耗时: {end_time - start_time:.3f}秒")
print("开始捕获和处理数据包...")
progress_bar = tqdm(total = total_packets)
for packet in capture:
if hasattr(packet, 'icmp') and int(packet.icmp.type) == 0:
if hasattr(packet, 'icmp'):
data_hex = packet.icmp.data.binary_value
if data_hex.startswith(b'$$START$$'):
try:
data = data_hex.decode('utf - 8')
if data not in unique_data:
unique_data.add(data)
res.append(data)
except UnicodeDecodeError:
pass
progress_bar.update(1)
progress_bar.close()
conn.close()
except Exception as e:
print(f"在处理pcap文件时发生错误: {e}")
if not res:
print("没有获取到有效的数据,可能是过滤条件问题或者pcap文件内容问题")
return
new_res = res[1: - 1]
new_content = []
for line in new_res:
if line.startswith('$$START$$'):
line = line.replace('$$START$$', '', 1)
line = line.rstrip('\n')
new_content.append(line)
output_file = 'processed_out.txt'
with open(output_file, 'w', encoding='utf - 8') as f_out:
for line in new_content:
f_out.write(line + '\n')
print('done')
if __name__ == '__main__':
try:
process_pcap_sync()
except Exception as e:
print(f"在运行主程序时发生错误: {e}")
pyshark
在解析数据包时是深度解析,占用资源大,耗时较多,上面的python脚本执行效率太低,都需要4分钟以上。
果断弃用pyshark库处理流量包数据,改用更强大灵活的Scapy模块,Scapy 是一个强大的交互式数据包处理程序。它可以用于发送、嗅探、剖析和伪造网络数据包。与 pyshark 相比,Scapy 在数据包的构建、修改和发送方面具有更大的灵活,果然效率提高,速度飙升,代码如下:
from scapy.all import *
import binascii
from tqdm import tqdm
import time
import sqlite3
def get_ip_info(packet):
if packet.haslayer(IP):
return packet[IP].src, packet[IP].dst
return 'N/A', 'N/A'
def process_pcap_sync():
res = []
unique_data = set()
print("开始获取数据包总数...")
start_time = time.perf_counter()
try:
conn = sqlite3.connect('packets.db')
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS packets
(id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp REAL,
source_ip TEXT,
destination_ip TEXT,
protocol TEXT,
data TEXT)''')
packet_count = 0
packets = rdpcap('out.pcap')
data_to_insert = []
for packet in packets:
if packet.haslayer(ICMP) and packet[ICMP].type == 0:
packet_count += 1
timestamp = float(packet.time)
source_ip, destination_ip = get_ip_info(packet)
protocol = packet.name
data = 'N/A'
if packet.haslayer(ICMP):
data_hex = packet[ICMP].load
if data_hex.startswith(b'$$START$$'):
try:
data = data_hex.decode('utf - 8')
if data not in unique_data:
unique_data.add(data)
res.append(data)
data_to_insert.append((timestamp, source_ip, destination_ip, protocol, data))
except UnicodeDecodeError:
pass
total_packets = packet_count
cursor.executemany('INSERT INTO packets (timestamp, source_ip, destination_ip, protocol, data) VALUES (?,?,?,?,?)',
data_to_insert)
conn.commit()
end_time = time.perf_counter()
print(f"\r获取数据包总数耗时: {end_time - start_time:.3f}秒")
print("开始捕获和处理数据包...")
progress_bar = tqdm(total = total_packets)
for packet in packets:
if packet.haslayer(ICMP) and packet[ICMP].type == 0:
if packet.haslayer(ICMP):
data_hex = packet[ICMP].load
if data_hex.startswith(b'$$START$$'):
try:
data = data_hex.decode('utf - 8')
if data not in unique_data:
unique_data.add(data)
res.append(data)
except UnicodeDecodeError:
pass
progress_bar.update(1)
progress_bar.close()
conn.close()
except Exception as e:
print(f"在处理pcap文件时发生错误: {e}")
if not res:
print("没有获取到有效的数据,可能是过滤条件问题或者pcap文件内容问题")
return
new_res = res[1: - 1]
new_content = []
for line in new_res:
if line.startswith('$$START$$'):
line = line.replace('$$START$$', '', 1)
line = line.rstrip('\n')
new_content.append(line)
output_file = 'processed_out.txt'
with open(output_file, 'w', encoding='utf - 8') as f_out:
for line in new_content:
f_out.write(line + '\n')
print('done')
if __name__ == '__main__':
try:
process_pcap_sync()
except Exception as e:
print(f"在运行主程序时发生错误: {e}")
获取包只有几十秒
cyberchef识别出是zip文件,点击保存图标,另存为zip文件,解压得flag.gif
把这个gif文件拷贝进kali,输入下面命令
identify -format "%T" flag.gif
把使用identify得到隐写信息
2050502050502050205020202050202020205050205020502050205050505050202050502020205020505050205020206666
我们去掉尾部6666,把20用0替换,50用1替换
205050205050205020502020205020202020505020502050205020505050505020205050202020502050505020502020
使用python和qt写个程序实现,源码如下:
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QTextEdit
class TextReplaceTool(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
# 查找输入框及标签
self.find_label = QLabel('查找内容:')
self.find_input = QLineEdit()
# 替换输入框及标签
self.replace_label = QLabel('替换内容:')
self.replace_input = QLineEdit()
# 查找按钮
self.find_button = QPushButton('查找')
self.find_button.clicked.connect(self.find_text)
# 替换按钮
self.replace_button = QPushButton('替换')
self.replace_button.clicked.connect(self.replace_text)
# 文本编辑区域
self.text_edit = QTextEdit()
# 布局设置
hbox1 = QHBoxLayout()
hbox1.addWidget(self.find_label)
hbox1.addWidget(self.find_input)
hbox2 = QHBoxLayout()
hbox2.addWidget(self.replace_label)
hbox2.addWidget(self.replace_input)
hbox3 = QHBoxLayout()
hbox3.addWidget(self.find_button)
hbox3.addWidget(self.replace_button)
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addWidget(self.text_edit)
self.setLayout(vbox)
self.setWindowTitle('文本查找替换工具')
self.show()
def find_text(self):
find_str = self.find_input.text()
text = self.text_edit.toPlainText()
start_index = text.find(find_str)
if start_index!= -1:
self.text_edit.moveCursor(QTextEdit.MoveOperation.Start)
cursor = self.text_edit.textCursor()
cursor.setPosition(start_index)
self.text_edit.setTextCursor(cursor)
def replace_text(self):
find_str = self.find_input.text()
replace_str = self.replace_input.text()
text = self.text_edit.toPlainText()
new_text = text.replace(find_str, replace_str)
self.text_edit.setPlainText(new_text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = TextReplaceTool()
sys.exit(app.exec_())
运行gui如图:两次替换可得结果
011011010100010000110101010111110011000101110100
去cyterchef
先binary(二进制)-bytes(字符串)再MD5编码
得 f0f1003afe4ae8ce4aa8e8487a8ab3b6
flag{f0f1003afe4ae8ce4aa8e8487a8ab3b6}
标签:BUUCTF,ip,self,packet,time,print,蜘蛛侠,data From: https://blog.csdn.net/weixin_34979095/article/details/142617089