222.jpg (122.33 KB, 下载次数: 0)
下载附件
2024-8-29 16:41 上传
提取当前网址中的所有链接,根据标题关键字对这些链接进行提炼汇总,得到A链接列表。进入每个A列表每个网页,分析其中是否存在符合特定内容的关键字,最后输出符合要求的网址的相关信息。
[Python] 纯文本查看 复制代码import tkinter as tk
from tkinter import messagebox, ttk
import json
import os
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
import threading
import webbrowser
# 获取当前脚本的路径
script_dir = os.path.dirname(os.path.abspath(__file__))
# 设置 settings.json 文件的路径
SETTINGS_FILE = os.path.join(script_dir, "settings.json")
class KeywordCollector:
def __init__(self, root):
self.root = root
self.root.title("URL关键字收集器")
# 初始化存储列表
self.title_keywords = []
self.content_keywords = []
self.urls = []
self.results = []
self.lock = threading.Lock()
# 尝试加载保存的设置
self.load_settings()
# 添加说明标签
tk.Label(root, text="标题关键字:").grid(row=0, column=0, sticky="w", padx=10, pady=5)
tk.Label(root, text="内容关键字:").grid(row=0, column=1, sticky="w", padx=10, pady=5)
tk.Label(root, text="URL:").grid(row=0, column=2, sticky="w", padx=10, pady=5)
# 输入框
self.title_entry = tk.Text(root, width=30, height=10)
self.title_entry.grid(row=1, column=0, padx=10, pady=5)
self.content_entry = tk.Text(root, width=30, height=10)
self.content_entry.grid(row=1, column=1, padx=10, pady=5)
self.url_entry = tk.Text(root, width=50, height=10) # 增宽 URL 输入框
self.url_entry.grid(row=1, column=2, padx=10, pady=5)
# 保存按钮
self.save_button = tk.Button(root, text="保存", command=self.save_data)
self.save_button.grid(row=2, column=0, columnspan=3, padx=10, pady=5)
# 生成表格按钮
self.generate_button = tk.Button(root, text="采集网页生成数据", command=self.start_generation)
self.generate_button.grid(row=3, column=0, columnspan=3, padx=10, pady=20)
# 添加显示结果的 Treeview
self.result_tree = ttk.Treeview(root, columns=("标题", "网址", "匹配标题关键字", "匹配内容关键字"), show='headings', height=15)
self.result_tree.grid(row=1, column=3, rowspan=4, padx=10, pady=5) # 将表格移到右侧
self.result_tree.heading("标题", text="标题")
self.result_tree.heading("网址", text="网址")
self.result_tree.heading("匹配标题关键字", text="匹配标题关键字")
self.result_tree.heading("匹配内容关键字", text="匹配内容关键字")
self.result_tree.bind("[B]", self.on_treeview_click)
# 状态栏
self.status_bar = tk.Label(root, text="当前访问网址: ", anchor="w", relief="sunken")
self.status_bar.grid(row=5, column=0, columnspan=4, sticky="ew", padx=10, pady=5) # 扩展到右侧
# 更新汇总框显示
self.update_listboxes()
def save_data(self):
# 获取数据
self.title_keywords = self.title_entry.get("1.0", tk.END).strip().split('\n')
self.content_keywords = self.content_entry.get("1.0", tk.END).strip().split('\n')
self.urls = self.url_entry.get("1.0", tk.END).strip().split('\n')
# 更新列表框
self.update_listboxes()
self.save_settings()
messagebox.showinfo("保存成功", "数据已保存。")
def start_generation(self):
# 启动线程执行数据生成任务
threading.Thread(target=self.generate_table).start()
def generate_table(self):
all_extracted_items = []
for url in self.urls:
self.update_status(f"访问主网址: {url}")
extracted_items = self.fetch_and_parse(url)
all_extracted_items.extend(extracted_items)
matched_items = []
for item in all_extracted_items:
matched_content_keywords = self.check_keywords_in_page(item)
if matched_content_keywords:
matched_items.append({
"标题": item['title'],
"网址": item['link'],
"匹配标题关键字": ", ".join(item['matched_title_keywords']),
"匹配内容关键字": ", ".join(matched_content_keywords),
})
with self.lock:
self.results = matched_items
self.update_results()
def update_status(self, message):
# 更新状态栏显示
self.status_bar.config(text=message)
self.root.update_idletasks()
def update_results(self):
# 更新 Treeview 中的结果
self.result_tree.delete(*self.result_tree.get_children())
for item in self.results:
self.result_tree.insert("", tk.END, values=(item['标题'], item['网址'], item['匹配标题关键字'], item['匹配内容关键字']))
if self.results:
messagebox.showinfo("生成成功", "匹配结果已显示在表格中。")
else:
messagebox.showinfo("无匹配结果", "没有找到任何符合条件的项目。")
def on_treeview_click(self, event):
# 检查用户点击的是不是网址列
item = self.result_tree.identify('item', event.x, event.y)
column = self.result_tree.identify_column(event.x)
if column == '#2': # 网址列的标识
url = self.result_tree.item(item, 'values')[1]
webbrowser.open(url)
def send_request(self, url):
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
return response
except requests.RequestException as e:
print(f"无法访问 {url}: {e}")
return None
def fetch_and_parse(self, url):
response = self.send_request(url)
if response is None:
return []
soup = BeautifulSoup(response.content, "html.parser")
links = soup.find_all('a')
extracted_items = []
for link in links:
text = link.get_text().strip()
href = link.get('href')
matched_title_keywords = [keyword for keyword in self.title_keywords if keyword in text]
if matched_title_keywords:
full_url = urljoin(url, href)
extracted_items.append({"title": text, "link": full_url, "matched_title_keywords": matched_title_keywords})
return extracted_items
def check_keywords_in_page(self, item):
response = self.send_request(item['link'])
if response is None:
return []
soup = BeautifulSoup(response.content, "html.parser")
text_content = soup.get_text()
matched_content_keywords = [keyword for keyword in self.content_keywords if keyword in text_content]
if matched_content_keywords:
self.update_status(f"访问内容网址: {item['link']}")
return matched_content_keywords
def save_settings(self):
settings = {
"title_keywords": self.title_keywords,
"content_keywords": self.content_keywords,
"urls": self.urls
}
with open(SETTINGS_FILE, 'w', encoding='utf-8') as f:
json.dump(settings, f, ensure_ascii=False, indent=4)
def load_settings(self):
if os.path.exists(SETTINGS_FILE):
with open(SETTINGS_FILE, 'r', encoding='utf-8') as f:
settings = json.load(f)
self.title_keywords = settings.get("title_keywords", [])
self.content_keywords = settings.get("content_keywords", [])
self.urls = settings.get("urls", [])
def update_listboxes(self):
# 清除当前内容
self.title_entry.delete("1.0", tk.END)
self.content_entry.delete("1.0", tk.END)
self.url_entry.delete("1.0", tk.END)
# 将数据填充到文本框
if self.title_keywords:
self.title_entry.insert(tk.END, "\n".join(self.title_keywords))
if self.content_keywords:
self.content_entry.insert(tk.END, "\n".join(self.content_keywords))
if self.urls:
self.url_entry.insert(tk.END, "\n".join(self.urls))
if __name__ == "__main__":
root = tk.Tk()
app = KeywordCollector(root)
root.mainloop()