import requests
from bs4 import BeautifulSoup
url = "https://example.com/table/"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
pagination_links = soup.find_all(class_="pagination-link")
for link in pagination_links:
page_url = link.get("href")
response = requests.get(page_url)
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find(class_="my_table") # 根据表格的 class 名称获取表格内容
# 处理表格内容
import requests
from bs4 import BeautifulSoup
url = "https://example.com/table/"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
pagination_links = soup.find_all(class_="pagination-link")
for link in pagination_links:
page_url = link.get("href")
response = requests.get(page_url)
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find(class_="my_table")
# 处理表格内容
注:在实际爬取数据时,需要根据网站的具体结构进行调整。