Python 的 requests 库可以有效地处理 HTTP,但验证码需要外部求解器。本指南展示了如何将 CaptchaAI 集成到 Python 抓取脚本中 - 大多数网站不需要浏览器。
要求
| 要求 | 细节 |
|---|---|
| Python 3.7+ | 带点 |
| 要求 | pip install requests |
| 美丽汤4 | pip install beautifulsoup4 |
| CaptchaAI API 密钥 | 从验证码网站 |
CaptchaAI 帮助程序类
为您的 Python 项目构建可重用的求解器类:
import requests
import time
class CaptchaSolver:
def __init__(self, api_key):
self.api_key = api_key
self.base = "https://ocr.captchaai.com"
def _submit(self, params):
params["key"] = self.api_key
resp = requests.get(f"{self.base}/in.php", params=params)
if not resp.text.startswith("OK|"):
raise Exception(f"Submit error: {resp.text}")
return resp.text.split("|")[1]
def _poll(self, task_id, timeout=300):
deadline = time.time() + timeout
while time.time() < deadline:
time.sleep(5)
resp = requests.get(f"{self.base}/res.php", params={
"key": self.api_key,
"action": "get",
"id": task_id
})
if resp.text == "CAPCHA_NOT_READY":
continue
if resp.text.startswith("OK|"):
return resp.text.split("|")[1]
raise Exception(f"Solve error: {resp.text}")
raise TimeoutError("Solve timed out")
def solve_recaptcha_v2(self, site_key, page_url):
task_id = self._submit({
"method": "userrecaptcha",
"googlekey": site_key,
"pageurl": page_url
})
return self._poll(task_id)
def solve_recaptcha_v3(self, site_key, page_url, action="verify"):
task_id = self._submit({
"method": "userrecaptcha",
"googlekey": site_key,
"pageurl": page_url,
"version": "v3",
"action": action
})
return self._poll(task_id)
def solve_turnstile(self, site_key, page_url):
task_id = self._submit({
"method": "turnstile",
"sitekey": site_key,
"pageurl": page_url
})
return self._poll(task_id)
def solve_image(self, image_base64):
task_id = self._submit({
"method": "base64",
"body": image_base64
})
return self._poll(task_id)
抓取受 reCAPTCHA 保护的表单
from bs4 import BeautifulSoup
import requests
solver = CaptchaSolver("YOUR_API_KEY")
session = requests.Session()
session.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
})
# Step 1: Load the page
url = "https://example.com/search"
page = session.get(url)
soup = BeautifulSoup(page.text, "html.parser")
# Step 2: Extract the site key
recaptcha_div = soup.find("div", class_="g-recaptcha")
site_key = recaptcha_div["data-sitekey"]
# Step 3: Solve the CAPTCHA
token = solver.solve_recaptcha_v2(site_key, url)
# Step 4: Submit the form with the token
form_data = {
"q": "search term",
"g-recaptcha-response": token
}
result = session.post(url, data=form_data)
# Step 5: Parse the results
result_soup = BeautifulSoup(result.text, "html.parser")
items = result_soup.find_all("div", class_="result-item")
for item in items:
print(item.text.strip())
抓取多个页面
对于验证码后面的分页结果:
def scrape_all_pages(base_url, site_key, max_pages=10):
solver = CaptchaSolver("YOUR_API_KEY")
session = requests.Session()
session.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
})
all_results = []
for page_num in range(1, max_pages + 1):
page_url = f"{base_url}?page={page_num}"
# Solve CAPTCHA for each page if needed
token = solver.solve_recaptcha_v2(site_key, page_url)
resp = session.get(page_url, params={
"g-recaptcha-response": token,
"page": page_num
})
soup = BeautifulSoup(resp.text, "html.parser")
items = soup.find_all("div", class_="item")
if not items:
break
all_results.extend([item.text.strip() for item in items])
print(f"Page {page_num}: {len(items)} items")
time.sleep(2) # Polite delay
return all_results
处理图像验证码
对于具有基于图像的文本验证码的网站:
import base64
def scrape_with_image_captcha(url):
solver = CaptchaSolver("YOUR_API_KEY")
session = requests.Session()
page = session.get(url)
soup = BeautifulSoup(page.text, "html.parser")
# Find the CAPTCHA image
captcha_img = soup.find("img", {"id": "captcha-image"})
captcha_url = captcha_img["src"]
# Download and encode the image
img_resp = session.get(captcha_url)
img_base64 = base64.b64encode(img_resp.content).decode()
# Solve
captcha_text = solver.solve_image(img_base64)
# Submit
form_data = {
"captcha": captcha_text,
"username": "user"
}
result = session.post(url, data=form_data)
return result.text
错误处理和重试
为生产刮刀添加重试逻辑:
def solve_with_retry(solver, site_key, page_url, max_retries=3):
for attempt in range(max_retries):
try:
return solver.solve_recaptcha_v2(site_key, page_url)
except Exception as e:
if attempt == max_retries - 1:
raise
print(f"Attempt {attempt + 1} failed: {e}. Retrying...")
time.sleep(2)
故障排除
| 问题 | 原因 | 处理方式 |
|---|---|---|
ERROR_WRONG_USER_KEY |
API 密钥无效 | 从仪表板验证密钥 |
ERROR_ZERO_BALANCE |
没有资金 | 为您的帐户充值 |
| 表单提交再次返回验证码页面 | 令牌过期或字段名称错误 | 立即使用代币;检查表单字段名称 |
ConnectionError |
网络问题 | 添加具有指数退避的重试逻辑 |
| 提交后结果为空 | 网站需要 cookies/session | 使用requests.Session()来维护cookie |
常问问题
我需要 Selenium 在 Python 中进行验证码抓取吗?
并非总是如此。如果站点的表单适用于标准 HTTP POST 请求,则 requests + CaptchaAI 比 Selenium 更快、更轻。仅当站点需要 JavaScript 渲染时才使用 Selenium。
我可以异步解决验证码吗?
是的。将 aiohttp 与 CaptchaAI 的 API 结合使用以实现异步工作流程。看aiohttp + CaptchaAI 集成。
如何处理速率限制?
在请求之间添加延迟 (time.sleep(2-5))、QA 测试会话并使用真实的标头。看用于验证码抓取的代理轮换。
相关指南
- 使用 Python 处理 Selenium 验证码
- 使用 Node.js 进行验证码抓取
- 抓取而不被阻止