scraper.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. """
  2. ScraperService: runs scrape jobs asynchronously using a thread pool executor.
  3. Uses the new crawl/main.py scrape_all() which collects prices, model info,
  4. rate limits and tool call prices in a single browser session.
  5. """
  6. from __future__ import annotations
  7. import asyncio
  8. import json
  9. import os
  10. import sys
  11. import traceback
  12. from typing import Any
  13. # Add backend root and crawl dir to path
  14. _backend_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
  15. _crawl_dir = os.path.join(_backend_root, "crawl")
  16. for _p in (_backend_root, _crawl_dir):
  17. if _p not in sys.path:
  18. sys.path.insert(0, _p)
  19. from main import scrape_all # noqa: E402 (backend/crawl/main.py)
  20. class ScraperService:
  21. """Manages the lifecycle of a scrape job."""
  22. async def run_job(self, job_id: str, urls: list[str], pool: Any) -> None:
  23. loop = asyncio.get_event_loop()
  24. async with pool.acquire() as conn:
  25. await conn.execute(
  26. "UPDATE scrape_jobs SET status = 'running', updated_at = NOW() WHERE id = $1",
  27. job_id,
  28. )
  29. try:
  30. exec_path = os.environ.get("PLAYWRIGHT_EXECUTABLE") or None
  31. headless = os.environ.get("PLAYWRIGHT_HEADLESS", "true").lower() != "false"
  32. def _norm(v) -> str:
  33. if v is None:
  34. return "null"
  35. return json.dumps(v if isinstance(v, (dict, list)) else json.loads(v), sort_keys=True)
  36. any_changed = False
  37. # 如果 snapshot 里已有的 URL 集合与本次爬取的不一致(多或少),触发变更
  38. async with pool.acquire() as conn:
  39. rows = await conn.fetch("SELECT url FROM price_snapshot")
  40. existing_snapshot_urls = {row["url"] for row in rows}
  41. if existing_snapshot_urls != set(urls):
  42. any_changed = True
  43. for url in urls:
  44. result: dict = await loop.run_in_executor(
  45. None,
  46. lambda u=url: scrape_all(
  47. u,
  48. headless=headless,
  49. timeout=20000,
  50. executable_path=exec_path,
  51. modules=["info", "rate", "tool", "price", "icon"],
  52. ),
  53. )
  54. prices = result.get("prices") or {}
  55. model_info = result.get("info") or {}
  56. rate_limits = result.get("rate_limits") or {}
  57. tool_prices = result.get("tool_call_prices") or []
  58. icon = result.get("icon") # SVG string or None
  59. # model_name: 直接用 URL 中提取的 model_id,保持和用户输入一致
  60. model_name = (
  61. result.get("model_id")
  62. or url.rstrip("/").split("/")[-1]
  63. )
  64. async with pool.acquire() as conn:
  65. await conn.execute(
  66. """
  67. INSERT INTO scrape_results
  68. (job_id, url, model_name, prices, model_info, rate_limits, tool_prices, raw_data, icon)
  69. VALUES ($1, $2, $3, $4::jsonb, $5::jsonb, $6::jsonb, $7::jsonb, $8::jsonb, $9)
  70. """,
  71. job_id, url, model_name,
  72. json.dumps(prices), json.dumps(model_info),
  73. json.dumps(rate_limits), json.dumps(tool_prices),
  74. json.dumps(result), icon,
  75. )
  76. # 对比旧快照,有变化才 upsert
  77. existing = await conn.fetchrow(
  78. "SELECT prices, model_info, rate_limits, tool_prices, icon FROM price_snapshot WHERE url = $1",
  79. url,
  80. )
  81. data_changed = (
  82. existing is None
  83. or _norm(existing["prices"]) != _norm(prices)
  84. or _norm(existing["model_info"]) != _norm(model_info)
  85. or _norm(existing["rate_limits"]) != _norm(rate_limits)
  86. or _norm(existing["tool_prices"]) != _norm(tool_prices)
  87. or (existing["icon"] or "") != (icon or "")
  88. )
  89. if data_changed:
  90. any_changed = True
  91. await conn.execute(
  92. """
  93. INSERT INTO price_snapshot
  94. (url, model_name, prices, model_info, rate_limits, tool_prices, icon, updated_at)
  95. VALUES ($1, $2, $3::jsonb, $4::jsonb, $5::jsonb, $6::jsonb, $7, NOW())
  96. ON CONFLICT (url) DO UPDATE SET
  97. model_name = EXCLUDED.model_name,
  98. prices = EXCLUDED.prices,
  99. model_info = EXCLUDED.model_info,
  100. rate_limits = EXCLUDED.rate_limits,
  101. tool_prices = EXCLUDED.tool_prices,
  102. icon = EXCLUDED.icon,
  103. updated_at = NOW()
  104. """,
  105. url, model_name,
  106. json.dumps(prices), json.dumps(model_info),
  107. json.dumps(rate_limits), json.dumps(tool_prices), icon,
  108. )
  109. # 删除 snapshot 里不在本次爬取列表中的行(模型被移除的情况)
  110. async with pool.acquire() as conn:
  111. await conn.execute(
  112. "DELETE FROM price_snapshot WHERE url != ALL($1::text[])",
  113. urls,
  114. )
  115. # 本批次有任何数据变化,全局版本号 +1(从 1 开始)
  116. if any_changed:
  117. async with pool.acquire() as conn:
  118. await conn.execute(
  119. """
  120. UPDATE price_snapshot_version
  121. SET version = GREATEST(version + 1, 1), updated_at = NOW()
  122. WHERE id = 1
  123. """
  124. )
  125. async with pool.acquire() as conn:
  126. await conn.execute(
  127. "UPDATE scrape_jobs SET status = 'done', updated_at = NOW() WHERE id = $1",
  128. job_id,
  129. )
  130. except Exception as exc:
  131. error_msg = f"{type(exc).__name__}: {exc}\n{traceback.format_exc()}"
  132. async with pool.acquire() as conn:
  133. await conn.execute(
  134. "UPDATE scrape_jobs SET status = 'failed', error = $2, updated_at = NOW() WHERE id = $1",
  135. job_id,
  136. error_msg,
  137. )