Ver Fonte

Update 优化代码

YueYunyun há 6 meses atrás
pai
commit
8a5993289f

+ 2 - 1
.gitignore

@@ -158,5 +158,6 @@ cython_debug/
 #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
 .idea/
 
+.vscode/
 .dev/
-logs/
+logs/

+ 2 - 1
SourceCode/TenderCrawler/.vscode/launch.json

@@ -21,7 +21,8 @@
 				"APP_SAVE__PROCESS_BATCH_SIZE": "1",
 				"APP_SCHEDULE__COLLECT": "12:53",
 				"APP_SCHEDULE__SEND_EMAIL": "22:48",
-				"APP_SCHEDULE__RUN_NOW": "1"
+				"APP_SCHEDULE__RUN_NOW": "1",
+
 			}
 		}
 	]

+ 35 - 35
SourceCode/TenderCrawler/app/adapters/chinabidding_data_collection_adapter.py

@@ -3,7 +3,7 @@ from time import sleep
 from selenium import webdriver
 from selenium.webdriver.common.by import By
 from selenium.webdriver.support.wait import WebDriverWait
-from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support import expected_conditions as ec
 from selenium.common.exceptions import TimeoutException, NoSuchElementException
 
 from drivers.driver_creator import DriverCreator
@@ -41,29 +41,29 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
     @property
     def driver(self):
         if not self._driver:
-            self._driver = self.createDriver()
+            self._driver = self.create_driver()
         return self._driver
 
-    def createDriver(self) -> webdriver:
+    def create_driver(self) -> webdriver:
         try:
-            return DriverCreator().GenRemoteDriver(self.url)
+            return DriverCreator().gen_remote_driver(self.url)
         except Exception as e:
             raise Exception(f"创建驱动器失败: {e}")
 
     def login(self, driver, username: str, password: str) -> None:
         try:
-            loginEl = driver.find_element(
+            login_el = driver.find_element(
                 By.XPATH, "//div[@id='loginRight']/a[@class='login']")
-            loginEl.click()
+            login_el.click()
             wait = WebDriverWait(driver, 10, 1)
-            wait.until(EC.presence_of_element_located((By.ID, "userpass")))
-            unEl = driver.find_element(By.ID, "username")
-            unEl.send_keys(username)
-            passEl = driver.find_element(By.ID, "userpass")
-            passEl.send_keys(password)
-            loginBtn = driver.find_element(By.ID, "login-button")
-            loginBtn.click()
-            wait.until(EC.presence_of_element_located((By.ID, "site-content")))
+            wait.until(ec.presence_of_element_located((By.ID, "userpass")))
+            un_el = driver.find_element(By.ID, "username")
+            un_el.send_keys(username)
+            pass_el = driver.find_element(By.ID, "userpass")
+            pass_el.send_keys(password)
+            login_btn = driver.find_element(By.ID, "login-button")
+            login_btn.click()
+            wait.until(ec.presence_of_element_located((By.ID, "site-content")))
         except TimeoutException as e:
             raise Exception(f"登录失败 [超时]: {e}")
         except NoSuchElementException as e:
@@ -74,25 +74,25 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
             self._keyword = keyword
             wait = WebDriverWait(driver, 10, 1)
             wait.until(
-                EC.presence_of_element_located((By.ID, "projSearchForm")))
-            searchEl = driver.find_element(By.ID, "fullText")
-            searchEl.send_keys(keyword)
-            searchBtn = driver.find_element(
+                ec.presence_of_element_located((By.ID, "projSearchForm")))
+            search_el = driver.find_element(By.ID, "fullText")
+            search_el.send_keys(keyword)
+            search_btn = driver.find_element(
                 By.XPATH, "//form[@id='projSearchForm']/button")
-            searchBtn.click()
-            wait.until(EC.presence_of_element_located((By.ID, "site-content")))
+            search_btn.click()
+            wait.until(ec.presence_of_element_located((By.ID, "site-content")))
             # 查询3天内的数据
             search_txt = ConfigHelper().get("adapter.chinabidding.search_day")
             if not search_txt:
                 search_txt = "近三天"
             self.logger.info(f"搜索关键字: {keyword},搜索条件: {search_txt}")
-            lastEl = driver.find_element(By.LINK_TEXT, search_txt)
-            lastEl.click()
-            wait.until(EC.presence_of_element_located((By.ID, "site-content")))
+            last_el = driver.find_element(By.LINK_TEXT, search_txt)
+            last_el.click()
+            wait.until(ec.presence_of_element_located((By.ID, "site-content")))
             try:
-                aLinks = driver.find_elements(
+                a_links = driver.find_elements(
                     By.XPATH, "//form[@id='pagerSubmitForm']/a")
-                count = len(aLinks)
+                count = len(a_links)
                 if count > 1:
                     count = count - 1
                 self.logger.info(f"共查询到 {count} 页")
@@ -106,7 +106,7 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
         except NoSuchElementException as e:
             raise Exception(f"搜索失败 [找不到元素]: {e}")
 
-    def collect(self, driver, items: list, store: IDataStore) -> list:
+    def collect(self, driver, items: list, store: IDataStore) :
         if store:
             self._store = store
         self._process_list(driver, items)
@@ -115,12 +115,12 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
     def _next_page(self, driver) -> list:
         try:
             wait = WebDriverWait(driver, 10, 1)
-            nextPath = "//form[@id='pagerSubmitForm']/a[@class='next']"
-            wait.until(EC.presence_of_element_located((By.XPATH, nextPath)))
-            btn = driver.find_element(By.XPATH, nextPath)
+            next_path = "//form[@id='pagerSubmitForm']/a[@class='next']"
+            wait.until(ec.presence_of_element_located((By.XPATH, next_path)))
+            btn = driver.find_element(By.XPATH, next_path)
             btn.click()
             self.logger.info(f"跳转到下页: {driver.current_url}")
-            wait.until(EC.presence_of_element_located((By.ID, "site-content")))
+            wait.until(ec.presence_of_element_located((By.ID, "site-content")))
             items = driver.find_elements(By.XPATH,
                                          "//ul[@class='as-pager-body']/li/a")
             return items
@@ -131,8 +131,8 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
             return []
 
     def _process_item(self, driver, item):
+        current_handle = driver.current_window_handle
         try:
-            currentHandle = driver.current_window_handle
             url = item.get_attribute('href')
             old = self.store.query_one_collect_by_url(url)
             if old:
@@ -140,15 +140,15 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
                 return
             item.click()
             wait = WebDriverWait(driver, 10, 1)
-            wait.until(EC.number_of_windows_to_be(2))
+            wait.until(ec.number_of_windows_to_be(2))
             handles = driver.window_handles
             for handle in handles:
-                if handle != currentHandle:
+                if handle != current_handle:
                     driver.switch_to.window(handle)
                     break
             url = driver.current_url
             self.logger.info(f"跳转详情: {driver.current_url}")
-            wait.until(EC.presence_of_element_located((By.TAG_NAME, "body")))
+            wait.until(ec.presence_of_element_located((By.TAG_NAME, "body")))
             content = driver.find_element(By.TAG_NAME, "body").text
             self._save(url, content)
             sleep(1)
@@ -163,7 +163,7 @@ class ChinabiddingDataCollectionAdapter(IDataCollectionAdapter):
                 f"采集发生异常 NoSuchElement: {driver.current_url}。Exception: {e}")
             raise Exception(f"采集失败 [找不到元素]: {e}")
         finally:
-            driver.switch_to.window(currentHandle)
+            driver.switch_to.window(current_handle)
 
     def _save(self, url, content):
         # self.logger.info(f"保存数据: {url},关键字{self.keyword}")

+ 2 - 1
SourceCode/TenderCrawler/app/adapters/data_collection_adapter_interface.py

@@ -32,7 +32,7 @@ class IDataCollectionAdapter(ABC):
         pass
 
     @abstractmethod
-    def createDriver(self) -> webdriver:
+    def create_driver(self) -> webdriver:
         """
         根据URL创建一个浏览器驱动器
 
@@ -90,6 +90,7 @@ class IDataCollectionAdapter(ABC):
 
         :param driver: 浏览器驱动器实例
         :param items: 搜索结果列表
+        :param store: 数据储存库
         :type items: list
         :return: 处理后的数据列表
         :rtype: list

+ 1 - 1
SourceCode/TenderCrawler/app/config.yml

@@ -22,7 +22,7 @@ ai:
   model: qwen2.5:7b
   max_tokens: 1024
   system_prompt: 请帮我分析以下文字,提取出关键信息,并以json格式字符串返回,如果部分信息为空,则该字段返回为空。
-  prompt_template: 在以上内容中提取信息:编号(no) 、标题(title)、在哪个城市招标(area)、开标的时间(date)、开标的地点(address)、发布时间(release_date)、150字左右的招标条件要求及联系方式等内容摘要(summary)。编号一般在“招标编号:”的后面,例如 (招标编号:xxxxxxx...), “xxxxxxx...”就是编号(no)。返回包含no,title,area,date,address,release_date,summary字段的json格式字符串,没有找到或未提供的信息json字段为空。
+  prompt_template: 在以上内容中提取信息:编号(no) 、标题(title)、在哪个城市招标(area)、开标的时间(date)、开标的地点(address)、发布时间(release_date)、150字左右的招标条件要求及联系方式等内容摘要(summary)。编号一般在“招标编号:”的后面,例如 (招标编号:xxx...), “xxx...”就是编号(no)。返回包含no,title,area,date,address,release_date,summary字段的json格式字符串,没有找到或未提供的信息json字段为空。
 email:
   smtp_server: smtp.exmail.qq.com
   smtp_port: 587

+ 7 - 8
SourceCode/TenderCrawler/app/drivers/driver_creator.py

@@ -10,7 +10,7 @@ class DriverCreator:
 
     default_remote_driver_url = "http://127.0.0.1:4444/wd/hub"
 
-    def GenRemoteDriver(self, url):
+    def gen_remote_driver(self, url):
         # 设置Chrome选项
         options = webdriver.ChromeOptions()
 
@@ -30,9 +30,9 @@ class DriverCreator:
         # 创建远程浏览器驱动实例
         driver = webdriver.Remote(command_executor=remote_driver_url,
                                   options=options)
-        return self._genDriver(driver, url)
+        return self._gen_driver(driver, url)
 
-    def GenChromeDriver(self, url):
+    def gen_chrome_driver(self, url):
         # 设置Chrome选项,包括隐藏Selenium特征、设置代理IP和排除或关闭一些Selenium相关开关
         options = webdriver.ChromeOptions()
         options.add_experimental_option('excludeSwitches',
@@ -40,7 +40,6 @@ class DriverCreator:
         options.add_argument('--disable-blink-features=AutomationControlled')
         options.add_argument('--disable-extensions')
         # options.add_argument('--disable-gpu')
-        # options.add_argument('--disable-infobars')
         options.add_argument('--disable-notifications')
         # options.add_argument('--disable-popup-blocking')
         # options.add_argument('--disable-web-security')
@@ -57,9 +56,9 @@ class DriverCreator:
         # 阻止浏览器窗口自动关闭
         # options.add_experimental_option('detach', True)
         driver = webdriver.Chrome(options=options)  # 创建Chrome浏览器驱动实例
-        return self._genDriver(driver, url)
+        return self._gen_driver(driver, url)
 
-    def _genDriver(self, driver, url):
+    def _gen_driver(self, driver, url):
         # 检查是否为 ChromeDriver 或 FirefoxDriver
         if isinstance(driver, (webdriver.Chrome, webdriver.Firefox)):
             # 隐藏navigator.webdriver标志,将其值修改为false或undefined
@@ -93,5 +92,5 @@ class DriverCreator:
         self.logger.info(f"创建浏览器驱动,URL: {url}")
         return driver
 
-    def ShutdownDriver(driver):
-        driver.quit()
+    # def shutdown_driver(self,driver):
+    #     driver.quit()

+ 6 - 6
SourceCode/TenderCrawler/app/main.py

@@ -6,18 +6,18 @@ from utils.logger_helper import LoggerHelper
 from main.runner import Runner
 
 logger = LoggerHelper.get_logger()
-DEFAUlT_SLEEP_INTERVAL = 60 * 30  # 配置默认时间间隔30分钟
+DEFAULT_USER_SLEEP_INTERVAL = 60 * 30  # 配置默认时间间隔30分钟
 
 runner = Runner()
 runner.run()
 
+interval_str = ConfigHelper().get("schedule.sleep_interval")
 try:
-    intervalStr = ConfigHelper().get("schedule.sleep_interval")
-    interval = int(intervalStr)
-except Exception:
-    interval = DEFAUlT_SLEEP_INTERVAL
+    interval = int(interval_str)
+except Exception as e:
+    interval = DEFAULT_USER_SLEEP_INTERVAL
     logger.warning(
-        f"schedule.sleep_interval {intervalStr} 配置不正确, 使用默认配置: {DEFAUlT_SLEEP_INTERVAL}秒"
+        f"schedule.sleep_interval {interval_str} 配置不正确, 使用默认配置: {DEFAULT_USER_SLEEP_INTERVAL}秒。 错误:{e}"
     )
 
 if __name__ == '__main__':

+ 1 - 1
SourceCode/TenderCrawler/app/main/data_collector.py

@@ -28,7 +28,7 @@ class DataCollector:
                  up: str,
                  store: IDataStore = None):
         self._adapter = self._genAdapter(type, url)
-        self._driver = self.adapter.createDriver()
+        self._driver = self.adapter.create_driver()
         # if type == "chinabidding":
         #     return
         self.adapter.login(self.driver, un, up)

+ 1 - 1
SourceCode/TenderCrawler/app/main/data_process.py

@@ -55,7 +55,7 @@ class DataProcess:
 
         self.logger.info("END   ==>" + url)
 
-    def _ai_process(self, item: CollectData) -> ProcessData:
+    def _ai_process(self, item: CollectData) -> ProcessData | None:
         try:
             data = AiHelper().call_ai(item.content)
             return data

+ 3 - 2
SourceCode/TenderCrawler/app/main/data_send.py

@@ -29,7 +29,7 @@ class DataSend:
         email = self.store.get_email_by_area(item.area)
         if not email:
             self.logger.error(f"{item.area} 下没有找到email")
-            if (item.area not in self._error_arr):
+            if item.area not in self._error_arr:
                 self._error_arr.append(item.area)
             return
         body = self._build_email_content(item)
@@ -37,7 +37,8 @@ class DataSend:
         if flag:
             self.store.set_send(item.no)
 
-    def _build_email_content(self, item: ProcessData, other: str = "") -> str:
+    @staticmethod
+    def _build_email_content(item: ProcessData, other: str = "") -> str:
         html_body = f"""
         <html>
         <head>

+ 13 - 11
SourceCode/TenderCrawler/app/main/runner.py

@@ -54,19 +54,20 @@ class Runner:
     def _collect_process_job(self):
         try:
             self.logger.info("开始执行数据采集处理任务")
-            urlSetting = UrlSetting()
-            for url_setting in urlSetting.fetch_all():
+            url_setting = UrlSetting()
+            for url_setting in url_setting.fetch_all():
+                data_collector =None
                 try:
                     self.logger.info(f"开始采集: {url_setting.url}")
-                    dataCollector = DataCollector(url_setting.type,
+                    data_collector = DataCollector(url_setting.type,
                                                   url_setting.url,
                                                   url_setting.username,
                                                   url_setting.password,
                                                   self.store)
                     keywords = url_setting.keywords
-                    keywordArray = keywords.split(',')
-                    for keyword in keywordArray:
-                        dataCollector.collect(keyword)
+                    keyword_array = keywords.split(',')
+                    for keyword in keyword_array:
+                        data_collector.collect(keyword)
                     self.logger.info(f"采集完成: {url_setting.url}")
                 except Exception as e:
                     self._send_error_email(
@@ -75,12 +76,13 @@ class Runner:
                     )
                     self.logger.error(f"采集发生异常: {e}")
                 finally:
-                    dataCollector.close()
+                    if data_collector:
+                        data_collector.close()
 
                 try:
                     self.logger.info(f"开始AI处理: {url_setting.url}")
-                    dataProcess = DataProcess(self.store)
-                    dataProcess.process()
+                    data_process = DataProcess(self.store)
+                    data_process.process()
                 except Exception as e:
                     self._send_error_email(
                         "AI数据处理",
@@ -95,8 +97,8 @@ class Runner:
     def _process_job(self):
         try:
             self.logger.info("开始AI处理数据执行任务")
-            dataProcess = DataProcess(self.store)
-            dataProcess.process()
+            data_process = DataProcess(self.store)
+            data_process.process()
             self.logger.info("AI处理数据任务执行完毕")
         except Exception as e:
             self._send_error_email("AI数据处理", f"\n    错误: {str(e)}")

+ 7 - 5
SourceCode/TenderCrawler/app/models/area_email.py

@@ -3,12 +3,14 @@ from utils.mysql_helper import MySQLHelper
 
 class AreaEmail:
 
-    def __init__(self, name=None, area=None, email=None):
+    def __init__(self, name=None, area=None, email=None,is_active=None,remark=None):
         self.name = name
         self.area = area
         if email is None:
             email = ""
         self.email = email.replace(",", ",")
+        self.is_active = is_active
+        self.remark = remark
 
     def __repr__(self):
         return (
@@ -33,19 +35,19 @@ class AreaEmail:
     #                   area_email.remark)
     #         db_helper.execute_non_query(query, params)
 
+    _query = "SELECT name,area,email FROM t_area_email WHERE is_active = 1"
+    _query_by_area = "SELECT email FROM t_area_email WHERE CONCAT(area,',') like %s AND is_active = 1"
     # 查询 AreaEmail 数据
     def fetch_all(self):
         with MySQLHelper() as db_helper:
-            query = "SELECT name,area,email FROM t_area_email WHERE is_active = 1"
-            results = db_helper.execute_query(query)
+            results = db_helper.execute_query(self._query)
             data = [AreaEmail(**result) for result in results]
             return data
 
     def fetch_one_by_area(self, area: str):
         with MySQLHelper() as db_helper:
-            query = "SELECT email FROM t_area_email WHERE CONCAT(area,',') like %s AND is_active = 1"
             params = ('%' + area + ',%', )
-            result = db_helper.fetch_one(query, params)
+            result = db_helper.fetch_one(self._query_by_area, params)
             if result is None:
                 return None
             return result["email"]

+ 3 - 2
SourceCode/TenderCrawler/app/models/url_setting.py

@@ -46,10 +46,11 @@ class UrlSetting:
     #                   url_setting.is_active)
     #         db_helper.execute_non_query(query, params)
 
+    _query = "SELECT  url, type, username, password, keywords FROM t_urls WHERE is_active = 1"
+
     # 查询 URL 设置数据
     def fetch_all(self):
         with MySQLHelper() as db_helper:
-            query = "SELECT  url, type, username, password, keywords FROM t_urls WHERE is_active = 1"
-            results = db_helper.execute_query(query)
+            results = db_helper.execute_query(self._query)
             data = [UrlSetting(**result) for result in results]
             return data

+ 4 - 1
SourceCode/TenderCrawler/app/stores/default_data_store.py

@@ -4,12 +4,15 @@ from stores.data_store_interface import IDataStore
 
 class DefaultDataStore(IDataStore):
 
+    def query_one_process_by_no(self, no):
+        pass
+
     logger = LoggerHelper.get_logger()
 
     def __init__(self):
         pass
 
-    def insert_collect_data(self, url, keyword, content):
+    def insert_collect_data(self, url, keyword, content, is_batch=True):
         self.logger.info(f"Default: INSERT {url},关键字:{keyword}")
 
     def save_collect_data(self, is_force=False):

+ 2 - 2
SourceCode/TenderCrawler/app/stores/mysql_data_store.py

@@ -39,7 +39,7 @@ class MysqlDataStore(IDataStore):
             self.save_collect_data()
 
     def save_collect_data(self, is_force=False):
-        if (is_force or len(self._collect_list) >= self._collect_size):
+        if is_force or len(self._collect_list) >= self._collect_size:
             self.logger.info("批量保存到数据库,数量: " + str(len(self._collect_list)))
             self._collectData.insert_batch(self._collect_list)
             self._collect_list = []
@@ -62,7 +62,7 @@ class MysqlDataStore(IDataStore):
 
     # 插入到数据库时会把CollectData设为已处理
     def save_process_data(self, is_force=False):
-        if (is_force or len(self._process_list) >= self._process_size):
+        if is_force or len(self._process_list) >= self._process_size:
             self.logger.info("批量保存到数据库,数量: " + str(len(self._process_list)))
             self._processData.insert_batch(self._process_list)
             self._process_list = []

+ 5 - 4
SourceCode/TenderCrawler/app/utils/ai_helper.py

@@ -17,7 +17,7 @@ class AiHelper:
     _ai_system_prompt = "请帮我分析以下文字,提取出关键信息,并以json格式字符串返回,如果部分信息为空,则该字段返回为空。"
     _ai_prompt_template = """在以上内容中提取信息:编号(no) 、标题(title)、在哪个城市招标(area)、开标的时间(date)、
     开标的地点(address)、发布时间(release_date)、150字左右的招标条件要求及联系方式等内容摘要(summary)。
-    编号一般在“招标编号:”的后面,例如 (招标编号:xxxxxxx...), “xxxxxxx...”就是编号(no)。"
+    编号一般在“招标编号:”的后面,例如 (招标编号:xxx...), “xxx...”就是编号(no)。"
     返回包含no,title,area,date,address,release_date,summary字段的json格式字符串,没有找到的信息json字段为空。"""
 
     def __init__(self):
@@ -66,15 +66,16 @@ class AiHelper:
         if response.status_code == 200:
             try:
                 self.logger.info(f"AI Response: {response.text}")
-                resStr = self._extract_message_content(response.json())
-                return self._parse_response(resStr, True)
+                res_str = self._extract_message_content(response.json())
+                return self._parse_response(res_str, True)
             except Exception as e:
                 raise Exception(f"解析 AI 响应错误: {e}")
         else:
             raise Exception(
                 f"调用 AI 错误: {response.status_code} - {response.text}")
 
-    def _extract_message_content(self, response_json: dict) -> str:
+    @staticmethod
+    def _extract_message_content(response_json: dict) -> str:
         if "choices" in response_json and len(response_json["choices"]) > 0:
             choice = response_json["choices"][0]
             message_content = choice.get("message", {}).get("content", "")

+ 16 - 16
SourceCode/TenderCrawler/app/utils/email_helper.py

@@ -9,6 +9,21 @@ from utils.config_helper import ConfigHelper
 from utils.logger_helper import LoggerHelper
 
 
+def _attach_file(msg: MIMEMultipart, attachment_path: str):
+    if not os.path.isfile(attachment_path):
+        raise FileNotFoundError(
+            f"The file {attachment_path} does not exist.")
+
+    with open(attachment_path, "rb") as attachment:
+        part = MIMEBase('application', 'octet-stream')
+        part.set_payload(attachment.read())
+        encoders.encode_base64(part)
+        part.add_header(
+            'Content-Disposition',
+            f"attachment; filename= {os.path.basename(attachment_path)}")
+        msg.attach(part)
+
+
 class EmailHelper:
 
     logger = LoggerHelper.get_logger()
@@ -42,10 +57,9 @@ class EmailHelper:
             msg.attach(MIMEText(body, 'plain', 'utf-8'))
 
         if attachment_path:
-            self._attach_file(msg, attachment_path)
+            _attach_file(msg, attachment_path)
 
         try:
-            # with smtplib.SMTP(self.smtp_server, self.port, timeout=10) as server:
             with smtplib.SMTP_SSL(self.smtp_server, timeout=10) as server:
                 # server.starttls()
                 server.login(self.username, self.password)
@@ -57,17 +71,3 @@ class EmailHelper:
         except Exception as e:
             self.logger.error(f"邮件发送失败:{to_addr} {e}")
             return False
-
-    def _attach_file(self, msg: MIMEMultipart, attachment_path: str):
-        if not os.path.isfile(attachment_path):
-            raise FileNotFoundError(
-                f"The file {attachment_path} does not exist.")
-
-        with open(attachment_path, "rb") as attachment:
-            part = MIMEBase('application', 'octet-stream')
-            part.set_payload(attachment.read())
-            encoders.encode_base64(part)
-            part.add_header(
-                'Content-Disposition',
-                f"attachment; filename= {os.path.basename(attachment_path)}")
-            msg.attach(part)

+ 10 - 10
SourceCode/TenderCrawler/app/utils/logger_helper.py

@@ -10,19 +10,19 @@ class LoggerHelper:
     """
     _instance = None
 
-    def __new__(self, *args, **kwargs):
+    def __new__(cls, *args, **kwargs):
         """
         实现单例模式,确保日志记录器仅被创建一次
         如果尚未创建实例,则创建并初始化日志记录器
         """
-        if not self._instance:
-            self._instance = super(LoggerHelper,
-                                   self).__new__(self, *args, **kwargs)
+        if not cls._instance:
+            cls._instance = super(LoggerHelper,
+                                  cls).__new__(cls, *args, **kwargs)
             try:
-                self._instance._initialize_logger()
+                cls._instance._initialize_logger()
             except Exception as e:
                 raise Exception(f"配置logger出错: {e}")
-        return self._instance
+        return cls._instance
 
     @property
     def logger(self):
@@ -64,11 +64,11 @@ class LoggerHelper:
         self._logger.addHandler(console_handler)
 
     @classmethod
-    def get_logger(self):
+    def get_logger(cls):
         """
         提供初始化后的日志记录器实例
         :return: 初始化后的日志记录器实例
         """
-        if not self._instance:
-            self._instance = self()
-        return self._instance._logger
+        if not cls._instance:
+            cls._instance = cls()
+        return cls._instance._logger