做网站常见问题模板,贵州省建设学校网站首页,网站开发计入什么费用,如何用一个域名做多个网站目录
前言
环境
代码 前言
有个朋友想爬取一些数据#xff0c;让我帮忙搞下#xff0c;我也比较菜#xff0c;不怎么用python就随便搜了点资料尝试下。
环境
idea#xff0c;python3.1.0
edge浏览器#xff08;谷歌也可以#xff09;#xff0c;都需要在python的…目录
前言
环境
代码 前言
有个朋友想爬取一些数据让我帮忙搞下我也比较菜不怎么用python就随便搜了点资料尝试下。
环境
ideapython3.1.0
edge浏览器谷歌也可以都需要在python的安装目录下存放驱动。
使用edge浏览器当浏览器更新时需要更新edgedriver驱动
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/#downloads
然后在页面搜索版本号比如127.0.2651.98 下载x86的放在C:\Python311python的安装目录 修改文件名为MicrosoftWebDriver.exe
代码
data_model类主要做数据的导出用的
import xlwt
FORMULA 1
NORMAL 0class Cell:def __init__(self, sheet, value, type0, row_index0, column_index0, merge_row0, merge_column0):self.sheet sheetself.row_index row_indexself.column_index column_indexself.row_name row_index 1self.column_name self.transfer_column(column_index 1)self.merge_row merge_rowself.merge_column merge_column# type0是写入值1是写入公式self.type typeself.value valuedef get_cell_location(self):return self.format_cell_location(self.column_name, self.row_name)def format_cell_location(self, col, row):return {col}{row}.format(colcol, rowrow)def get_pre_cell_location(self):return self.format_cell_location(self.transfer_column(self.column_index), self.row_name)def write(self):if self.type NORMAL:self.write_value(self.value)elif self.type FORMULA:self.write_formula(self.value)def write_value(self, value):print(write_value cell:, self.row_index, -, self.column_index, _, self.value, self.row_name)if self.merge_row 0 and self.merge_column 0:self.sheet.write(self.row_index, self.column_index, str(value))else:merge_row_index self.row_index self.merge_rowmerge_column_index self.column_index self.merge_columnif self.merge_row 0:merge_row_index merge_row_index - 1if self.merge_column 0:merge_column_index merge_column_index - 1self.sheet.write_merge(self.row_index, merge_row_index, self.column_index, merge_column_index, str(value))def write_formula(self, formula):#print( write_formula cell:, self.row_index, -, self.column_index, _, self.value, _, self.row_name)if self.merge_row 0 and self.merge_column 0:self.sheet.write(self.row_index, self.column_index, xlwt.Formula(formula))else:merge_row_index self.row_index self.merge_rowmerge_column_index self.column_index self.merge_columnif self.merge_row 0:merge_row_index merge_row_index - 1if self.merge_column 0:merge_column_index merge_column_index - 1self.sheet.write_merge(self.row_index, merge_row_index, self.column_index, merge_column_index,xlwt.Formula(formula))def transfer_column(self, index):转换列名:param index::return:chars [A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T,U,V, W, X, Y, Z]b len(chars)result while True:if index % b 0:result chars[25]index int(index / b)-1if index 1:breakelse:result chars[index % b - 1] resultindex int(index / b)if index 0:breakreturn resultclass ZhuanLiInfo:def __init__(self, sheet, data):self.data dataself.rows []self.init_title(sheet)self.init_rows(sheet)def init_title(self, sheet):self.rows.append(Cell(sheet, 公司名称, NORMAL, 0, 0))self.rows.append(Cell(sheet, 电子邮箱, NORMAL, 0, 1))self.rows.append(Cell(sheet, 联系电话, NORMAL, 0, 2))self.rows.append(Cell(sheet, 机构网址, NORMAL, 0, 3))self.rows.append(Cell(sheet, 邮政编码, NORMAL, 0, 4))self.rows.append(Cell(sheet, 法定代表人, NORMAL, 0, 5))self.rows.append(Cell(sheet, 机构类型, NORMAL, 0, 6))self.rows.append(Cell(sheet, 通讯地址, NORMAL, 0, 7))self.rows.append(Cell(sheet, 代理机构状态, NORMAL, 0, 8))self.rows.append(Cell(sheet, 代理机构成立年限, NORMAL, 0, 9))self.rows.append(Cell(sheet, 信用等级, NORMAL, 0, 10))def init_rows(self, sheet):row_index 0for third_transport_monitor_data in self.data:row_index row_index1col_index 0for val in third_transport_monitor_data:self.rows.append(Cell(sheet, val, NORMAL, row_index, col_index))col_index col_index1def write(self):for row in self.rows:row.write()
主类
ddd.py
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import random
import xlwt
from datetime import datetime
from data_model import ZhuanLiInfodef getData(driver,detail_list):# 查找页面上的所有a标签all_a_tags driver.find_elements(By.CSS_SELECTOR, a.name)# 遍历所有a标签并打印它们的href属性如果有的话row 0for a_tag in all_a_tags:try:a_tag.click()except Exception as e:#捕获try块中发生的任何其他异常print(f查询详情发生了其他异常: {e})break;row 1print(row:str(row))time.sleep(random.randint(30, 50))# 切换到新窗口假设新窗口是最后一个打开的original_window driver.current_window_handleall_handles driver.window_handlesfor handle in all_handles:if handle ! original_window:driver.switch_to.window(handle)break# 在新页面上执行操作# 使用XPath查找元素companyName driver.find_element(By.XPATH, //div[contains(class, box)]/h5[contains(class, name)]).textemail driver.find_element(By.XPATH,//dt[text()电子邮箱]/following-sibling::dd).textphone driver.find_element(By.XPATH,//dt[text()联系电话]/following-sibling::dd).textwebUrl driver.find_element(By.XPATH,//dt[text()机构网址]/following-sibling::dd).textpostCode driver.find_element(By.XPATH,//dt[text()邮政编码]/following-sibling::dd).textfaren_elements driver.find_elements(By.XPATH,//dt[contains(text(), 法定代表人)]/following-sibling::dd)faren if faren_elements:farenfaren_elements[0].text;faren_elements2 driver.find_elements(By.XPATH,//dt[contains(text(), 执行事务合伙人)]/following-sibling::dd)if faren_elements2:farenfaren_elements2[0].text;type driver.find_element(By.XPATH,//dt[text()机构类型]/following-sibling::dd).textaddress driver.find_element(By.XPATH,//dt[text()通讯地址]/following-sibling::dd).textstatus driver.find_element(By.XPATH,//dt[text()代理机构状态]/following-sibling::dd).textyears driver.find_element(By.XPATH,//dt[text()代理机构成立年限]/following-sibling::dd).textlevel driver.find_element(By.XPATH,//dt[text()信用等级]/following-sibling::dd).textdetail_list.append([companyName, email, phone, webUrl, postCode, faren, type,address,status,years,level])print(detail_list[-1])driver.close() # 关闭当前窗口# 可选切换回原始窗口driver.switch_to.window(original_window)return detail_list
def queryByProvince(driver):#只查询广东省的all_province_link driver.find_elements(By.CSS_SELECTOR, .localoffice a)for a_province in all_province_link:if a_province.text.strip() 广东省:a_province.click()break#查询queryButtondriver.find_element(By.CSS_SELECTOR, .button-btn)queryButton.click()# 初始化WebDriver
driver webdriver.Edge()
# 打开网页
driver.get(http://XXXX)
#只查询广东省的
queryByProvince(driver)
#数据列表
detail_list []
detail_list getData(driver,detail_list);page 1
while True:#下一页all_page_link driver.find_elements(By.CSS_SELECTOR, .incpage a)hasnext 0for a_page in all_page_link:if a_page.text.strip() 下一页:try:a_page.click()hasnext 1except Exception as e:#捕获try块中发生的任何其他异常print(fpage发生了其他异常: {e})finally:break;if hasnext:page 1print(page:str(page))try:detail_list getData(driver,detail_list)except Exception as e:#捕获try块中发生的任何其他异常print(fgetData发生了其他异常: {e})finally:break;else:break# 获取当前时间
now datetime.now()
# 格式化时间为年月日时分秒字符串例如2023-04-01 14:30:45
datetime_str now.strftime(%Y%m%d%H%M%S)
week datetime_str 信息
wb xlwt.Workbook(encodingutf-8)
zhuanlisheetwb.add_sheet(信息)
zhuanliObjZhuanLiInfo(zhuanlisheet,detail_list)
zhuanliObj.write()
wb.save(fres/data-{week}1.xls)
# 关闭WebDriver
driver.quit()
大多数网站都有防爬机制最好是能换ip不行就间隔时间久点查询我这里间隔time.sleep(random.randint(30, 50)) 30-50s查询还是会被识别需要验证码。大家学习为主不要随便爬取网站信息哈