💍 https://www.holabirdsports.com/collections/padel-paddles
import random, csv, requests, logging, time, json, re
from selenium import webdriver
from copy import deepcopy
from time import sleep
from lxml import etree
from DrissionPage import Chromium, ChromiumOptions
class Holabirdsports:
def __init__(self):
self.index_url = 'https://www.holabirdsports.com/collections/padel-paddles/?page={}§ion_id=template--18674699043006__main'
self.headers = {
"accept": "*/*",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "no-cache",
"pragma": "no-cache",
"priority": "u=1, i",
"sec-ch-ua": "\"Chromium\";v=\"136\", \"Google Chrome\";v=\"136\", \"Not.A/Brand\";v=\"99\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
}
self.id = 0 # 保存到csv文件不用id字段
self.init_data = {
'Handle': '',
'Title': '',
'Body (HTML)': '',
'Vendor': '',
'Type': '',
'Tags': '',
'Published': 'TRUE',
'Option1 Name': '',
'Option1 Value': '',
'Option2 Name': '',
'Option2 Value': '',
'Option3 Name': '',
'Option3 Value': '',
'Variant SKU': '',
'Variant Grams': '',
'Variant Inventory Tracker': 'Shopify',
'Variant Inventory Qty': '99999',
'Variant Inventory Policy': '',
'Variant Fulfillment Service': '',
'Variant Price': '',
'Variant Compare At Price': '',
'Variant Requires Shipping': '',
'Variant Taxable': '',
'Variant Barcode': '',
'Image Src': '',
'Image Position': '',
'Image Alt Text': '',
'Gift Card': '',
'SEO Title': '',
'SEO Description': '',
'Variant Image': '',
'Status': '',
'Collection': '',
}
self.empty_data = {
'Handle': '',
'Title': '',
'Body (HTML)': '',
'Vendor': '',
'Type': '',
'Tags': '',
'Published': '',
'Option1 Name': '',
'Option1 Value': '',
'Option2 Name': '',
'Option2 Value': '',
'Option3 Name': '',
'Option3 Value': '',
'Variant SKU': '',
'Variant Grams': '',
'Variant Inventory Tracker': '',
'Variant Inventory Qty': '',
'Variant Inventory Policy': '',
'Variant Fulfillment Service': '',
'Variant Price': '',
'Variant Compare At Price': '',
'Variant Requires Shipping': '',
'Variant Taxable': '',
'Variant Barcode': '',
'Image Src': '',
'Image Position': '',
'Image Alt Text': '',
'Gift Card': '',
'SEO Title': '',
'SEO Description': '',
'Variant Image': '',
'Status': '',
'Collection': '',
}
self.field_names = ['Handle', 'Title', 'Body (HTML)', 'Vendor', 'Type', 'Tags', 'Published', 'Option1 Name',
'Option1 Value', 'Option2 Name', 'Option2 Value', 'Option3 Name', 'Option3 Value',
'Variant SKU', 'Variant Grams', 'Variant Inventory Tracker', 'Variant Inventory Qty',
'Variant Inventory Policy', 'Variant Fulfillment Service', 'Variant Price',
'Variant Compare At Price', 'Variant Requires Shipping', 'Variant Taxable',
'Variant Barcode', 'Image Src', 'Image Position', 'Image Alt Text', 'Gift Card',
'SEO Title', 'SEO Description', 'Variant Image', 'Status', 'Collection']
self.file = None
self.writer = None
self.browser = None
self.tab = None
self.cnt = 0
self.type_list = [
{
'_type': 'New Arrivals',
'seo_url': 'new-arrivals',
'ab_type': 'original_ranking_mix',
'scene_id': 'original_ranking'
},
{
'_type': 'Bikinis',
'seo_url': 'bikinis',
'ab_type': 'collection_self_ranking_pure',
'scene_id': 'us_web_nmix_collect_v103'
},
{
'_type': 'One-Pieces',
'seo_url': 'one-piece',
'ab_type': 'collection_self_ranking_pure',
'scene_id': 'us_web_nmix_collect_v103'
},
{
'_type': 'Dresses',
'seo_url': 'vacationdress',
'ab_type': 'collection_self_ranking_pure',
'scene_id': 'us_web_nmix_collect_v103'
},
{
'_type': 'Cover-Ups',
'seo_url': 'cover-up-1',
'ab_type': '',
'scene_id': ''
},
{
'_type': 'Rompers & Jumpsuits',
'seo_url': 'jumpsuits-rompers',
'ab_type': 'original_ranking_mix',
'scene_id': 'original_ranking'
},
{
'_type': 'Tops & Bottoms',
'seo_url': 'alltopsvacaion-copy',
'ab_type': '',
'scene_id': ''
},
{
'_type': 'Clothing',
'seo_url': 'allbestsellersvacation',
'ab_type': '',
'scene_id': ''
}
]
def simulated_smooth_scroll(self, driver, step=1000, interval=0.5, timeout=30):
# 平滑移动到底部
start_time = time.time()
last_height = driver.execute_script("return document.documentElement.scrollHeight")
current_position = 0
while time.time() - start_time < timeout:
# 计算剩余滚动距离
remaining = last_height - current_position
# 动态调整步长
current_step = min(step, remaining) if remaining > 0 else 0
if current_step <= 0:
break
# 执行分步滚动
driver.execute_script(f"window.scrollBy(0, {current_step})")
current_position += current_step
# 等待滚动和内容加载
time.sleep(interval * (current_step / step)) # 动态间隔
# 检查新高度
new_height = driver.execute_script(
"return document.documentElement.scrollHeight"
)
# 更新高度(处理动态加载)
if new_height > last_height:
last_height = new_height
def get_driver(self, url, xpath_txt=None, is_turn=False):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.page_load_strategy = "none"
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(10)
driver.maximize_window()
while True:
try:
print('正在获取', url, '的页面数据')
driver.get(url)
if is_turn:
sleep(1)
self.simulated_smooth_scroll(driver)
if xpath_txt:
driver.find_element('xpath', xpath_txt)
else:
self.random_sleep(5)
break
except:
print(url, '没定位到,重新请求...')
# self.writer_to_file(driver.page_source, 'w', 'utf-8')
return driver
def driver_continue(self, driver, url, xpath_txt=None, is_turn=False):
flag = True
while flag:
flag = False
try:
print('正在获取', url, '的页面数据')
driver.get(url)
if is_turn:
self.random_sleep()
self.simulated_smooth_scroll(driver)
driver.find_element('xpath', xpath_txt)
except:
flag = True
print(url, '没定位到,重新请求...')
# self.writer_to_file(driver.page_source, 'w', 'utf-8')
def get_page_html(self, url, xpath_txt=None, is_turn=False):
driver = self.get_driver(url, xpath_txt, is_turn=is_turn)
page_source = driver.page_source
driver.close()
return etree.HTML(page_source)
def writer_to_file(self, data, mode, encoding=None):
if 'b' in encoding:
open('./text.html', mode).write(data)
else:
open('./text.html', mode, encoding=encoding).write(data)
print('写入文件成功!')
def driver_click(self, driver, timeout=2):
driver.click()
self.random_sleep(timeout)
def driver_back(self, driver, timeout=2):
driver.back()
self.random_sleep(timeout)
def driver_refresh(self, driver, timeout=2):
driver.refresh()
self.random_sleep(timeout)
def tab_wait(self, tab, timeout=3):
tab.wait(timeout)
return tab
def tab_get(self, tab, url, xpath_txt='html', is_turn=False):
print('正在获取', url, '的数据')
while True:
tab.get(url)
self.tab_wait(tab)
if is_turn:
tab.scroll.to_bottom()
self.tab_wait(tab)
ele = tab.ele(f'x:{xpath_txt}')
if ele:
break
print('没有请求到元素,重新请求中')
tab.wait(2)
return tab
def get_dp_html(self, tab, url, xpath_txt='html', is_turn=False):
tab = self.tab_get(tab, url, xpath_txt=xpath_txt, is_turn=is_turn)
res = etree.HTML(tab.html)
return res
def random_sleep(self, timeout=2):
sleep(random.random() + timeout)
def save_csv(self, data):
self.writer.writerow({
'Handle': data['Handle'],
'Title': data['Title'],
'Body (HTML)': data['Body (HTML)'],
'Vendor': data['Vendor'],
'Type': data['Type'],
'Tags': data['Tags'],
'Published': data['Published'],
'Option1 Name': data['Option1 Name'],
'Option1 Value': data['Option1 Value'],
'Option2 Name': data['Option2 Name'],
'Option2 Value': data['Option2 Value'],
'Option3 Name': data['Option3 Name'],
'Option3 Value': data['Option3 Value'],
'Variant SKU': data['Variant SKU'],
'Variant Grams': data['Variant Grams'],
'Variant Inventory Tracker': data['Variant Inventory Tracker'],
'Variant Inventory Qty': data['Variant Inventory Qty'],
'Variant Inventory Policy': data['Variant Inventory Policy'],
'Variant Fulfillment Service': data['Variant Fulfillment Service'],
'Variant Price': data['Variant Price'],
'Variant Compare At Price': data['Variant Compare At Price'],
'Variant Requires Shipping': data['Variant Requires Shipping'],
'Variant Taxable': data['Variant Taxable'],
'Variant Barcode': data['Variant Barcode'],
'Image Src': data['Image Src'],
'Image Position': data['Image Position'],
'Image Alt Text': data['Image Alt Text'],
'Gift Card': data['Gift Card'],
'SEO Title': data['SEO Title'],
'SEO Description': data['SEO Description'],
'Variant Image': data['Variant Image'],
'Status': data['Status'],
'Collection': data['Collection']
})
def get_response(self, url, timeout=2):
while True:
print(f'正在获取 {url} 的数据')
try:
response = requests.get(url, headers=self.headers, timeout=10)
break
except:
print('没有请求到,重新请求')
self.random_sleep(timeout)
self.random_sleep(timeout)
return response
def tab_run_js(self, tab, js_code):
while True:
try:
tab.run_js(js_code)
break
except Exception as e:
print('捕获tab_run_js方法的run_js:', e)
tab.wait(1)
def ele_click(self, tab, ele):
while True:
try:
tab.actions.click(ele)
break
except Exception as e:
print('捕获ele_click方法的actions.click:', e)
tab.wait(1)
tab.wait(2)
def dp_click_ad(self, tab, xpath_txt):
ad_ele = tab.ele(f'x:{xpath_txt}', timeout=2)
if ad_ele:
print('有广告:', ad_ele)
self.ele_click(tab, ad_ele)
self.tab_wait(tab, 1)
def infinite_scroll(self, tab):
product_url_list = []
w_cnt = 0
turn_cnt = 0
is_bottom = False
while True:
self.dp_click_ad(tab, '//button[@class="css-2vqtum"]')
self.dp_click_ad(tab, '//button[@class="css-71t821"]')
if is_bottom:
break
tab.scroll.to_bottom()
self.tab_wait(tab, 1)
self.tab_run_js(self.tab, 'window.scrollBy(0, -1000)')
turn_cnt += 1
print(f'翻页了{turn_cnt}次')
n_cnt = 0
self.tab_wait(tab, 5)
product_url_ele_list = tab.eles('x://div[@class="product-thumbnail plpv3 css-1gasjii"]//a[@class="css-avqw6d"]')
for product_url_ele in product_url_ele_list:
product_url = product_url_ele.attr('href')
if 'https:' not in product_url:
product_url = 'https://www.coach.com' + product_url
if product_url in product_url_list:
continue
n_cnt += 1
product_url_list.append(product_url)
print('产品个数:', len(product_url_ele_list))
print('获取到的产品个数', len(product_url_list))
if n_cnt == 0:
w_cnt += 1
else:
w_cnt = 0
if w_cnt >= 5:
print('到底了')
is_bottom = True
return product_url_list
def get_detail_tab(self, url, xpath_txt='html', backup_xpath_txt='html'):
print('正在获取', url, '的数据')
tab = self.browser.latest_tab
while True:
tab.get(url)
self.tab_wait(tab)
ele = tab.ele(f'x:{xpath_txt}')
if ele:
print('第1个xpath找到的')
break
else:
t_ele = tab.ele(f'x:{backup_xpath_txt}')
if t_ele:
print('第2个xpath找到的')
break
print('没有请求到元素,重新请求中')
tab.wait(2)
return tab
def get_product_img_url_list(self, tab, data):
tab.wait(5)
self.dp_click_ad(tab, '//button[@class="css-2vqtum"]')
data['Option1 Value'] = tab.ele('x://p[contains(@class, "color-name")]').text
product_color_img_url_list = []
img_temp_ele = tab.ele('x://li[contains(@class, "is-prev")]')
if img_temp_ele:
product_color_img_url_cnt = int(img_temp_ele.attr('data-slide-index'))
else:
product_color_img_url_cnt = len(tab.eles('x://div[@class="css-1161qt5"]/div')) - 1
print(data['Option1 Value'], '色的图片个数应为:', product_color_img_url_cnt)
tab.wait(6)
raw_product_color_img_ele_list = tab.eles('x://li[contains(@class, "splide__slide")]/div | //li[contains(@class, "splide__slide")]//video/source | //div[@class="css-1161qt5"]/div[@class="css-113el1s"]/div')
for raw_product_color_img_ele in raw_product_color_img_ele_list:
raw_product_color_img_url = raw_product_color_img_ele.attr('src')
if not raw_product_color_img_url:
continue
raw_product_color_img_url = raw_product_color_img_url.split('?')[0]
if raw_product_color_img_url in product_color_img_url_list:
continue
product_color_img_url_list.append(raw_product_color_img_url)
print(data['Option1 Value'], '色获取到的图片个数为:', len(product_color_img_url_list))
return product_color_img_url_list
def init_tab(self):
co = ChromiumOptions()
co.set_browser_path(r'C:\Program Files\Google\Chrome\Application\chrome.exe')
co.auto_port()
# co.headless()
# co.set_argument('--no-sandbox')
self.browser = Chromium(co)
self.tab = self.browser.latest_tab
self.tab.set.window.max()
def handle_img_url(self, raw_img_url):
return raw_img_url.split('?')[0]
def save_all_data(self, product_data_list):
for product_data in product_data_list:
self.cnt += 1
self.save_csv(product_data)
print('第', self.cnt, '行保存完成!')
# 取消翻译
def cancel_translate(self, url):
tab = self.tab_get(self.tab, url, '//div[@class="collection_item overflow-hidden"]')
tab.actions.move_to(tab.ele('x://div[@class="gt-translate-btn-bg"]'))
tab.wait(2)
tab.actions.click(tab.ele('x://div[@class="gt-translate-switch"]'))
tab.wait(2)
def get_index_response(self, page, timeout=2):
print(f'正在获取 第 {page} 页的数据')
return self.get_response(self.index_url.format(page), timeout)
def get_index_html(self, page, timeout=2):
response = self.get_index_response(page, timeout)
return etree.HTML(response.text)
def get_index_json(self, page, timeout=2):
index_html = self.get_index_html(page, timeout)
# 使用XPath查找所有目标<script>标签
script_element = index_html.xpath('//div[contains(@class, "flex-display")]//script[@type="application/ld+json"]')[0]
# 获取文本内容并清理
raw_text = script_element.text.strip() if script_element.text else ""
# 尝试解析JSON
index_json = {}
if raw_text:
try:
index_json = json.loads(raw_text)
except json.JSONDecodeError as e:
print(f"JSON解析失败: {e},原始内容:{raw_text}")
return index_json['itemListElement']
def get_total_page(self):
index_html = self.get_index_html(1)
total_page = int(index_html.xpath('//div[contains(@class, "pagination__nav")]/a[last()]/@data-page')[0])
return total_page
def get_detail_json(self, product_name, url, timeout=2):
while True:
print(f'正在获取 {product_name} 的数据')
try:
res = self.get_response(url, timeout).json()
if res['title']:
break
print('json数据获取失败,重新获取')
except:
print('获取详情页没获取到json数据')
return res
def check_color(self, color_dict_filter, color_text):
for color_dict in color_dict_filter:
if color_dict['name'] == color_text:
return True
return False
def test_product(self, product_name, _type='One'):
self.file = open('./test.csv', 'w', newline='', encoding='utf-8-sig')
self.writer = csv.DictWriter(self.file, fieldnames=self.field_names)
self.writer.writeheader()
logging.captureWarnings(True)
product_url = 'https://www.holabirdsports.com/products/' + product_name.replace(' ', '-').replace("'", "").replace('/', '-').lower() + '.js'
product_json = self.get_detail_json(product_name, product_url)
self.id += 1
print(self.id, '开始')
product_data_list = self.product_detail_parse(_type, product_json, product_json['handle'], product_name)
self.save_all_data(product_data_list)
print(self.id, '结束')
if self.file:
self.file.close()
def product_detail_parse(self, _type, product_json, data_handle, product_name):
_type = _type.replace('-', ' ').title()
product_data_list = []
sku_id = 0
is_have_color = False
data = deepcopy(self.init_data)
data['Type'] = _type
data['Collection'] = data['Type']
data['Handle'] = data_handle
data['Title'] = product_json['title']
data['Body (HTML)'] = product_json['description']
product_option_json_list = product_json['options']
for product_option_json in product_option_json_list:
data[f'Option{product_option_json["position"]} Name'] = product_option_json['name']
if 'Color' in product_option_json['name']:
is_have_color = True
product_variant_json_list = product_json['variants']
for product_variant_json in product_variant_json_list:
if not product_variant_json['available']:
continue
_data = deepcopy(data)
sku_id += 1
product_variant_option_list = product_variant_json['options']
option_id = 0
for product_variant_option in product_variant_option_list:
option_id += 1
_data[f'Option{option_id} Value'] = product_variant_option
_data['Variant SKU'] = _type + '-' + _data['Title'] + '-' + _data['Option1 Value'] + '-' + _data['Option2 Value'] + '-' + _data['Option3 Value'] + '-' + str(sku_id)
_data['Variant SKU'] = _data['Variant SKU'].replace(' ', '-').replace('/', '-').replace("'", '-').lower()
_data['Variant Price'] = "%.2f" % float(int(product_variant_json['price']) / 100)
_data['Variant Compare At Price'] = "%.2f" % float(int(product_variant_json['compare_at_price']) / 100)
# -------------------------------------------
# 获取图片url 和 图片数量
product_img_url_list = []
product_img_id = ''
if is_have_color:
product_img_id = product_variant_json['featured_image']['src'].split('?')[0].split('/')[-1].split('.')[0][:-2]
product_all_img_url_list = product_json['images']
for product_all_img_url in product_all_img_url_list:
if (not is_have_color) or (product_img_id in product_all_img_url):
if 'https:' not in product_all_img_url:
product_img_url = 'https:' + product_all_img_url
else:
product_img_url = product_all_img_url
product_img_url_list.append(product_img_url.split('?')[0])
product_img_url_len = len(product_img_url_list)
# -------------------------------------------
_data['Image Src'] = product_img_url_list[0]
_data['Image Position'] = 1
_data['Variant Image'] = _data['Image Src']
print(f'第{self.id}个产品 {product_name} 的第{sku_id}个sku的图片个数为:{product_img_url_len}')
product_data_list.append(_data)
print(_data)
for i in range(1, product_img_url_len):
temp_data = deepcopy(self.empty_data)
temp_data['Handle'] = _data['Handle']
temp_data['Published'] = 'TRUE'
temp_data['Image Src'] = product_img_url_list[i]
temp_data['Image Position'] = i + 1
product_data_list.append(temp_data)
print(temp_data)
return product_data_list
def parse(self):
_type = 'Padel Paddles'
product_index_json_list = self.get_index_json(1) # 第2个请求
for product_index_json in product_index_json_list:
self.id += 1
product_detail_interface_url = 'https://www.holabirdsports.com' + product_index_json['url'] + '.js'
product_detail_json = self.get_detail_json(product_index_json['name'], product_detail_interface_url) # 第3个请求
product_uuid = product_detail_json['handle']
with open('./filter.txt', 'r', encoding='utf-8') as f:
filter_txt = f.read()
if product_uuid in filter_txt:
print(self.id, '已完成')
continue
print(self.id, '开始')
product_data_list = self.product_detail_parse(_type, product_detail_json, product_uuid, product_index_json['name'])
self.save_all_data(product_data_list)
print(self.id, '结束')
with open('./filter.txt', 'a', encoding='utf-8') as f:
f.write(product_uuid + '\n')
def run(self, is_continue=False):
if is_continue:
self.file = open('./holabirdsports_2_padel_paddles.csv', 'a', newline='', encoding='utf-8-sig')
self.writer = csv.DictWriter(self.file, fieldnames=self.field_names)
else:
self.file = open('./holabirdsports_2_padel_paddles.csv', 'w', newline='', encoding='utf-8-sig')
self.writer = csv.DictWriter(self.file, fieldnames=self.field_names)
self.writer.writeheader()
with open('./filter.txt', 'w', encoding='utf-8') as f:
f.write('')
logging.captureWarnings(True)
self.parse()
if self.file:
self.file.close()
if __name__ == '__main__':
holabirdsports = Holabirdsports()
holabirdsports.run()
# holabirdsports.test_product('', 'Padel Paddles')
# https://www.holabirdsports.com/collections/padel-paddles
# 页面接口:https://www.holabirdsports.com/collections/padel-paddles/?page={}§ion_id=template--18674699043006__main
# 详情接口:https://www.holabirdsports.com/products/*.js
# page从1开始,一页有46个产品
# 上传代码的时候把True给去掉 √
# 试试全部产品数据 √
# 试试部分产品数据 √
# 改url
# 改文件名
# 改类型