🛫 https://global.llbean.com/llb/shop/50/?page=bags-and-travel
import random, csv, requests, logging
from selenium import webdriver
from copy import deepcopy
from time import sleep
from lxml import etree
class Llbean:
def __init__(self):
self.url = 'https://global.llbean.com/llb/shop/50/?page=bags-and-travel&start={}&sz=48'
self.id = 0 # 保存到csv文件不用id字段
self.init_data = {
'Handle': '',
'Title': '',
'Body (HTML)': '',
'Vendor': '',
'Type': '',
'Tags': '',
'Published': 'TRUE',
'Option1 Name': 'Color',
'Option1 Value': '',
'Option2 Name': 'Size',
'Option2 Value': '',
'Option3 Name': '',
'Option3 Value': '',
'Variant SKU': '',
'Variant Grams': '',
'Variant Inventory Tracker': 'Shopify',
'Variant Inventory Qty': '',
'Variant Inventory Policy': '',
'Variant Fulfillment Service': '',
'Variant Price': '',
'Variant Compare At Price': '',
'Variant Requires Shipping': '',
'Variant Taxable': '',
'Variant Barcode': '',
'Image Src': '',
'Image Position': '',
'Image Alt Text': '',
'Gift Card': '',
'SEO Title': '',
'SEO Description': '',
'Variant Image': '',
'Status': '',
'Collection': '',
}
self.field_names = ['Handle', 'Title', 'Body (HTML)', 'Vendor', 'Type', 'Tags', 'Published', 'Option1 Name',
'Option1 Value', 'Option2 Name', 'Option2 Value', 'Option3 Name', 'Option3 Value',
'Variant SKU', 'Variant Grams', 'Variant Inventory Tracker', 'Variant Inventory Qty',
'Variant Inventory Policy', 'Variant Fulfillment Service', 'Variant Price',
'Variant Compare At Price', 'Variant Requires Shipping', 'Variant Taxable',
'Variant Barcode', 'Image Src', 'Image Position', 'Image Alt Text', 'Gift Card',
'SEO Title', 'SEO Description', 'Variant Image', 'Status', 'Collection']
self.file = None
self.writer = None
self.headers = {
"accept": "*/*",
"accept-language": "zh-CN,zh;q=0.9",
"priority": "u=1, i",
"referer": "https://global.llbean.com/llb/shop/37037.html?cgid=50&page=boat-and-tote-bag-zip-top",
"sec-ch-ua": "\"Google Chrome\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
"x-requested-with": "XMLHttpRequest"
}
self.cookies = {
"dwanonymous_d13b2ca47c82e2b9b55e4e73e26138f9": "acLGEUJnl9Sf4T47uf6aEG2gQ6",
"GlobalE_Data": "%7B%22countryISO%22%3A%22TW%22%2C%22cultureCode%22%3A%22en-GB%22%2C%22currencyCode%22%3A%22TWD%22%2C%22apiVersion%22%3A%222.1.4%22%7D",
"_ga": "GA1.1.752159582.1744796441",
"GlobalE_Geo_Popup": "true",
"_uetsid": "dd0ecc001aa611f0b557934d883e85db|xakoul|2|fv4|0|1932",
"_uetvid": "dd0ef7501aa611f097c9b31d5b526d3e|znh5e0|1744796443427|1|1|bat.bing.com/p/insights/c/w",
"LLB_ROW_Dnt": "1",
"_fbp": "fb.1.1744796444258.25439828125704840",
"LLB_Email_Subscription": "false",
"BVBRANDID": "82983d77-009b-40a3-a682-170e8f4389d2",
"_abck": "668806FEEE2CA1E7C8499990174868B1~-1~YAAQDFFFy+N+dguWAQAAPm7BQQ11WDiWgQIuwC9qJUtbwXgrkX+axh17QnVDfTe0Kx5dN6otJQ0B9qAGeNVn6WRggBnJT0D6rahoDpSt86BVG/VrF7MraIuBuiX3SIAGcPLNovHci/+sRVHfDjZ5rxZqxoQR7UhHjpyA66/bT2FSsl86wKKyIEuxTPTEjc7WlcKlIJYpuSFntYD+Aho5JM6EZ55s67z7gSj+53qgW/Dc+GDDwBM4R5OfocuoLgfa7wjnj8Qtb1hgoZwMMsDsXLhkhbwecB2/gm+VuJUlenc1gWw1Nd/DbkiaolUCKCB9lf0FJ66IqoY7kIbmydMCtRcVxQJlYWCo1y8p2yn6eRDu0jq6dMBRlCt3fnihygjUkdMHv7YBRSQOW5tPnFtbgiLi02JavRJNwx6gGOc=~-1~-1~-1",
"bm_sz": "1535D6264B8CD7CB65E0ECF5CD15B825~YAAQDFFFy+R+dguWAQAAPm7BQRvAqHKM2NExo6pY7Cd0q7fom+xXGWmnnvWTkk21k4heHMaQnFtJoQ5SOv+1zRmn3dGKCMRGTF151K+GzQxi4XIvldHGjqDVhvtd92juspaV77ESftEYnqaAzLtJOnGHC2uV11AeGHhL7/LKzvfZPnxIVcsPqRdbMhLiidrvF78Ei1uBZdUbv906ewpnY8iZhe0i0Mo81Nzcl6fOyhu/MttIfK+n1D8/a+DtW02dBcpE16Hn95qZzxFnicXF4nsk0EB5dXfu4xhBUqMISBJi7uswXDcf2vEYhdvw13qCBvDE14xFLNngvem5wFbPDVlt2IU6rA9Gucj11P7FU0H6ojOPSqKUATWtStufJKu6k7V31E9PZ2pzRdVm~3687991~3552581",
"dwsid": "6412WGeg39xpwdr_JgWlw-1c4TaP5_i7hpm7pZlUq89dAzAAje9ZVHdco7FkSj9U3HFnx_fkY4z8iJqFhVySOA==",
"sid": "ZYDIETslDMNJwMeLU5TzVWNRX3_TYiukppg",
"GlobalE_Full_Redirect": "false",
"__cq_dnt": "1",
"dw_dnt": "1",
"BVImplmain_site": "18072",
"BVBRANDSID": "a84afcbc-6f5b-4d02-a733-c65927ef2f16",
"utag_main": "v_id:01963df8d84f0014104f067047f10506f004a06700bd0$_sn:2$_ss:0$_st:1744863738501$_pn:13%3Bexp-session$ses_id:1744859923869%3Bexp-session",
"_ga_J9VHBK32D5": "GS1.1.1744859925.2.1.1744861939.0.0.0",
"GlobalE_CT_Data": "%7B%22CUID%22%3A%7B%22id%22%3A%22207147689.296087532.1723%22%2C%22expirationDate%22%3A%22Thu%2C%2017%20Apr%202025%2004%3A22%3A20%20GMT%22%7D%2C%22CHKCUID%22%3Anull%2C%22GA4SID%22%3A426420058%2C%22GA4TS%22%3A1744861940377%2C%22Domain%22%3A%22global.llbean.com%22%7D"
}
def get_driver(self, url, xpath_txt):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.page_load_strategy = "none"
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(20)
driver.maximize_window()
flag = True
while flag:
flag = False
try:
print('正在获取', url, '的页面数据')
driver.get(url)
driver.find_element('xpath', xpath_txt)
except:
flag = True
print(url, '没定位到,重新请求...')
# self.writer_to_file(driver.page_source, 'w', 'utf-8')
return driver
def get_page_html(self, url, xpath_txt):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.page_load_strategy = "none"
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(20)
driver.maximize_window()
flag = True
while flag:
flag = False
try:
print('正在获取', url, '的页面数据')
driver.get(url)
driver.find_element('xpath', xpath_txt)
except:
flag = True
print(url, '没定位到,重新请求...')
# self.writer_to_file(driver.page_source, 'w', 'utf-8')
page_source = driver.page_source
driver.close()
return etree.HTML(page_source)
def writer_to_file(self, data, mode, encoding=None):
if 'b' in encoding:
open('./text.html', mode).write(data)
else:
open('./text.html', mode, encoding=encoding).write(data)
print('写入文件成功!')
def random_sleep(self):
sleep(random.random() + 0.5)
def save_csv(self, data):
self.writer.writerow({
'Handle': data['Handle'],
'Title': data['Title'],
'Body (HTML)': data['Body (HTML)'],
'Vendor': data['Vendor'],
'Type': data['Type'],
'Tags': data['Tags'],
'Published': data['Published'],
'Option1 Name': data['Option1 Name'],
'Option1 Value': data['Option1 Value'],
'Option2 Name': data['Option2 Name'],
'Option2 Value': data['Option2 Value'],
'Option3 Name': data['Option3 Name'],
'Option3 Value': data['Option3 Value'],
'Variant SKU': data['Variant SKU'],
'Variant Grams': data['Variant Grams'],
'Variant Inventory Tracker': data['Variant Inventory Tracker'],
'Variant Inventory Qty': data['Variant Inventory Qty'],
'Variant Inventory Policy': data['Variant Inventory Policy'],
'Variant Fulfillment Service': data['Variant Fulfillment Service'],
'Variant Price': data['Variant Price'],
'Variant Compare At Price': data['Variant Compare At Price'],
'Variant Requires Shipping': data['Variant Requires Shipping'],
'Variant Taxable': data['Variant Taxable'],
'Variant Barcode': data['Variant Barcode'],
'Image Src': data['Image Src'],
'Image Position': data['Image Position'],
'Image Alt Text': data['Image Alt Text'],
'Gift Card': data['Gift Card'],
'SEO Title': data['SEO Title'],
'SEO Description': data['SEO Description'],
'Variant Image': data['Variant Image'],
'Status': data['Status'],
'Collection': data['Collection']
})
def __del__(self):
self.file.close()
def get_index_html(self, url, page):
url = url.format(str(page * 48))
response = requests.get(url, headers=self.headers, cookies=self.cookies, verify=False)
self.random_sleep()
return etree.HTML(response.text)
def get_total_page(self):
html = self.get_index_html(self.url, 0)
return int(html.xpath('//div[@class="hidden-xs-down"]/span/text()')[0].split(' ')[-1])
def get_detail_json(self, url):
print('正在获取', url, 'json数据')
res = requests.get(url, headers=self.headers, cookies=self.cookies, verify=False).json()
self.random_sleep()
return res
def get_right_color_url(self, url, img_url):
if '=&' in url:
t = img_url.split('_')[-2]
url = url.replace('dwvar_128133_color=', 'dwvar_128133_color=' + t)
return url
def parse(self, url):
self.file = open('llbean.csv', 'w', newline='', encoding='utf-8-sig')
self.writer = csv.DictWriter(self.file, fieldnames=self.field_names)
self.writer.writeheader()
data = deepcopy(self.init_data)
total_page = self.get_total_page()
for page in range(total_page):
html = self.get_index_html(url, page)
temp_url_list = html.xpath('//div[@class="image-container"]/a/@href')
product_url_list = ['https://global.llbean.com' + temp_url for temp_url in temp_url_list]
for product_url in product_url_list:
self.id += 1
print(self.id, '开始')
product_html = self.get_page_html(product_url, '//div[@class="attribute color col-12 p-0"]/button')
product_color_url = product_html.xpath('//div[@class="attribute color col-12 p-0"]/button/@data-url')[0]
product_color_url = self.get_right_color_url(product_color_url, product_html.xpath('//img[@class="image-swatch"]/@src')[0]) # 获取正确的product_color_url
detail_json = self.get_detail_json(product_color_url)['product']
item = detail_json['itemId']['display']
data['Body (HTML)'] = detail_json['premiseStatement']
data['Title'] = detail_json['productName']
data['Type'] = 'Bags & Travel'
data['Collection'] = data['Type']
color_json_list = detail_json['variationAttributes'][0]['values']
print('产品颜色个数:', len(color_json_list))
try:
size_json_list = detail_json['variationAttributes'][1]['values']
if 'Regular' == size_json_list[0]['displayValue']:
raise Exception("json中没有尺寸")
except:
size_json_list = []
print('产品尺寸个数:', 1)
for color_json in color_json_list:
data['Option1 Value'] = color_json['displayValue']
data['Handle'] = item + '-' + data['Option1 Value']
data['Option2 Value'] = 'one size'
data['Variant Price'] = detail_json['price']['sales']['value']
img_list = color_json['images']['small']
img_len = len(img_list)
print('图片个数:', img_len)
data['Image Src'] = img_list[0]['url']
data['Image Position'] = 1
self.save_csv(data)
print(data)
for i in range(1, img_len):
temp_data = deepcopy(self.init_data)
temp_data['Handle'] = data['Handle']
temp_data['Image Src'] = img_list[i]['url']
temp_data['Image Position'] = 1
self.save_csv(temp_data)
print(temp_data)
print(self.id, '结束')
continue
print('产品尺寸个数:', len(size_json_list))
for color_json in color_json_list:
data['Option1 Value'] = color_json['displayValue']
data['Handle'] = item + '-' + data['Option1 Value']
for size_json in size_json_list:
data['Option2 Value'] = size_json['displayValue']
data['Variant Price'] = size_json['minPrice'].replace('NT$', '').replace(',', '')
img_list = color_json['images']['small']
img_len = len(img_list)
print('图片个数:', img_len)
data['Image Src'] = img_list[0]['url']
data['Image Position'] = 1
self.save_csv(data)
print(data)
for i in range(1, img_len):
temp_data = deepcopy(self.init_data)
temp_data['Handle'] = data['Handle']
temp_data['Image Src'] = img_list[i]['url']
temp_data['Image Position'] = 1
self.save_csv(temp_data)
print(temp_data)
print(self.id, '结束')
def run(self):
logging.captureWarnings(True)
self.parse(self.url)
if __name__ == '__main__':
llbean = Llbean()
llbean.run()