搜索结果

×

搜索结果将在这里显示。

🖼️ https://wooland.com/collections/dresses-skirts-jumpsuits

import random, csv, requests, logging
from selenium import webdriver
from copy import deepcopy
from time import sleep
from lxml import etree

class Wooland:
    def __init__(self):
        self.url = 'https://wooland.com/collections/dresses-skirts-jumpsuits'
        self.id = 0  # 保存到csv文件不用id字段
        self.init_data = {
            'Handle': '',
            'Title': '',
            'Body (HTML)': '',
            'Vendor': '',
            'Type': '',
            'Tags': '',
            'Published': 'TRUE',
            'Option1 Name': 'Color',
            'Option1 Value': '',
            'Option2 Name': 'Size',
            'Option2 Value': '',
            'Option3 Name': '',
            'Option3 Value': '',
            'Variant SKU': '',
            'Variant Grams': '',
            'Variant Inventory Tracker': 'Shopify',
            'Variant Inventory Qty': '',
            'Variant Inventory Policy': '',
            'Variant Fulfillment Service': '',
            'Variant Price': '',
            'Variant Compare At Price': '',
            'Variant Requires Shipping': '',
            'Variant Taxable': '',
            'Variant Barcode': '',
            'Image Src': '',
            'Image Position': '',
            'Image Alt Text': '',
            'Gift Card': '',
            'SEO Title': '',
            'SEO Description': '',
            'Variant Image': '',
            'Status': '',
            'Collection': '',
        }
        self.field_names = ['Handle', 'Title', 'Body (HTML)', 'Vendor', 'Type', 'Tags', 'Published', 'Option1 Name',
                            'Option1 Value', 'Option2 Name', 'Option2 Value', 'Option3 Name', 'Option3 Value',
                            'Variant SKU', 'Variant Grams', 'Variant Inventory Tracker', 'Variant Inventory Qty',
                            'Variant Inventory Policy', 'Variant Fulfillment Service', 'Variant Price',
                            'Variant Compare At Price', 'Variant Requires Shipping', 'Variant Taxable',
                            'Variant Barcode', 'Image Src', 'Image Position', 'Image Alt Text', 'Gift Card',
                            'SEO Title', 'SEO Description', 'Variant Image', 'Status', 'Collection']
        self.file = None
        self.writer = None

        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "max-age=0",
            "if-none-match": "\"cacheable:7f17a78127bbba1db80fcebf35a7c712\"",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Google Chrome\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
        }

self.cookies = {
            "secure_customer_sig": "",
            "localization": "US",
            "cart_currency": "USD",
            "_shopify_y": "2BF7AF89-db65-4885-9c94-eddf7e6d1b40",
            "_orig_referrer": "",
            "_landing_page": "%2Fcollections%2Fdresses-skirts-jumpsuits",
            "_ks_scriptVersion": "318",
            "_pin_unauth": "dWlkPU56TXpZbVEzTm1VdFltSTBNeTAwTVdRNUxXRTJZakl0T0RSak1tUm1aVEF6WVdabQ",
            "_ga": "GA1.1.963718107.1744711475",
            "_ttp": "01JRWEGR1S5R3BHNRBVFEFNGMN_.tt.0",
            "_gcl_au": "1.1.1049729370.1744711476",
            "_fbp": "fb.1.1744711475849.247676053415700692",
            "_tracking_consent": "%7B%22con%22%3A%7B%22CMP%22%3A%7B%22a%22%3A%22%22%2C%22m%22%3A%22%22%2C%22p%22%3A%22%22%2C%22s%22%3A%22%22%7D%7D%2C%22v%22%3A%222.1%22%2C%22region%22%3A%22TWTPE%22%2C%22reg%22%3A%22%22%2C%22purposes%22%3A%7B%22a%22%3Atrue%2C%22p%22%3Atrue%2C%22m%22%3Atrue%2C%22t%22%3Atrue%7D%2C%22display_banner%22%3Afalse%2C%22sale_of_data_region%22%3Afalse%2C%22consent_id%22%3A%2259E48FFD-fba9-4BD2-bc41-72cb684b4f1d%22%7D",
            "_ks_scriptVersionChecked": "true",
            "_ks_userCountryUnit": "1",
            "_ks_countryCodeFromIP": "TW",
            "_shopify_sa_p": "",
            "shopify_pay_redirect": "pending",
            "_clck": "1shmhul%7C2%7Cfv5%7C0%7C1931",
            "kiwi-sizing-token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzaWQiOiJmMTk1ZDhhOS1hMWEwLTRmMWUtYjljOS0zN2U0Y2UyNDNlYjkiLCJpYXQiOjE3NDQ4NTQ5MTEsImV4cCI6MTc0NDg1ODUxMX0.j5OItoY2nrwMZh528o4GoO_SZcw5PGtNoLOQ1HmXyyo",
            "ttcsid_C7M28GP6C8J4EI5HKJ90": "1744854291599.3.1744855267052",
            "ttcsid": "1744854291601.3.1744855267064",
            "_clsk": "1w08dbv%7C1744855267830%7C6%7C1%7Ci.clarity.ms%2Fcollect",
            "__kla_id": "eyJjaWQiOiJZek5rWm1VM01qZ3Raak5rTkMwMFpqVmlMV0UyWlRNdE9UVTNOMlF6T1dFNFlqTTIiLCIkcmVmZXJyZXIiOnsidHMiOjE3NDQ3MTE0ODEsInZhbHVlIjoiIiwiZmlyc3RfcGFnZSI6Imh0dHBzOi8vd29vbGFuZC5jb20vY29sbGVjdGlvbnMvZHJlc3Nlcy1za2lydHMtanVtcHN1aXRzIn0sIiRsYXN0X3JlZmVycmVyIjp7InRzIjoxNzQ0ODU1MjcyLCJ2YWx1ZSI6IiIsImZpcnN0X3BhZ2UiOiJodHRwczovL3dvb2xhbmQuY29tL3Byb2R1Y3RzL2F1ZHJleS10ZW5jZWwtbWF4aS1kcmVzcy1jb29sLW9saXZlLWhlYXRoZXIifX0=",
            "_shopify_s": "D385EEC9-4205-44B2-9d94-2025d271e0da",
            "_shopify_sa_t": "2025-04-17T02%3A01%3A53.116Z",
            "_ga_H91MRWJPXL": "GS1.1.1744854291.7.1.1744855313.3.0.377228909",
            "_ga_S1J0K2CPRX": "GS1.1.1744854292.7.1.1744855313.0.0.0",
            "keep_alive": "eyJ2IjoxLCJ0cyI6MTc0NDg1NTMyMTgxMSwiZW52Ijp7IndkIjowLCJ1YSI6MSwiY3YiOjEsImJyIjoxfSwiYmh2Ijp7Im1hIjoxMTcsImNhIjo3LCJrYSI6MTEsInNhIjoxOCwidCI6NTUsIm5tIjoxLCJ2YyI6MX0sInNlcyI6eyJwIjoyOSwicyI6MTc0NDc2ODc2NTcwMiwiZCI6ODYwNDJ9fQ%3D%3D"
        }

    def get_driver(self, url, xpath_txt):
        options = webdriver.ChromeOptions()
        options.add_argument('--headless')
        options.add_argument('--disable-gpu')
        options.page_load_strategy = "none"

        driver = webdriver.Chrome(options=options)

        driver.implicitly_wait(20)
        driver.maximize_window()

        flag = True
        while flag:
            flag = False
            try:
                print('正在获取', url, '的页面数据')
                driver.get(url)
                driver.find_element('xpath', xpath_txt)
            except:
                flag = True
                print(url, '没定位到,重新请求...')

        # self.writer_to_file(driver.page_source, 'w', 'utf-8')

        return driver

    def get_page_html(self, url, xpath_txt):
        options = webdriver.ChromeOptions()
        options.add_argument('--headless')
        options.add_argument('--disable-gpu')
        options.page_load_strategy = "none"

        driver = webdriver.Chrome(options=options)

        driver.implicitly_wait(20)
        driver.maximize_window()

        flag = True
        while flag:
            flag = False
            try:
                print('正在获取', url, '的页面数据')
                driver.get(url)
                driver.find_element('xpath', xpath_txt)
            except:
                flag = True
                print(url, '没定位到,重新请求...')

        # self.writer_to_file(driver.page_source, 'w', 'utf-8')

        self.random_sleep()

        page_source = driver.page_source

        driver.close()

        return etree.HTML(page_source)

    def writer_to_file(self, data, mode, encoding=None):
        if 'b' in encoding:
            open('./text.html', mode).write(data)
        else:
            open('./text.html', mode, encoding=encoding).write(data)

        print('写入文件成功!')

    def random_sleep(self):
        sleep(random.random() + 0.5)

    def save_csv(self, data):
        self.writer.writerow({
            'Handle': data['Handle'],
            'Title': data['Title'],
            'Body (HTML)': data['Body (HTML)'],
            'Vendor': data['Vendor'],
            'Type': data['Type'],
            'Tags': data['Tags'],
            'Published': data['Published'],
            'Option1 Name': data['Option1 Name'],
            'Option1 Value': data['Option1 Value'],
            'Option2 Name': data['Option2 Name'],
            'Option2 Value': data['Option2 Value'],
            'Option3 Name': data['Option3 Name'],
            'Option3 Value': data['Option3 Value'],
            'Variant SKU': data['Variant SKU'],
            'Variant Grams': data['Variant Grams'],
            'Variant Inventory Tracker': data['Variant Inventory Tracker'],
            'Variant Inventory Qty': data['Variant Inventory Qty'],
            'Variant Inventory Policy': data['Variant Inventory Policy'],
            'Variant Fulfillment Service': data['Variant Fulfillment Service'],
            'Variant Price': data['Variant Price'],
            'Variant Compare At Price': data['Variant Compare At Price'],
            'Variant Requires Shipping': data['Variant Requires Shipping'],
            'Variant Taxable': data['Variant Taxable'],
            'Variant Barcode': data['Variant Barcode'],
            'Image Src': data['Image Src'],
            'Image Position': data['Image Position'],
            'Image Alt Text': data['Image Alt Text'],
            'Gift Card': data['Gift Card'],
            'SEO Title': data['SEO Title'],
            'SEO Description': data['SEO Description'],
            'Variant Image': data['Variant Image'],
            'Status': data['Status'],
            'Collection': data['Collection']
        })

    def __del__(self):
        self.file.close()

    def get_detail_html(self, url):
        print('正在读取', url, '的页面数据')

        response = requests.get(url, headers=self.headers, cookies=self.cookies, verify=False)
        self.random_sleep()

        # self.writer_to_file(response.text, 'w', 'utf-8')

        return etree.HTML(response.text)

    def parse(self, url):
        self.file = open('wooland.csv', 'w', newline='', encoding='utf-8-sig')
        self.writer = csv.DictWriter(self.file, fieldnames=self.field_names)
        self.random_sleep()
        self.writer.writeheader()

        data = deepcopy(self.init_data)

        html = self.get_page_html(url, '//*[@id="slip-dresses"]/div/div[2]/product-card[1]/a/div[2]/p')
        product_type_list = html.xpath('//div[contains(@class, "product-list-wrapper")]')
        print('类型数量有:', len(product_type_list))

        for product_type in product_type_list:
            data['Type'] = product_type.xpath('.//h2[@class="collection-title heading-m custom-underline"]/a/text()')[0].strip()

            product_list = product_type.xpath('.//product-card[@class="product-card"]')
            print(data['Type'], '类型有', len(product_list), '个产品')

            for product in product_list:
                self.id += 1
                print(self.id, '开始')

                product_url = 'https://wooland.com' + product.xpath('.//a[@class="product-link"]/@href')[0].strip()
                html = self.get_detail_html(product_url)

                product_color_url_elements = html.xpath(
                    '//fieldset[@data-option="color"]/div[@class="color-swatches-wrapper"]/div[@class="color-swatches"]/a')
                product_color_url_list = []
                for product_color_url_element in product_color_url_elements:
                    product_color_url_list.append(
                        'https://wooland.com' + product_color_url_element.xpath('./@data-swatch-product-url')[0].strip())

                for product_color_url in product_color_url_list:
                    driver = None
                    size_elements = []

                    flag = True
                    while flag:
                        flag = False

                        driver = self.get_driver(product_color_url, '//image-zoom/img')

                        size_elements = driver.find_elements('xpath', '//fieldset[@data-option="size"]/label[not(contains(@class, "hidden"))]')
                        if len(size_elements) == 0:
                            flag = True
                            print('没获取到尺寸,重新获取')

                    data['Handle'] = product_color_url.split('/')[-1]
                    data['Title'] = driver.find_element('xpath', '//h1[@class="heading-m title"]').text.strip()
                    data['Body (HTML)'] = driver.find_element('xpath', '//small[@class="fabric-description rich-text"]').text.strip()
                    data['Option1 Value'] = driver.find_element('xpath', '//legend/span[@class="color-name"]').text.strip()
                    data['Variant Price'] = driver.find_element('xpath', '//div[@class="price-wrapper"]/h2').text.strip().strip('$')

                    try:
                        data['Variant Compare At Price'] = driver.find_element('xpath', '//div[@class="price-wrapper"]/s').text.strip().strip('$')
                    except:
                        data['Variant Compare At Price'] = ""

                    data['Collection'] = data['Type']

                    print('尺寸个数:', len(size_elements))
                    for size_element in size_elements:
                        data['Option2 Value'] = size_element.text.strip()

                        img_url_elements = driver.find_elements('xpath', '//image-zoom/img')
                        print('图片个数:', len(img_url_elements))

                        data['Image Src'] = img_url_elements[0].get_attribute('src').strip()
                        data['Image Position'] = 1

                        self.save_csv(data)
                        print(data)

                        for i in range(1, len(img_url_elements)):
                            temp_data = deepcopy(self.init_data)

                            temp_data['Handle'] = data['Handle']
                            temp_data['Image Src'] = img_url_elements[i].get_attribute('src').strip()
                            temp_data['Image Position'] = i + 1

                            self.save_csv(temp_data)
print(temp_data)

                    driver.close()

                print(self.id, '结束')

    def run(self):
        logging.captureWarnings(True)
        self.parse(self.url)

if __name__ == '__main__':
    wooland = Wooland()
    wooland.run()