Python爬虫之Selenium库的使用方法
程序员文章站
2023-02-19 17:43:49
selenium是一个用于web应用程序测试的工具。selenium测试直接运行在浏览器中,就像真正的用户在操作一样。支持的浏览器包括ie(7, 8, 9, 10, 11),mozilla firef...
selenium 是一个用于web应用程序测试的工具。selenium测试直接运行在浏览器中,就像真正的用户在操作一样。支持的浏览器包括ie(7, 8, 9, 10, 11),mozilla firefox,safari,google chrome,opera等。这个工具的主要功能包括:测试与浏览器的兼容性——测试你的应用程序看是否能够很好得工作在不同浏览器和操作系统之上。测试系统功能——创建回归测试检验软件功能和用户需求。支持自动录制动作和自动生成 .net、java、perl等不同语言的测试脚本。(摘自百科)
# 基本使用 from selenium import webdriver from selenium.webdriver.common.by import by from selenium.webdriver.common.keys import keys from selenium.webdriver.support import expected_conditions as ec from selenium.webdriver.support.wait import webdriverwait browser = webdriver.chrome() try: browser.get('https://www.baidu.com') input = browser.find_element_by_id('kw') input.send_keys('python') input.send_keys(keys.enter) wait = webdriverwait(browser, 10) wait.until(ec.presence_of_element_located((by.id, 'content_left'))) print(browser.current_url) print(browser.get_cookies()) print(browser.page_source) finally: browser.close() # 声明浏览器对象 from selenium import webdriver browser = webdriver.chrome() browser = webdriver.firefox() browser = webdriver.edge() browser = webdriver.phantomjs() browser = webdriver.safari() # 访问页面 from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.taobao.com') print(browser.page_source) browser.close() # 查找元素 # 单个元素 from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.taobao.com') # 下面三个效果是一样的 input_first = browser.find_element_by_id('q') input_second = browser.find_element_by_css_selector('#q') input_third = browser.find_element_by_xpath('//*[@id="q"]') print(input_first) print(input_second) print(input_third) browser.close() from selenium import webdriver from selenium.webdriver.common.by import by browser = webdriver.chrome() browser.get('https://www.taobao.com') input_first = browser.find_element(by.id, 'q') print(input_first) browser.close() # 多个元素 from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.taobao.com') lis = browser.find_elements_by_css_selector('.service-bd li') print(lis) browser.close() from selenium import webdriver from selenium.webdriver.common.by import by browser = webdriver.chrome() browser.get('https://www.taobao.com') lis = browser.find_elements(by.css_selector, '.service-bd li') print(lis) browser.close() # 元素交互操作 # 对获取的元素调用交互方法 from selenium import webdriver import time browser = webdriver.chrome() browser.get('https://www.taobao.com') input = browser.find_element_by_id('q') input.send_keys('笔记本电脑') time.sleep(5) input.clear() input.send_keys('ipad') button = browser.find_element_by_class_name('btn-search') # button.click() # 交互动作 # 将动作附加到动作链中串行执行 from selenium import webdriver from selenium.webdriver import actionchains browser = webdriver.chrome() url = "http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable" browser.get(url) browser.switch_to.frame('iframeresult') source = browser.find_element_by_css_selector('#draggable') target = browser.find_element_by_css_selector('#droppable') actions = actionchains(browser) actions.drag_and_drop(source, target) actions.perform() # 执行javascript from selenium import webdriver browser = webdriver.chrome() browser.get("https://www.zhihu.com/explore") browser.execute_script('window.scrollto(0, document.body.scrollheight)') browser.execute_script('alert("to bottom")') # 获取元素信息 # 获取属性 from selenium import webdriver from selenium.webdriver import actionchains browser = webdriver.chrome() browser.get("https://www.zhihu.com/explore") logo = browser.find_element_by_id('zh-top-link-logo') print(logo) print(logo.get_attribute('class')) # 获取文本值 from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.zhihu.com/explore') input = browser.find_element_by_class_name('zu-top-add-question') print(input.text) # 获取id、位置、标签名、大小 from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.zhihu.com/explore') input = browser.find_element_by_class_name('zu-top-add-question') print(input.id) print(input.location) print(input.tag_name) print(input.size) # frame import time from selenium import webdriver from selenium.common.exceptions import nosuchelementexception browser = webdriver.chrome() browser.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable') browser.switch_to.frame('iframeresult') source = browser.find_element_by_css_selector('#draggable') print(source) try: logo = browser.find_element_by_class_name('logo') except nosuchelementexception: print('no logo') browser.switch_to.parent_frame() logo = browser.find_element_by_class_name('logo') print(logo) print(logo.text) # 等待 # 隐式等待 # 当使用了隐式等待执行测试的时候,如果webdriver没有在dom中找到元素,将继续等待,超出设定时间后抛出找不到元素的异常, # 换句话说,当查找元素或元素并没有立即出现的时候,隐式等待将等待一段时间再查找dom,默认的时间是0 from selenium import webdriver browser = webdriver.chrome() browser.implicitly_wait(10) browser.get('https://www.zhihu.com/explore') input = browser.find_element_by_class_name('zu-top-add-question') print(input) # 显示等待 from selenium import webdriver from selenium.webdriver.common.by import by from selenium.webdriver.support.ui import webdriverwait from selenium.webdriver.support import expected_conditions as ec browser = webdriver.chrome() browser.get('https://www.taobao.com/') wait = webdriverwait(browser, 10) input = wait.until(ec.presence_of_element_located((by.id, 'q'))) button = wait.until(ec.element_to_be_clickable((by.css_selector, '.btn-search'))) print(input, button) # 前进后退 import time from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.baidu.com') browser.get('https://www.taobao.com') browser.get('https://www.python.org') browser.back() time.sleep(5) browser.forward() browser.close() # cookies from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.zhihu.com/explore') print(browser.get_cookies()) browser.add_cookie({'name':'name', 'domain':'www.zhihu.com', 'value':'germey'}) print(browser.get_cookies()) browser.delete_all_cookies() print(browser.get_cookies()) # 选项卡管理 import time from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.baidu.com') browser.execute_script('window.open()') print(browser.window_handles) browser.switch_to_window(browser.window_handles[1]) browser.get('https://www.taobao.com') time.sleep(5) browser.switch_to_window(browser.window_handles[0]) browser.get('https://python.org') # 异常处理 from selenium import webdriver browser = webdriver.chrome() browser.get('https://www.baidu.com') browser.find_element_by_id('hello') from selenium import webdriver from selenium.common.exceptions import timeoutexception, nosuchelementexception browser = webdriver.chrome() try: browser.get('https://www.baidu.com') except timeoutexception: print('time out') try: browser.find_element_by_id('hello') except nosuchelementexception: print('no element') finally: browser.close()
以上就是python爬虫之selenium库的使用方法的详细内容,更多关于python selenium库的使用的资料请关注其它相关文章!
上一篇: AngularJS路由切换实现方法分析
推荐阅读