依赖的环境
-
需要安装allure命令行工具以及allure-pytest插件 -
pytest-playwright需要升级0.3.0版本,改版本支持如下参数:
Playwright:
--browser={chromium,firefox,webkit}
Browser engine which should be used
--headed Run tests in headed mode.
--browser-channel=BROWSER_CHANNEL
Browser channel to be used.
--slowmo=SLOWMO Run tests with slow mo
--device=DEVICE Device to be emulated.
--output=OUTPUT Directory for artifacts produced by tests, defaults to test-results.
--tracing={on,off,retain-on-failure}
Whether to record a trace for each test.
--video={on,off,retain-on-failure}
Whether to record video for each test.
--screenshot={on,off,only-on-failure}
Whether to automatically capture a screenshot after each test.
调整pytest-playwright.py文件
调整该文件的目的是想再使用如下命令: pytest --video=on --screenshot=on --alluredir=./report/xml 该命令的是执行过程对每个用例进行录屏和截屏,并把执行数据输入到xml中。如果不调整pytest-playwright.py脚本,allure报告中是无法享用录屏和截屏数据,那如何让allure报告中使用该部分数据呢?需要对pytest-playwright.py进行如下调整:
- 进入到pytest_playwright安装目录
2.编辑pytest_playwright.py
allure.attach.file(screenshot_path,name=f"{request.node.name}-{human_readable_status}-{index+1}",attachment_type=allure.attachment_type.PNG)
allure.attach.file(filepath,name=f"{request.node.name}-{human_readable_status}-{index+1}",attachment_type=allure.attachment_type.WEBM)
做出如上三部分调整,再次执行allure报告生成会在
pytest --video=on --screenshot=on --alluredir=./report/xml
allure generate --clean ./report/xml -o ./report/html
allure open ./report/html
Tear down的context展示截图和视频:
附件
调整的pytest_playwright.py文件
import shutil
import os
import sys
import warnings
import allure
from typing import Any, Callable, Dict, Generator, List, Optional
import pytest
from playwright.sync_api import (
Browser,
BrowserContext,
BrowserType,
Error,
Page,
Playwright,
sync_playwright,
)
from slugify import slugify
import tempfile
artifacts_folder = tempfile.TemporaryDirectory(prefix="playwright-pytest-")
@pytest.fixture(scope="session", autouse=True)
def delete_output_dir(pytestconfig: Any) -> None:
output_dir = pytestconfig.getoption("--output")
if os.path.exists(output_dir):
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
def pytest_generate_tests(metafunc: Any) -> None:
if "browser_name" in metafunc.fixturenames:
browsers = metafunc.config.option.browser or ["chromium"]
metafunc.parametrize("browser_name", browsers, scope="session")
def pytest_configure(config: Any) -> None:
config.addinivalue_line(
"markers", "skip_browser(name): mark test to be skipped a specific browser"
)
config.addinivalue_line(
"markers", "only_browser(name): mark test to run only on a specific browser"
)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Any) -> Generator[None, Any, None]:
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
def _get_skiplist(item: Any, values: List[str], value_name: str) -> List[str]:
skipped_values: List[str] = []
only_marker = item.get_closest_marker(f"only_{value_name}")
if only_marker:
skipped_values = values
skipped_values.remove(only_marker.args[0])
skip_marker = item.get_closest_marker(f"skip_{value_name}")
if skip_marker:
skipped_values.append(skip_marker.args[0])
return skipped_values
def pytest_runtest_setup(item: Any) -> None:
if not hasattr(item, "callspec"):
return
browser_name = item.callspec.params.get("browser_name")
if not browser_name:
return
skip_browsers_names = _get_skiplist(
item, ["chromium", "firefox", "webkit"], "browser"
)
if browser_name in skip_browsers_names:
pytest.skip("skipped for this browser: {}".format(browser_name))
VSCODE_PYTHON_EXTENSION_ID = "ms-python.python"
@pytest.fixture(scope="session")
def browser_type_launch_args(pytestconfig: Any) -> Dict:
launch_options = {}
headed_option = pytestconfig.getoption("--headed")
if headed_option:
launch_options["headless"] = False
elif VSCODE_PYTHON_EXTENSION_ID in sys.argv[0] and _is_debugger_attached():
launch_options["headless"] = False
browser_channel_option = pytestconfig.getoption("--browser-channel")
if browser_channel_option:
launch_options["channel"] = browser_channel_option
slowmo_option = pytestconfig.getoption("--slowmo")
if slowmo_option:
launch_options["slow_mo"] = slowmo_option
return launch_options
def _is_debugger_attached() -> bool:
pydevd = sys.modules.get("pydevd")
if not pydevd or not hasattr(pydevd, "get_global_debugger"):
return False
debugger = pydevd.get_global_debugger()
if not debugger or not hasattr(debugger, "is_attached"):
return False
return debugger.is_attached()
def _build_artifact_test_folder(
pytestconfig: Any, request: pytest.FixtureRequest, folder_or_file_name: str
) -> str:
output_dir = pytestconfig.getoption("--output")
return os.path.join(output_dir, slugify(request.node.nodeid), folder_or_file_name)
@pytest.fixture(scope="session")
def browser_context_args(
pytestconfig: Any,
playwright: Playwright,
device: Optional[str],
) -> Dict:
context_args = {}
if device:
context_args.update(playwright.devices[device])
base_url = pytestconfig.getoption("--base-url")
if base_url:
context_args["base_url"] = base_url
video_option = pytestconfig.getoption("--video")
capture_video = video_option in ["on", "retain-on-failure"]
if capture_video:
context_args["record_video_dir"] = artifacts_folder.name
return context_args
@pytest.fixture(scope="session")
def playwright() -> Generator[Playwright, None, None]:
pw = sync_playwright().start()
yield pw
pw.stop()
@pytest.fixture(scope="session")
def browser_type(playwright: Playwright, browser_name: str) -> BrowserType:
return getattr(playwright, browser_name)
@pytest.fixture(scope="session")
def launch_browser(
browser_type_launch_args: Dict,
browser_type: BrowserType,
) -> Callable[..., Browser]:
def launch(**kwargs: Dict) -> Browser:
launch_options = {**browser_type_launch_args, **kwargs}
browser = browser_type.launch(**launch_options)
return browser
return launch
@pytest.fixture(scope="session")
def browser(launch_browser: Callable[[], Browser]) -> Generator[Browser, None, None]:
browser = launch_browser()
yield browser
browser.close()
artifacts_folder.cleanup()
@pytest.fixture
def context(
browser: Browser,
browser_context_args: Dict,
pytestconfig: Any,
request: pytest.FixtureRequest,
) -> Generator[BrowserContext, None, None]:
pages: List[Page] = []
context = browser.new_context(**browser_context_args)
context.on("page", lambda page: pages.append(page))
tracing_option = pytestconfig.getoption("--tracing")
capture_trace = tracing_option in ["on", "retain-on-failure"]
if capture_trace:
context.tracing.start(
name=slugify(request.node.nodeid),
screenshots=True,
snapshots=True,
sources=True,
)
yield context
failed = request.node.rep_call.failed if hasattr(request.node, "rep_call") else True
if capture_trace:
retain_trace = tracing_option == "on" or (
failed and tracing_option == "retain-on-failure"
)
if retain_trace:
trace_path = _build_artifact_test_folder(pytestconfig, request, "trace.zip")
context.tracing.stop(path=trace_path)
else:
context.tracing.stop()
screenshot_option = pytestconfig.getoption("--screenshot")
capture_screenshot = screenshot_option == "on" or (
failed and screenshot_option == "only-on-failure"
)
if capture_screenshot:
for index, page in enumerate(pages):
human_readable_status = "failed" if failed else "finished"
screenshot_path = _build_artifact_test_folder(
pytestconfig, request, f"test-{human_readable_status}-{index+1}.png"
)
try:
page.screenshot(timeout=5000, path=screenshot_path)
allure.attach.file(screenshot_path,name=f"{request.node.name}-{human_readable_status}-{index+1}",attachment_type=allure.attachment_type.PNG)
except Error:
pass
context.close()
video_option = pytestconfig.getoption("--video")
preserve_video = video_option == "on" or (
failed and video_option == "retain-on-failure"
)
if preserve_video:
for page in pages:
video = page.video
if not video:
continue
try:
video_path = video.path()
file_name = os.path.basename(video_path)
filepath = _build_artifact_test_folder(pytestconfig, request, file_name)
video.save_as(
path=filepath
)
allure.attach.file(filepath,name=f"{request.node.name}-{human_readable_status}-{index+1}",attachment_type=allure.attachment_type.WEBM)
except Error:
pass
@pytest.fixture
def page(context: BrowserContext) -> Generator[Page, None, None]:
page = context.new_page()
yield page
@pytest.fixture(scope="session")
def is_webkit(browser_name: str) -> bool:
return browser_name == "webkit"
@pytest.fixture(scope="session")
def is_firefox(browser_name: str) -> bool:
return browser_name == "firefox"
@pytest.fixture(scope="session")
def is_chromium(browser_name: str) -> bool:
return browser_name == "chromium"
@pytest.fixture(scope="session")
def browser_name(pytestconfig: Any) -> Optional[str]:
browser_names = pytestconfig.getoption("--browser")
if len(browser_names) == 0:
return "chromium"
if len(browser_names) == 1:
return browser_names[0]
warnings.warn(
"When using unittest.TestCase specifying multiple browsers is not supported"
)
return browser_names[0]
@pytest.fixture(scope="session")
def browser_channel(pytestconfig: Any) -> Optional[str]:
return pytestconfig.getoption("--browser-channel")
@pytest.fixture(scope="session")
def device(pytestconfig: Any) -> Optional[str]:
return pytestconfig.getoption("--device")
def pytest_addoption(parser: Any) -> None:
group = parser.getgroup("playwright", "Playwright")
group.addoption(
"--browser",
action="append",
default=[],
help="Browser engine which should be used",
choices=["chromium", "firefox", "webkit"],
)
group.addoption(
"--headed",
action="store_true",
default=False,
help="Run tests in headed mode.",
)
group.addoption(
"--browser-channel",
action="store",
default=None,
help="Browser channel to be used.",
)
group.addoption(
"--slowmo",
default=0,
type=int,
help="Run tests with slow mo",
)
group.addoption(
"--device", default=None, action="store", help="Device to be emulated."
)
group.addoption(
"--output",
default="test-results",
help="Directory for artifacts produced by tests, defaults to test-results.",
)
group.addoption(
"--tracing",
default="off",
choices=["on", "off", "retain-on-failure"],
help="Whether to record a trace for each test.",
)
group.addoption(
"--video",
default="off",
choices=["on", "off", "retain-on-failure"],
help="Whether to record video for each test.",
)
group.addoption(
"--screenshot",
default="off",
choices=["on", "off", "only-on-failure"],
help="Whether to automatically capture a screenshot after each test.",
)
|