diff --git a/tests/e2e-test/pages/adminPage.py b/tests/e2e-test/pages/adminPage.py index e345c5633..ff3efc55d 100644 --- a/tests/e2e-test/pages/adminPage.py +++ b/tests/e2e-test/pages/adminPage.py @@ -28,3 +28,8 @@ def __init__(self, page): def click_delete_data_tab(self): self.page.locator(self.DELETE_DATA_TAB).click() self.page.wait_for_timeout(5000) + + def assert_admin_page_title(self, admin_page): + actual_title = self.page.locator(admin_page.ADMIN_PAGE_TITLE).text_content() + expected_title = admin_page.ADMIN_PAGE_TITLE + assert expected_title == actual_title, f"Expected title: {expected_title}, Found: {actual_title}" diff --git a/tests/e2e-test/pages/webUserPage.py b/tests/e2e-test/pages/webUserPage.py index 7abcc8ae6..1faabd089 100644 --- a/tests/e2e-test/pages/webUserPage.py +++ b/tests/e2e-test/pages/webUserPage.py @@ -1,7 +1,6 @@ +from asyncio.log import logger from base.base import BasePage from playwright.sync_api import expect - - class WebUserPage(BasePage): WEB_PAGE_TITLE = "//h3[text()='Azure AI']" TYPE_QUESTION_TEXT_AREA = "//textarea[contains(@placeholder,'Type a new question')]" @@ -22,6 +21,9 @@ class WebUserPage(BasePage): TOGGLE_CITATIONS_LIST = "[data-testid='toggle-citations-list']" CITATIONS_CONTAINER = "[data-testid='citations-container']" CITATION_BLOCK = "[data-testid='citation-block']" + SHOW_CHAT_HISTORY_BUTTON="//span[text()='Show Chat History']" + HIDE_CHAT_HISTORY_BUTTON = "//span[text()='Hide Chat History']" + CHAT_HISTORY_ITEM = "//div[@aria-label='chat history item']" def __init__(self, page): self.page = page @@ -53,15 +55,28 @@ def click_clear_chat_icon(self): self.page.locator(self.CLEAR_CHAT_ICON).click() def show_chat_history(self): - self.page.locator(self.SHOW_CHAT_HISTORY).click() - self.page.wait_for_load_state("networkidle") - self.page.wait_for_timeout(2000) - expect(self.page.locator(self.CHAT_HISTORY_NAME)).to_be_visible() + """Click to show chat history if the button is visible.""" + show_button = self.page.locator(self.SHOW_CHAT_HISTORY_BUTTON) + if show_button.is_visible(): + show_button.click() + self.page.wait_for_timeout(2000) + expect(self.page.locator(self.CHAT_HISTORY_ITEM)).to_be_visible() + else: + logger.info("'Show' button not visible — chat history may already be shown.") + + # def show_chat_history(self): + # self.page.wait_for_selector(self.SHOW_CHAT_HISTORY_BUTTON) + # self.page.locator(self.SHOW_CHAT_HISTORY_BUTTON).click() + # self.page.wait_for_timeout(1000) def close_chat_history(self): - self.page.locator(self.CHAT_CLOSE_ICON).click() - self.page.wait_for_load_state("networkidle") - self.page.wait_for_timeout(2000) + """Click to close chat history if visible.""" + hide_button = self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON) + if hide_button.is_visible(): + hide_button.click() + self.page.wait_for_timeout(2000) + else: + logger.info("Hide button not visible. Chat history might already be closed.") def delete_chat_history(self): self.page.locator(self.SHOW_CHAT_HISTORY).click() @@ -69,8 +84,8 @@ def delete_chat_history(self): chat_history = self.page.locator("//span[contains(text(),'No chat history.')]") if chat_history.is_visible(): self.page.wait_for_load_state("networkidle") - self.page.wait_for_timeout(2000) - self.page.get_by_label("hide button").click() + self.page.locator("button[title='Hide']").wait_for(state="visible", timeout=5000) + self.page.locator("button[title='Hide']").click() else: self.page.locator(self.CHAT_HISTORY_OPTIONS).click() diff --git a/tests/e2e-test/pytest.ini b/tests/e2e-test/pytest.ini index 76eb64fc7..ead15695a 100644 --- a/tests/e2e-test/pytest.ini +++ b/tests/e2e-test/pytest.ini @@ -3,4 +3,4 @@ log_cli = true log_cli_level = INFO log_file = logs/tests.log log_file_level = INFO -addopts = -p no:warnings +addopts = -p no:warnings --tb=short diff --git a/tests/e2e-test/requirements.txt b/tests/e2e-test/requirements.txt index 7aad0cfb2..37159fb19 100644 --- a/tests/e2e-test/requirements.txt +++ b/tests/e2e-test/requirements.txt @@ -3,4 +3,5 @@ pytest-reporter-html1 python-dotenv pytest-check pytest-html -py \ No newline at end of file +py +beautifulsoup4 diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py index 31a3bc295..55ed8f1d7 100644 --- a/tests/e2e-test/tests/conftest.py +++ b/tests/e2e-test/tests/conftest.py @@ -1,54 +1,92 @@ -import os - import pytest -from config.constants import * +import os +import io +import logging +import atexit +from bs4 import BeautifulSoup from playwright.sync_api import sync_playwright -from py.xml import html # type: ignore +from config.constants import * +log_streams = {} +# ---------- FIXTURE: Login and Logout Setup ---------- @pytest.fixture(scope="session") def login_logout(): - # perform login and browser close once in a session with sync_playwright() as p: browser = p.chromium.launch(headless=False, args=["--start-maximized"]) context = browser.new_context(no_viewport=True) context.set_default_timeout(80000) page = context.new_page() - # Navigate to the login URL + + # Load URL and wait page.goto(WEB_URL) - # Wait for the login form to appear page.wait_for_load_state("networkidle") page.wait_for_timeout(5000) - # login to web url with username and password - # login_page = LoginPage(page) + + # Uncomment if authentication is needed # load_dotenv() + # login_page = LoginPage(page) # login_page.authenticate(os.getenv('user_name'), os.getenv('pass_word')) + yield page browser.close() - +# ---------- HTML Report Title ---------- @pytest.hookimpl(tryfirst=True) def pytest_html_report_title(report): report.title = "Test_Automation_Chat_with_your_Data" +# ---------- Logging Setup per Test ---------- +@pytest.hookimpl(tryfirst=True) +def pytest_runtest_setup(item): + stream = io.StringIO() + handler = logging.StreamHandler(stream) + handler.setLevel(logging.INFO) + logger = logging.getLogger() + logger.addHandler(handler) + log_streams[item.nodeid] = (handler, stream) -# Add a column for descriptions -def pytest_html_results_table_header(cells): - cells.insert(1, html.th("Description")) - - -def pytest_html_results_table_row(report, cells): - cells.insert( - 1, html.td(report.description if hasattr(report, "description") else "") - ) - - -# Add logs and docstring to report +# ---------- Attach Logs to HTML Report ---------- @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield report = outcome.get_result() - report.description = str(item.function.__doc__) - os.makedirs("logs", exist_ok=True) - extra = getattr(report, "extra", []) - report.extra = extra + + if report.when == "call": + question_logs = getattr(item, "_question_logs", None) + if question_logs: + for i, (question, logs) in enumerate(question_logs.items(), start=1): + report.sections.append((f"Q{i:02d}: {question}", logs)) + else: + log = getattr(item, "_captured_log", None) + if log: + report.sections.append(("Captured Log", log)) + +# ---------- Optional: Clean Up Node IDs for Parametrized Prompts ---------- +def pytest_collection_modifyitems(items): + for item in items: + if hasattr(item, 'callspec') and "prompt" in item.callspec.params: + item._nodeid = item.callspec.params["prompt"] + +# ---------- Rename Duration Column in HTML Report ---------- +def rename_duration_column(): + report_path = os.path.abspath("report.html") + if not os.path.exists(report_path): + print("Report file not found, skipping column rename.") + return + + with open(report_path, 'r', encoding='utf-8') as f: + soup = BeautifulSoup(f, 'html.parser') + + headers = soup.select('table#results-table thead th') + for th in headers: + if th.text.strip() == 'Duration': + th.string = 'Execution Time' + break + else: + print("'Duration' column not found in report.") + + with open(report_path, 'w', encoding='utf-8') as f: + f.write(str(soup)) + +atexit.register(rename_duration_column) diff --git a/tests/e2e-test/tests/test_chat_with_your_data.py b/tests/e2e-test/tests/test_chat_with_your_data.py index def96c501..84e6905ee 100644 --- a/tests/e2e-test/tests/test_chat_with_your_data.py +++ b/tests/e2e-test/tests/test_chat_with_your_data.py @@ -1,4 +1,7 @@ import logging +import time +import pytest +import io from config.constants import * from pages.adminPage import AdminPage @@ -6,72 +9,144 @@ logger = logging.getLogger(__name__) +# === Step Functions === -def test_golden_path_web_page_demo_script(login_logout): - """Validate Golden path test case for Chat with your Data""" - page = login_logout +def validate_admin_page_loaded(page, admin_page, home_page): page.goto(ADMIN_URL) - logger.info("Step 1: Validate Admin page is loaded.") - admin_page = AdminPage(page) - assert ( - admin_page_title == page.locator(admin_page.ADMIN_PAGE_TITLE).text_content() - ), "page title not found" - logger.info("Step 2: Validate Files are uploaded or not") + actual_title = page.locator(admin_page.ADMIN_PAGE_TITLE).text_content() + assert actual_title == "Chat with your data Solution Accelerator", "Admin page title mismatch" + +def validate_files_are_uploaded(page, admin_page, home_page): admin_page.click_delete_data_tab() - assert ( - page.locator(admin_page.DELETE_CHECK_BOXES).count() >= 1 - ), "Files are not uploaded." - logger.info("Step 3: Validate Web page is loaded.") + checkbox_count = page.locator(admin_page.DELETE_CHECK_BOXES).count() + assert checkbox_count >= 1, "No files available to delete" + +def goto_web_page(page, admin_page, home_page): page.goto(WEB_URL) - home_page = WebUserPage(page) - logger.info("Step 5: Validate Chat history has been deleted.") + +def delete_chat_history(page, admin_page, home_page): home_page.delete_chat_history() - failed_questions = [] - logger.info("Step 6: Validate Golden Path prompts response") - - def ask_question_and_check(question, attempt): - home_page.wait_for_load(4000) - home_page.enter_a_question(question) - home_page.click_send_button() - home_page.validate_response_status(question) - - response_text = page.locator(home_page.ANSWER_TEXT) - response_count = response_text.count() - - if home_page.has_reference_link(): - logger.info("Step 6.1: Reference link found. Opening citation.") - home_page.click_reference_link_in_response() - logger.info("Step 6.2: Closing citation.") - home_page.close_citation() - - if response_count == 0: - return False # no response found - - response_text_content = response_text.nth(response_count - 1).text_content() - - if response_text_content == invalid_response: - print(f"[Attempt {attempt}] Invalid response({response_text_content}) for prompt: {question}") - return False - return True - - # First run through all questions - for question in questions: - if not ask_question_and_check(question, attempt=1): - failed_questions.append(question) - - # Retry failed questions once more - if failed_questions: - logger.info("Step 7: Retry failed question one more time.") - for question in failed_questions: - if not ask_question_and_check(question, attempt=2): - home_page.soft_assert( - False, - f"Failed after retry- Invalid response for prompt: {question}", - ) - - logger.info("Step 8: Validate chat history.") +# === Golden Path Step Definitions === + +golden_path_functions = [ + validate_admin_page_loaded, + validate_files_are_uploaded, + goto_web_page, + delete_chat_history, +] + +step_descriptions = [ + "Validate Admin page is loaded", + "Validate files are uploaded", + "Validate Web page is loaded", + "Delete chat history" +] + +golden_path_steps = list(zip(step_descriptions, golden_path_functions)) + +# === Golden Path Test Execution === + +@pytest.mark.parametrize("step_desc, action", golden_path_steps, ids=[desc for desc, _ in golden_path_steps]) +def test_golden_path_steps(login_logout, step_desc, action, request): + request.node._nodeid = step_desc + page = login_logout + admin_page = AdminPage(page) + home_page = WebUserPage(page) + + log_capture = io.StringIO() + handler = logging.StreamHandler(log_capture) + logger.addHandler(handler) + + logger.info(f"🟢 START: {step_desc}") + start = time.time() + + try: + result = action(page, admin_page, home_page) + if isinstance(result, tuple): + for func in result: + if callable(func): + func() + except AssertionError as e: + logger.error(f"❌ FAILED: {step_desc} - {str(e)}") + raise + finally: + duration = time.time() - start + logger.info(f"✅ END: {step_desc} | Execution Time: {duration:.2f}s") + logger.removeHandler(handler) + setattr(request.node, "_captured_log", log_capture.getvalue()) + + +# === Each Question as a Separate Test Case === + +@pytest.mark.parametrize("question", questions, ids=[f"Validate response for prompt : {q}" for q in questions]) +def test_gp_question(login_logout, question, request): + page = login_logout + home_page = WebUserPage(page) + request.node._nodeid = f"Validate response for prompt : {question}" + + log_capture = io.StringIO() + handler = logging.StreamHandler(log_capture) + logger.addHandler(handler) + + success = False + start_time = time.time() + + try: + for attempt in range(1, 3): + logger.info(f"[GP] [{question}] Attempt {attempt} - START") + + try: + home_page.wait_for_load(4000) + home_page.enter_a_question(question) + home_page.click_send_button() + home_page.validate_response_status(question) + + response_text = page.locator(home_page.ANSWER_TEXT) + response_count = response_text.count() + + if response_count == 0: + logger.warning(f"[GP] [{question}] No response returned.") + continue + + if home_page.has_reference_link(): + logger.info(f"[GP] [{question}] Reference link found. Opening citation.") + home_page.click_reference_link_in_response() + logger.info(f"[GP] [{question}] Closing citation.") + home_page.close_citation() + + response_content = response_text.nth(response_count - 1).text_content().strip() + + if response_content == invalid_response: + logger.warning(f"[GP] [{question}] Invalid response: {response_content}") + continue + + logger.info(f"[GP] [{question}] Valid response received.") + success = True + break + + except Exception as e: + logger.error(f"[GP] [{question}] Exception: {str(e)}") + + if not success: + pytest.fail(f"[GP] [{question}] Failed after 2 attempts.") + + finally: + duration = time.time() - start_time + logger.info(f"[GP] [{question}] Execution Time: {duration:.2f}s") + logger.removeHandler(handler) + setattr(request.node, "_captured_log", log_capture.getvalue()) + + +# === Chat History Test === + +def test_validate_chat_history(login_logout, request): + request.node._nodeid = "Validate chat history shown and closed" + page = login_logout + home_page = WebUserPage(page) + + logger.info("[FINAL] Showing chat history after all questions executed.") home_page.show_chat_history() - logger.info("Step 9: Validate chat history closed.") + + logger.info("[FINAL] Closing chat history.") home_page.close_chat_history() - home_page.assert_all() diff --git a/tests/e2e-test/tests/test_poc_chat_with_your_data.py b/tests/e2e-test/tests/test_poc_chat_with_your_data.py deleted file mode 100644 index e253d39c4..000000000 --- a/tests/e2e-test/tests/test_poc_chat_with_your_data.py +++ /dev/null @@ -1,71 +0,0 @@ -import logging - -from config.constants import * -from pages.adminPage import AdminPage -from pages.webUserPage import WebUserPage - -logger = logging.getLogger(__name__) - - -def test_golden_path_web_page_demo_script(login_logout): - """Validate Golden path test case for Chat with your Data""" - page = login_logout - page.goto(ADMIN_URL) - logger.info("Step 1: Validate Admin page is loaded.") - admin_page = AdminPage(page) - assert ( - admin_page_title == page.locator(admin_page.ADMIN_PAGE_TITLE).text_content() - ), "page title not found" - logger.info("Step 2: Validate Files are uploaded or not") - admin_page.click_delete_data_tab() - assert ( - page.locator(admin_page.DELETE_CHECK_BOXES).count() >= 1 - ), "Files are not uploaded." - logger.info("Step 3: Validate Web page is loaded.") - page.goto(WEB_URL) - home_page = WebUserPage(page) - logger.info("Step 5: Validate Chat history has been deleted.") - home_page.delete_chat_history() - - failed_questions = [] - logger.info("Step 6: Validate Golden Path prompts response") - - def ask_question_and_check(question, attempt): - home_page.wait_for_load(4000) - home_page.enter_a_question(question) - home_page.click_send_button() - home_page.validate_response_status(question) - - response_text = page.locator(home_page.ANSWER_TEXT) - response_count = response_text.count() - - if response_count == 0: - return False # no response found - - response_text_content = response_text.nth(response_count - 1).text_content() - - if response_text_content == invalid_response: - print(f"[Attempt {attempt}] Invalid response for prompt: {question}") - return False - return True - - # First run through all questions - for question in questions: - if not ask_question_and_check(question, attempt=1): - failed_questions.append(question) - - # Retry failed questions once more - if failed_questions: - logger.info("Step 7: Retry failed question one more time.") - for question in failed_questions: - if not ask_question_and_check(question, attempt=2): - home_page.soft_assert( - False, - f"Failed after retry- Invalid response for prompt: {question}", - ) - - logger.info("Step 8: Validate chat history.") - home_page.show_chat_history() - logger.info("Step 9: Validate chat history closed.") - home_page.close_chat_history() - home_page.assert_all()