From 828bdeedcec039d84dfb28f81bb1766305a6cd5f Mon Sep 17 00:00:00 2001 From: supercoder-dev Date: Wed, 12 Jun 2024 14:28:33 +0530 Subject: [PATCH 1/3] Update smart_scraper_graph.py --- scrapegraphai/graphs/smart_scraper_graph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scrapegraphai/graphs/smart_scraper_graph.py b/scrapegraphai/graphs/smart_scraper_graph.py index ee230695..6192b437 100644 --- a/scrapegraphai/graphs/smart_scraper_graph.py +++ b/scrapegraphai/graphs/smart_scraper_graph.py @@ -65,8 +65,10 @@ def _create_graph(self) -> BaseGraph: output=["doc", "link_urls", "img_urls"], node_config={ "loader_kwargs": self.config.get("loader_kwargs", {}), + "headless": self.config.get("headless", True) # Ensure headless flag is passed } ) + logging.info("FetchNode configured with headless: %s", self.config.get("headless", True)) parse_node = ParseNode( input="doc", output=["parsed_doc"], @@ -117,4 +119,4 @@ def run(self) -> str: inputs = {"user_prompt": self.prompt, self.input_key: self.source} self.final_state, self.execution_info = self.graph.execute(inputs) - return self.final_state.get("answer", "No answer found.") \ No newline at end of file + return self.final_state.get("answer", "No answer found.") From 879c94a2b53ff5ad6fffffb9efe213c554a9b78e Mon Sep 17 00:00:00 2001 From: supercoder-dev Date: Wed, 12 Jun 2024 14:30:02 +0530 Subject: [PATCH 2/3] Update cleanup_html.py --- scrapegraphai/utils/cleanup_html.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scrapegraphai/utils/cleanup_html.py b/scrapegraphai/utils/cleanup_html.py index d9398c0f..ea008fc5 100644 --- a/scrapegraphai/utils/cleanup_html.py +++ b/scrapegraphai/utils/cleanup_html.py @@ -24,6 +24,12 @@ def cleanup_html(html_content: str, base_url: str) -> str: This function is particularly useful for preparing HTML content for environments where bandwidth usage needs to be minimized. """ + import logging + logging.basicConfig(level=logging.DEBUG) + + # Add logging to capture the HTML content before parsing + logging.debug(f'HTML content before parsing: {html_content}') + soup = BeautifulSoup(html_content, 'html.parser') # Title Extraction @@ -57,9 +63,9 @@ def cleanup_html(html_content: str, base_url: str) -> str: if body_content: # Minify the HTML within the body tag minimized_body = minify(str(body_content)) - return title, minimized_body, link_urls, image_urls - # return "Title: " + title + ", Body: " + minimized_body + ", Links: " + str(link_urls) + ", Images: " + str(image_urls) - # throw an error if no body content is found - raise ValueError("No HTML body content found, please try setting the 'headless' flag to False in the graph configuration.") \ No newline at end of file + # Add fallback mechanism + else: + logging.error(f'No body content found in HTML: {html_content}') + raise ValueError(f"No HTML body content found, please try setting the 'headless' flag to False in the graph configuration. HTML content: {html_content}") From d0e300af7265794beaa23426128c07364b4f76a2 Mon Sep 17 00:00:00 2001 From: supercoder-dev Date: Wed, 12 Jun 2024 14:32:01 +0530 Subject: [PATCH 3/3] Update fetch_node.py --- scrapegraphai/nodes/fetch_node.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scrapegraphai/nodes/fetch_node.py b/scrapegraphai/nodes/fetch_node.py index 5d2b575f..dbdd9925 100644 --- a/scrapegraphai/nodes/fetch_node.py +++ b/scrapegraphai/nodes/fetch_node.py @@ -131,6 +131,9 @@ def execute(self, state): pass elif not source.startswith("http"): + self.logger.info(f"Fetching local HTML content from: {source}") + if not source.strip(): + raise ValueError("No HTML body content found in the local source.") title, minimized_body, link_urls, image_urls = cleanup_html(source, source) parsed_content = f"Title: {title}, Body: {minimized_body}, Links: {link_urls}, Images: {image_urls}" compressed_document = [ @@ -138,8 +141,11 @@ def execute(self, state): ] elif self.useSoup: + self.logger.info(f"Fetching HTML content using requests from: {source}") response = requests.get(source) if response.status_code == 200: + if not response.text.strip(): + raise ValueError("No HTML body content found in the response.") title, minimized_body, link_urls, image_urls = cleanup_html( response.text, source ) @@ -151,6 +157,7 @@ def execute(self, state): ) else: + self.logger.info(f"Fetching HTML content using ChromiumLoader from: {source}") loader_kwargs = {} if self.node_config is not None: @@ -159,6 +166,9 @@ def execute(self, state): loader = ChromiumLoader([source], headless=self.headless, **loader_kwargs) document = loader.load() + if not document or not document[0].page_content.strip(): + raise ValueError("No HTML body content found in the document fetched by ChromiumLoader.") + title, minimized_body, link_urls, image_urls = cleanup_html( str(document[0].page_content), source )