diff --git a/README.md b/README.md index eb50b8c..19a2c92 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@
-
+
@@ -24,6 +24,10 @@
> You can also contact the developer via e-mail: osint.technologies@gmail.com
+***[Download DPULSE stable ZIP archive (with latest stable changes)](https://github.com/OSINT-TECHNOLOGIES/dpulse/archive/refs/heads/main.zip)***
+
+***[Download DPULSE rolling ZIP archive (with latest developer commit)](https://github.com/OSINT-TECHNOLOGIES/dpulse/archive/refs/heads/rolling.zip)***
+
# About DPULSE
@@ -63,36 +67,39 @@ Since DPULSE repository is using Poetry* to manage dependencies, it is higly rec
_* Poetry is a tool for dependency management and packaging in Python. It can be simply installed everywhere using `pip install poetry` command, but more instructions you can find on [Poetry official documentation page](https://python-poetry.org/docs/#ci-recommendations)_
-### First way (recommended on every OS, using Poetry)
+### First way (the simplest way)
+
+Just download DPULSE using fast-access links at the top of the README:
+
+
+
+Then just unpack downloaded archive, open terminal in DPULSE root folder and use `pip install -r requirements.txt` command to install requirements. Then type `python dpulse.py` in terminal, and that's where program starts.
+
+If `pip install -r requirements.txt` doesn't work, then just use `poetry install` command. After that, start DPULSE with `poetry run python dpulse.py`
+
+### Second way (the most correct way)
-Use this set of commands to use recommended way of DPULSE installation:
+Use this set of commands to install DPULSE stable versions:
```
git clone https://github.com/OSINT-TECHNOLOGIES/dpulse
cd dpulse
poetry install
```
-Then you simply start DPULSE using `poetry run python dpulse.py`
-### Second way (recommended on Windows systems, without using Poetry)
-
-Simply download zip archive from assets in releases bookmark, just right here:
-
-
-
-Then you just unpack the archive, open terminal in DPULSE root folder and use `pip install -r requirements.txt` command to install requirements. Then type `python dpulse.py` in terminal, and that's where program starts.
-
-You also can use this installation way with some different approach using this set of commands:
+Use this set of commands to install DPULSE rolling versions:
```
- git clone https://github.com/OSINT-TECHNOLOGIES/dpulse
+ git clone --branch rolling --single-branch https://github.com/OSINT-TECHNOLOGIES/dpulse.git
cd dpulse
- pip install -r requirements.txt
+ poetry install
```
+After installation, you simply start DPULSE using `poetry run python dpulse.py`
+
## _Other ways_
-### Third way (using pip)
+### Third way (using pip manager)
You also can install DPULSE using pip manager. It'll install DPULSE and necessery dependencies in one command: `pip install dpulse`. Then you just locate DPULSE root folder and type `python dpulse.py` to start program.
@@ -100,13 +107,13 @@ You also can install DPULSE using pip manager. It'll install DPULSE and necesser
DPULSE has two pre-written installation scripts, both for Windows (installer.bat) and for Linux (installer.sh). You can use them to clone repository and install dependencies or only for dependencies installation. Keep in mind that installer.bat (Windows installer) requires installed Git to clone repository.
-### Windows installer usage
+### Windows installer usage
You can start installer.bat from terminal by typing `./installer.bat` in terminal. Then you choose menu item which you want to start.
If you have problems with starting installer.bat, you should try to start it in admin terminal.
-### Linux installer usage
+### Linux installer usage
To start installer.sh in Linux you should follow these steps in your terminal:
@@ -122,11 +129,11 @@ If you have problems with starting installer.sh, you should try to use `dos2unix
### You can start DPULSE and see the main menu on the screen using one of the recommended commands in DPULSE root folder. Don't forget to install all requirements before starting DPULSE
-
+
### After choosing first menu point, you will be able to enter target's URL and case comment, and then you will see scanning progress
-
+
### Finally, DPULSE will create report folder which contains case name (basically URL of target), date and time of scan. All report folders are contained in DPULSE root folder
diff --git a/apis/__init__.py b/apis/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/apis/__init__.py
@@ -0,0 +1 @@
+
diff --git a/datagather_modules/data_assembler.py b/datagather_modules/data_assembler.py
index da05385..bf0d4b1 100644
--- a/datagather_modules/data_assembler.py
+++ b/datagather_modules/data_assembler.py
@@ -1,9 +1,10 @@
import sys
sys.path.append('service')
sys.path.append('pagesearch')
+sys.path.append('dorking')
import crawl_processor as cp
-import dorking_processor as dp
+import dorking_handler as dp
import networking_processor as np
from pagesearch_main import normal_search, sitemap_inspection_search
from logs_processing import logging
@@ -20,6 +21,18 @@
print(Fore.RED + "Import error appeared. Reason: {}".format(e) + Style.RESET_ALL)
sys.exit()
+def establishing_dork_db_connection(dorking_flag):
+ if dorking_flag == 'basic':
+ dorking_db_path = 'dorking//basic_dorking.db'
+ table = 'basic_dorks'
+ elif dorking_flag == 'iot':
+ dorking_db_path = 'dorking//iot_dorking.db'
+ table = 'iot_dorks'
+ elif dorking_flag == 'files':
+ dorking_db_path = 'dorking//files_dorking.db'
+ table = 'files_dorks'
+ return dorking_db_path, table
+
class DataProcessing():
def report_preprocessing(self, short_domain, report_file_type):
report_ctime = datetime.now().strftime('%d-%m-%Y, %H:%M:%S')
@@ -42,7 +55,7 @@ def report_preprocessing(self, short_domain, report_file_type):
os.makedirs(report_folder, exist_ok=True)
return casename, db_casename, db_creation_date, robots_filepath, sitemap_filepath, sitemap_links_filepath, report_file_type, report_folder, files_ctime, report_ctime
- def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, keywords, keywords_flag):
+ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, keywords, keywords_flag, dorking_flag):
casename, db_casename, db_creation_date, robots_filepath, sitemap_filepath, sitemap_links_filepath, report_file_type, report_folder, ctime, report_ctime = self.report_preprocessing(short_domain, report_file_type)
logging.info(f'### THIS LOG PART FOR {casename} CASE, TIME: {ctime} STARTS HERE')
print(Fore.GREEN + "Started scanning domain" + Style.RESET_ALL)
@@ -83,11 +96,6 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
web_servers, cms, programming_languages, web_frameworks, analytics, javascript_frameworks = np.get_technologies(url)
print(Fore.GREEN + 'Processing Shodan InternetDB search' + Style.RESET_ALL)
ports, hostnames, cpes, tags, vulns = np.query_internetdb(ip, report_file_type)
- print(Fore.GREEN + 'Processing Google Dorking' + Style.RESET_ALL)
- if report_file_type == 'pdf' or report_file_type == 'html':
- dorking_status = dp.save_results_to_txt(report_folder, dp.get_dorking_query(short_domain))
- elif report_file_type == 'xlsx':
- dorking_status, dorking_results = dp.transfer_results_to_xlsx(dp.get_dorking_query(short_domain))
common_socials = {key: social_medias.get(key, []) + sd_socials.get(key, []) for key in set(social_medias) | set(sd_socials)}
for key in common_socials:
common_socials[key] = list(set(common_socials[key]))
@@ -100,12 +108,15 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
to_search_array = [subdomains, social_medias, sd_socials]
print(Fore.LIGHTMAGENTA_EX + "\n[EXTENDED SCAN START: PAGESEARCH]\n" + Style.RESET_ALL)
ps_emails_return, accessible_subdomains, emails_amount, files_counter, cookies_counter, api_keys_counter, website_elements_counter, exposed_passwords_counter, keywords_messages_list = normal_search(to_search_array, report_folder, keywords, keywords_flag)
+ if len(keywords_messages_list) == 0:
+ keywords_messages_list = ['No keywords were found']
total_links_counter = accessed_links_counter = 0
print(Fore.LIGHTMAGENTA_EX + "\n[EXTENDED SCAN END: PAGESEARCH]\n" + Style.RESET_ALL)
else:
print(Fore.RED + "Cant start PageSearch because no subdomains were detected")
ps_emails_return = ""
accessible_subdomains = files_counter = cookies_counter = api_keys_counter = website_elements_counter = exposed_passwords_counter = total_links_counter = accessed_links_counter = emails_amount = 'No data was gathered because no subdomains were found'
+ keywords_messages_list = ['No data was gathered because no subdomains were found']
pass
elif pagesearch_flag.lower() == 'si':
print(Fore.LIGHTMAGENTA_EX + "\n[EXTENDED SCAN START: PAGESEARCH SITEMAP INSPECTION]\n" + Style.RESET_ALL)
@@ -117,13 +128,33 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
ps_emails_return = ""
pass
+ if dorking_flag == 'none':
+ pass
+ dorking_status = 'Google Dorking mode was not selected for this scan'
+ dorking_file_path = 'Google Dorking mode was not selected for this scan'
+ elif dorking_flag == 'basic':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ elif dorking_flag == 'iot':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ elif dorking_flag == 'files':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+
data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials,
subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records,
robots_txt_result, sitemap_xml_result, sitemap_links_status,
web_servers, cms, programming_languages, web_frameworks, analytics, javascript_frameworks, ports,
- hostnames, cpes, tags, vulns, dorking_status, common_socials, total_socials, ps_emails_return,
+ hostnames, cpes, tags, vulns, common_socials, total_socials, ps_emails_return,
accessible_subdomains, emails_amount, files_counter, cookies_counter, api_keys_counter,
- website_elements_counter, exposed_passwords_counter, total_links_counter, accessed_links_counter, keywords_messages_list]
+ website_elements_counter, exposed_passwords_counter, total_links_counter, accessed_links_counter, keywords_messages_list, dorking_status, dorking_file_path]
elif report_file_type == 'xlsx':
if pagesearch_flag.lower() == 'y':
@@ -148,13 +179,33 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
accessible_subdomains = files_counter = cookies_counter = api_keys_counter = website_elements_counter = exposed_passwords_counter = total_links_counter = accessed_links_counter = emails_amount = 0
pass
+ if dorking_flag == 'none':
+ pass
+ dorking_status = 'Google Dorking mode was not selected for this scan'
+ dorking_results = 'Google Dorking mode was not selected for this scan'
+ elif dorking_flag == 'basic':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_results = dp.transfer_results_to_xlsx(table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ elif dorking_flag == 'iot':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_results = dp.transfer_results_to_xlsx(table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ elif dorking_flag == 'files':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_results = dp.transfer_results_to_xlsx(table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+
data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials,
subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records,
robots_txt_result, sitemap_xml_result, sitemap_links_status,
web_servers, cms, programming_languages, web_frameworks, analytics, javascript_frameworks, ports,
- hostnames, cpes, tags, vulns, dorking_status, common_socials, total_socials, ps_emails_return,
+ hostnames, cpes, tags, vulns, common_socials, total_socials, ps_emails_return,
accessible_subdomains, emails_amount, files_counter, cookies_counter, api_keys_counter,
- website_elements_counter, exposed_passwords_counter, total_links_counter, accessed_links_counter, dorking_results]
+ website_elements_counter, exposed_passwords_counter, total_links_counter, accessed_links_counter, dorking_status, dorking_results]
elif report_file_type == 'html':
if pagesearch_flag.lower() == 'y':
@@ -163,11 +214,14 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
print(Fore.LIGHTMAGENTA_EX + "\n[EXTENDED SCAN START: PAGESEARCH]\n" + Style.RESET_ALL)
ps_emails_return, accessible_subdomains, emails_amount, files_counter, cookies_counter, api_keys_counter, website_elements_counter, exposed_passwords_counter, keywords_messages_list = normal_search(to_search_array, report_folder, keywords, keywords_flag)
total_links_counter = accessed_links_counter = 0
+ if len(keywords_messages_list) == 0:
+ keywords_messages_list = ['No keywords were found']
print(Fore.LIGHTMAGENTA_EX + "\n[EXTENDED SCAN END: PAGESEARCH]\n" + Style.RESET_ALL)
else:
print(Fore.RED + "Cant start PageSearch because no subdomains were detected")
ps_emails_return = ""
accessible_subdomains = files_counter = cookies_counter = api_keys_counter = website_elements_counter = exposed_passwords_counter = total_links_counter = accessed_links_counter = emails_amount = 'No data was gathered because no subdomains were found'
+ keywords_messages_list = ['No data was gathered because no subdomains were found']
pass
elif pagesearch_flag.lower() == 'si':
print(Fore.LIGHTMAGENTA_EX + "\n[EXTENDED SCAN START: PAGESEARCH SITEMAP INSPECTION]\n" + Style.RESET_ALL)
@@ -179,13 +233,33 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k
ps_emails_return = ""
pass
+ if dorking_flag == 'none':
+ pass
+ dorking_status = 'Google Dorking mode was not selected for this scan'
+ dorking_file_path = 'Google Dorking mode was not selected for this scan'
+ elif dorking_flag == 'basic':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ elif dorking_flag == 'iot':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ elif dorking_flag == 'files':
+ dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower())
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+ dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table))
+ print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL)
+
data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials,
subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records,
robots_txt_result, sitemap_xml_result, sitemap_links_status,
web_servers, cms, programming_languages, web_frameworks, analytics, javascript_frameworks, ports,
- hostnames, cpes, tags, vulns, dorking_status, common_socials, total_socials, ps_emails_return,
+ hostnames, cpes, tags, vulns, common_socials, total_socials, ps_emails_return,
accessible_subdomains, emails_amount, files_counter, cookies_counter, api_keys_counter,
- website_elements_counter, exposed_passwords_counter, total_links_counter, accessed_links_counter, keywords_messages_list]
+ website_elements_counter, exposed_passwords_counter, total_links_counter, accessed_links_counter, keywords_messages_list, dorking_status, dorking_file_path]
report_info_array = [casename, db_casename, db_creation_date, report_folder, ctime, report_file_type, report_ctime]
logging.info(f'### THIS LOG PART FOR {casename} CASE, TIME: {ctime} ENDS HERE')
diff --git a/datagather_modules/dorking_processor.py b/datagather_modules/dorking_processor.py
deleted file mode 100644
index 36abd2d..0000000
--- a/datagather_modules/dorking_processor.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import sys
-
-try:
- import requests.exceptions
- from colorama import Fore, Style
- import mechanicalsoup
- import re
- import requests
-except ImportError as e:
- print(Fore.RED + "Import error appeared. Reason: {}".format(e) + Style.RESET_ALL)
- sys.exit()
-
-def get_dorking_query(short_domain):
- print(Fore.GREEN + "Getting dorking query from config file")
- with open('dorkslist.txt', 'r') as cfg_file:
- lines = cfg_file.readlines()
- index = lines.index('[SOLID DORKS]\n')
- lines_after = lines[index + 2:]
- search_query = [line.format(short_domain) for line in lines_after]
- return search_query
-
-def solid_google_dorking(query, pages=100):
- try:
- browser = mechanicalsoup.StatefulBrowser()
- browser.open("https://www.google.com/")
- browser.select_form('form[action="/search"]')
- browser["q"] = str(query)
- browser.submit_selected(btnName="btnG")
- result_query = []
- for page in range(pages):
- for link in browser.links():
- target = link.attrs['href']
- if (target.startswith('/url?') and not
- target.startswith("/url?q=http://webcache.googleusercontent.com")):
- target = re.sub(r"^/url\?q=([^&]*)&.*", r"\1", target)
- result_query.append(target)
- try:
- browser.follow_link(nr=page + 1)
- except mechanicalsoup.LinkNotFoundError:
- break
- del result_query[-2:]
- return result_query
- except requests.exceptions.ConnectionError as e:
- print(Fore.RED + "Error while establishing connection with domain. No results will appear. Reason: {}".format(e) + Style.RESET_ALL)
- return "Google Dorking results file was not created"
-
-def save_results_to_txt(folderpath, queries, pages=10):
- txt_writepath = folderpath + '//04-dorking_results.txt'
- with open(txt_writepath, 'w') as f:
- for i, query in enumerate(queries, start=1):
- f.write(f"QUERY #{i}: {query}\n")
- results = solid_google_dorking(query, pages)
- if not results:
- f.write("=> NO RESULT FOUND\n")
- else:
- for result in results:
- f.write(f"=> {result}\n")
- f.write("\n")
- print(Fore.GREEN + "Google Dorking results successfully saved in TXT file" + Style.RESET_ALL)
- return "File with gathered links was successfully created"
-
-def transfer_results_to_xlsx(queries, pages=10):
- dorking_return_list = []
- for i, query in enumerate(queries, start=1):
- dorking_return_list.append(f"QUERY #{i}: {query}\n")
- results = solid_google_dorking(query, pages)
- if not results:
- dorking_return_list.append("NO RESULT FOUND\n")
- else:
- for result in results:
- dorking_return_list.append(f"{result}\n")
- dorking_return_list.append("\n")
- return "File with gathered links was successfully created", dorking_return_list
diff --git a/dorking/__init__.py b/dorking/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/dorking/__init__.py
@@ -0,0 +1 @@
+
diff --git a/dorking/basic_dorking.db b/dorking/basic_dorking.db
new file mode 100644
index 0000000..0421fb0
Binary files /dev/null and b/dorking/basic_dorking.db differ
diff --git a/dorking/dorking_handler.py b/dorking/dorking_handler.py
new file mode 100644
index 0000000..4c6bbdd
--- /dev/null
+++ b/dorking/dorking_handler.py
@@ -0,0 +1,122 @@
+import sys
+
+try:
+ import requests.exceptions
+ from colorama import Fore, Style
+ import mechanicalsoup
+ import re
+ import requests
+ import sqlite3
+ import os
+except ImportError as e:
+ print(Fore.RED + "Import error appeared. Reason: {}".format(e) + Style.RESET_ALL)
+ sys.exit()
+
+def get_dorking_query(short_domain, dorking_db_path, table):
+ print(Fore.GREEN + "Getting dorking query from database")
+ conn = sqlite3.connect(dorking_db_path)
+ cursor = conn.cursor()
+ cursor.execute(f"SELECT dork FROM {table}")
+ rows = cursor.fetchall()
+ search_query = [row[0].format(short_domain) for row in rows]
+ conn.close()
+ return search_query
+
+def get_columns_amount(dorking_db_path, table):
+ conn = sqlite3.connect(dorking_db_path)
+ cursor = conn.cursor()
+ cursor.execute(f"SELECT COUNT(*) FROM {table}")
+ row_count = cursor.fetchone()[0]
+ conn.close()
+ return row_count
+
+def solid_google_dorking(query, pages=100):
+ try:
+ browser = mechanicalsoup.StatefulBrowser()
+ browser.open("https://www.google.com/")
+ browser.select_form('form[action="/search"]')
+ browser["q"] = str(query)
+ browser.submit_selected(btnName="btnG")
+ result_query = []
+ for page in range(pages):
+ for link in browser.links():
+ target = link.attrs['href']
+ if (target.startswith('/url?') and not
+ target.startswith("/url?q=http://webcache.googleusercontent.com")):
+ target = re.sub(r"^/url\?q=([^&]*)&.*", r"\1", target)
+ result_query.append(target)
+ try:
+ browser.follow_link(nr=page + 1)
+ except mechanicalsoup.LinkNotFoundError:
+ break
+ del result_query[-2:]
+ return result_query
+ except requests.exceptions.ConnectionError as e:
+ print(Fore.RED + "Error while establishing connection with domain. No results will appear. Reason: {}".format(e) + Style.RESET_ALL)
+
+def save_results_to_txt(folderpath, table, queries, pages=10):
+ try:
+ txt_writepath = folderpath + '//04-dorking_results.txt'
+ total_results = []
+ total_dorks_amount = len(queries)
+ with open(txt_writepath, 'w') as f:
+ print(Fore.GREEN + "Started Google Dorking. Please, be patient, it may take some time")
+ dorked_query_counter = 0
+ for i, query in enumerate(queries, start=1):
+ f.write(f"QUERY #{i}: {query}\n")
+ results = solid_google_dorking(query, pages)
+ if not results:
+ f.write("=> NO RESULT FOUND\n")
+ total_results.append((query, 0))
+ else:
+ total_results.append((query, len(results)))
+ for result in results:
+ f.write(f"=> {result}\n")
+ f.write("\n")
+ dorked_query_counter += 1
+ print(Fore.GREEN + f" Dorking with " + Style.RESET_ALL + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{dorked_query_counter}/{total_dorks_amount}" + Style.RESET_ALL + Fore.GREEN + " dork" + Style.RESET_ALL, end="\r")
+ print(Fore.GREEN + "Google Dorking end. Results successfully saved in TXT file\n" + Style.RESET_ALL)
+ print(Fore.GREEN + f"During Google Dorking with {table.upper()}:")
+ for query, count in total_results:
+ if count == 0:
+ count = 'no results'
+ print(Fore.GREEN + f"[+] Found results for " + Fore.LIGHTCYAN_EX + f'{query}' + Fore.GREEN + ' query: ' + Fore.LIGHTCYAN_EX + f'{count}' + Style.RESET_ALL)
+ return f'Successfully dorked domain with {table.upper()} dorks table', txt_writepath
+ except Exception:
+ print(Fore.RED + 'Error appeared while trying to dork target. See journal for details')
+ return 'Domain dorking failed. See journal for details', txt_writepath
+
+def transfer_results_to_xlsx(table, queries, pages=10):
+ print(Fore.GREEN + "Started Google Dorking. Please, be patient, it may take some time")
+ dorked_query_counter = 0
+ total_dorks_amount = len(queries)
+ dorking_return_list = []
+ for i, query in enumerate(queries, start=1):
+ dorking_return_list.append(f"QUERY #{i}: {query}\n")
+ results = solid_google_dorking(query, pages)
+ if not results:
+ dorking_return_list.append("NO RESULT FOUND\n")
+ else:
+ for result in results:
+ dorking_return_list.append(f"{result}\n")
+ dorked_query_counter += 1
+ dorking_return_list.append("\n")
+ print(Fore.GREEN + f" Dorking with " + Style.RESET_ALL + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{dorked_query_counter}/{total_dorks_amount}" + Style.RESET_ALL + Fore.GREEN + " dork" + Style.RESET_ALL, end="\r")
+ print(Fore.GREEN + "Google Dorking end. Results successfully saved in XLSX report\n" + Style.RESET_ALL)
+ return f'Successfully dorked domain with {table.upper()} dorks table', dorking_return_list
+
+def dorks_files_check():
+ dorks_path = 'dorking//'
+ dorks_files = ['iot_dorking.db', 'files_dorking.db', 'basic_dorking.db']
+ dorks_files_counter = 0
+ for dork_files in dorks_files:
+ files_path = os.path.join(dorks_path, dork_files)
+ if os.path.isfile(files_path):
+ dorks_files_counter += 1
+ else:
+ pass
+ if dorks_files_counter == 3:
+ print(Fore.GREEN + "Dorks databases presence: OK" + Style.RESET_ALL)
+ else:
+ print(Fore.RED + "Dorks databases presence: NOT OK\nSome files may not be in folder. Please compare dorking folder with the same folder on the official repository\n" + Style.RESET_ALL)
+ sys.exit()
diff --git a/dorking/files_dorking.db b/dorking/files_dorking.db
new file mode 100644
index 0000000..335ad28
Binary files /dev/null and b/dorking/files_dorking.db differ
diff --git a/dorking/iot_dorking.db b/dorking/iot_dorking.db
new file mode 100644
index 0000000..6328bd0
Binary files /dev/null and b/dorking/iot_dorking.db differ
diff --git a/dorkslist.txt b/dorkslist.txt
deleted file mode 100644
index ff87008..0000000
--- a/dorkslist.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-[SOLID DORKS]
-
-site:{} filetype:pdf
-site:{} filetype:xlsx
-site:{} filetype:docx
-site:{} filetype:ppt
-site:{} filetype:doc
-site:{} filetype:pptx
-site:{} filetype:db
-site:{} filetype:accdb
-site:{} filetype:nsf
-site:{} filetype:fp7
-site:{} filetype:mdf
-site:{} filetype:sqlitedb
-{} site:linkedin.com/in/
-site:{} inurl:login | inurl:logon | inurl:sign-in | inurl:signin | inurl:portal
-site:{} inurl:/signup.aspx
-site:{} intitle:"index of" ".ssh" OR "ssh_config" OR "ssh_known_hosts" OR "authorized_keys" OR "id_rsa" OR "id_dsa"
-site:{} intitle:"index of" "nginx.conf"
diff --git a/dpulse.py b/dpulse.py
index dc22b29..a5cac2d 100644
--- a/dpulse.py
+++ b/dpulse.py
@@ -2,20 +2,38 @@
sys.path.append('datagather_modules')
sys.path.append('service')
sys.path.append('reporting_modules')
+sys.path.append('dorking')
-import pdf_report_creation as pdf_rc
+from colorama import Fore, Style, Back
import cli_init
+from config_processing import create_config, check_cfg_presence, read_config
import db_processing as db
+import os
+from dorking_handler import dorks_files_check, get_columns_amount
+
+db.db_creation('report_storage.db')
+dorks_files_check()
+cfg_presence = check_cfg_presence()
+if cfg_presence is True:
+ print(Fore.GREEN + "Global config file presence: OK" + Style.RESET_ALL)
+else:
+ print(Fore.RED + "Global config file presence: NOT OK")
+ create_config()
+ print(Fore.GREEN + "Successfully generated global config file")
+
+
+import pdf_report_creation as pdf_rc
import xlsx_report_creation as xlsx_rc
import html_report_creation as html_rc
from data_assembler import DataProcessing
+from misc import time_processing, domain_precheck
try:
+ import socket
+ import re
import time
- from colorama import Fore, Style, Back
import webbrowser
import sqlite3
- import os
import itertools
import threading
from time import sleep, time
@@ -23,21 +41,11 @@
print(Fore.RED + "Import error appeared. Reason: {}".format(e) + Style.RESET_ALL)
sys.exit()
-cli = cli_init.Menu()
-cli.welcome_menu()
data_processing = DataProcessing()
+config_values = read_config()
-def time_processing(end):
- if end < 60:
- endtime = round(end)
- endtime_string = f'approximately {endtime} seconds'
- else:
- time_minutes = round(end / 60)
- if time_minutes == 1:
- endtime_string = f'approximately {time_minutes} minute'
- else:
- endtime_string = f'approximately {time_minutes} minutes'
- return endtime_string
+cli = cli_init.Menu()
+cli.welcome_menu()
class ProgressBar(threading.Thread):
def __init__(self):
@@ -51,12 +59,11 @@ def run(self):
print(Fore.LIGHTMAGENTA_EX + Back.WHITE + char + Style.RESET_ALL, end='\r')
sleep(0.1)
-db.db_creation('report_storage.db')
-
def run():
while True:
try:
cli.print_main_menu()
+ domain_patter = r'^[a-zA-Z0-9-]+\.[a-zA-Z]{2,}$'
choice = input(Fore.YELLOW + "Enter your choice >> ")
if choice == "1":
while True:
@@ -67,8 +74,16 @@ def run():
else:
if not short_domain:
print(Fore.RED + "\nEmpty domain names are not supported")
+ elif re.match(domain_patter, short_domain) is None:
+ print(Fore.RED + '\nYour string does not match domain pattern')
else:
url = "http://" + short_domain + "/"
+ print(Fore.GREEN + 'Pinging domain...' + Style.RESET_ALL)
+ if domain_precheck(short_domain):
+ print(Fore.GREEN + 'Entered domain is accessible. Continuation' + Style.RESET_ALL)
+ else:
+ print(Fore.RED + "Entered domain is not accessible. Scan is impossible" + Style.RESET_ALL)
+ break
case_comment = input(Fore.YELLOW + "Enter case comment >> ")
report_filetype = input(Fore.YELLOW + "Enter report file extension [xlsx/pdf/html] >> ")
if not report_filetype:
@@ -96,6 +111,15 @@ def run():
keywords_list = None
keywords_flag = 0
if report_filetype.lower() == 'pdf' or report_filetype.lower() == 'xlsx' or report_filetype.lower() == 'html':
+ dorking_flag = input(Fore.YELLOW + "Select Dorking mode [Basic/IoT/Files/None] >> ")
+ #api_flag = input(Fore.YELLOW + "Would you like to use 3rd party API in scan? [Y/N] >> ")
+ #if api_flag.lower() == 'y':
+ #print api db content
+ #write ID which you want to use using comma (ex: 1,3,4)
+ #elif api_flag.lower() == 'n':
+ #pass
+ #else:
+ #print invalid mode
if pagesearch_flag.lower() == 'y' or pagesearch_flag.lower() == 'n' or pagesearch_flag.lower() == 'si':
if pagesearch_flag.lower() == "n":
pagesearch_ui_mark = 'No'
@@ -105,10 +129,26 @@ def run():
pagesearch_ui_mark = 'Yes, in Sitemap Inspection mode'
else:
pagesearch_ui_mark = 'Yes, without keywords search'
+ if dorking_flag.lower() not in ['basic', 'iot', 'none', 'files']:
+ print(Fore.RED + "\nInvalid Dorking mode. Please select mode among Basic, IoT, Files or None")
+ break
+ else:
+ if dorking_flag.lower() == 'basic':
+ row_count = get_columns_amount('dorking//basic_dorking.db', 'basic_dorks')
+ dorking_ui_mark = f'Yes, Basic dorking ({row_count} dorks)'
+ elif dorking_flag.lower() == 'iot':
+ row_count = get_columns_amount('dorking//iot_dorking.db', 'iot_dorks')
+ dorking_ui_mark = f'Yes, IoT dorking ({row_count} dorks)'
+ elif dorking_flag.lower() == 'none':
+ dorking_ui_mark = 'No'
+ elif dorking_flag.lower() == 'files':
+ row_count = get_columns_amount('dorking//files_dorking.db', 'files_dorks')
+ dorking_ui_mark = f'Yes, Files dorking ({row_count} dorks)'
print(Fore.LIGHTMAGENTA_EX + "\n[PRE-SCAN SUMMARY]\n" + Style.RESET_ALL)
print(Fore.GREEN + "Determined target: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + short_domain + Style.RESET_ALL)
print(Fore.GREEN + "Report type: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + report_filetype.lower() + Style.RESET_ALL)
print(Fore.GREEN + "PageSearch conduction: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + pagesearch_ui_mark + Style.RESET_ALL)
+ print(Fore.GREEN + "Dorking conduction: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + dorking_ui_mark + Style.RESET_ALL)
print(Fore.GREEN + "Case comment: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + case_comment + Style.RESET_ALL + "\n")
print(Fore.LIGHTMAGENTA_EX + "[BASIC SCAN START]\n" + Style.RESET_ALL)
spinner_thread = ProgressBar()
@@ -117,15 +157,15 @@ def run():
try:
if pagesearch_flag.lower() == 'y':
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower())
end = time() - start
elif pagesearch_flag.lower() == 'si':
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower())
end = time() - start
else:
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower())
end = time() - start
endtime_string = time_processing(end)
pdf_rc.report_assembling(short_domain, url, case_comment, data_array, report_info_array, pagesearch_ui_mark, pagesearch_flag.lower(), endtime_string)
@@ -136,15 +176,15 @@ def run():
try:
if pagesearch_flag.lower() == 'y':
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower())
end = time() - start
elif pagesearch_flag.lower() == 'si':
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower())
end = time() - start
else:
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower())
end = time() - start
endtime_string = time_processing(end)
xlsx_rc.create_report(short_domain, url, case_comment, data_array, report_info_array, pagesearch_ui_mark, pagesearch_flag.lower(), endtime_string)
@@ -155,15 +195,15 @@ def run():
try:
if pagesearch_flag.lower() == 'y':
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower())
end = time() - start
elif pagesearch_flag.lower() == 'si':
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower())
end = time() - start
else:
start = time()
- data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag)
+ data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower())
end = time() - start
endtime_string = time_processing(end)
html_rc.report_assembling(short_domain, url, case_comment, data_array, report_info_array, pagesearch_ui_mark, pagesearch_flag.lower(), endtime_string)
@@ -174,30 +214,9 @@ def run():
print(Fore.RED + "\nUnsupported PageSearch mode. Please choose between Y, N or SI")
elif choice == "2":
- cli.print_settings_menu()
- choice_settings = input(Fore.YELLOW + "Enter your choice >> ")
- if choice_settings == '1':
- with open('dorkslist.txt', 'r') as cfg_file:
- print(Fore.LIGHTMAGENTA_EX + '\n[START OF CONFIG FILE]' + Style.RESET_ALL)
- print('\n' + Fore.LIGHTBLUE_EX + cfg_file.read() + Style.RESET_ALL)
- print(Fore.LIGHTMAGENTA_EX + '\n[END OF CONFIG FILE]\n' + Style.RESET_ALL)
- continue
- elif choice_settings == '2':
- with open('dorkslist.txt', 'a+') as cfg_file:
- print(Fore.LIGHTMAGENTA_EX + '\n[START OF CONFIG FILE]' + Style.RESET_ALL)
- cfg_file.seek(0)
- print('\n' + Fore.LIGHTBLUE_EX + cfg_file.read() + Style.RESET_ALL)
- print(Fore.LIGHTMAGENTA_EX + '\n[END OF CONFIG FILE]\n' + Style.RESET_ALL)
- new_line = str(input(Fore.YELLOW + "Input new dork >> ") + Style.RESET_ALL)
- print(Fore.GREEN + "New dork successfully added to dorks list" + Style.RESET_ALL)
- cfg_file.write(new_line + '\n')
- continue
- elif choice_settings == '3':
- continue
- else:
- print(Fore.RED + "\nInvalid menu item. Please select between existing menu items")
+ print(Fore.RED + "Sorry, but this menu is deprecated since v1.1.1. It will be back soon")
- elif choice == "3":
+ elif choice == "5":
cli.print_help_menu()
choice_help = input(Fore.YELLOW + "Enter your choice >> ")
if choice_help == '1':
@@ -215,7 +234,7 @@ def run():
else:
print(Fore.RED + "\nInvalid menu item. Please select between existing menu items")
- elif choice == "4":
+ elif choice == "3":
cli.print_db_menu()
print('\n')
db.db_creation('report_storage.db')
@@ -240,7 +259,7 @@ def run():
elif choice_db == "3":
print(Fore.GREEN + "\nDatabase connection is successfully closed")
continue
- elif choice == "5":
+ elif choice == "6":
print(Fore.RED + "Exiting the program." + Style.RESET_ALL)
break
else:
diff --git a/poetry.lock b/poetry.lock
index 82810ec..f2f0bd9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -993,13 +993,13 @@ files = [
[[package]]
name = "pypdf"
-version = "4.3.1"
+version = "5.0.1"
description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
files = [
- {file = "pypdf-4.3.1-py3-none-any.whl", hash = "sha256:64b31da97eda0771ef22edb1bfecd5deee4b72c3d1736b7df2689805076d6418"},
- {file = "pypdf-4.3.1.tar.gz", hash = "sha256:b2f37fe9a3030aa97ca86067a56ba3f9d3565f9a791b305c7355d8392c30d91b"},
+ {file = "pypdf-5.0.1-py3-none-any.whl", hash = "sha256:ff8a32da6c7a63fea9c32fa4dd837cdd0db7966adf6c14f043e3f12592e992db"},
+ {file = "pypdf-5.0.1.tar.gz", hash = "sha256:a361c3c372b4a659f9c8dd438d5ce29a753c79c620dc6e1fd66977651f5547ea"},
]
[package.dependencies]
@@ -1012,17 +1012,6 @@ docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"]
full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"]
image = ["Pillow (>=8.0.0)"]
-[[package]]
-name = "pypng"
-version = "0.20220715.0"
-description = "Pure Python library for saving and loading PNG images"
-optional = false
-python-versions = "*"
-files = [
- {file = "pypng-0.20220715.0-py3-none-any.whl", hash = "sha256:4a43e969b8f5aaafb2a415536c1a8ec7e341cd6a3f957fd5b5f32a4cfeed902c"},
- {file = "pypng-0.20220715.0.tar.gz", hash = "sha256:739c433ba96f078315de54c0db975aee537cbc3e1d0ae4ed9aab0ca1e427e2c1"},
-]
-
[[package]]
name = "python-bidi"
version = "0.6.0"
@@ -1244,36 +1233,32 @@ files = [
[[package]]
name = "qrcode"
-version = "7.4.2"
+version = "8.0"
description = "QR Code image generator"
optional = false
-python-versions = ">=3.7"
+python-versions = "<4.0,>=3.9"
files = [
- {file = "qrcode-7.4.2-py3-none-any.whl", hash = "sha256:581dca7a029bcb2deef5d01068e39093e80ef00b4a61098a2182eac59d01643a"},
- {file = "qrcode-7.4.2.tar.gz", hash = "sha256:9dd969454827e127dbd93696b20747239e6d540e082937c90f14ac95b30f5845"},
+ {file = "qrcode-8.0-py3-none-any.whl", hash = "sha256:9fc05f03305ad27a709eb742cf3097fa19e6f6f93bb9e2f039c0979190f6f1b1"},
+ {file = "qrcode-8.0.tar.gz", hash = "sha256:025ce2b150f7fe4296d116ee9bad455a6643ab4f6e7dce541613a4758cbce347"},
]
[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-pypng = "*"
-typing-extensions = "*"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
[package.extras]
-all = ["pillow (>=9.1.0)", "pytest", "pytest-cov", "tox", "zest.releaser[recommended]"]
-dev = ["pytest", "pytest-cov", "tox"]
-maintainer = ["zest.releaser[recommended]"]
+all = ["pillow (>=9.1.0)", "pypng"]
pil = ["pillow (>=9.1.0)"]
-test = ["coverage", "pytest"]
+png = ["pypng"]
[[package]]
name = "reportlab"
-version = "4.2.2"
+version = "4.2.5"
description = "The Reportlab Toolkit"
optional = false
python-versions = "<4,>=3.7"
files = [
- {file = "reportlab-4.2.2-py3-none-any.whl", hash = "sha256:927616931637e2f13e2ee3b3b6316d7a07803170e258621cff7d138bde17fbb5"},
- {file = "reportlab-4.2.2.tar.gz", hash = "sha256:765eecbdd68491c56947e29c38b8b69b834ee5dbbdd2fb7409f08ebdebf04428"},
+ {file = "reportlab-4.2.5-py3-none-any.whl", hash = "sha256:eb2745525a982d9880babb991619e97ac3f661fae30571b7d50387026ca765ee"},
+ {file = "reportlab-4.2.5.tar.gz", hash = "sha256:5cf35b8fd609b68080ac7bbb0ae1e376104f7d5f7b2d3914c7adc63f2593941f"},
]
[package.dependencies]
@@ -1393,13 +1378,13 @@ files = [
[[package]]
name = "tzdata"
-version = "2024.1"
+version = "2024.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
files = [
- {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"},
- {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"},
+ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
+ {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index af56925..49c530d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "dpulse"
-version = "1.1"
+version = "1.1.1"
description = "Convenient,fast and user-friendly collector of domain information from Open-Sources"
authors = ["OSINT-TECHNOLOGIES