-
Notifications
You must be signed in to change notification settings - Fork 1
/
mufasa
130 lines (109 loc) · 3.43 KB
/
mufasa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/usr/bin/python3
# Author: z3r0day :: https://github.com/SxNade
RED = '\033[1;31;48m'
WHITE = "\33[0m"
GREEN = '\033[1;32;48m'
import requests
import sys
import re
import time
import optparse
sys.path.insert(0, './Modules')
def banner(version):
print(f"[{GREEN}+{WHITE}] Initializing {RED}Mufasa{WHITE}.....v({RED}{version}{WHITE})")
time.sleep(1)
banner("1.3-Alpha")
print(f"[{GREEN}+{WHITE}] Importing {RED}Modules{WHITE}.....\n")
try:
import pyjs
except:
print(f"[{RED}!{WHITE}] Error importing {GREEN}Javascript{WHITE} module\n")
sys.exit(0)
def get_content(url):
resp = requests.get(url, allow_redirects=True)
webcontent = resp.text
return webcontent
#webcontent = (resp.content).decode()
#webcontent = resp.text
def find_html_comments(resp):
comment_list = re.findall(r"<\!--.*?-->", resp, re.DOTALL)
comment_list = list(set(comment_list))
return comment_list
def find_css_js_comments(resp):
comment_list = re.findall(r"/\*.*?\*/", resp, re.DOTALL)
return comment_list
def print_com(list_com):
for comm in list_com:
print(comm)
print("\n")
def fix_http(target_url, link_pgsrc):
if link_pgsrc[0] == "/":
link_pgsrc = link_pgsrc[1:len(link_pgsrc)]
if not 'http' in link_pgsrc:
url = f"{target_url}/{link_pgsrc}"
return url
else:
return link_pgsrc
def make_requests(url):
print(f"[{GREEN}+{WHITE}] Searching for {RED}comments{WHITE} in the main page....\n")
webcontent = get_content(url)
comment_list = find_html_comments(webcontent)
print_com(comment_list)
print(f"\n[{GREEN}+{WHITE}] Gathering {RED}href{WHITE} links in Page.....\n")
list_links = depth_search(webcontent)
for link in list_links:
if '.css' in link:
final_url = fix_http(url, link)
print(f"[{GREEN}+{WHITE}] Searching {final_url}....")
try:
url_resp = get_content(final_url)
except KeyboardInterrupt:
print(f"\n[{GREEN}+{WHITE}] {RED}Exiting{WHITE}......\n")
sys.exit(0)
except:
pass
list_comm = find_css_js_comments(url_resp)
print_com(list_comm)
else:
final_url = fix_http(url, link)
print(f"[{GREEN}+{WHITE}] Searching {final_url}....")
try:
url_resp = get_content(final_url)
except KeyboardInterrupt:
print(f"\n[{GREEN}+{WHITE}] {RED}Exiting{WHITE}......\n")
sys.exit(0)
except:
pass
list_comm = find_html_comments(url_resp)
print_com(list_comm)
print(f"\n[{GREEN}+{WHITE}] Gathering {RED}Javascript{WHITE} Files in Page.....")
js_list = pyjs.js_search(webcontent)
for link in js_list:
final_url = fix_http(url, link)
print(f"[{GREEN}+{WHITE}] Searching {final_url}....")
try:
url_resp = get_content(final_url)
except KeyboardInterrupt:
print(f"\n[{GREEN}+{WHITE}] {RED}Exiting{WHITE}......\n")
sys.exit(0)
except:
pass
list_comm = find_css_js_comments(url_resp)
print_com(list_comm)
#print(js_list)
def depth_search(webcontent):
links_list = re.findall(r'(?i)href=["\']?(.*?)[\s > " \']', webcontent)
links_list = list(set(links_list))
return links_list
parser = optparse.OptionParser("\n./mufasa [-h or --help] [-u or --url]=<target-url>")
parser.add_option("-u", "--url", dest="url_of_target", type='string', help="specify the target url")
(options, args) = parser.parse_args()
if (options.url_of_target == None):
parser.error(f"\n[{RED}!{WHITE}] Insufficent Arguments supplied\n")
sys.exit(0)
else:
global target_url
target_url = options.url_of_target
if target_url[-1] == "/":
target_url = target_url[:(len(target_url) - 1)]
make_requests(target_url)