这是去年毕设做的一个Web漏洞扫描小工具,主要针对简单的SQL注入漏洞、SQL盲注和XSS漏洞,代码是看过github外国大神(听说是SMAP的编写者之一)的两个小工具源码,根据里面的思路自己写的。以下是使用说明和源代码。
一、使用说明:
1.运行环境:
Linux命令行界面+Python2.7
2.程序源码:
Vim scanner//建立一个名为scanner的文件
Chmod a+xscanner//修改文件权限为可执行的
3.运行程序:
Python scanner//运行文件
若没有携带目标URL信息,界面输出帮助信息,提醒可以可输入的参数。
参数包括:
--h 输出帮助信息
--url 扫描的URL
--data POST请求方法的参数
--cookie HTTP请求头Cookie值
--user-agent HTTP请求头User-Agent值
--random-agent 是否使用浏览器伪装
--referer 目标URL的上一层界面
--proxy HTTP请求头代理值
例如扫描“http://127.0.0.1/dvwa/vulnerabilities/sqli/?id=&Submit=Submit”
Python scanner--url="http://127.0.0.1/dvwa/vulnerabilities/sqli/?id=&Submit=Submit"--cookie="security=low;PHPSESSID=menntb9b2isj7qha739ihg9of1"
输出扫描结果如下:
结果显示:
存在XSS漏洞,漏洞匹配漏洞特征库“”>.XSS.<””,属于嵌入标签外的类型。
存在SQL注入漏洞,目标网站服务器的数据库类型为MySQL。
存在BLIND SQL注入漏洞。
二、源代码:
代码验证过可以运行,我个人推荐用DVWA测试吧。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
|
#!-*-coding:UTF-8-*- import optparse, random, re, string, urllib, urllib2,difflib,itertools,httplib NAME = "Scanner for RXSS and SQLI" AUTHOR = "Lishuze" PREFIXES = ( " " , ") " , "' " , "') " , "\"" ) SUFFIXES = (" ", " - - - ", " #") BOOLEAN_TESTS = ( "AND %d=%d" , "OR NOT (%d=%d)" ) TAMPER_SQL_CHAR_POOL = ( '(' , ')' , '\'' , '"''"' ) TAMPER_XSS_CHAR_POOL = ( '\'' , '"' , '>' , '<' , ';' ) GET, POST = "GET" , "POST" COOKIE, UA, REFERER = "Cookie" , "User-Agent" , "Referer" TEXT, HTTPCODE, TITLE, HTML = xrange ( 4 ) _headers = {} USER_AGENTS = ( "Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0" , "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36" , "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7_0; en-US) AppleWebKit/534.21 (KHTML, like Gecko) Chrome/11.0.678.0 Safari/534.21" , ) XSS_PATTERNS = ( (r "<!--[^>]*%(chars)s|%(chars)s[^<]*-->" , "\"<!--.'.xss.'.-->\", inside the comment" , None ), (r "(?s)<script[^>]*>[^<]*?'[^<']*%(chars)s|%(chars)s[^<']*'[^<]*</script>" , "\"<script>.'.xss.'.</script>\", enclosed by <script> tags, inside single-quotes" , None ), (r '(?s)<script[^>]*>[^<]*?"[^<"]*%(chars)s|%(chars)s[^<"]*"[^<]*</script>' , "'<script>.\".xss.\".</script>', enclosed by <script> tags, inside double-quotes" , None ), (r "(?s)<script[^>]*>[^<]*?%(chars)s|%(chars)s[^<]*</script>" , "\"<script>.xss.</script>\", enclosed by <script> tags" , None ), (r ">[^<]*%(chars)s[^<]*(<|\Z)" , "\">.xss.<\", outside of tags" , r "(?s)<script.+?</script>|<!--.*?-->" ), (r "<[^>]*'[^>']*%(chars)s[^>']*'[^>]*>" , "\"<.'.xss.'.>\", inside the tag, inside single-quotes" , r "(?s)<script.+?</script>|<!--.*?-->" ), (r '<[^>]*"[^>"]*%(chars)s[^>"]*"[^>]*>' , "'<.\".xss.\".>', inside the tag, inside double-quotes" , r "(?s)<script.+?</script>|<!--.*?-->" ), (r "<[^>]*%(chars)s[^>]*>" , "\"<.xss.>\", inside the tag, outside of quotes" , r "(?s)<script.+?</script>|<!--.*?-->" ) ) DBMS_ERRORS = { "MySQL" : (r "SQL syntax.*MySQL" , r "Warning.*mysql_.*" , r "valid MySQL result" , r "MySqlClient\." ), "Microsoft SQL Server" : (r "Driver.* SQL[\-\_\ ]*Server" , r "OLE DB.* SQL Server" , r "(\W|\A)SQL Server.*Driver" , r "Warning.*mssql_.*" , r "(\W|\A)SQL Server.*[0-9a-fA-F]{8}" , r "(?s)Exception.*\WSystem\.Data\.SqlClient\." , r "(?s)Exception.*\WRoadhouse\.Cms\." ), "Microsoft Access" : (r "Microsoft Access Driver" , r "JET Database Engine" , r "Access Database Engine" ), "Oracle" : (r "ORA-[0-9][0-9][0-9][0-9]" , r "Oracle error" , r "Oracle.*Driver" , r "Warning.*\Woci_.*" , r "Warning.*\Wora_.*" ) } def _retrieve_content_xss(url, data = None ): surl = "" for i in xrange ( len (url)): if i > url.find( '?' ): surl + = surl.join(url[i]).replace( ' ' , "%20" ) else : surl + = surl.join(url[i]) try : req = urllib2.Request(surl, data, _headers) retval = urllib2.urlopen(req, timeout = 30 ).read() except Exception, ex: retval = getattr (ex, "message" , "") return retval or "" def _retrieve_content_sql(url, data = None ): retval = {HTTPCODE: httplib.OK} surl = "" for i in xrange ( len (url)): if i > url.find( '?' ): surl + = surl.join(url[i]).replace( ' ' , "%20" ) else : surl + = surl.join(url[i]) try : req = urllib2.Request(surl, data, _headers) retval[HTML] = urllib2.urlopen(req, timeout = 30 ).read() except Exception, ex: retval[HTTPCODE] = getattr (ex, "code" , None ) retval[HTML] = getattr (ex, "message" , "") match = re.search(r "<title>(?P<result>[^<]+)</title>" , retval[HTML], re.I) retval[TITLE] = match.group( "result" ) if match else None retval[TEXT] = re.sub(r "(?si)<script.+?</script>|<!--.+?-->|<style.+?</style>|<[^>]+>|\s+" , " " , retval[HTML]) return retval def scan_page_xss(url, data = None ): print "Start scanning RXSS:\n" retval, usable = False , False url = re.sub(r "=(&|\Z)" , "=1\g<1>" , url) if url else url data = re.sub(r "=(&|\Z)" , "=1\g<1>" , data) if data else data try : for phase in (GET, POST): current = url if phase is GET else (data or "") for match in re.finditer(r "((\A|[?&])(?P<parameter>[\w]+)=)(?P<value>[^&]+)" , current): found, usable = False , True print "Scanning %s parameter '%s'" % (phase, match.group( "parameter" )) prefix = ("".join(random.sample(string.ascii_lowercase, 5 ))) suffix = ("".join(random.sample(string.ascii_lowercase, 5 ))) if not found: tampered = current.replace(match.group( 0 ), "%s%s" % (match.group( 0 ), urllib.quote( "%s%s%s%s" % ( "'" , prefix, "".join(random.sample(TAMPER_XSS_CHAR_POOL, len (TAMPER_XSS_CHAR_POOL))), suffix)))) content = _retrieve_content_xss(tampered, data) if phase is GET else _retrieve_content_xss(url, tampered) for sample in re.finditer( "%s([^ ]+?)%s" % (prefix, suffix), content, re.I): #print sample.group() for regex, info, content_removal_regex in XSS_PATTERNS: context = re.search(regex % { "chars" : re.escape(sample.group( 0 ))}, re.sub(content_removal_regex or " ", " ", content), re.I) if context and not found and sample.group( 1 ).strip(): print "!!!%s parameter '%s' appears to be XSS vulnerable (%s)" % (phase, match.group( "parameter" ), info) found = retval = True if not usable: print " (x) no usable GET/POST parameters found" except KeyboardInterrupt: print "\r (x) Ctrl-C pressed" return retval def scan_page_sql(url, data = None ): print "Start scanning SQLI:\n" retval, usable = False , False url = re.sub(r "=(&|\Z)" , "=1\g<1>" , url) if url else url data = re.sub(r "=(&|\Z)" , "=1\g<1>" , data) if data else data try : for phase in (GET, POST): current = url if phase is GET else (data or "") for match in re.finditer(r "((\A|[?&])(?P<parameter>\w+)=)(?P<value>[^&]+)" , current): vulnerable, usable = False , True original = None print "Scanning %s parameter '%s'" % (phase, match.group( "parameter" )) tampered = current.replace(match.group( 0 ), "%s%s" % (match.group( 0 ), urllib.quote("".join(random.sample(TAMPER_SQL_CHAR_POOL, len (TAMPER_SQL_CHAR_POOL)))))) content = _retrieve_content_sql(tampered, data) if phase is GET else _retrieve_content_sql(url, tampered) for (dbms, regex) in ((dbms, regex) for dbms in DBMS_ERRORS for regex in DBMS_ERRORS[dbms]): if not vulnerable and re.search(regex, content[HTML], re.I): print "!!!%s parameter '%s' could be error SQLi vulnerable (%s)" % (phase, match.group( "parameter" ), dbms) retval = vulnerable = True vulnerable = False original = original or (_retrieve_content_sql(current, data) if phase is GET else _retrieve_content_sql(url, current)) for prefix,boolean,suffix in itertools.product(PREFIXES,BOOLEAN_TESTS,SUFFIXES): if not vulnerable: template = "%s%s%s" % (prefix,boolean, suffix) payloads = dict ((_, current.replace(match.group( 0 ), "%s%s" % (match.group( 0 ), urllib.quote(template % ( 1 if _ else 2 , 1 ), safe = '%' )))) for _ in ( True , False )) contents = dict ((_, _retrieve_content_sql(payloads[_], data) if phase is GET else _retrieve_content_sql(url, payloads[_])) for _ in ( False , True )) if all (_[HTTPCODE] for _ in (original, contents[ True ], contents[ False ])) and ( any (original[_] = = contents[ True ][_] ! = contents[ False ][_] for _ in (HTTPCODE, TITLE))): vulnerable = True else : ratios = dict ((_, difflib.SequenceMatcher( None , original[TEXT], contents[_][TEXT]).quick_ratio()) for _ in ( True , False )) vulnerable = all (ratios.values()) and ratios[ True ] > 0.95 and ratios[ False ] < 0.95 if vulnerable: print "!!!%s parameter '%s' could be error Blind SQLi vulnerable" % (phase, match.group( "parameter" )) retval = True if not usable: print " (x) no usable GET/POST parameters found" except KeyboardInterrupt: print "\r (x) Ctrl-C pressed" return retval def init_options(proxy = None , cookie = None , ua = None , referer = None ): global _headers _headers = dict ( filter ( lambda _: _[ 1 ], ((COOKIE, cookie), (UA, ua or NAME), (REFERER, referer)))) urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler({ 'http' : proxy})) if proxy else None ) if __name__ = = "__main__" : print "----------------------------------------------------------------------------------" print "%s\nBy:%s" % (NAME, AUTHOR) print "----------------------------------------------------------------------------------" parser = optparse.OptionParser() parser.add_option( "--url" , dest = "url" , help = "Target URL" ) parser.add_option( "--data" , dest = "data" , help = "POST data" ) parser.add_option( "--cookie" , dest = "cookie" , help = "HTTP Cookie header value" ) parser.add_option( "--user-agent" , dest = "ua" , help = "HTTP User-Agent header value" ) parser.add_option( "--random-agent" , dest = "randomAgent" , action = "store_true" , help = "Use randomly selected HTTP User-Agent header value" ) parser.add_option( "--referer" , dest = "referer" , help = "HTTP Referer header value" ) parser.add_option( "--proxy" , dest = "proxy" , help = "HTTP proxy address" ) options, _ = parser.parse_args() if options.url: init_options(options.proxy, options.cookie, options.ua if not options.randomAgent else random.choice(USER_AGENTS), options.referer) result_xss = scan_page_xss(options.url if options.url.startswith( "http" ) else "http://%s" % options.url, options.data) print "\nScan results: %s vulnerabilities found" % ( "possible" if result_xss else "no" ) print "----------------------------------------------------------------------------------" result_sql = scan_page_sql(options.url if options.url.startswith( "http" ) else "http://%s" % options.url, options.data) print "\nScan results: %s vulnerabilities found" % ( "possible" if result_sql else "no" ) print "----------------------------------------------------------------------------------" else : parser.print_help() |
以上所述是小编给大家介绍的Python脚本实现Web漏洞扫描工具,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对服务器之家网站的支持!
原文链接:http://blog.csdn.net/u013181216/article/details/52675649