From 4e2e5a6dd58d27c9c82c9dfa788e417c567b96b8 Mon Sep 17 00:00:00 2001 From: makefu Date: Wed, 18 Apr 2012 23:49:16 +0200 Subject: Reaktor: bxfr the bing.com zone lookup --- repos/bxfr/bxfr.py | 238 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 238 insertions(+) create mode 100644 repos/bxfr/bxfr.py diff --git a/repos/bxfr/bxfr.py b/repos/bxfr/bxfr.py new file mode 100644 index 0000000..245ea92 --- /dev/null +++ b/repos/bxfr/bxfr.py @@ -0,0 +1,238 @@ +#!/usr/bin/python -tt + +# gxfr replicates dns zone transfers by enumerating subdomains using advanced search engine queries and conducting dns lookups. +# Original code By Tim Tomes (LaNMaSteR53) +# rewrite for bing.com,csv output by makefu +# Available for download at http://LaNMaSteR53.com or +# http://code.google.com/p/gxfr/ and https://github.com/krebscode/painload in Reaktor/repos + +import sys, os.path, urllib, urllib2, re, time, socket, random, socket + + +def help(): + print """ Syntax: %s domain [options] + + -h, --help this screen + -v enable verbose mode + -t [num of seconds] set number of seconds to wait between queries (default=15) + -q [max num of queries] restrict to maximum number of queries (default=0, indefinite) + --dns-lookup enable dns lookups of all subdomains + --proxy [file|ip:port|-] use a proxy or list of open proxies to send queries (@random w/list) + - [file] must consist of 1 or more ip:port pairs + - replace filename with '-' (dash) to accept stdin + --user-agent ['string'] set custom user-agent string + --timeout [seconds] set socket timeout (default=system default) + --csv [file] + + Examples: + $ ./gxfr.py foxnews.com --dns-lookup -v + $ ./gxfr.py foxnews.com --dns-lookup --proxy open_proxies.txt --timeout 10 + $ ./gxfr.py foxnews.com --dns-lookup -t 5 -q 5 -v --proxy 127.0.0.1:8080 + $ curl http://rmccurdy.com/scripts/proxy/good.txt | ./gxfr.py website.com -v -t 3 --proxy - + """ % sys.argv[0] + sys.exit(2) + +if len(sys.argv) < 2: + help() + +if '-h' in sys.argv or '--help' in sys.argv: + help() + +# declare vars and process arguments +query_cnt = 0 +csvname = False +domain = sys.argv[1] +sys.argv = sys.argv[2:] +lookup = False +encrypt = True +base_url = 'http://api.bing.net' +bing_appid = "01CDBCA91C590493EE4E91FAF83E5239FEF6ADFD" #from darkb0t, thanks +base_uri = '/xml.aspx?AppID=%s&Sources=Web&Version=2.0&Web.Count=50&Web.Options=DisableHostCollapsing+DisableQueryAlterations' %bing_appid +base_query = 'site:' + domain +pattern = '>([\.\w-]*)\.%s.+?<' % (domain) +proxy = False +user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)' +verbose = False +secs = 10 +max_queries = 10 +# process command line arguments +if len(sys.argv) > 0: + if '--dns-lookup' in sys.argv: + lookup = True + if '--csv' in sys.argv: + csvname = sys.argv[sys.argv.index('--csv') + 1] + if '--proxy' in sys.argv: + proxy = True + filename = sys.argv[sys.argv.index('--proxy') + 1] + if filename == '-': + proxies = sys.stdin.read().split() + elif os.path.exists(filename): + content = open(filename).read() + proxies = re.findall('\d+\.\d+\.\d+\.\d+:\d+', content) + elif re.match(r'^\d+\.\d+\.\d+\.\d+:\d+$', filename): + proxies = [filename] + else: + help() + if '--timeout' in sys.argv: + timeout = int(sys.argv[sys.argv.index('--timeout') + 1]) + socket.setdefaulttimeout(timeout) + if '--user-agent' in sys.argv: + user_agent = sys.argv[sys.argv.index('--user-agent') + 1] + if '-v' in sys.argv: + verbose = True + if '-t' in sys.argv: + secs = int(sys.argv[sys.argv.index('-t') + 1]) + if '-q' in sys.argv: + max_queries = int(sys.argv[sys.argv.index('-q') + 1]) +subs = [] +new = True +page = 0 + +# --begin-- +print '[-] domain:', domain +print '[-] user-agent:', user_agent +# execute search engine queries and scrape results storing subdomains in a list +print '[-] querying search engine, please wait...' +# loop until no new subdomains are found +while new == True: + try: + query = '' + # build query based on results of previous results + for sub in subs: + query += ' -site:%s.%s' % (sub, domain) + full_query = base_query + query + start_param = '&Web.Offset=%s' % (str(page*10)) + query_param = '&Query=%s' % (urllib.quote_plus(full_query)) + if len(base_uri) + len(query_param) + len(start_param) < 2048: + last_query_param = query_param + params = query_param + start_param + else: + params = last_query_param[:2047-len(start_param)-len(base_uri)] + start_param + full_url = base_url + base_uri + params + # note: query character limit is passive in mobile, but seems to be ~794 + # note: query character limit seems to be 852 for desktop queries + # note: typical URI max length is 2048 (starts after top level domain) + if verbose: print '[+] using query: %s...' % (full_url) + # build web request and submit query + request = urllib2.Request(full_url) + # spoof user-agent string + request.add_header('User-Agent', user_agent) + # if proxy is enabled, use the correct handler + if proxy == True: + # validate proxies at runtime + while True: + try: + # select a proxy from list at random + num = random.randint(0,len(proxies)-1) + host = proxies[num] + opener = urllib2.build_opener(urllib2.ProxyHandler({'http': host})) + if verbose: print '[+] sending query to', host + # send query to proxy server + result = opener.open(request).read() + # exit while loop if successful + break + except Exception as inst: + print '[!] %s failed: %s' % (host, inst) + if len(proxies) == 1: + # exit of no proxy servers from list are valid + print '[-] valid proxy server not found' + sys.exit(2) + else: + # remove host from list of proxies and try again + del proxies[num] + else: + opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler()) + # send query to search engine + try: + result = opener.open(request).read() + except Exception as inst: + print '[!] {0}'.format(inst) + if str(inst).index('503') != -1: print '[!] possible shun: use --proxy or find something else to do for 24 hours :)' + sys.exit(2) + if not verbose: sys.stdout.write('.'); sys.stdout.flush() + #if not verbose: sys.stdout.write('\n'); sys.stdout.flush() + # iterate query count + query_cnt += 1 + sites = re.findall(pattern, result) + # create a uniq list + sites = list(set(sites)) + new = False + # add subdomain to list if not already exists + for site in sites: + if site not in subs: + if verbose: print '[!] subdomain found:', site + subs.append(site) + new = True + # exit if maximum number of queries has been made + if query_cnt == max_queries: + print '[-] maximum number of queries made...' + break + # start going through all pages if querysize is maxed out + if new == False: + # exit if all subdomains have been found + if not 'Next page' in result: + #import pdb; pdb.set_trace() # curl to stdin breaks pdb + print '[-] all available subdomains found...' + break + else: + page += 1 + new = True + if verbose: print '[+] no new subdomains found on page. jumping to result %d.' % (page*10) + # sleep script to avoid lock-out + if verbose: print '[+] sleeping to avoid lock-out...' + time.sleep(secs) + except KeyboardInterrupt: + # catch keyboard interrupt and gracefull complete script + break + +# print list of subdomains +print '[-] successful queries made:', str(query_cnt) +if verbose: + # rebuild and display final query if in verbose mode + #final_query = '' + #for sub in subs: + # final_query += '+-site:%s.%s' % (sub, domain) + #print '[+] final query string: %sstart=%s&%s%s' % (base_url, str(page*10), base_query, query) + print '[+] final query string: %s' % (full_url) +print ' ' +print '[subdomains] -', str(len(subs)) +csvwriter = False +try: + if csvname: + import csv + csvwriter = csv.writer(open(csvname,'wb')) +except: + print "[!] Cannot open CSV" +for sub in subs: + dom = '%s.%s' % (sub, domain ) + hostname,aliases,ips = socket.gethostbyname_ex(dom) + #print hostname,aliases,ip + print dom,",".join(ips) + try: + line = [dom] + ips + csvwriter.writerow([dom] + ips) + except: pass + + +# conduct dns lookup if argument is present +if lookup == True: + print ' ' + print '[-] querying dns, please wait...' + dict = {} + # create a dictionary where the subdomain is the key and a list of all associated ips is the value + for sub in subs: + sub = '%s.%s' % (sub, domain) + if verbose: print '[+] querying dns for %s...' % (sub) + # dns query and dictionary assignment + try: + dict[sub] = list(set([item[4][0] for item in socket.getaddrinfo(sub, 80)])) + except socket.gaierror: + # dns lookup failure + dict[sub] = list(set(['no entry'])) + # print table of subdomains and ips + print ' ' + print '[ip]'.ljust(16, ' ') + '[subdomain]' + for key in dict.keys(): + for ip in dict[key]: + print ip.ljust(16, ' ') + key +# --end-- -- cgit v1.2.3 From dabeafea30aefde7d4018f31fc6a21f92b1d7ee7 Mon Sep 17 00:00:00 2001 From: makefu Date: Thu, 19 Apr 2012 00:14:54 +0200 Subject: add bing zone transfer for humans bxfr does not uses the api anymore, as it is less reliable --- repos/bxfr/bxfr.py | 30 +++---- repos/bxfr/bxfr_api.py | 238 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 253 insertions(+), 15 deletions(-) create mode 100644 repos/bxfr/bxfr_api.py diff --git a/repos/bxfr/bxfr.py b/repos/bxfr/bxfr.py index 245ea92..8e6bd10 100644 --- a/repos/bxfr/bxfr.py +++ b/repos/bxfr/bxfr.py @@ -1,16 +1,14 @@ #!/usr/bin/python -tt # gxfr replicates dns zone transfers by enumerating subdomains using advanced search engine queries and conducting dns lookups. -# Original code By Tim Tomes (LaNMaSteR53) -# rewrite for bing.com,csv output by makefu -# Available for download at http://LaNMaSteR53.com or -# http://code.google.com/p/gxfr/ and https://github.com/krebscode/painload in Reaktor/repos +# By Tim Tomes (LaNMaSteR53) +# Available for download at http://LaNMaSteR53.com or http://code.google.com/p/gxfr/ import sys, os.path, urllib, urllib2, re, time, socket, random, socket def help(): - print """ Syntax: %s domain [options] + print """ Syntax: ./gxfr.py domain [options] -h, --help this screen -v enable verbose mode @@ -29,7 +27,7 @@ def help(): $ ./gxfr.py foxnews.com --dns-lookup --proxy open_proxies.txt --timeout 10 $ ./gxfr.py foxnews.com --dns-lookup -t 5 -q 5 -v --proxy 127.0.0.1:8080 $ curl http://rmccurdy.com/scripts/proxy/good.txt | ./gxfr.py website.com -v -t 3 --proxy - - """ % sys.argv[0] + """ sys.exit(2) if len(sys.argv) < 2: @@ -39,22 +37,22 @@ if '-h' in sys.argv or '--help' in sys.argv: help() # declare vars and process arguments +#http://www.bing.com/search?q=site%3agoogle.de&qs=n&filt=all&pq=site%3agoogle.d&sc=8-5&sp=-1&sk=&first=1&FORM=PORE query_cnt = 0 csvname = False domain = sys.argv[1] sys.argv = sys.argv[2:] lookup = False encrypt = True -base_url = 'http://api.bing.net' -bing_appid = "01CDBCA91C590493EE4E91FAF83E5239FEF6ADFD" #from darkb0t, thanks -base_uri = '/xml.aspx?AppID=%s&Sources=Web&Version=2.0&Web.Count=50&Web.Options=DisableHostCollapsing+DisableQueryAlterations' %bing_appid +base_url = 'http://www.bing.com' +base_uri = '/search?qs=n&form=QBRE&sc=0-0&sp=-1&sk=' base_query = 'site:' + domain -pattern = '>([\.\w-]*)\.%s.+?<' % (domain) +pattern = '//([\.\w-]*)\.%s.+?' % (domain) proxy = False user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)' verbose = False -secs = 10 -max_queries = 10 +secs = 15 +max_queries = 10 # default = 10 queries # process command line arguments if len(sys.argv) > 0: if '--dns-lookup' in sys.argv: @@ -101,8 +99,8 @@ while new == True: for sub in subs: query += ' -site:%s.%s' % (sub, domain) full_query = base_query + query - start_param = '&Web.Offset=%s' % (str(page*10)) - query_param = '&Query=%s' % (urllib.quote_plus(full_query)) + start_param = '&first=%s' % (str(page*10)) + query_param = '&q=%s&pq=%s' % (urllib.quote_plus(full_query),urllib.quote_plus(full_query)) if len(base_uri) + len(query_param) + len(start_param) < 2048: last_query_param = query_param params = query_param + start_param @@ -205,7 +203,9 @@ except: print "[!] Cannot open CSV" for sub in subs: dom = '%s.%s' % (sub, domain ) - hostname,aliases,ips = socket.gethostbyname_ex(dom) + + #host resolution makes this computer more visible + hostname,aliases,ips = socket.gethostbyname_ex(dom) #print hostname,aliases,ip print dom,",".join(ips) try: diff --git a/repos/bxfr/bxfr_api.py b/repos/bxfr/bxfr_api.py new file mode 100644 index 0000000..245ea92 --- /dev/null +++ b/repos/bxfr/bxfr_api.py @@ -0,0 +1,238 @@ +#!/usr/bin/python -tt + +# gxfr replicates dns zone transfers by enumerating subdomains using advanced search engine queries and conducting dns lookups. +# Original code By Tim Tomes (LaNMaSteR53) +# rewrite for bing.com,csv output by makefu +# Available for download at http://LaNMaSteR53.com or +# http://code.google.com/p/gxfr/ and https://github.com/krebscode/painload in Reaktor/repos + +import sys, os.path, urllib, urllib2, re, time, socket, random, socket + + +def help(): + print """ Syntax: %s domain [options] + + -h, --help this screen + -v enable verbose mode + -t [num of seconds] set number of seconds to wait between queries (default=15) + -q [max num of queries] restrict to maximum number of queries (default=0, indefinite) + --dns-lookup enable dns lookups of all subdomains + --proxy [file|ip:port|-] use a proxy or list of open proxies to send queries (@random w/list) + - [file] must consist of 1 or more ip:port pairs + - replace filename with '-' (dash) to accept stdin + --user-agent ['string'] set custom user-agent string + --timeout [seconds] set socket timeout (default=system default) + --csv [file] + + Examples: + $ ./gxfr.py foxnews.com --dns-lookup -v + $ ./gxfr.py foxnews.com --dns-lookup --proxy open_proxies.txt --timeout 10 + $ ./gxfr.py foxnews.com --dns-lookup -t 5 -q 5 -v --proxy 127.0.0.1:8080 + $ curl http://rmccurdy.com/scripts/proxy/good.txt | ./gxfr.py website.com -v -t 3 --proxy - + """ % sys.argv[0] + sys.exit(2) + +if len(sys.argv) < 2: + help() + +if '-h' in sys.argv or '--help' in sys.argv: + help() + +# declare vars and process arguments +query_cnt = 0 +csvname = False +domain = sys.argv[1] +sys.argv = sys.argv[2:] +lookup = False +encrypt = True +base_url = 'http://api.bing.net' +bing_appid = "01CDBCA91C590493EE4E91FAF83E5239FEF6ADFD" #from darkb0t, thanks +base_uri = '/xml.aspx?AppID=%s&Sources=Web&Version=2.0&Web.Count=50&Web.Options=DisableHostCollapsing+DisableQueryAlterations' %bing_appid +base_query = 'site:' + domain +pattern = '>([\.\w-]*)\.%s.+?<' % (domain) +proxy = False +user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)' +verbose = False +secs = 10 +max_queries = 10 +# process command line arguments +if len(sys.argv) > 0: + if '--dns-lookup' in sys.argv: + lookup = True + if '--csv' in sys.argv: + csvname = sys.argv[sys.argv.index('--csv') + 1] + if '--proxy' in sys.argv: + proxy = True + filename = sys.argv[sys.argv.index('--proxy') + 1] + if filename == '-': + proxies = sys.stdin.read().split() + elif os.path.exists(filename): + content = open(filename).read() + proxies = re.findall('\d+\.\d+\.\d+\.\d+:\d+', content) + elif re.match(r'^\d+\.\d+\.\d+\.\d+:\d+$', filename): + proxies = [filename] + else: + help() + if '--timeout' in sys.argv: + timeout = int(sys.argv[sys.argv.index('--timeout') + 1]) + socket.setdefaulttimeout(timeout) + if '--user-agent' in sys.argv: + user_agent = sys.argv[sys.argv.index('--user-agent') + 1] + if '-v' in sys.argv: + verbose = True + if '-t' in sys.argv: + secs = int(sys.argv[sys.argv.index('-t') + 1]) + if '-q' in sys.argv: + max_queries = int(sys.argv[sys.argv.index('-q') + 1]) +subs = [] +new = True +page = 0 + +# --begin-- +print '[-] domain:', domain +print '[-] user-agent:', user_agent +# execute search engine queries and scrape results storing subdomains in a list +print '[-] querying search engine, please wait...' +# loop until no new subdomains are found +while new == True: + try: + query = '' + # build query based on results of previous results + for sub in subs: + query += ' -site:%s.%s' % (sub, domain) + full_query = base_query + query + start_param = '&Web.Offset=%s' % (str(page*10)) + query_param = '&Query=%s' % (urllib.quote_plus(full_query)) + if len(base_uri) + len(query_param) + len(start_param) < 2048: + last_query_param = query_param + params = query_param + start_param + else: + params = last_query_param[:2047-len(start_param)-len(base_uri)] + start_param + full_url = base_url + base_uri + params + # note: query character limit is passive in mobile, but seems to be ~794 + # note: query character limit seems to be 852 for desktop queries + # note: typical URI max length is 2048 (starts after top level domain) + if verbose: print '[+] using query: %s...' % (full_url) + # build web request and submit query + request = urllib2.Request(full_url) + # spoof user-agent string + request.add_header('User-Agent', user_agent) + # if proxy is enabled, use the correct handler + if proxy == True: + # validate proxies at runtime + while True: + try: + # select a proxy from list at random + num = random.randint(0,len(proxies)-1) + host = proxies[num] + opener = urllib2.build_opener(urllib2.ProxyHandler({'http': host})) + if verbose: print '[+] sending query to', host + # send query to proxy server + result = opener.open(request).read() + # exit while loop if successful + break + except Exception as inst: + print '[!] %s failed: %s' % (host, inst) + if len(proxies) == 1: + # exit of no proxy servers from list are valid + print '[-] valid proxy server not found' + sys.exit(2) + else: + # remove host from list of proxies and try again + del proxies[num] + else: + opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler()) + # send query to search engine + try: + result = opener.open(request).read() + except Exception as inst: + print '[!] {0}'.format(inst) + if str(inst).index('503') != -1: print '[!] possible shun: use --proxy or find something else to do for 24 hours :)' + sys.exit(2) + if not verbose: sys.stdout.write('.'); sys.stdout.flush() + #if not verbose: sys.stdout.write('\n'); sys.stdout.flush() + # iterate query count + query_cnt += 1 + sites = re.findall(pattern, result) + # create a uniq list + sites = list(set(sites)) + new = False + # add subdomain to list if not already exists + for site in sites: + if site not in subs: + if verbose: print '[!] subdomain found:', site + subs.append(site) + new = True + # exit if maximum number of queries has been made + if query_cnt == max_queries: + print '[-] maximum number of queries made...' + break + # start going through all pages if querysize is maxed out + if new == False: + # exit if all subdomains have been found + if not 'Next page' in result: + #import pdb; pdb.set_trace() # curl to stdin breaks pdb + print '[-] all available subdomains found...' + break + else: + page += 1 + new = True + if verbose: print '[+] no new subdomains found on page. jumping to result %d.' % (page*10) + # sleep script to avoid lock-out + if verbose: print '[+] sleeping to avoid lock-out...' + time.sleep(secs) + except KeyboardInterrupt: + # catch keyboard interrupt and gracefull complete script + break + +# print list of subdomains +print '[-] successful queries made:', str(query_cnt) +if verbose: + # rebuild and display final query if in verbose mode + #final_query = '' + #for sub in subs: + # final_query += '+-site:%s.%s' % (sub, domain) + #print '[+] final query string: %sstart=%s&%s%s' % (base_url, str(page*10), base_query, query) + print '[+] final query string: %s' % (full_url) +print ' ' +print '[subdomains] -', str(len(subs)) +csvwriter = False +try: + if csvname: + import csv + csvwriter = csv.writer(open(csvname,'wb')) +except: + print "[!] Cannot open CSV" +for sub in subs: + dom = '%s.%s' % (sub, domain ) + hostname,aliases,ips = socket.gethostbyname_ex(dom) + #print hostname,aliases,ip + print dom,",".join(ips) + try: + line = [dom] + ips + csvwriter.writerow([dom] + ips) + except: pass + + +# conduct dns lookup if argument is present +if lookup == True: + print ' ' + print '[-] querying dns, please wait...' + dict = {} + # create a dictionary where the subdomain is the key and a list of all associated ips is the value + for sub in subs: + sub = '%s.%s' % (sub, domain) + if verbose: print '[+] querying dns for %s...' % (sub) + # dns query and dictionary assignment + try: + dict[sub] = list(set([item[4][0] for item in socket.getaddrinfo(sub, 80)])) + except socket.gaierror: + # dns lookup failure + dict[sub] = list(set(['no entry'])) + # print table of subdomains and ips + print ' ' + print '[ip]'.ljust(16, ' ') + '[subdomain]' + for key in dict.keys(): + for ip in dict[key]: + print ip.ljust(16, ' ') + key +# --end-- -- cgit v1.2.3 From 59290ba4e15065149ed61df88e0d7bc72874a092 Mon Sep 17 00:00:00 2001 From: makefu Date: Thu, 19 Apr 2012 00:18:08 +0200 Subject: add bxfr support --- repos/consolidate_dns/index | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/repos/consolidate_dns/index b/repos/consolidate_dns/index index 2908b9e..6ff51f8 100755 --- a/repos/consolidate_dns/index +++ b/repos/consolidate_dns/index @@ -10,6 +10,8 @@ DNSMAP = "../dnsmap/dnsmap" silent=open("/dev/null","w") gxfr_enabled = False GXFR = "../gxfr/gxfr.py" +bxfr_enabled = False +BXFR = "../gxfr/gxfr.py" domains = {} try: DOMAIN=sys.argv[1] @@ -27,6 +29,11 @@ if os.path.isfile(GXFR): print (" gxfr.py enabled") else: print (" gxfr.py not available or not supported") +if os.path.isfile(BXFR): + bxfr_enabled=True + print (" bxfr.py enabled") +else: + print (" bxfr.py not available or not supported") if dnsmap_enabled: @@ -50,6 +57,16 @@ if gxfr_enabled: domains[row[0]] = row[1:] print ("...finished with [%d] domains" %reader.line_num) os.unlink(gxfr_tmp) +if bxfr_enabled: + bxfr_tmp = tempfile.NamedTemporaryFile(delete=False).name + print ("Starting bxfr, this may take some time") + p = Popen(["/usr/bin/python",BXFR,DOMAIN,"-q","5","--csv",bxfr_tmp],stdout=silent,stderr=silent) + p.wait() + reader = csv.reader(open(bxfr_tmp)) + for row in reader: + domains[row[0]] = row[1:] + print ("...finished with [%d] domains" %reader.line_num) + os.unlink(bxfr_tmp) print "found %d subdomain(s)" % len(domains) num = 1 -- cgit v1.2.3