idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
14,200 | def from_size ( cls , data , width , height ) : monitor = { "left" : 0 , "top" : 0 , "width" : width , "height" : height } return cls ( data , monitor ) | Instantiate a new class given only screen shot s data and size . |
14,201 | def rgb ( self ) : if not self . __rgb : rgb = bytearray ( self . height * self . width * 3 ) raw = self . raw rgb [ 0 : : 3 ] = raw [ 2 : : 4 ] rgb [ 1 : : 3 ] = raw [ 1 : : 4 ] rgb [ 2 : : 3 ] = raw [ 0 : : 4 ] self . __rgb = bytes ( rgb ) return self . __rgb | Compute RGB values from the BGRA raw pixels . |
14,202 | def pixel ( self , coord_x , coord_y ) : try : return self . pixels [ coord_y ] [ coord_x ] except IndexError : raise ScreenShotError ( "Pixel location ({}, {}) is out of range." . format ( coord_x , coord_y ) ) | Returns the pixel value at a given position . |
14,203 | def main ( args = None ) : cli_args = ArgumentParser ( ) cli_args . add_argument ( "-c" , "--coordinates" , default = "" , type = str , help = "the part of the screen to capture: top, left, width, height" , ) cli_args . add_argument ( "-l" , "--level" , default = 6 , type = int , choices = list ( range ( 10 ) ) , help = "the PNG compression level" , ) cli_args . add_argument ( "-m" , "--monitor" , default = 0 , type = int , help = "the monitor to screen shot" ) cli_args . add_argument ( "-o" , "--output" , default = "monitor-{mon}.png" , help = "the output file name" ) cli_args . add_argument ( "-q" , "--quiet" , default = False , action = "store_true" , help = "do not print created files" , ) cli_args . add_argument ( "-v" , "--version" , action = "version" , version = __version__ ) options = cli_args . parse_args ( args ) kwargs = { "mon" : options . monitor , "output" : options . output } if options . coordinates : try : top , left , width , height = options . coordinates . split ( "," ) except ValueError : print ( "Coordinates syntax: top, left, width, height" ) return 2 kwargs [ "mon" ] = { "top" : int ( top ) , "left" : int ( left ) , "width" : int ( width ) , "height" : int ( height ) , } if options . output == "monitor-{mon}.png" : kwargs [ "output" ] = "sct-{top}x{left}_{width}x{height}.png" try : with mss ( ) as sct : if options . coordinates : output = kwargs [ "output" ] . format ( ** kwargs [ "mon" ] ) sct_img = sct . grab ( kwargs [ "mon" ] ) to_png ( sct_img . rgb , sct_img . size , level = options . level , output = output ) if not options . quiet : print ( os . path . realpath ( output ) ) else : for file_name in sct . save ( ** kwargs ) : if not options . quiet : print ( os . path . realpath ( file_name ) ) return 0 except ScreenShotError : return 1 | Main logic . |
14,204 | def error_handler ( _ , event ) : evt = event . contents ERROR . details = { "type" : evt . type , "serial" : evt . serial , "error_code" : evt . error_code , "request_code" : evt . request_code , "minor_code" : evt . minor_code , } return 0 | Specifies the program s supplied error handler . |
14,205 | def validate ( retval , func , args ) : if retval != 0 and not ERROR . details : return args err = "{}() failed" . format ( func . __name__ ) details = { "retval" : retval , "args" : args } raise ScreenShotError ( err , details = details ) | Validate the returned value of a Xlib or XRANDR function . |
14,206 | def get_error_details ( self ) : details = { } if ERROR . details : details = { "xerror_details" : ERROR . details } ERROR . details = None xserver_error = ctypes . create_string_buffer ( 1024 ) self . xlib . XGetErrorText ( MSS . display , details . get ( "xerror_details" , { } ) . get ( "error_code" , 0 ) , xserver_error , len ( xserver_error ) , ) xerror = xserver_error . value . decode ( "utf-8" ) if xerror != "0" : details [ "xerror" ] = xerror return details | Get more information about the latest X server error . |
14,207 | def on_exists ( fname ) : if os . path . isfile ( fname ) : newfile = fname + ".old" print ( "{} -> {}" . format ( fname , newfile ) ) os . rename ( fname , newfile ) | Callback example when we try to overwrite an existing screenshot . |
14,208 | def save ( self , mon = 0 , output = "monitor-{mon}.png" , callback = None ) : monitors = self . monitors if not monitors : raise ScreenShotError ( "No monitor found." ) if mon == 0 : for idx , monitor in enumerate ( monitors [ 1 : ] , 1 ) : fname = output . format ( mon = idx , date = datetime . now ( ) , ** monitor ) if callable ( callback ) : callback ( fname ) sct = self . grab ( monitor ) to_png ( sct . rgb , sct . size , level = self . compression_level , output = fname ) yield fname else : mon = 0 if mon == - 1 else mon try : monitor = monitors [ mon ] except IndexError : raise ScreenShotError ( "Monitor {!r} does not exist." . format ( mon ) ) output = output . format ( mon = mon , date = datetime . now ( ) , ** monitor ) if callable ( callback ) : callback ( output ) sct = self . grab ( monitor ) to_png ( sct . rgb , sct . size , level = self . compression_level , output = output ) yield output | Grab a screen shot and save it to a file . |
14,209 | def shot ( self , ** kwargs ) : kwargs [ "mon" ] = kwargs . get ( "mon" , 1 ) return next ( self . save ( ** kwargs ) ) | Helper to save the screen shot of the 1st monitor by default . You can pass the same arguments as for save . |
14,210 | def _cfactory ( attr , func , argtypes , restype , errcheck = None ) : meth = getattr ( attr , func ) meth . argtypes = argtypes meth . restype = restype if errcheck : meth . errcheck = errcheck | Factory to create a ctypes function and automatically manage errors . |
14,211 | def _set_dpi_awareness ( self ) : version = sys . getwindowsversion ( ) [ : 2 ] if version >= ( 6 , 3 ) : ctypes . windll . shcore . SetProcessDpiAwareness ( 2 ) elif ( 6 , 0 ) <= version < ( 6 , 3 ) : self . user32 . SetProcessDPIAware ( ) | Set DPI aware to capture full screen on Hi - DPI monitors . |
14,212 | def mss ( ** kwargs ) : os_ = platform . system ( ) . lower ( ) if os_ == "darwin" : from . import darwin return darwin . MSS ( ** kwargs ) if os_ == "linux" : from . import linux return linux . MSS ( ** kwargs ) if os_ == "windows" : from . import windows return windows . MSS ( ** kwargs ) raise ScreenShotError ( "System {!r} not (yet?) implemented." . format ( os_ ) ) | Factory returning a proper MSS class instance . |
14,213 | def hsl2rgb ( hsl ) : h , s , l = [ float ( v ) for v in hsl ] if not ( 0.0 - FLOAT_ERROR <= s <= 1.0 + FLOAT_ERROR ) : raise ValueError ( "Saturation must be between 0 and 1." ) if not ( 0.0 - FLOAT_ERROR <= l <= 1.0 + FLOAT_ERROR ) : raise ValueError ( "Lightness must be between 0 and 1." ) if s == 0 : return l , l , l if l < 0.5 : v2 = l * ( 1.0 + s ) else : v2 = ( l + s ) - ( s * l ) v1 = 2.0 * l - v2 r = _hue2rgb ( v1 , v2 , h + ( 1.0 / 3 ) ) g = _hue2rgb ( v1 , v2 , h ) b = _hue2rgb ( v1 , v2 , h - ( 1.0 / 3 ) ) return r , g , b | Convert HSL representation towards RGB |
14,214 | def rgb2hsl ( rgb ) : r , g , b = [ float ( v ) for v in rgb ] for name , v in { 'Red' : r , 'Green' : g , 'Blue' : b } . items ( ) : if not ( 0 - FLOAT_ERROR <= v <= 1 + FLOAT_ERROR ) : raise ValueError ( "%s must be between 0 and 1. You provided %r." % ( name , v ) ) vmin = min ( r , g , b ) vmax = max ( r , g , b ) diff = vmax - vmin vsum = vmin + vmax l = vsum / 2 if diff < FLOAT_ERROR : return ( 0.0 , 0.0 , l ) if l < 0.5 : s = diff / vsum else : s = diff / ( 2.0 - vsum ) dr = ( ( ( vmax - r ) / 6 ) + ( diff / 2 ) ) / diff dg = ( ( ( vmax - g ) / 6 ) + ( diff / 2 ) ) / diff db = ( ( ( vmax - b ) / 6 ) + ( diff / 2 ) ) / diff if r == vmax : h = db - dg elif g == vmax : h = ( 1.0 / 3 ) + dr - db elif b == vmax : h = ( 2.0 / 3 ) + dg - dr if h < 0 : h += 1 if h > 1 : h -= 1 return ( h , s , l ) | Convert RGB representation towards HSL |
14,215 | def rgb2hex ( rgb , force_long = False ) : hx = '' . join ( [ "%02x" % int ( c * 255 + 0.5 - FLOAT_ERROR ) for c in rgb ] ) if not force_long and hx [ 0 : : 2 ] == hx [ 1 : : 2 ] : hx = '' . join ( hx [ 0 : : 2 ] ) return "#%s" % hx | Transform RGB tuple to hex RGB representation |
14,216 | def hex2rgb ( str_rgb ) : try : rgb = str_rgb [ 1 : ] if len ( rgb ) == 6 : r , g , b = rgb [ 0 : 2 ] , rgb [ 2 : 4 ] , rgb [ 4 : 6 ] elif len ( rgb ) == 3 : r , g , b = rgb [ 0 ] * 2 , rgb [ 1 ] * 2 , rgb [ 2 ] * 2 else : raise ValueError ( ) except : raise ValueError ( "Invalid value %r provided for rgb color." % str_rgb ) return tuple ( [ float ( int ( v , 16 ) ) / 255 for v in ( r , g , b ) ] ) | Transform hex RGB representation to RGB tuple |
14,217 | def hex2web ( hex ) : dec_rgb = tuple ( int ( v * 255 ) for v in hex2rgb ( hex ) ) if dec_rgb in RGB_TO_COLOR_NAMES : color_name = RGB_TO_COLOR_NAMES [ dec_rgb ] [ 0 ] return color_name if len ( re . sub ( r"[^A-Z]" , "" , color_name ) ) > 1 else color_name . lower ( ) if len ( hex ) == 7 : if hex [ 1 ] == hex [ 2 ] and hex [ 3 ] == hex [ 4 ] and hex [ 5 ] == hex [ 6 ] : return '#' + hex [ 1 ] + hex [ 3 ] + hex [ 5 ] return hex | Converts HEX representation to WEB |
14,218 | def web2hex ( web , force_long = False ) : if web . startswith ( '#' ) : if ( LONG_HEX_COLOR . match ( web ) or ( not force_long and SHORT_HEX_COLOR . match ( web ) ) ) : return web . lower ( ) elif SHORT_HEX_COLOR . match ( web ) and force_long : return '#' + '' . join ( [ ( "%s" % ( t , ) ) * 2 for t in web [ 1 : ] ] ) raise AttributeError ( "%r is not in web format. Need 3 or 6 hex digit." % web ) web = web . lower ( ) if web not in COLOR_NAME_TO_RGB : raise ValueError ( "%r is not a recognized color." % web ) return rgb2hex ( [ float ( int ( v ) ) / 255 for v in COLOR_NAME_TO_RGB [ web ] ] , force_long ) | Converts WEB representation to HEX |
14,219 | def color_scale ( begin_hsl , end_hsl , nb ) : if nb < 0 : raise ValueError ( "Unsupported negative number of colors (nb=%r)." % nb ) step = tuple ( [ float ( end_hsl [ i ] - begin_hsl [ i ] ) / nb for i in range ( 0 , 3 ) ] ) if nb > 0 else ( 0 , 0 , 0 ) def mul ( step , value ) : return tuple ( [ v * value for v in step ] ) def add_v ( step , step2 ) : return tuple ( [ v + step2 [ i ] for i , v in enumerate ( step ) ] ) return [ add_v ( begin_hsl , mul ( step , r ) ) for r in range ( 0 , nb + 1 ) ] | Returns a list of nb color HSL tuples between begin_hsl and end_hsl |
14,220 | def RGB_color_picker ( obj ) : digest = hashlib . sha384 ( str ( obj ) . encode ( 'utf-8' ) ) . hexdigest ( ) subsize = int ( len ( digest ) / 3 ) splitted_digest = [ digest [ i * subsize : ( i + 1 ) * subsize ] for i in range ( 3 ) ] max_value = float ( int ( "f" * subsize , 16 ) ) components = ( int ( d , 16 ) / max_value for d in splitted_digest ) return Color ( rgb2hex ( components ) ) | Build a color representation from the string representation of an object |
14,221 | def _strip_marker_elem ( elem_name , elements ) : extra_indexes = [ ] preceding_operators = [ "and" ] if elem_name == "extra" else [ "and" , "or" ] for i , element in enumerate ( elements ) : if isinstance ( element , list ) : cancelled = _strip_marker_elem ( elem_name , element ) if cancelled : extra_indexes . append ( i ) elif isinstance ( element , tuple ) and element [ 0 ] . value == elem_name : extra_indexes . append ( i ) for i in reversed ( extra_indexes ) : del elements [ i ] if i > 0 and elements [ i - 1 ] in preceding_operators : del elements [ i - 1 ] elif elements : del elements [ 0 ] return not elements | Remove the supplied element from the marker . |
14,222 | def _get_stripped_marker ( marker , strip_func ) : if not marker : return None marker = _ensure_marker ( marker ) elements = marker . _markers strip_func ( elements ) if elements : return marker return None | Build a new marker which is cleaned according to strip_func |
14,223 | def get_contained_pyversions ( marker ) : collection = [ ] if not marker : return set ( ) marker = _ensure_marker ( marker ) _markers_collect_pyversions ( marker . _markers , collection ) marker_str = " and " . join ( sorted ( collection ) ) if not marker_str : return set ( ) marker_dict = distlib . markers . parse_marker ( marker_str ) [ 0 ] version_set = set ( ) pyversions , _ = parse_marker_dict ( marker_dict ) if isinstance ( pyversions , set ) : version_set . update ( pyversions ) elif pyversions is not None : version_set . add ( pyversions ) versions = set ( ) if version_set : versions = reduce ( lambda x , y : x & y , version_set ) return versions | Collect all python_version operands from a marker . |
14,224 | def contains_pyversion ( marker ) : if not marker : return False marker = _ensure_marker ( marker ) return _markers_contains_pyversion ( marker . _markers ) | Check whether a marker contains a python_version operand . |
14,225 | def distance ( a , b ) : R = 3963 lat1 , lon1 = math . radians ( a [ 0 ] ) , math . radians ( a [ 1 ] ) lat2 , lon2 = math . radians ( b [ 0 ] ) , math . radians ( b [ 1 ] ) return math . acos ( math . sin ( lat1 ) * math . sin ( lat2 ) + math . cos ( lat1 ) * math . cos ( lat2 ) * math . cos ( lon1 - lon2 ) ) * R | Calculates distance between two latitude - longitude coordinates . |
14,226 | def energy ( self ) : e = 0 for i in range ( len ( self . state ) ) : e += self . distance_matrix [ self . state [ i - 1 ] ] [ self . state [ i ] ] return e | Calculates the length of the route . |
14,227 | def round_figures ( x , n ) : return round ( x , int ( n - math . ceil ( math . log10 ( abs ( x ) ) ) ) ) | Returns x rounded to n significant figures . |
14,228 | def save_state ( self , fname = None ) : if not fname : date = datetime . datetime . now ( ) . strftime ( "%Y-%m-%dT%Hh%Mm%Ss" ) fname = date + "_energy_" + str ( self . energy ( ) ) + ".state" with open ( fname , "wb" ) as fh : pickle . dump ( self . state , fh ) | Saves state to pickle |
14,229 | def load_state ( self , fname = None ) : with open ( fname , 'rb' ) as fh : self . state = pickle . load ( fh ) | Loads state from pickle |
14,230 | def set_schedule ( self , schedule ) : self . Tmax = schedule [ 'tmax' ] self . Tmin = schedule [ 'tmin' ] self . steps = int ( schedule [ 'steps' ] ) self . updates = int ( schedule [ 'updates' ] ) | Takes the output from auto and sets the attributes |
14,231 | def copy_state ( self , state ) : if self . copy_strategy == 'deepcopy' : return copy . deepcopy ( state ) elif self . copy_strategy == 'slice' : return state [ : ] elif self . copy_strategy == 'method' : return state . copy ( ) else : raise RuntimeError ( 'No implementation found for ' + 'the self.copy_strategy "%s"' % self . copy_strategy ) | Returns an exact copy of the provided state Implemented according to self . copy_strategy one of |
14,232 | def default_update ( self , step , T , E , acceptance , improvement ) : elapsed = time . time ( ) - self . start if step == 0 : print ( ' Temperature Energy Accept Improve Elapsed Remaining' , file = sys . stderr ) print ( '\r%12.5f %12.2f %s ' % ( T , E , time_string ( elapsed ) ) , file = sys . stderr , end = "\r" ) sys . stderr . flush ( ) else : remain = ( self . steps - step ) * ( elapsed / step ) print ( '\r%12.5f %12.2f %7.2f%% %7.2f%% %s %s\r' % ( T , E , 100.0 * acceptance , 100.0 * improvement , time_string ( elapsed ) , time_string ( remain ) ) , file = sys . stderr , end = "\r" ) sys . stderr . flush ( ) | Default update outputs to stderr . |
14,233 | def anneal ( self ) : step = 0 self . start = time . time ( ) if self . Tmin <= 0.0 : raise Exception ( 'Exponential cooling requires a minimum "\ "temperature greater than zero.' ) Tfactor = - math . log ( self . Tmax / self . Tmin ) T = self . Tmax E = self . energy ( ) prevState = self . copy_state ( self . state ) prevEnergy = E self . best_state = self . copy_state ( self . state ) self . best_energy = E trials , accepts , improves = 0 , 0 , 0 if self . updates > 0 : updateWavelength = self . steps / self . updates self . update ( step , T , E , None , None ) while step < self . steps and not self . user_exit : step += 1 T = self . Tmax * math . exp ( Tfactor * step / self . steps ) self . move ( ) E = self . energy ( ) dE = E - prevEnergy trials += 1 if dE > 0.0 and math . exp ( - dE / T ) < random . random ( ) : self . state = self . copy_state ( prevState ) E = prevEnergy else : accepts += 1 if dE < 0.0 : improves += 1 prevState = self . copy_state ( self . state ) prevEnergy = E if E < self . best_energy : self . best_state = self . copy_state ( self . state ) self . best_energy = E if self . updates > 1 : if ( step // updateWavelength ) > ( ( step - 1 ) // updateWavelength ) : self . update ( step , T , E , accepts / trials , improves / trials ) trials , accepts , improves = 0 , 0 , 0 self . state = self . copy_state ( self . best_state ) if self . save_state_on_exit : self . save_state ( ) return self . best_state , self . best_energy | Minimizes the energy of a system by simulated annealing . |
14,234 | def auto ( self , minutes , steps = 2000 ) : def run ( T , steps ) : E = self . energy ( ) prevState = self . copy_state ( self . state ) prevEnergy = E accepts , improves = 0 , 0 for _ in range ( steps ) : self . move ( ) E = self . energy ( ) dE = E - prevEnergy if dE > 0.0 and math . exp ( - dE / T ) < random . random ( ) : self . state = self . copy_state ( prevState ) E = prevEnergy else : accepts += 1 if dE < 0.0 : improves += 1 prevState = self . copy_state ( self . state ) prevEnergy = E return E , float ( accepts ) / steps , float ( improves ) / steps step = 0 self . start = time . time ( ) T = 0.0 E = self . energy ( ) self . update ( step , T , E , None , None ) while T == 0.0 : step += 1 self . move ( ) T = abs ( self . energy ( ) - E ) E , acceptance , improvement = run ( T , steps ) step += steps while acceptance > 0.98 : T = round_figures ( T / 1.5 , 2 ) E , acceptance , improvement = run ( T , steps ) step += steps self . update ( step , T , E , acceptance , improvement ) while acceptance < 0.98 : T = round_figures ( T * 1.5 , 2 ) E , acceptance , improvement = run ( T , steps ) step += steps self . update ( step , T , E , acceptance , improvement ) Tmax = T while improvement > 0.0 : T = round_figures ( T / 1.5 , 2 ) E , acceptance , improvement = run ( T , steps ) step += steps self . update ( step , T , E , acceptance , improvement ) Tmin = T elapsed = time . time ( ) - self . start duration = round_figures ( int ( 60.0 * minutes * step / elapsed ) , 2 ) return { 'tmax' : Tmax , 'tmin' : Tmin , 'steps' : duration , 'updates' : self . updates } | Explores the annealing landscape and estimates optimal temperature settings . |
14,235 | def load ( self , shapefile = None ) : if shapefile : ( shapeName , ext ) = os . path . splitext ( shapefile ) self . shapeName = shapeName try : self . shp = open ( "%s.shp" % shapeName , "rb" ) except IOError : raise ShapefileException ( "Unable to open %s.shp" % shapeName ) try : self . shx = open ( "%s.shx" % shapeName , "rb" ) except IOError : raise ShapefileException ( "Unable to open %s.shx" % shapeName ) try : self . dbf = open ( "%s.dbf" % shapeName , "rb" ) except IOError : raise ShapefileException ( "Unable to open %s.dbf" % shapeName ) if self . shp : self . __shpHeader ( ) if self . dbf : self . __dbfHeader ( ) | Opens a shapefile from a filename or file - like object . Normally this method would be called by the constructor with the file object or file name as an argument . |
14,236 | def shapes ( self ) : shp = self . __getFileObj ( self . shp ) shp . seek ( 100 ) shapes = [ ] while shp . tell ( ) < self . shpLength : shapes . append ( self . __shape ( ) ) return shapes | Returns all shapes in a shapefile . |
14,237 | def __dbfHeaderLength ( self ) : if not self . __dbfHdrLength : if not self . dbf : raise ShapefileException ( "Shapefile Reader requires a shapefile or file-like object. (no dbf file found)" ) dbf = self . dbf ( self . numRecords , self . __dbfHdrLength ) = unpack ( "<xxxxLH22x" , dbf . read ( 32 ) ) return self . __dbfHdrLength | Retrieves the header length of a dbf file header . |
14,238 | def __recordFmt ( self ) : if not self . numRecords : self . __dbfHeader ( ) fmt = '' . join ( [ '%ds' % fieldinfo [ 2 ] for fieldinfo in self . fields ] ) fmtSize = calcsize ( fmt ) return ( fmt , fmtSize ) | Calculates the size of a . shp geometry record . |
14,239 | def records ( self ) : if not self . numRecords : self . __dbfHeader ( ) records = [ ] f = self . __getFileObj ( self . dbf ) f . seek ( self . __dbfHeaderLength ( ) ) for i in range ( self . numRecords ) : r = self . __record ( ) if r : records . append ( r ) return records | Returns all records in a dbf file . |
14,240 | def point ( self , x , y , z = 0 , m = 0 ) : pointShape = _Shape ( self . shapeType ) pointShape . points . append ( [ x , y , z , m ] ) self . _shapes . append ( pointShape ) | Creates a point shape . |
14,241 | def saveShp ( self , target ) : if not hasattr ( target , "write" ) : target = os . path . splitext ( target ) [ 0 ] + '.shp' if not self . shapeType : self . shapeType = self . _shapes [ 0 ] . shapeType self . shp = self . __getFileObj ( target ) self . __shapefileHeader ( self . shp , headerType = 'shp' ) self . __shpRecords ( ) | Save an shp file . |
14,242 | def saveShx ( self , target ) : if not hasattr ( target , "write" ) : target = os . path . splitext ( target ) [ 0 ] + '.shx' if not self . shapeType : self . shapeType = self . _shapes [ 0 ] . shapeType self . shx = self . __getFileObj ( target ) self . __shapefileHeader ( self . shx , headerType = 'shx' ) self . __shxRecords ( ) | Save an shx file . |
14,243 | def saveDbf ( self , target ) : if not hasattr ( target , "write" ) : target = os . path . splitext ( target ) [ 0 ] + '.dbf' self . dbf = self . __getFileObj ( target ) self . __dbfHeader ( ) self . __dbfRecords ( ) | Save a dbf file . |
14,244 | def save ( self , target = None , shp = None , shx = None , dbf = None ) : if shp : self . saveShp ( shp ) if shx : self . saveShx ( shx ) if dbf : self . saveDbf ( dbf ) elif target : self . saveShp ( target ) self . shp . close ( ) self . saveShx ( target ) self . shx . close ( ) self . saveDbf ( target ) self . dbf . close ( ) | Save the shapefile data to three files or three file - like objects . SHP and DBF files can also be written exclusively using saveShp saveShx and saveDbf respectively . |
14,245 | def delete ( self , shape = None , part = None , point = None ) : if shape and part and point : del self . _shapes [ shape ] [ part ] [ point ] elif shape and part and not point : del self . _shapes [ shape ] [ part ] elif shape and not part and not point : del self . _shapes [ shape ] elif not shape and not part and point : for s in self . _shapes : if s . shapeType == 1 : del self . _shapes [ point ] else : for part in s . parts : del s [ part ] [ point ] elif not shape and part and point : for s in self . _shapes : del s [ part ] [ point ] elif not shape and part and not point : for s in self . _shapes : del s [ part ] | Deletes the specified part of any shape by specifying a shape number part number or point number . |
14,246 | def balance ( self ) : if len ( self . records ) > len ( self . _shapes ) : self . null ( ) elif len ( self . records ) < len ( self . _shapes ) : self . record ( ) | Adds a corresponding empty attribute or null geometry record depending on which type of record was created to make sure all three files are in synch . |
14,247 | def __fieldNorm ( self , fieldName ) : if len ( fieldName ) > 11 : fieldName = fieldName [ : 11 ] fieldName = fieldName . upper ( ) fieldName . replace ( ' ' , '_' ) | Normalizes a dbf field name to fit within the spec and the expectations of certain ESRI software . |
14,248 | def diff_main ( self , text1 , text2 , checklines = True , deadline = None ) : if deadline == None : if self . Diff_Timeout <= 0 : deadline = sys . maxsize else : deadline = time . time ( ) + self . Diff_Timeout if text1 == None or text2 == None : raise ValueError ( "Null inputs. (diff_main)" ) if text1 == text2 : if text1 : return [ ( self . DIFF_EQUAL , text1 ) ] return [ ] commonlength = self . diff_commonPrefix ( text1 , text2 ) commonprefix = text1 [ : commonlength ] text1 = text1 [ commonlength : ] text2 = text2 [ commonlength : ] commonlength = self . diff_commonSuffix ( text1 , text2 ) if commonlength == 0 : commonsuffix = '' else : commonsuffix = text1 [ - commonlength : ] text1 = text1 [ : - commonlength ] text2 = text2 [ : - commonlength ] diffs = self . diff_compute ( text1 , text2 , checklines , deadline ) if commonprefix : diffs [ : 0 ] = [ ( self . DIFF_EQUAL , commonprefix ) ] if commonsuffix : diffs . append ( ( self . DIFF_EQUAL , commonsuffix ) ) self . diff_cleanupMerge ( diffs ) return diffs | Find the differences between two texts . Simplifies the problem by stripping any common prefix or suffix off the texts before diffing . |
14,249 | def diff_compute ( self , text1 , text2 , checklines , deadline ) : if not text1 : return [ ( self . DIFF_INSERT , text2 ) ] if not text2 : return [ ( self . DIFF_DELETE , text1 ) ] if len ( text1 ) > len ( text2 ) : ( longtext , shorttext ) = ( text1 , text2 ) else : ( shorttext , longtext ) = ( text1 , text2 ) i = longtext . find ( shorttext ) if i != - 1 : diffs = [ ( self . DIFF_INSERT , longtext [ : i ] ) , ( self . DIFF_EQUAL , shorttext ) , ( self . DIFF_INSERT , longtext [ i + len ( shorttext ) : ] ) ] if len ( text1 ) > len ( text2 ) : diffs [ 0 ] = ( self . DIFF_DELETE , diffs [ 0 ] [ 1 ] ) diffs [ 2 ] = ( self . DIFF_DELETE , diffs [ 2 ] [ 1 ] ) return diffs if len ( shorttext ) == 1 : return [ ( self . DIFF_DELETE , text1 ) , ( self . DIFF_INSERT , text2 ) ] hm = self . diff_halfMatch ( text1 , text2 ) if hm : ( text1_a , text1_b , text2_a , text2_b , mid_common ) = hm diffs_a = self . diff_main ( text1_a , text2_a , checklines , deadline ) diffs_b = self . diff_main ( text1_b , text2_b , checklines , deadline ) return diffs_a + [ ( self . DIFF_EQUAL , mid_common ) ] + diffs_b if checklines and len ( text1 ) > 100 and len ( text2 ) > 100 : return self . diff_lineMode ( text1 , text2 , deadline ) return self . diff_bisect ( text1 , text2 , deadline ) | Find the differences between two texts . Assumes that the texts do not have any common prefix or suffix . |
14,250 | def diff_lineMode ( self , text1 , text2 , deadline ) : ( text1 , text2 , linearray ) = self . diff_linesToChars ( text1 , text2 ) diffs = self . diff_main ( text1 , text2 , False , deadline ) self . diff_charsToLines ( diffs , linearray ) self . diff_cleanupSemantic ( diffs ) diffs . append ( ( self . DIFF_EQUAL , '' ) ) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len ( diffs ) : if diffs [ pointer ] [ 0 ] == self . DIFF_INSERT : count_insert += 1 text_insert += diffs [ pointer ] [ 1 ] elif diffs [ pointer ] [ 0 ] == self . DIFF_DELETE : count_delete += 1 text_delete += diffs [ pointer ] [ 1 ] elif diffs [ pointer ] [ 0 ] == self . DIFF_EQUAL : if count_delete >= 1 and count_insert >= 1 : subDiff = self . diff_main ( text_delete , text_insert , False , deadline ) diffs [ pointer - count_delete - count_insert : pointer ] = subDiff pointer = pointer - count_delete - count_insert + len ( subDiff ) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs . pop ( ) return diffs | Do a quick line - level diff on both strings then rediff the parts for greater accuracy . This speedup can produce non - minimal diffs . |
14,251 | def diff_bisectSplit ( self , text1 , text2 , x , y , deadline ) : text1a = text1 [ : x ] text2a = text2 [ : y ] text1b = text1 [ x : ] text2b = text2 [ y : ] diffs = self . diff_main ( text1a , text2a , False , deadline ) diffsb = self . diff_main ( text1b , text2b , False , deadline ) return diffs + diffsb | Given the location of the middle snake split the diff in two parts and recurse . |
14,252 | def diff_linesToChars ( self , text1 , text2 ) : lineArray = [ ] lineHash = { } lineArray . append ( '' ) def diff_linesToCharsMunge ( text ) : chars = [ ] lineStart = 0 lineEnd = - 1 while lineEnd < len ( text ) - 1 : lineEnd = text . find ( '\n' , lineStart ) if lineEnd == - 1 : lineEnd = len ( text ) - 1 line = text [ lineStart : lineEnd + 1 ] if line in lineHash : chars . append ( chr ( lineHash [ line ] ) ) else : if len ( lineArray ) == maxLines : line = text [ lineStart : ] lineEnd = len ( text ) lineArray . append ( line ) lineHash [ line ] = len ( lineArray ) - 1 chars . append ( chr ( len ( lineArray ) - 1 ) ) lineStart = lineEnd + 1 return "" . join ( chars ) maxLines = 666666 chars1 = diff_linesToCharsMunge ( text1 ) maxLines = 1114111 chars2 = diff_linesToCharsMunge ( text2 ) return ( chars1 , chars2 , lineArray ) | Split two texts into an array of strings . Reduce the texts to a string of hashes where each Unicode character represents one line . |
14,253 | def diff_charsToLines ( self , diffs , lineArray ) : for i in range ( len ( diffs ) ) : text = [ ] for char in diffs [ i ] [ 1 ] : text . append ( lineArray [ ord ( char ) ] ) diffs [ i ] = ( diffs [ i ] [ 0 ] , "" . join ( text ) ) | Rehydrate the text in a diff from a string of line hashes to real lines of text . |
14,254 | def diff_commonPrefix ( self , text1 , text2 ) : if not text1 or not text2 or text1 [ 0 ] != text2 [ 0 ] : return 0 pointermin = 0 pointermax = min ( len ( text1 ) , len ( text2 ) ) pointermid = pointermax pointerstart = 0 while pointermin < pointermid : if text1 [ pointerstart : pointermid ] == text2 [ pointerstart : pointermid ] : pointermin = pointermid pointerstart = pointermin else : pointermax = pointermid pointermid = ( pointermax - pointermin ) // 2 + pointermin return pointermid | Determine the common prefix of two strings . |
14,255 | def diff_commonSuffix ( self , text1 , text2 ) : if not text1 or not text2 or text1 [ - 1 ] != text2 [ - 1 ] : return 0 pointermin = 0 pointermax = min ( len ( text1 ) , len ( text2 ) ) pointermid = pointermax pointerend = 0 while pointermin < pointermid : if ( text1 [ - pointermid : len ( text1 ) - pointerend ] == text2 [ - pointermid : len ( text2 ) - pointerend ] ) : pointermin = pointermid pointerend = pointermin else : pointermax = pointermid pointermid = ( pointermax - pointermin ) // 2 + pointermin return pointermid | Determine the common suffix of two strings . |
14,256 | def diff_commonOverlap ( self , text1 , text2 ) : text1_length = len ( text1 ) text2_length = len ( text2 ) if text1_length == 0 or text2_length == 0 : return 0 if text1_length > text2_length : text1 = text1 [ - text2_length : ] elif text1_length < text2_length : text2 = text2 [ : text1_length ] text_length = min ( text1_length , text2_length ) if text1 == text2 : return text_length best = 0 length = 1 while True : pattern = text1 [ - length : ] found = text2 . find ( pattern ) if found == - 1 : return best length += found if found == 0 or text1 [ - length : ] == text2 [ : length ] : best = length length += 1 | Determine if the suffix of one string is the prefix of another . |
14,257 | def diff_halfMatch ( self , text1 , text2 ) : if self . Diff_Timeout <= 0 : return None if len ( text1 ) > len ( text2 ) : ( longtext , shorttext ) = ( text1 , text2 ) else : ( shorttext , longtext ) = ( text1 , text2 ) if len ( longtext ) < 4 or len ( shorttext ) * 2 < len ( longtext ) : return None def diff_halfMatchI ( longtext , shorttext , i ) : seed = longtext [ i : i + len ( longtext ) // 4 ] best_common = '' j = shorttext . find ( seed ) while j != - 1 : prefixLength = self . diff_commonPrefix ( longtext [ i : ] , shorttext [ j : ] ) suffixLength = self . diff_commonSuffix ( longtext [ : i ] , shorttext [ : j ] ) if len ( best_common ) < suffixLength + prefixLength : best_common = ( shorttext [ j - suffixLength : j ] + shorttext [ j : j + prefixLength ] ) best_longtext_a = longtext [ : i - suffixLength ] best_longtext_b = longtext [ i + prefixLength : ] best_shorttext_a = shorttext [ : j - suffixLength ] best_shorttext_b = shorttext [ j + prefixLength : ] j = shorttext . find ( seed , j + 1 ) if len ( best_common ) * 2 >= len ( longtext ) : return ( best_longtext_a , best_longtext_b , best_shorttext_a , best_shorttext_b , best_common ) else : return None hm1 = diff_halfMatchI ( longtext , shorttext , ( len ( longtext ) + 3 ) // 4 ) hm2 = diff_halfMatchI ( longtext , shorttext , ( len ( longtext ) + 1 ) // 2 ) if not hm1 and not hm2 : return None elif not hm2 : hm = hm1 elif not hm1 : hm = hm2 else : if len ( hm1 [ 4 ] ) > len ( hm2 [ 4 ] ) : hm = hm1 else : hm = hm2 if len ( text1 ) > len ( text2 ) : ( text1_a , text1_b , text2_a , text2_b , mid_common ) = hm else : ( text2_a , text2_b , text1_a , text1_b , mid_common ) = hm return ( text1_a , text1_b , text2_a , text2_b , mid_common ) | Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non - minimal diffs . |
14,258 | def diff_cleanupEfficiency ( self , diffs ) : changes = False equalities = [ ] lastEquality = None pointer = 0 pre_ins = False pre_del = False post_ins = False post_del = False while pointer < len ( diffs ) : if diffs [ pointer ] [ 0 ] == self . DIFF_EQUAL : if ( len ( diffs [ pointer ] [ 1 ] ) < self . Diff_EditCost and ( post_ins or post_del ) ) : equalities . append ( pointer ) pre_ins = post_ins pre_del = post_del lastEquality = diffs [ pointer ] [ 1 ] else : equalities = [ ] lastEquality = None post_ins = post_del = False else : if diffs [ pointer ] [ 0 ] == self . DIFF_DELETE : post_del = True else : post_ins = True if lastEquality and ( ( pre_ins and pre_del and post_ins and post_del ) or ( ( len ( lastEquality ) < self . Diff_EditCost / 2 ) and ( pre_ins + pre_del + post_ins + post_del ) == 3 ) ) : diffs . insert ( equalities [ - 1 ] , ( self . DIFF_DELETE , lastEquality ) ) diffs [ equalities [ - 1 ] + 1 ] = ( self . DIFF_INSERT , diffs [ equalities [ - 1 ] + 1 ] [ 1 ] ) equalities . pop ( ) lastEquality = None if pre_ins and pre_del : post_ins = post_del = True equalities = [ ] else : if len ( equalities ) : equalities . pop ( ) if len ( equalities ) : pointer = equalities [ - 1 ] else : pointer = - 1 post_ins = post_del = False changes = True pointer += 1 if changes : self . diff_cleanupMerge ( diffs ) | Reduce the number of edits by eliminating operationally trivial equalities . |
14,259 | def diff_prettyHtml ( self , diffs ) : html = [ ] for ( op , data ) in diffs : text = ( data . replace ( "&" , "&" ) . replace ( "<" , "<" ) . replace ( ">" , ">" ) . replace ( "\n" , "¶<br>" ) ) if op == self . DIFF_INSERT : html . append ( "<ins style=\"background:#e6ffe6;\">%s</ins>" % text ) elif op == self . DIFF_DELETE : html . append ( "<del style=\"background:#ffe6e6;\">%s</del>" % text ) elif op == self . DIFF_EQUAL : html . append ( "<span>%s</span>" % text ) return "" . join ( html ) | Convert a diff array into a pretty HTML report . |
14,260 | def diff_levenshtein ( self , diffs ) : levenshtein = 0 insertions = 0 deletions = 0 for ( op , data ) in diffs : if op == self . DIFF_INSERT : insertions += len ( data ) elif op == self . DIFF_DELETE : deletions += len ( data ) elif op == self . DIFF_EQUAL : levenshtein += max ( insertions , deletions ) insertions = 0 deletions = 0 levenshtein += max ( insertions , deletions ) return levenshtein | Compute the Levenshtein distance ; the number of inserted deleted or substituted characters . |
14,261 | def diff_fromDelta ( self , text1 , delta ) : diffs = [ ] pointer = 0 tokens = delta . split ( "\t" ) for token in tokens : if token == "" : continue param = token [ 1 : ] if token [ 0 ] == "+" : param = urllib . parse . unquote ( param ) diffs . append ( ( self . DIFF_INSERT , param ) ) elif token [ 0 ] == "-" or token [ 0 ] == "=" : try : n = int ( param ) except ValueError : raise ValueError ( "Invalid number in diff_fromDelta: " + param ) if n < 0 : raise ValueError ( "Negative number in diff_fromDelta: " + param ) text = text1 [ pointer : pointer + n ] pointer += n if token [ 0 ] == "=" : diffs . append ( ( self . DIFF_EQUAL , text ) ) else : diffs . append ( ( self . DIFF_DELETE , text ) ) else : raise ValueError ( "Invalid diff operation in diff_fromDelta: " + token [ 0 ] ) if pointer != len ( text1 ) : raise ValueError ( "Delta length (%d) does not equal source text length (%d)." % ( pointer , len ( text1 ) ) ) return diffs | Given the original text1 and an encoded string which describes the operations required to transform text1 into text2 compute the full diff . |
14,262 | def match_main ( self , text , pattern , loc ) : if text == None or pattern == None : raise ValueError ( "Null inputs. (match_main)" ) loc = max ( 0 , min ( loc , len ( text ) ) ) if text == pattern : return 0 elif not text : return - 1 elif text [ loc : loc + len ( pattern ) ] == pattern : return loc else : match = self . match_bitap ( text , pattern , loc ) return match | Locate the best instance of pattern in text near loc . |
14,263 | def match_bitap ( self , text , pattern , loc ) : s = self . match_alphabet ( pattern ) def match_bitapScore ( e , x ) : accuracy = float ( e ) / len ( pattern ) proximity = abs ( loc - x ) if not self . Match_Distance : return proximity and 1.0 or accuracy return accuracy + ( proximity / float ( self . Match_Distance ) ) score_threshold = self . Match_Threshold best_loc = text . find ( pattern , loc ) if best_loc != - 1 : score_threshold = min ( match_bitapScore ( 0 , best_loc ) , score_threshold ) best_loc = text . rfind ( pattern , loc + len ( pattern ) ) if best_loc != - 1 : score_threshold = min ( match_bitapScore ( 0 , best_loc ) , score_threshold ) matchmask = 1 << ( len ( pattern ) - 1 ) best_loc = - 1 bin_max = len ( pattern ) + len ( text ) last_rd = None for d in range ( len ( pattern ) ) : bin_min = 0 bin_mid = bin_max while bin_min < bin_mid : if match_bitapScore ( d , loc + bin_mid ) <= score_threshold : bin_min = bin_mid else : bin_max = bin_mid bin_mid = ( bin_max - bin_min ) // 2 + bin_min bin_max = bin_mid start = max ( 1 , loc - bin_mid + 1 ) finish = min ( loc + bin_mid , len ( text ) ) + len ( pattern ) rd = [ 0 ] * ( finish + 2 ) rd [ finish + 1 ] = ( 1 << d ) - 1 for j in range ( finish , start - 1 , - 1 ) : if len ( text ) <= j - 1 : charMatch = 0 else : charMatch = s . get ( text [ j - 1 ] , 0 ) if d == 0 : rd [ j ] = ( ( rd [ j + 1 ] << 1 ) | 1 ) & charMatch else : rd [ j ] = ( ( ( rd [ j + 1 ] << 1 ) | 1 ) & charMatch ) | ( ( ( last_rd [ j + 1 ] | last_rd [ j ] ) << 1 ) | 1 ) | last_rd [ j + 1 ] if rd [ j ] & matchmask : score = match_bitapScore ( d , j - 1 ) if score <= score_threshold : score_threshold = score best_loc = j - 1 if best_loc > loc : start = max ( 1 , 2 * loc - best_loc ) else : break if match_bitapScore ( d + 1 , loc ) > score_threshold : break last_rd = rd return best_loc | Locate the best instance of pattern in text near loc using the Bitap algorithm . |
14,264 | def patch_addContext ( self , patch , text ) : if len ( text ) == 0 : return pattern = text [ patch . start2 : patch . start2 + patch . length1 ] padding = 0 while ( text . find ( pattern ) != text . rfind ( pattern ) and ( self . Match_MaxBits == 0 or len ( pattern ) < self . Match_MaxBits - self . Patch_Margin - self . Patch_Margin ) ) : padding += self . Patch_Margin pattern = text [ max ( 0 , patch . start2 - padding ) : patch . start2 + patch . length1 + padding ] padding += self . Patch_Margin prefix = text [ max ( 0 , patch . start2 - padding ) : patch . start2 ] if prefix : patch . diffs [ : 0 ] = [ ( self . DIFF_EQUAL , prefix ) ] suffix = text [ patch . start2 + patch . length1 : patch . start2 + patch . length1 + padding ] if suffix : patch . diffs . append ( ( self . DIFF_EQUAL , suffix ) ) patch . start1 -= len ( prefix ) patch . start2 -= len ( prefix ) patch . length1 += len ( prefix ) + len ( suffix ) patch . length2 += len ( prefix ) + len ( suffix ) | Increase the context until it is unique but don t let the pattern expand beyond Match_MaxBits . |
14,265 | def patch_deepCopy ( self , patches ) : patchesCopy = [ ] for patch in patches : patchCopy = patch_obj ( ) patchCopy . diffs = patch . diffs [ : ] patchCopy . start1 = patch . start1 patchCopy . start2 = patch . start2 patchCopy . length1 = patch . length1 patchCopy . length2 = patch . length2 patchesCopy . append ( patchCopy ) return patchesCopy | Given an array of patches return another array that is identical . |
14,266 | def patch_addPadding ( self , patches ) : paddingLength = self . Patch_Margin nullPadding = "" for x in range ( 1 , paddingLength + 1 ) : nullPadding += chr ( x ) for patch in patches : patch . start1 += paddingLength patch . start2 += paddingLength patch = patches [ 0 ] diffs = patch . diffs if not diffs or diffs [ 0 ] [ 0 ] != self . DIFF_EQUAL : diffs . insert ( 0 , ( self . DIFF_EQUAL , nullPadding ) ) patch . start1 -= paddingLength patch . start2 -= paddingLength patch . length1 += paddingLength patch . length2 += paddingLength elif paddingLength > len ( diffs [ 0 ] [ 1 ] ) : extraLength = paddingLength - len ( diffs [ 0 ] [ 1 ] ) newText = nullPadding [ len ( diffs [ 0 ] [ 1 ] ) : ] + diffs [ 0 ] [ 1 ] diffs [ 0 ] = ( diffs [ 0 ] [ 0 ] , newText ) patch . start1 -= extraLength patch . start2 -= extraLength patch . length1 += extraLength patch . length2 += extraLength patch = patches [ - 1 ] diffs = patch . diffs if not diffs or diffs [ - 1 ] [ 0 ] != self . DIFF_EQUAL : diffs . append ( ( self . DIFF_EQUAL , nullPadding ) ) patch . length1 += paddingLength patch . length2 += paddingLength elif paddingLength > len ( diffs [ - 1 ] [ 1 ] ) : extraLength = paddingLength - len ( diffs [ - 1 ] [ 1 ] ) newText = diffs [ - 1 ] [ 1 ] + nullPadding [ : extraLength ] diffs [ - 1 ] = ( diffs [ - 1 ] [ 0 ] , newText ) patch . length1 += extraLength patch . length2 += extraLength return nullPadding | Add some padding on text start and end so that edges can match something . Intended to be called only from within patch_apply . |
14,267 | def patch_toText ( self , patches ) : text = [ ] for patch in patches : text . append ( str ( patch ) ) return "" . join ( text ) | Take a list of patches and return a textual representation . |
14,268 | def diff_toDelta ( self , diffs ) : text = [ ] for ( op , data ) in diffs : if op == self . DIFF_INSERT : data = data . encode ( "utf-8" ) text . append ( "+" + urllib . quote ( data , "!~*'();/?:@&=+$,# " ) ) elif op == self . DIFF_DELETE : text . append ( "-%d" % len ( data ) ) elif op == self . DIFF_EQUAL : text . append ( "=%d" % len ( data ) ) return "\t" . join ( text ) | Crush the diff into an encoded string which describes the operations required to transform text1 into text2 . E . g . = 3 \ t - 2 \ t + ing - > Keep 3 chars delete 2 chars insert ing . Operations are tab - separated . Inserted text is escaped using %xx notation . |
14,269 | def patch_fromText ( self , textline ) : if type ( textline ) == unicode : textline = textline . encode ( "ascii" ) patches = [ ] if not textline : return patches text = textline . split ( '\n' ) while len ( text ) != 0 : m = re . match ( "^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$" , text [ 0 ] ) if not m : raise ValueError ( "Invalid patch string: " + text [ 0 ] ) patch = patch_obj ( ) patches . append ( patch ) patch . start1 = int ( m . group ( 1 ) ) if m . group ( 2 ) == '' : patch . start1 -= 1 patch . length1 = 1 elif m . group ( 2 ) == '0' : patch . length1 = 0 else : patch . start1 -= 1 patch . length1 = int ( m . group ( 2 ) ) patch . start2 = int ( m . group ( 3 ) ) if m . group ( 4 ) == '' : patch . start2 -= 1 patch . length2 = 1 elif m . group ( 4 ) == '0' : patch . length2 = 0 else : patch . start2 -= 1 patch . length2 = int ( m . group ( 4 ) ) del text [ 0 ] while len ( text ) != 0 : if text [ 0 ] : sign = text [ 0 ] [ 0 ] else : sign = '' line = urllib . unquote ( text [ 0 ] [ 1 : ] ) line = line . decode ( "utf-8" ) if sign == '+' : patch . diffs . append ( ( self . DIFF_INSERT , line ) ) elif sign == '-' : patch . diffs . append ( ( self . DIFF_DELETE , line ) ) elif sign == ' ' : patch . diffs . append ( ( self . DIFF_EQUAL , line ) ) elif sign == '@' : break elif sign == '' : pass else : raise ValueError ( "Invalid patch mode: '%s'\n%s" % ( sign , line ) ) del text [ 0 ] return patches | Parse a textual representation of patches and return a list of patch objects . |
14,270 | def diff_trees ( left , right , diff_options = None , formatter = None ) : if formatter is not None : formatter . prepare ( left , right ) if diff_options is None : diff_options = { } differ = diff . Differ ( ** diff_options ) diffs = differ . diff ( left , right ) if formatter is None : return list ( diffs ) return formatter . format ( diffs , left ) | Takes two lxml root elements or element trees |
14,271 | def diff_texts ( left , right , diff_options = None , formatter = None ) : return _diff ( etree . fromstring , left , right , diff_options = diff_options , formatter = formatter ) | Takes two Unicode strings containing XML |
14,272 | def diff_files ( left , right , diff_options = None , formatter = None ) : return _diff ( etree . parse , left , right , diff_options = diff_options , formatter = formatter ) | Takes two filenames or streams and diffs the XML in those files |
14,273 | def patch_tree ( actions , tree ) : patcher = patch . Patcher ( ) return patcher . patch ( actions , tree ) | Takes an lxml root element or element tree and a list of actions |
14,274 | def patch_text ( actions , tree ) : tree = etree . fromstring ( tree ) actions = patch . DiffParser ( ) . parse ( actions ) tree = patch_tree ( actions , tree ) return etree . tounicode ( tree ) | Takes a string with XML and a string with actions |
14,275 | def patch_file ( actions , tree ) : tree = etree . parse ( tree ) if isinstance ( actions , six . string_types ) : with open ( actions ) as f : actions = f . read ( ) else : actions = actions . read ( ) actions = patch . DiffParser ( ) . parse ( actions ) tree = patch_tree ( actions , tree ) return etree . tounicode ( tree ) | Takes two filenames or streams one with XML the other a diff |
14,276 | def url_to_text ( self , url ) : path , headers = urllib . request . urlretrieve ( url ) return self . path_to_text ( path ) | Download PDF file and transform its document to string . |
14,277 | def path_to_text ( self , path ) : rsrcmgr = PDFResourceManager ( ) retstr = StringIO ( ) codec = 'utf-8' laparams = LAParams ( ) device = TextConverter ( rsrcmgr , retstr , codec = codec , laparams = laparams ) fp = open ( path , 'rb' ) interpreter = PDFPageInterpreter ( rsrcmgr , device ) password = "" maxpages = 0 caching = True pagenos = set ( ) pages_data = PDFPage . get_pages ( fp , pagenos , maxpages = maxpages , password = password , caching = caching , check_extractable = True ) for page in pages_data : interpreter . process_page ( page ) text = retstr . getvalue ( ) text = text . replace ( "\n" , "" ) fp . close ( ) device . close ( ) retstr . close ( ) return text | Transform local PDF file to string . |
14,278 | def listup_sentence ( self , data , counter = 0 ) : delimiter = self . delimiter_list [ counter ] sentence_list = [ ] [ sentence_list . append ( sentence + delimiter ) for sentence in data . split ( delimiter ) if sentence != "" ] if counter + 1 < len ( self . delimiter_list ) : sentence_list_r = [ ] [ sentence_list_r . extend ( self . listup_sentence ( sentence , counter + 1 ) ) for sentence in sentence_list ] sentence_list = sentence_list_r return sentence_list | Divide string into sentence list . |
14,279 | def observe ( self , success , failure ) : if isinstance ( success , int ) is False : if isinstance ( success , float ) is False : raise TypeError ( ) if isinstance ( failure , int ) is False : if isinstance ( failure , float ) is False : raise TypeError ( ) if success <= 0 : raise ValueError ( ) if failure <= 0 : raise ValueError ( ) self . __success += success self . __failure += failure | Observation data . |
14,280 | def likelihood ( self ) : try : likelihood = self . __success / ( self . __success + self . __failure ) except ZeroDivisionError : likelihood = 0.0 return likelihood | Compute likelihood . |
14,281 | def expected_value ( self ) : alpha = self . __success + self . __default_alpha beta = self . __failure + self . __default_beta try : expected_value = alpha / ( alpha + beta ) except ZeroDivisionError : expected_value = 0.0 return expected_value | Compute expected value . |
14,282 | def variance ( self ) : alpha = self . __success + self . __default_alpha beta = self . __failure + self . __default_beta try : variance = alpha * beta / ( ( alpha + beta ) ** 2 ) * ( alpha + beta + 1 ) except ZeroDivisionError : variance = 0.0 return variance | Compute variance . |
14,283 | def __move ( self , current_pos ) : if self . __move_range is not None : next_pos = np . random . randint ( current_pos - self . __move_range , current_pos + self . __move_range ) if next_pos < 0 : next_pos = 0 elif next_pos >= self . var_arr . shape [ 0 ] - 1 : next_pos = self . var_arr . shape [ 0 ] - 1 return next_pos else : next_pos = np . random . randint ( self . var_arr . shape [ 0 ] - 1 ) return next_pos | Move in the feature map . |
14,284 | def summarize ( self , document , Abstractor , similarity_filter = None ) : if isinstance ( document , str ) is False : raise TypeError ( "The type of document must be str." ) if isinstance ( Abstractor , AbstractableDoc ) is False : raise TypeError ( "The type of Abstractor must be AbstractableDoc." ) if isinstance ( similarity_filter , SimilarityFilter ) is False and similarity_filter is not None : raise TypeError ( "The type of similarity_filter must be SimilarityFilter." ) normalized_sentences = self . listup_sentence ( document ) if similarity_filter is not None : normalized_sentences = similarity_filter . similar_filter_r ( normalized_sentences ) self . tokenize ( document ) words = self . token fdist = nltk . FreqDist ( words ) top_n_words = [ w [ 0 ] for w in fdist . items ( ) ] [ : self . target_n ] scored_list = self . __closely_associated_score ( normalized_sentences , top_n_words ) filtered_list = Abstractor . filter ( scored_list ) result_list = [ normalized_sentences [ idx ] for ( idx , score ) in filtered_list ] result_dict = { "summarize_result" : result_list , "scoring_data" : filtered_list } return result_dict | Execute summarization . |
14,285 | def __closely_associated_score ( self , normalized_sentences , top_n_words ) : scores_list = [ ] sentence_idx = - 1 for sentence in normalized_sentences : self . tokenize ( sentence ) sentence = self . token sentence_idx += 1 word_idx = [ ] for w in top_n_words : try : word_idx . append ( sentence . index ( w ) ) except ValueError : pass word_idx . sort ( ) if len ( word_idx ) == 0 : continue clusters = [ ] cluster = [ word_idx [ 0 ] ] i = 1 while i < len ( word_idx ) : if word_idx [ i ] - word_idx [ i - 1 ] < self . cluster_threshold : cluster . append ( word_idx [ i ] ) else : clusters . append ( cluster [ : ] ) cluster = [ word_idx [ i ] ] i += 1 clusters . append ( cluster ) max_cluster_score = 0 for c in clusters : significant_words_in_cluster = len ( c ) total_words_in_cluster = c [ - 1 ] - c [ 0 ] + 1 score = 1.0 * significant_words_in_cluster * significant_words_in_cluster / total_words_in_cluster if score > max_cluster_score : max_cluster_score = score scores_list . append ( ( sentence_idx , score ) ) return scores_list | Scoring the sentence with closely associations . |
14,286 | def learn ( self , initial_state_key , limit = 1000 , game_n = 1 ) : end_flag = False state_key_list = [ None ] * len ( self . q_learning_list ) action_key_list = [ None ] * len ( self . q_learning_list ) next_action_key_list = [ None ] * len ( self . q_learning_list ) for game in range ( game_n ) : state_key = initial_state_key self . t = 1 while self . t <= limit : for i in range ( len ( self . q_learning_list ) ) : state_key_list [ i ] = state_key if game + 1 == game_n : self . state_key_list . append ( tuple ( i , state_key_list ) ) self . q_learning_list [ i ] . t = self . t next_action_list = self . q_learning_list [ i ] . extract_possible_actions ( tuple ( i , state_key_list ) ) if len ( next_action_list ) : action_key = self . q_learning_list [ i ] . select_action ( state_key = tuple ( i , state_key_list ) , next_action_list = next_action_list ) action_key_list [ i ] = action_key reward_value = self . q_learning_list [ i ] . observe_reward_value ( tuple ( i , state_key_list ) , tuple ( i , action_key_list ) ) if self . q_learning_list [ i ] . check_the_end_flag ( tuple ( i , state_key_list ) ) is True : end_flag = True next_next_action_list = self . q_learning_list [ i ] . extract_possible_actions ( tuple ( i , action_key_list ) ) if len ( next_next_action_list ) : next_action_key = self . q_learning_list [ i ] . predict_next_action ( tuple ( i , action_key_list ) , next_next_action_list ) next_action_key_list [ i ] = next_action_key next_max_q = self . q_learning_list [ i ] . extract_q_df ( tuple ( i , action_key_list ) , next_action_key ) self . q_learning_list [ i ] . update_q ( state_key = tuple ( i , state_key_list ) , action_key = tuple ( i , action_key_list ) , reward_value = reward_value , next_max_q = next_max_q ) state_key = self . q_learning_list [ i ] . update_state ( state_key = tuple ( i , state_key_list ) , action_key = tuple ( i , action_key_list ) ) state_key_list [ i ] = state_key self . t += 1 self . q_learning_list [ i ] . t = self . t if end_flag is True : break | Multi - Agent Learning . |
14,287 | def get_model ( self ) : class Model ( object ) : def __init__ ( self , cnn ) : self . cnn = cnn return Model ( self . __cnn ) | object of model as a function approximator which has cnn whose type is pydbm . cnn . pydbm . cnn . convolutional_neural_network . ConvolutionalNeuralNetwork . |
14,288 | def update_state ( self , state_arr , action_arr ) : x , y = np . where ( action_arr [ - 1 ] == 1 ) self . __agent_pos = ( x [ 0 ] , y [ 0 ] ) self . __route_memory_list . append ( ( x [ 0 ] , y [ 0 ] ) ) self . __route_long_memory_list . append ( ( x [ 0 ] , y [ 0 ] ) ) self . __route_long_memory_list = list ( set ( self . __route_long_memory_list ) ) while len ( self . __route_memory_list ) > self . __memory_num : self . __route_memory_list = self . __route_memory_list [ 1 : ] return self . extract_now_state ( ) | Update state . Override . |
14,289 | def initialize ( self , map_arr , start_point_label = "S" , end_point_label = "G" , wall_label = "#" , agent_label = "@" ) : np . set_printoptions ( threshold = np . inf ) self . __agent_label = agent_label self . __map_arr = map_arr self . __start_point_label = start_point_label start_arr_tuple = np . where ( self . __map_arr == self . __start_point_label ) x_arr , y_arr = start_arr_tuple self . __start_point_tuple = ( x_arr [ 0 ] , y_arr [ 0 ] ) end_arr_tuple = np . where ( self . __map_arr == self . __end_point_label ) x_arr , y_arr = end_arr_tuple self . __end_point_tuple = ( x_arr [ 0 ] , y_arr [ 0 ] ) self . __wall_label = wall_label for x in range ( self . __map_arr . shape [ 1 ] ) : for y in range ( self . __map_arr . shape [ 0 ] ) : if ( x , y ) == self . __start_point_tuple or ( x , y ) == self . __end_point_tuple : continue arr_value = self . __map_arr [ y ] [ x ] if arr_value == self . __wall_label : continue self . save_r_df ( ( x , y ) , float ( arr_value ) ) | Initialize map of maze and setup reward value . |
14,290 | def visualize_learning_result ( self , state_key ) : x , y = state_key map_arr = copy . deepcopy ( self . __map_arr ) goal_point_tuple = np . where ( map_arr == self . __end_point_label ) goal_x , goal_y = goal_point_tuple map_arr [ y ] [ x ] = "@" self . __map_arr_list . append ( map_arr ) if goal_x == x and goal_y == y : for i in range ( 10 ) : key = len ( self . __map_arr_list ) - ( 10 - i ) print ( "Number of searches: " + str ( key ) ) print ( self . __map_arr_list [ key ] ) print ( "Total number of searches: " + str ( self . t ) ) print ( self . __map_arr_list [ - 1 ] ) print ( "Goal !!" ) | Visualize learning result . |
14,291 | def normalize_r_value ( self ) : if self . r_df is not None and self . r_df . shape [ 0 ] : self . r_df . r_value = ( self . r_df . r_value - self . r_df . r_value . mean ( ) ) / self . r_df . r_value . std ( ) | Normalize r - value . |
14,292 | def get_alpha_value ( self ) : if isinstance ( self . __alpha_value , float ) is False : raise TypeError ( "The type of __alpha_value must be float." ) return self . __alpha_value | getter Learning rate . |
14,293 | def set_alpha_value ( self , value ) : if isinstance ( value , float ) is False : raise TypeError ( "The type of __alpha_value must be float." ) self . __alpha_value = value | setter Learning rate . |
14,294 | def get_gamma_value ( self ) : if isinstance ( self . __gamma_value , float ) is False : raise TypeError ( "The type of __gamma_value must be float." ) return self . __gamma_value | getter Gamma value . |
14,295 | def set_gamma_value ( self , value ) : if isinstance ( value , float ) is False : raise TypeError ( "The type of __gamma_value must be float." ) self . __gamma_value = value | setter Gamma value . |
14,296 | def filter ( self , scored_list ) : top_n_key = - 1 * self . top_n top_n_list = sorted ( scored_list , key = lambda x : x [ 1 ] ) [ top_n_key : ] result_list = sorted ( top_n_list , key = lambda x : x [ 0 ] ) return result_list | Filtering with top - n ranking . |
14,297 | def tokenize ( self , data ) : super ( ) . tokenize ( data ) token_tuple_zip = self . n_gram . generate_tuple_zip ( self . token , self . n ) token_list = [ ] self . token = [ "" . join ( list ( token_tuple ) ) for token_tuple in token_tuple_zip ] | Tokenize sentence . |
14,298 | def extract_q_df ( self , state_key , action_key ) : q = 0.0 if self . q_df is None : self . save_q_df ( state_key , action_key , q ) return q q_df = self . q_df [ self . q_df . state_key == state_key ] q_df = q_df [ q_df . action_key == action_key ] if q_df . shape [ 0 ] : q = float ( q_df [ "q_value" ] ) else : self . save_q_df ( state_key , action_key , q ) return q | Extract Q - Value from self . q_df . |
14,299 | def save_q_df ( self , state_key , action_key , q_value ) : if isinstance ( q_value , float ) is False : raise TypeError ( "The type of q_value must be float." ) new_q_df = pd . DataFrame ( [ ( state_key , action_key , q_value ) ] , columns = [ "state_key" , "action_key" , "q_value" ] ) if self . q_df is not None : self . q_df = pd . concat ( [ new_q_df , self . q_df ] ) self . q_df = self . q_df . drop_duplicates ( [ "state_key" , "action_key" ] ) else : self . q_df = new_q_df | Insert or update Q - Value in self . q_df . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.