idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
7,600
|
def set_result ( self , msg , valid = True , overwrite = False ) : if self . has_result and not overwrite : log . warn ( LOG_CHECK , "Double result %r (previous %r) for %s" , msg , self . result , self ) else : self . has_result = True if not isinstance ( msg , unicode ) : log . warn ( LOG_CHECK , "Non-unicode result for %s: %r" , self , msg ) elif not msg : log . warn ( LOG_CHECK , "Empty result for %s" , self ) self . result = msg self . valid = valid self . data = None
|
Set result string and validity .
|
7,601
|
def get_title ( self ) : if self . title is None : url = u"" if self . base_url : url = self . base_url elif self . url : url = self . url self . title = url if "/" in url : title = url . rsplit ( "/" , 1 ) [ 1 ] if title : self . title = title return self . title
|
Return title of page the URL refers to . This is per default the filename or the URL .
|
7,602
|
def _is_ctype ( self , ctype ) : if not self . valid : return False mime = self . content_type return self . ContentMimetypes . get ( mime ) == ctype
|
Return True iff content is valid and of the given type .
|
7,603
|
def add_warning ( self , s , tag = None ) : item = ( tag , s ) if item not in self . warnings and tag not in self . aggregate . config [ "ignorewarnings" ] : self . warnings . append ( item )
|
Add a warning string .
|
7,604
|
def add_info ( self , s ) : if s not in self . info : self . info . append ( s )
|
Add an info string .
|
7,605
|
def set_cache_url ( self ) : self . cache_url = urlutil . urlunsplit ( self . urlparts [ : 4 ] + [ u'' ] ) if self . cache_url is not None : assert isinstance ( self . cache_url , unicode ) , repr ( self . cache_url )
|
Set the URL to be used for caching .
|
7,606
|
def check_url_warnings ( self ) : effectiveurl = urlutil . urlunsplit ( self . urlparts ) if self . url != effectiveurl : self . add_warning ( _ ( "Effective URL %(url)r." ) % { "url" : effectiveurl } , tag = WARN_URL_EFFECTIVE_URL ) self . url = effectiveurl if len ( self . url ) > URL_MAX_LENGTH and self . scheme != u"data" : args = dict ( len = len ( self . url ) , max = URL_MAX_LENGTH ) self . add_warning ( _ ( "URL length %(len)d is longer than %(max)d." ) % args , tag = WARN_URL_TOO_LONG )
|
Check URL name and length .
|
7,607
|
def build_url ( self ) : base_url , is_idn = url_norm ( self . base_url , self . encoding ) if self . base_ref : if ":" not in self . base_ref : self . base_ref = urljoin ( self . parent_url , self . base_ref ) self . url = urljoin ( self . base_ref , base_url ) elif self . parent_url : urlparts = list ( urlparse . urlsplit ( self . parent_url ) ) urlparts [ 4 ] = "" parent_url = urlutil . urlunsplit ( urlparts ) self . url = urljoin ( parent_url , base_url ) else : self . url = base_url urlparts = list ( urlparse . urlsplit ( self . url ) ) if urlparts [ 2 ] : urlparts [ 2 ] = urlutil . collapse_segments ( urlparts [ 2 ] ) self . url = urlutil . urlunsplit ( urlparts ) self . urlparts = strformat . url_unicode_split ( self . url ) self . build_url_parts ( ) self . url = urlutil . urlunsplit ( self . urlparts )
|
Construct self . url and self . urlparts out of the given base url information self . base_url self . parent_url and self . base_ref .
|
7,608
|
def build_url_parts ( self ) : self . userinfo , host = urllib . splituser ( self . urlparts [ 1 ] ) port = urlutil . default_ports . get ( self . scheme , 0 ) host , port = urlutil . splitport ( host , port = port ) if port is None : raise LinkCheckerError ( _ ( "URL host %(host)r has invalid port" ) % { "host" : host } ) self . port = port self . host = host . lower ( ) if self . scheme in scheme_requires_host : if not self . host : raise LinkCheckerError ( _ ( "URL has empty hostname" ) ) self . check_obfuscated_ip ( ) if not self . port or self . port == urlutil . default_ports . get ( self . scheme ) : host = self . host else : host = "%s:%d" % ( self . host , self . port ) if self . userinfo : self . urlparts [ 1 ] = "%s@%s" % ( self . userinfo , host ) else : self . urlparts [ 1 ] = host self . anchor = self . urlparts [ 4 ] if self . anchor is not None : assert isinstance ( self . anchor , unicode ) , repr ( self . anchor )
|
Set userinfo host port and anchor from self . urlparts . Also checks for obfuscated IP addresses .
|
7,609
|
def check_obfuscated_ip ( self ) : if iputil . is_obfuscated_ip ( self . host ) : ips = iputil . resolve_host ( self . host ) if ips : self . host = ips [ 0 ] self . add_warning ( _ ( "URL %(url)s has obfuscated IP address %(ip)s" ) % { "url" : self . base_url , "ip" : ips [ 0 ] } , tag = WARN_URL_OBFUSCATED_IP )
|
Warn if host of this URL is obfuscated IP address .
|
7,610
|
def check ( self ) : if self . aggregate . config [ "trace" ] : trace . trace_on ( ) try : self . local_check ( ) except ( socket . error , select . error ) : etype , value = sys . exc_info ( ) [ : 2 ] if etype == errno . EINTR : raise KeyboardInterrupt ( value ) else : raise
|
Main check function for checking this URL .
|
7,611
|
def local_check ( self ) : log . debug ( LOG_CHECK , "Checking %s" , unicode ( self ) ) assert not self . extern [ 1 ] , 'checking strict extern URL' log . debug ( LOG_CHECK , "checking connection" ) try : self . check_connection ( ) self . set_content_type ( ) self . add_size_info ( ) self . aggregate . plugin_manager . run_connection_plugins ( self ) except tuple ( ExcList ) as exc : value = self . handle_exception ( ) if isinstance ( exc , socket . error ) and exc . args [ 0 ] == - 2 : value = _ ( 'Hostname not found' ) elif isinstance ( exc , UnicodeError ) : value = _ ( 'Bad hostname %(host)r: %(msg)s' ) % { 'host' : self . host , 'msg' : str ( value ) } self . set_result ( unicode_safe ( value ) , valid = False )
|
Local check function can be overridden in subclasses .
|
7,612
|
def check_content ( self ) : if self . do_check_content and self . valid : try : if self . can_get_content ( ) : self . aggregate . plugin_manager . run_content_plugins ( self ) if self . allows_recursion ( ) : return True except tuple ( ExcList ) : value = self . handle_exception ( ) self . add_warning ( _ ( "could not get content: %(msg)s" ) % { "msg" : str ( value ) } , tag = WARN_URL_ERROR_GETTING_CONTENT ) return False
|
Check content of URL .
|
7,613
|
def close_connection ( self ) : if self . url_connection is None : return try : self . url_connection . close ( ) except Exception : pass self . url_connection = None
|
Close an opened url connection .
|
7,614
|
def handle_exception ( self ) : etype , evalue = sys . exc_info ( ) [ : 2 ] log . debug ( LOG_CHECK , "Error in %s: %s %s" , self . url , etype , evalue , exception = True ) if ( etype in ExcNoCacheList ) or ( etype == socket . error and evalue . args [ 0 ] == errno . EBADF ) or not evalue : self . caching = False errmsg = unicode ( etype . __name__ ) uvalue = strformat . unicode_safe ( evalue ) if uvalue : errmsg += u": %s" % uvalue return strformat . limit ( errmsg , length = 240 )
|
An exception occurred . Log it and set the cache flag .
|
7,615
|
def allows_simple_recursion ( self ) : rec_level = self . aggregate . config [ "recursionlevel" ] if rec_level >= 0 and self . recursion_level >= rec_level : log . debug ( LOG_CHECK , "... no, maximum recursion level reached." ) return False if self . extern [ 0 ] : log . debug ( LOG_CHECK , "... no, extern." ) return False return True
|
Check recursion level and extern status .
|
7,616
|
def allows_recursion ( self ) : log . debug ( LOG_CHECK , "checking recursion of %r ..." , self . url ) if not self . valid : log . debug ( LOG_CHECK , "... no, invalid." ) return False if not self . can_get_content ( ) : log . debug ( LOG_CHECK , "... no, cannot get content." ) return False if not self . allows_simple_recursion ( ) : return False if self . size > self . aggregate . config [ "maxfilesizeparse" ] : log . debug ( LOG_CHECK , "... no, maximum parse size." ) return False if not self . is_parseable ( ) : log . debug ( LOG_CHECK , "... no, not parseable." ) return False if not self . content_allows_robots ( ) : log . debug ( LOG_CHECK , "... no, robots." ) return False log . debug ( LOG_CHECK , "... yes, recursion." ) return True
|
Return True iff we can recurse into the url s content .
|
7,617
|
def read_content ( self ) : buf = StringIO ( ) data = self . read_content_chunk ( ) while data : if buf . tell ( ) + len ( data ) > self . aggregate . config [ "maxfilesizedownload" ] : raise LinkCheckerError ( _ ( "File size too large" ) ) buf . write ( data ) data = self . read_content_chunk ( ) return buf . getvalue ( )
|
Return data for this URL . Can be overridden in subclasses .
|
7,618
|
def add_url ( self , url , line = 0 , column = 0 , page = 0 , name = u"" , base = None ) : if base : base_ref = urlutil . url_norm ( base ) [ 0 ] else : base_ref = None url_data = get_url_from ( url , self . recursion_level + 1 , self . aggregate , parent_url = self . url , base_ref = base_ref , line = line , column = column , page = page , name = name , parent_content_type = self . content_type ) self . aggregate . urlqueue . put ( url_data )
|
Add new URL to queue .
|
7,619
|
def serialized ( self , sep = os . linesep ) : return unicode_safe ( sep ) . join ( [ u"%s link" % self . scheme , u"base_url=%r" % self . base_url , u"parent_url=%r" % self . parent_url , u"base_ref=%r" % self . base_ref , u"recursion_level=%d" % self . recursion_level , u"url_connection=%s" % self . url_connection , u"line=%d" % self . line , u"column=%d" % self . column , u"page=%d" % self . page , u"name=%r" % self . name , u"anchor=%r" % self . anchor , u"cache_url=%s" % self . cache_url , ] )
|
Return serialized url check data as unicode string .
|
7,620
|
def add_intern_pattern ( self , url = None ) : try : pat = self . get_intern_pattern ( url = url ) if pat : log . debug ( LOG_CHECK , "Add intern pattern %r" , pat ) self . aggregate . config [ 'internlinks' ] . append ( get_link_pat ( pat ) ) except UnicodeError as msg : res = _ ( "URL has unparsable domain name: %(domain)s" ) % { "domain" : msg } self . set_result ( res , valid = False )
|
Add intern URL regex to config .
|
7,621
|
def to_wire_dict ( self ) : return dict ( valid = self . valid , extern = self . extern [ 0 ] , result = self . result , warnings = self . warnings [ : ] , name = self . name or u"" , title = self . get_title ( ) , parent_url = self . parent_url or u"" , base_ref = self . base_ref or u"" , base_url = self . base_url or u"" , url = self . url or u"" , domain = ( self . urlparts [ 1 ] if self . urlparts else u"" ) , checktime = self . checktime , dltime = self . dltime , size = self . size , info = self . info , line = self . line , column = self . column , page = self . page , cache_url = self . cache_url , content_type = self . content_type , level = self . recursion_level , modified = self . modified , )
|
Return a simplified transport object for logging and caching .
|
7,622
|
def comment ( self , s , ** args ) : self . write ( u"<!-- " ) self . write ( s , ** args ) self . writeln ( u" )
|
Write XML comment .
|
7,623
|
def xml_starttag ( self , name , attrs = None ) : self . write ( self . indent * self . level ) self . write ( u"<%s" % xmlquote ( name ) ) if attrs : for name , value in attrs . items ( ) : args = ( xmlquote ( name ) , xmlquoteattr ( value ) ) self . write ( u' %s="%s"' % args ) self . writeln ( u">" ) self . level += 1
|
Write XML start tag .
|
7,624
|
def xml_tag ( self , name , content , attrs = None ) : self . write ( self . indent * self . level ) self . write ( u"<%s" % xmlquote ( name ) ) if attrs : for aname , avalue in attrs . items ( ) : args = ( xmlquote ( aname ) , xmlquoteattr ( avalue ) ) self . write ( u' %s="%s"' % args ) self . writeln ( u">%s</%s>" % ( xmlquote ( content ) , xmlquote ( name ) ) )
|
Write XML tag with content .
|
7,625
|
def _escapify ( qstring ) : text = '' for c in qstring : if c in __escaped : text += '\\' + c elif ord ( c ) >= 0x20 and ord ( c ) < 0x7F : text += c else : text += '\\%03d' % ord ( c ) return text
|
Escape the characters in a quoted string which need it .
|
7,626
|
def _truncate_bitmap ( what ) : for i in xrange ( len ( what ) - 1 , - 1 , - 1 ) : if what [ i ] != '\x00' : break return '' . join ( what [ 0 : i + 1 ] )
|
Determine the index of greatest byte that isn t all zeros and return the bitmap that contains all the bytes less than that index .
|
7,627
|
def from_text ( rdclass , rdtype , tok , origin = None , relativize = True ) : if isinstance ( tok , str ) : tok = dns . tokenizer . Tokenizer ( tok ) cls = get_rdata_class ( rdclass , rdtype ) if cls != GenericRdata : token = tok . get ( ) tok . unget ( token ) if token . is_identifier ( ) and token . value == r'\#' : rdata = GenericRdata . from_text ( rdclass , rdtype , tok , origin , relativize ) return from_wire ( rdclass , rdtype , rdata . data , 0 , len ( rdata . data ) , origin ) return cls . from_text ( rdclass , rdtype , tok , origin , relativize )
|
Build an rdata object from text format .
|
7,628
|
def from_wire ( rdclass , rdtype , wire , current , rdlen , origin = None ) : wire = dns . wiredata . maybe_wrap ( wire ) cls = get_rdata_class ( rdclass , rdtype ) return cls . from_wire ( rdclass , rdtype , wire , current , rdlen , origin )
|
Build an rdata object from wire format
|
7,629
|
def do_print ( self , url_data ) : if self . verbose : return True if self . warnings and url_data . warnings : return True return not url_data . valid
|
Determine if URL entry should be logged or not .
|
7,630
|
def log_url ( self , url_data ) : self . check_active_loggers ( ) do_print = self . do_print ( url_data ) for log in self . loggers : log . log_filter_url ( url_data , do_print )
|
Send new url to all configured loggers .
|
7,631
|
def quote_attrval ( s ) : res = [ ] for c in s : if ord ( c ) <= 127 : if c == u'&' : res . append ( u"&" ) elif c == u'"' : res . append ( u""" ) else : res . append ( c ) else : res . append ( u"&#%d;" % ord ( c ) ) return u"" . join ( res )
|
Quote a HTML attribute to be able to wrap it in double quotes .
|
7,632
|
def comment ( self , data ) : data = data . encode ( self . encoding , "ignore" ) self . fd . write ( "<!--%s % data )
|
Print HTML comment .
|
7,633
|
def _start_element ( self , tag , attrs , end ) : tag = tag . encode ( self . encoding , "ignore" ) self . fd . write ( "<%s" % tag . replace ( "/" , "" ) ) for key , val in attrs . items ( ) : key = key . encode ( self . encoding , "ignore" ) if val is None : self . fd . write ( " %s" % key ) else : val = val . encode ( self . encoding , "ignore" ) self . fd . write ( ' %s="%s"' % ( key , quote_attrval ( val ) ) ) self . fd . write ( end )
|
Print HTML element with end string .
|
7,634
|
def end_element ( self , tag ) : tag = tag . encode ( self . encoding , "ignore" ) self . fd . write ( "</%s>" % tag )
|
Print HTML end element .
|
7,635
|
def doctype ( self , data ) : data = data . encode ( self . encoding , "ignore" ) self . fd . write ( "<!DOCTYPE%s>" % data )
|
Print HTML document type .
|
7,636
|
def pi ( self , data ) : data = data . encode ( self . encoding , "ignore" ) self . fd . write ( "<?%s?>" % data )
|
Print HTML pi .
|
7,637
|
def cdata ( self , data ) : data = data . encode ( self . encoding , "ignore" ) self . fd . write ( "<![CDATA[%s]]>" % data )
|
Print HTML cdata .
|
7,638
|
def characters ( self , data ) : data = data . encode ( self . encoding , "ignore" ) self . fd . write ( data )
|
Print characters .
|
7,639
|
def canonical_clamav_conf ( ) : if os . name == 'posix' : clamavconf = "/etc/clamav/clamd.conf" elif os . name == 'nt' : clamavconf = r"c:\clamav-devel\etc\clamd.conf" else : clamavconf = "clamd.conf" return clamavconf
|
Default clamav configs for various platforms .
|
7,640
|
def get_clamav_conf ( filename ) : if os . path . isfile ( filename ) : return ClamavConfig ( filename ) log . warn ( LOG_PLUGIN , "No ClamAV config file found at %r." , filename )
|
Initialize clamav configuration .
|
7,641
|
def get_sockinfo ( host , port = None ) : family , socktype = socket . AF_INET , socket . SOCK_STREAM return socket . getaddrinfo ( host , port , family , socktype )
|
Return socket . getaddrinfo for given host and port .
|
7,642
|
def scan ( data , clamconf ) : try : scanner = ClamdScanner ( clamconf ) except socket . error : errmsg = _ ( "Could not connect to ClamAV daemon." ) return ( [ ] , [ errmsg ] ) try : scanner . scan ( data ) finally : scanner . close ( ) return scanner . infected , scanner . errors
|
Scan data for viruses .
|
7,643
|
def new_scansock ( self ) : port = None try : self . sock . sendall ( "STREAM" ) port = None for dummy in range ( 60 ) : data = self . sock . recv ( self . sock_rcvbuf ) i = data . find ( "PORT" ) if i != - 1 : port = int ( data [ i + 5 : ] ) break except socket . error : self . sock . close ( ) raise if port is None : raise ClamavError ( _ ( "clamd is not ready for stream scanning" ) ) sockinfo = get_sockinfo ( self . host , port = port ) wsock = create_socket ( socket . AF_INET , socket . SOCK_STREAM ) try : wsock . connect ( sockinfo [ 0 ] [ 4 ] ) except socket . error : wsock . close ( ) raise return wsock
|
Return a connected socket for sending scan data to it .
|
7,644
|
def close ( self ) : self . wsock . close ( ) data = self . sock . recv ( self . sock_rcvbuf ) while data : if "FOUND\n" in data : self . infected . append ( data ) if "ERROR\n" in data : self . errors . append ( data ) data = self . sock . recv ( self . sock_rcvbuf ) self . sock . close ( )
|
Get results and close clamd daemon sockets .
|
7,645
|
def parseconf ( self , filename ) : with open ( filename ) as fd : for line in fd : line = line . strip ( ) if not line or line . startswith ( "#" ) : continue split = line . split ( None , 1 ) if len ( split ) == 1 : self [ split [ 0 ] ] = True else : self [ split [ 0 ] ] = split [ 1 ]
|
Parse clamav configuration from given file .
|
7,646
|
def new_connection ( self ) : if self . get ( 'LocalSocket' ) : host = 'localhost' sock = self . create_local_socket ( ) elif self . get ( 'TCPSocket' ) : host = self . get ( 'TCPAddr' , 'localhost' ) sock = self . create_tcp_socket ( host ) else : raise ClamavError ( _ ( "one of TCPSocket or LocalSocket must be enabled" ) ) return sock , host
|
Connect to clamd for stream scanning .
|
7,647
|
def create_local_socket ( self ) : sock = create_socket ( socket . AF_UNIX , socket . SOCK_STREAM ) addr = self [ 'LocalSocket' ] try : sock . connect ( addr ) except socket . error : sock . close ( ) raise return sock
|
Create local socket connect to it and return socket object .
|
7,648
|
def create_tcp_socket ( self , host ) : port = int ( self [ 'TCPSocket' ] ) sockinfo = get_sockinfo ( host , port = port ) sock = create_socket ( socket . AF_INET , socket . SOCK_STREAM ) try : sock . connect ( sockinfo [ 0 ] [ 4 ] ) except socket . error : sock . close ( ) raise return sock
|
Create tcp socket connect to it and return socket object .
|
7,649
|
def zonalstats ( features , raster , all_touched , band , categorical , indent , info , nodata , prefix , stats , sequence , use_rs ) : if info : logging . basicConfig ( level = logging . INFO ) if stats is not None : stats = stats . split ( " " ) if 'all' in [ x . lower ( ) for x in stats ] : stats = "ALL" zonal_results = gen_zonal_stats ( features , raster , all_touched = all_touched , band = band , categorical = categorical , nodata = nodata , stats = stats , prefix = prefix , geojson_out = True ) if sequence : for feature in zonal_results : if use_rs : click . echo ( b'\x1e' , nl = False ) click . echo ( json . dumps ( feature ) ) else : click . echo ( json . dumps ( { 'type' : 'FeatureCollection' , 'features' : list ( zonal_results ) } ) )
|
zonalstats generates summary statistics of geospatial raster datasets based on vector features .
|
7,650
|
def pointquery ( features , raster , band , indent , nodata , interpolate , property_name , sequence , use_rs ) : results = gen_point_query ( features , raster , band = band , nodata = nodata , interpolate = interpolate , property_name = property_name , geojson_out = True ) if sequence : for feature in results : if use_rs : click . echo ( b'\x1e' , nl = False ) click . echo ( json . dumps ( feature ) ) else : click . echo ( json . dumps ( { 'type' : 'FeatureCollection' , 'features' : list ( results ) } ) )
|
Queries the raster values at the points of the input GeoJSON Features . The raster values are added to the features properties and output as GeoJSON Feature Collection .
|
7,651
|
def point_window_unitxy ( x , y , affine ) : fcol , frow = ~ affine * ( x , y ) r , c = int ( round ( frow ) ) , int ( round ( fcol ) ) new_win = ( ( r - 1 , r + 1 ) , ( c - 1 , c + 1 ) ) unitxy = ( 0.5 - ( c - fcol ) , 0.5 + ( r - frow ) ) return new_win , unitxy
|
Given an x y and a geotransform Returns - rasterio window representing 2x2 window whose center points encompass point - the cartesian x y coordinates of the point on the unit square defined by the array center points .
|
7,652
|
def geom_xys ( geom ) : if geom . has_z : geom = wkt . loads ( geom . to_wkt ( ) ) assert not geom . has_z if hasattr ( geom , "geoms" ) : geoms = geom . geoms else : geoms = [ geom ] for g in geoms : arr = g . array_interface_base [ 'data' ] for pair in zip ( arr [ : : 2 ] , arr [ 1 : : 2 ] ) : yield pair
|
Given a shapely geometry generate a flattened series of 2D points as x y tuples
|
7,653
|
def gen_point_query ( vectors , raster , band = 1 , layer = 0 , nodata = None , affine = None , interpolate = 'bilinear' , property_name = 'value' , geojson_out = False ) : if interpolate not in [ 'nearest' , 'bilinear' ] : raise ValueError ( "interpolate must be nearest or bilinear" ) features_iter = read_features ( vectors , layer ) with Raster ( raster , nodata = nodata , affine = affine , band = band ) as rast : for feat in features_iter : geom = shape ( feat [ 'geometry' ] ) vals = [ ] for x , y in geom_xys ( geom ) : if interpolate == 'nearest' : r , c = rast . index ( x , y ) window = ( ( int ( r ) , int ( r + 1 ) ) , ( int ( c ) , int ( c + 1 ) ) ) src_array = rast . read ( window = window , masked = True ) . array val = src_array [ 0 , 0 ] if val is masked : vals . append ( None ) else : vals . append ( asscalar ( val ) ) elif interpolate == 'bilinear' : window , unitxy = point_window_unitxy ( x , y , rast . affine ) src_array = rast . read ( window = window , masked = True ) . array vals . append ( bilinear ( src_array , * unitxy ) ) if len ( vals ) == 1 : vals = vals [ 0 ] if geojson_out : if 'properties' not in feat : feat [ 'properties' ] = { } feat [ 'properties' ] [ property_name ] = vals yield feat else : yield vals
|
Given a set of vector features and a raster generate raster values at each vertex of the geometry
|
7,654
|
def key_assoc_val ( d , func , exclude = None ) : vs = list ( d . values ( ) ) ks = list ( d . keys ( ) ) key = ks [ vs . index ( func ( vs ) ) ] return key
|
return the key associated with the value returned by func
|
7,655
|
def boxify_points ( geom , rast ) : if 'Point' not in geom . type : raise ValueError ( "Points or multipoints only" ) buff = - 0.01 * abs ( min ( rast . affine . a , rast . affine . e ) ) if geom . type == 'Point' : pts = [ geom ] elif geom . type == "MultiPoint" : pts = geom . geoms geoms = [ ] for pt in pts : row , col = rast . index ( pt . x , pt . y ) win = ( ( row , row + 1 ) , ( col , col + 1 ) ) geoms . append ( box ( * window_bounds ( win , rast . affine ) ) . buffer ( buff ) ) return MultiPolygon ( geoms )
|
Point and MultiPoint don t play well with GDALRasterize convert them into box polygons 99% cellsize centered on the raster cell
|
7,656
|
def parse_feature ( obj ) : if hasattr ( obj , '__geo_interface__' ) : gi = obj . __geo_interface__ if gi [ 'type' ] in geom_types : return wrap_geom ( gi ) elif gi [ 'type' ] == 'Feature' : return gi try : shape = wkt . loads ( obj ) return wrap_geom ( shape . __geo_interface__ ) except ( ReadingError , TypeError , AttributeError ) : pass try : shape = wkb . loads ( obj ) return wrap_geom ( shape . __geo_interface__ ) except ( ReadingError , TypeError ) : pass try : if obj [ 'type' ] in geom_types : return wrap_geom ( obj ) elif obj [ 'type' ] == 'Feature' : return obj except ( AssertionError , TypeError ) : pass raise ValueError ( "Can't parse %s as a geojson Feature object" % obj )
|
Given a python object attemp to a GeoJSON - like Feature from it
|
7,657
|
def bounds_window ( bounds , affine ) : w , s , e , n = bounds row_start , col_start = rowcol ( w , n , affine ) row_stop , col_stop = rowcol ( e , s , affine , op = math . ceil ) return ( row_start , row_stop ) , ( col_start , col_stop )
|
Create a full cover rasterio - style window
|
7,658
|
def read ( self , bounds = None , window = None , masked = False ) : if bounds and window : raise ValueError ( "Specify either bounds or window" ) if bounds : win = bounds_window ( bounds , self . affine ) elif window : win = window else : raise ValueError ( "Specify either bounds or window" ) c , _ , _ , f = window_bounds ( win , self . affine ) a , b , _ , d , e , _ , _ , _ , _ = tuple ( self . affine ) new_affine = Affine ( a , b , c , d , e , f ) nodata = self . nodata if nodata is None : nodata = - 999 warnings . warn ( "Setting nodata to -999; specify nodata explicitly" ) if self . array is not None : new_array = boundless_array ( self . array , window = win , nodata = nodata , masked = masked ) elif self . src : new_array = self . src . read ( self . band , window = win , boundless = True , masked = masked ) return Raster ( new_array , new_affine , nodata )
|
Performs a boundless read against the underlying array source
|
7,659
|
def chunks ( data , n ) : for i in range ( 0 , len ( data ) , n ) : yield data [ i : i + n ]
|
Yield successive n - sized chunks from a slice - able iterable .
|
7,660
|
def halt ( self ) : buf = [ ] buf . append ( self . act_end ) buf . append ( 0 ) crc = self . calculate_crc ( buf ) self . clear_bitmask ( 0x08 , 0x80 ) self . card_write ( self . mode_transrec , buf ) self . clear_bitmask ( 0x08 , 0x08 ) self . authed = False
|
Switch state to HALT
|
7,661
|
def write ( self , block_address , data ) : buf = [ ] buf . append ( self . act_write ) buf . append ( block_address ) crc = self . calculate_crc ( buf ) buf . append ( crc [ 0 ] ) buf . append ( crc [ 1 ] ) ( error , back_data , back_length ) = self . card_write ( self . mode_transrec , buf ) if not ( back_length == 4 ) or not ( ( back_data [ 0 ] & 0x0F ) == 0x0A ) : error = True if not error : buf_w = [ ] for i in range ( 16 ) : buf_w . append ( data [ i ] ) crc = self . calculate_crc ( buf_w ) buf_w . append ( crc [ 0 ] ) buf_w . append ( crc [ 1 ] ) ( error , back_data , back_length ) = self . card_write ( self . mode_transrec , buf_w ) if not ( back_length == 4 ) or not ( ( back_data [ 0 ] & 0x0F ) == 0x0A ) : error = True return error
|
Writes data to block . You should be authenticated before calling write . Returns error state .
|
7,662
|
def auth ( self , auth_method , key ) : self . method = auth_method self . key = key if self . debug : print ( "Changing used auth key to " + str ( key ) + " using method " + ( "A" if auth_method == self . rfid . auth_a else "B" ) )
|
Sets authentication info for current tag
|
7,663
|
def write_trailer ( self , sector , key_a = ( 0xFF , 0xFF , 0xFF , 0xFF , 0xFF , 0xFF ) , auth_bits = ( 0xFF , 0x07 , 0x80 ) , user_data = 0x69 , key_b = ( 0xFF , 0xFF , 0xFF , 0xFF , 0xFF , 0xFF ) ) : addr = self . block_addr ( sector , 3 ) return self . rewrite ( addr , key_a [ : 6 ] + auth_bits [ : 3 ] + ( user_data , ) + key_b [ : 6 ] )
|
Writes sector trailer of specified sector . Tag and auth must be set - does auth . If value is None value of byte is kept . Returns error state .
|
7,664
|
def rewrite ( self , block_address , new_bytes ) : if not self . is_tag_set_auth ( ) : return True error = self . do_auth ( block_address ) if not error : ( error , data ) = self . rfid . read ( block_address ) if not error : for i in range ( len ( new_bytes ) ) : if new_bytes [ i ] != None : if self . debug : print ( "Changing pos " + str ( i ) + " with current value " + str ( data [ i ] ) + " to " + str ( new_bytes [ i ] ) ) data [ i ] = new_bytes [ i ] error = self . rfid . write ( block_address , data ) if self . debug : print ( "Writing " + str ( data ) + " to " + self . sector_string ( block_address ) ) return error
|
Rewrites block with new bytes keeping the old ones if None is passed . Tag and auth must be set - does auth . Returns error state .
|
7,665
|
def get_access_bits ( self , c1 , c2 , c3 ) : byte_6 = ( ( ~ c2 [ 3 ] & 1 ) << 7 ) + ( ( ~ c2 [ 2 ] & 1 ) << 6 ) + ( ( ~ c2 [ 1 ] & 1 ) << 5 ) + ( ( ~ c2 [ 0 ] & 1 ) << 4 ) + ( ( ~ c1 [ 3 ] & 1 ) << 3 ) + ( ( ~ c1 [ 2 ] & 1 ) << 2 ) + ( ( ~ c1 [ 1 ] & 1 ) << 1 ) + ( ~ c1 [ 0 ] & 1 ) byte_7 = ( ( c1 [ 3 ] & 1 ) << 7 ) + ( ( c1 [ 2 ] & 1 ) << 6 ) + ( ( c1 [ 1 ] & 1 ) << 5 ) + ( ( c1 [ 0 ] & 1 ) << 4 ) + ( ( ~ c3 [ 3 ] & 1 ) << 3 ) + ( ( ~ c3 [ 2 ] & 1 ) << 2 ) + ( ( ~ c3 [ 1 ] & 1 ) << 1 ) + ( ~ c3 [ 0 ] & 1 ) byte_8 = ( ( c3 [ 3 ] & 1 ) << 7 ) + ( ( c3 [ 2 ] & 1 ) << 6 ) + ( ( c3 [ 1 ] & 1 ) << 5 ) + ( ( c3 [ 0 ] & 1 ) << 4 ) + ( ( c2 [ 3 ] & 1 ) << 3 ) + ( ( c2 [ 2 ] & 1 ) << 2 ) + ( ( c2 [ 1 ] & 1 ) << 1 ) + ( c2 [ 0 ] & 1 ) return byte_6 , byte_7 , byte_8
|
Calculates the access bits for a sector trailer based on their access conditions c1 c2 c3 c4 are 4 items tuples containing the values for each block returns the 3 bytes for the sector trailer
|
7,666
|
def get_data ( n_samples = 100 ) : X , y = make_classification ( n_samples = n_samples , n_features = N_FEATURES , n_classes = N_CLASSES , random_state = 0 , ) X = X . astype ( np . float32 ) return X , y
|
Get synthetic classification data with n_samples samples .
|
7,667
|
def get_model ( with_pipeline = False ) : model = NeuralNetClassifier ( MLPClassifier ) if with_pipeline : model = Pipeline ( [ ( 'scale' , FeatureUnion ( [ ( 'minmax' , MinMaxScaler ( ) ) , ( 'normalize' , Normalizer ( ) ) , ] ) ) , ( 'select' , SelectKBest ( k = N_FEATURES ) ) , ( 'net' , model ) , ] ) return model
|
Get a multi - layer perceptron model .
|
7,668
|
def save_model ( model , output_file ) : if not output_file : return with open ( output_file , 'wb' ) as f : pickle . dump ( model , f ) print ( "Saved model to file '{}'." . format ( output_file ) )
|
Save model to output_file if given
|
7,669
|
def net ( n_samples = 100 , output_file = None , ** kwargs ) : model = get_model ( with_pipeline = False ) parsed = parse_args ( kwargs , defaults = DEFAULTS_NET ) model = parsed ( model ) X , y = get_data ( n_samples = n_samples ) print ( "Training MLP classifier" ) model . fit ( X , y ) save_model ( model , output_file )
|
Train an MLP classifier on synthetic data .
|
7,670
|
def pipeline ( n_samples = 100 , output_file = None , ** kwargs ) : model = get_model ( with_pipeline = True ) parsed = parse_args ( kwargs , defaults = DEFAULTS_PIPE ) model = parsed ( model ) X , y = get_data ( n_samples = n_samples ) print ( "Training MLP classifier in a pipeline" ) model . fit ( X , y ) save_model ( model , output_file )
|
Train an MLP classifier in a pipeline on synthetic data .
|
7,671
|
def predict ( self , X ) : y_preds = [ ] for yp in self . forward_iter ( X , training = False ) : yp = yp [ 0 ] if isinstance ( yp , tuple ) else yp y_preds . append ( to_numpy ( yp . max ( - 1 ) [ - 1 ] ) ) y_pred = np . concatenate ( y_preds , 0 ) return y_pred
|
Where applicable return class labels for samples in X .
|
7,672
|
def fit ( self , X , y , ** fit_params ) : return super ( ) . fit ( X , y , ** fit_params )
|
See NeuralNet . fit .
|
7,673
|
def predict_proba ( self , X ) : y_probas = [ ] bce_logits_loss = isinstance ( self . criterion_ , torch . nn . BCEWithLogitsLoss ) for yp in self . forward_iter ( X , training = False ) : yp = yp [ 0 ] if isinstance ( yp , tuple ) else yp if bce_logits_loss : yp = torch . sigmoid ( yp ) y_probas . append ( to_numpy ( yp ) ) y_proba = np . concatenate ( y_probas , 0 ) return y_proba
|
Where applicable return probability estimates for samples .
|
7,674
|
def _apply_to_data ( data , func , unpack_dict = False ) : apply_ = partial ( _apply_to_data , func = func , unpack_dict = unpack_dict ) if isinstance ( data , dict ) : if unpack_dict : return [ apply_ ( v ) for v in data . values ( ) ] return { k : apply_ ( v ) for k , v in data . items ( ) } if isinstance ( data , ( list , tuple ) ) : try : return [ apply_ ( x ) for x in data ] except TypeError : return func ( data ) return func ( data )
|
Apply a function to data trying to unpack different data types .
|
7,675
|
def uses_placeholder_y ( ds ) : if isinstance ( ds , torch . utils . data . Subset ) : return uses_placeholder_y ( ds . dataset ) return isinstance ( ds , Dataset ) and hasattr ( ds , "y" ) and ds . y is None
|
If ds is a skorch . dataset . Dataset or a skorch . dataset . Dataset nested inside a torch . utils . data . Subset and uses y as a placeholder return True .
|
7,676
|
def unpack_data ( data ) : try : X , y = data return X , y except ValueError : if not isinstance ( data , ( tuple , list ) ) or len ( data ) < 2 : raise ValueError ( ERROR_MSG_1_ITEM ) raise ValueError ( ERROR_MSG_MORE_THAN_2_ITEMS . format ( len ( data ) ) )
|
Unpack data returned by the net s iterator into a 2 - tuple .
|
7,677
|
def transform ( self , X , y ) : y = torch . Tensor ( [ 0 ] ) if y is None else y if sparse . issparse ( X ) : X = X . toarray ( ) . squeeze ( 0 ) return X , y
|
Additional transformations on X and y .
|
7,678
|
def check_cv ( self , y ) : y_arr = None if self . stratified : try : y_arr = to_numpy ( y ) except ( AttributeError , TypeError ) : y_arr = y if self . _is_float ( self . cv ) : return self . _check_cv_float ( ) return self . _check_cv_non_float ( y_arr )
|
Resolve which cross validation strategy is used .
|
7,679
|
def _get_span ( s , pattern ) : i , j = - 1 , - 1 match = pattern . match ( s ) if not match : return i , j for group_name in pattern . groupindex : i , j = match . span ( group_name ) if ( i , j ) != ( - 1 , - 1 ) : return i , j return i , j
|
Return the span of the first group that matches the pattern .
|
7,680
|
def _substitute_default ( s , new_value ) : if new_value is None : return s i , j = _get_span ( s , pattern = P_DEFAULTS ) if ( i , j ) == ( - 1 , - 1 ) : return s return '{}{}{}' . format ( s [ : i ] , new_value , s [ j : ] )
|
Replaces the default value in a parameter docstring by a new value .
|
7,681
|
def _resolve_dotted_name ( dotted_name ) : if not isinstance ( dotted_name , str ) : return dotted_name if '.' not in dotted_name : return dotted_name args = None params = None match = P_PARAMS . match ( dotted_name ) if match : dotted_name = match . group ( 'name' ) params = match . group ( 'params' ) module , name = dotted_name . rsplit ( '.' , 1 ) attr = import_module ( module ) attr = getattr ( attr , name ) if params : args , kwargs = _parse_args_kwargs ( params [ 1 : - 1 ] ) attr = attr ( * args , ** kwargs ) return attr
|
Returns objects from strings
|
7,682
|
def parse_net_kwargs ( kwargs ) : if not kwargs : return kwargs resolved = { } for k , v in kwargs . items ( ) : resolved [ k ] = _resolve_dotted_name ( v ) return resolved
|
Parse arguments for the estimator .
|
7,683
|
def _yield_estimators ( model ) : yield from _yield_preproc_steps ( model ) net_prefixes = [ ] module_prefixes = [ ] if isinstance ( model , Pipeline ) : name = model . steps [ - 1 ] [ 0 ] net_prefixes . append ( name ) module_prefixes . append ( name ) net = model . steps [ - 1 ] [ 1 ] else : net = model yield '__' . join ( net_prefixes ) , net module = net . module module_prefixes . append ( 'module' ) yield '__' . join ( module_prefixes ) , module
|
Yield estimator and its prefix from the model .
|
7,684
|
def _get_help_for_estimator ( prefix , estimator , defaults = None ) : from numpydoc . docscrape import ClassDoc defaults = defaults or { } estimator = _extract_estimator_cls ( estimator ) yield "<{}> options:" . format ( estimator . __name__ ) doc = ClassDoc ( estimator ) yield from _get_help_for_params ( doc [ 'Parameters' ] , prefix = prefix , defaults = defaults , ) yield ''
|
Yield help lines for the given estimator and prefix .
|
7,685
|
def print_help ( model , defaults = None ) : defaults = defaults or { } print ( "This is the help for the model-specific parameters." ) print ( "To invoke help for the remaining options, run:" ) print ( "python {} -- --help" . format ( sys . argv [ 0 ] ) ) print ( ) lines = ( _get_help_for_estimator ( prefix , estimator , defaults = defaults ) for prefix , estimator in _yield_estimators ( model ) ) print ( '\n' . join ( chain ( * lines ) ) )
|
Print help for the command line arguments of the given model .
|
7,686
|
def parse_args ( kwargs , defaults = None ) : try : import fire except ImportError : raise ImportError ( "Using skorch cli helpers requires the fire library," " you can install it with pip: pip install fire." ) try : import numpydoc . docscrape except ImportError : raise ImportError ( "Using skorch cli helpers requires the numpydoc library," " you can install it with pip: pip install numpydoc." ) defaults = defaults or { } def print_help_and_exit ( estimator ) : print_help ( estimator , defaults = defaults ) sys . exit ( ) def set_params ( estimator ) : estimator . set_params ( ** defaults ) return estimator . set_params ( ** parse_net_kwargs ( kwargs ) ) if kwargs . get ( 'help' ) : return print_help_and_exit return set_params
|
Apply command line arguments or show help .
|
7,687
|
def filter_requires_grad ( pgroups ) : warnings . warn ( "For filtering gradients, please use skorch.callbacks.Freezer." , DeprecationWarning ) for pgroup in pgroups : output = { k : v for k , v in pgroup . items ( ) if k != 'params' } output [ 'params' ] = ( p for p in pgroup [ 'params' ] if p . requires_grad ) yield output
|
Returns parameter groups where parameters that don t require a gradient are filtered out .
|
7,688
|
def convert_cell_to_img ( t , padding = 16 ) : std = torch . Tensor ( [ 0.229 , 0.224 , 0.225 ] ) . reshape ( - 1 , 1 , 1 ) mu = torch . Tensor ( [ 0.485 , 0.456 , 0.406 ] ) . reshape ( - 1 , 1 , 1 ) output = t . mul ( std ) output . add_ ( mu ) img = to_pil_image ( output ) w , h = img . size return img . crop ( ( padding , padding , w - padding , h - padding ) )
|
Converts pytorch tensor into a Pillow Image . The padding will be removed from the resulting image
|
7,689
|
def plot_mask_cells ( mask_cells , padding = 16 ) : fig , axes = plt . subplots ( len ( mask_cells ) , 3 , figsize = ( 12 , 10 ) ) for idx , ( axes , mask_cell ) in enumerate ( zip ( axes , mask_cells ) , 1 ) : ax1 , ax2 , ax3 = axes true_mask , predicted_mask , cell = mask_cell plot_mask_cell ( true_mask , predicted_mask , cell , 'Type {}' . format ( idx ) , ax1 , ax2 , ax3 , padding = padding ) fig . tight_layout ( ) return fig , axes
|
Plots cells with their true mask predicted mask .
|
7,690
|
def plot_mask_cell ( true_mask , predicted_mask , cell , suffix , ax1 , ax2 , ax3 , padding = 16 ) : for ax in [ ax1 , ax2 , ax3 ] : ax . grid ( False ) ax . set_xticks ( [ ] ) ax . set_yticks ( [ ] ) ax1 . imshow ( true_mask [ padding : - padding , padding : - padding ] , cmap = 'viridis' ) ax1 . set_title ( 'True Mask - {}' . format ( suffix ) ) ax2 . imshow ( predicted_mask [ padding : - padding , padding : - padding ] , cmap = 'viridis' ) ax2 . set_title ( 'Predicted Mask - {}' . format ( suffix ) ) ax3 . imshow ( convert_cell_to_img ( cell , padding = padding ) ) ax3 . set_title ( 'Image - {}' . format ( suffix ) ) return ax1 , ax2 , ax3
|
Plots a single cell with a its true mask and predicuted mask
|
7,691
|
def plot_masks ( mask_1 , mask_2 , mask_3 ) : fig , ( ( ax1 , ax2 , ax3 ) ) = plt . subplots ( 1 , 3 , figsize = ( 12 , 5 ) ) for ax in [ ax1 , ax2 , ax3 ] : ax . grid ( False ) ax . set_xticks ( [ ] ) ax . set_yticks ( [ ] ) ax1 . set_title ( "Type 1" ) ax1 . imshow ( mask_1 , cmap = 'viridis' ) ax2 . set_title ( "Type 2" ) ax2 . imshow ( mask_2 , cmap = 'viridis' ) ax3 . set_title ( "Type 3" ) ax3 . imshow ( mask_3 , cmap = 'viridis' ) return ax1 , ax2 , ax3
|
Plots three masks
|
7,692
|
def plot_cells ( cell_1 , cell_2 , cell_3 ) : fig , ( ( ax1 , ax2 , ax3 ) ) = plt . subplots ( 1 , 3 , figsize = ( 12 , 5 ) ) for ax in [ ax1 , ax2 , ax3 ] : ax . grid ( False ) ax . set_xticks ( [ ] ) ax . set_yticks ( [ ] ) ax1 . set_title ( "Type 1" ) ax1 . imshow ( cell_1 ) ax2 . set_title ( "Type 2" ) ax2 . imshow ( cell_2 ) ax3 . set_title ( "Type 3" ) ax3 . imshow ( cell_3 ) return ax1 , ax2 , ax3
|
Plots three cells
|
7,693
|
def _sorted_keys ( self , keys ) : sorted_keys = [ ] if ( 'epoch' in keys ) and ( 'epoch' not in self . keys_ignored_ ) : sorted_keys . append ( 'epoch' ) for key in sorted ( keys ) : if not ( ( key in ( 'epoch' , 'dur' ) ) or ( key in self . keys_ignored_ ) or key . endswith ( '_best' ) or key . startswith ( 'event_' ) ) : sorted_keys . append ( key ) for key in sorted ( keys ) : if key . startswith ( 'event_' ) and ( key not in self . keys_ignored_ ) : sorted_keys . append ( key ) if ( 'dur' in keys ) and ( 'dur' not in self . keys_ignored_ ) : sorted_keys . append ( 'dur' ) return sorted_keys
|
Sort keys dropping the ones that should be ignored .
|
7,694
|
def to_tensor ( X , device , accept_sparse = False ) : to_tensor_ = partial ( to_tensor , device = device ) if is_torch_data_type ( X ) : return X . to ( device ) if isinstance ( X , dict ) : return { key : to_tensor_ ( val ) for key , val in X . items ( ) } if isinstance ( X , ( list , tuple ) ) : return [ to_tensor_ ( x ) for x in X ] if np . isscalar ( X ) : return torch . as_tensor ( X , device = device ) if isinstance ( X , Sequence ) : return torch . as_tensor ( np . array ( X ) , device = device ) if isinstance ( X , np . ndarray ) : return torch . as_tensor ( X , device = device ) if sparse . issparse ( X ) : if accept_sparse : return torch . sparse_coo_tensor ( X . nonzero ( ) , X . data , size = X . shape ) . to ( device ) raise TypeError ( "Sparse matrices are not supported. Set " "accept_sparse=True to allow sparse matrices." ) raise TypeError ( "Cannot convert this data type to a torch tensor." )
|
Turn input data to torch tensor .
|
7,695
|
def to_numpy ( X ) : if isinstance ( X , np . ndarray ) : return X if is_pandas_ndframe ( X ) : return X . values if not is_torch_data_type ( X ) : raise TypeError ( "Cannot convert this data type to a numpy array." ) if X . is_cuda : X = X . cpu ( ) if X . requires_grad : X = X . detach ( ) return X . numpy ( )
|
Generic function to convert a pytorch tensor to numpy .
|
7,696
|
def _normalize_numpy_indices ( i ) : if isinstance ( i , np . ndarray ) : if i . dtype == bool : i = tuple ( j . tolist ( ) for j in i . nonzero ( ) ) elif i . dtype == int : i = i . tolist ( ) return i
|
Normalize the index in case it is a numpy integer or boolean array .
|
7,697
|
def multi_indexing ( data , i , indexing = None ) : i = _normalize_numpy_indices ( i ) if indexing is not None : return indexing ( data , i ) return check_indexing ( data ) ( data , i )
|
Perform indexing on multiple data structures .
|
7,698
|
def duplicate_items ( * collections ) : duplicates = set ( ) seen = set ( ) for item in flatten ( collections ) : if item in seen : duplicates . add ( item ) else : seen . add ( item ) return duplicates
|
Search for duplicate items in all collections .
|
7,699
|
def params_for ( prefix , kwargs ) : if not prefix . endswith ( '__' ) : prefix += '__' return { key [ len ( prefix ) : ] : val for key , val in kwargs . items ( ) if key . startswith ( prefix ) }
|
Extract parameters that belong to a given sklearn module prefix from kwargs . This is useful to obtain parameters that belong to a submodule .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.