idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
52,700 | def clean_whitespace ( string , compact = False ) : for a , b in ( ( '\r\n' , '\n' ) , ( '\r' , '\n' ) , ( '\n\n' , '\n' ) , ( '\t' , ' ' ) , ( ' ' , ' ' ) ) : string = string . replace ( a , b ) if compact : for a , b in ( ( '\n' , ' ' ) , ( '[ ' , '[' ) , ( ' ' , ' ' ) , ( ' ' , ' ' ) , ( ' ' , ' ' ) ) : string = string . replace ( a , b ) return string . strip ( ) | Return string with compressed whitespace . |
52,701 | def pformat_xml ( xml ) : try : from lxml import etree if not isinstance ( xml , bytes ) : xml = xml . encode ( 'utf-8' ) xml = etree . parse ( io . BytesIO ( xml ) ) xml = etree . tostring ( xml , pretty_print = True , xml_declaration = True , encoding = xml . docinfo . encoding ) xml = bytes2str ( xml ) except Exception : if isinstance ( xml , bytes ) : xml = bytes2str ( xml ) xml = xml . replace ( '><' , '>\n<' ) return xml . replace ( ' ' , ' ' ) . replace ( '\t' , ' ' ) | Return pretty formatted XML . |
52,702 | def pformat ( arg , width = 79 , height = 24 , compact = True ) : if height is None or height < 1 : height = 1024 if width is None or width < 1 : width = 256 npopt = numpy . get_printoptions ( ) numpy . set_printoptions ( threshold = 100 , linewidth = width ) if isinstance ( arg , basestring ) : if arg [ : 5 ] . lower ( ) in ( '<?xml' , b'<?xml' ) : if isinstance ( arg , bytes ) : arg = bytes2str ( arg ) if height == 1 : arg = arg [ : 4 * width ] else : arg = pformat_xml ( arg ) elif isinstance ( arg , bytes ) : if isprintable ( arg ) : arg = bytes2str ( arg ) arg = clean_whitespace ( arg ) else : numpy . set_printoptions ( ** npopt ) return hexdump ( arg , width = width , height = height , modulo = 1 ) arg = arg . rstrip ( ) elif isinstance ( arg , numpy . record ) : arg = arg . pprint ( ) else : import pprint compact = { } if sys . version_info [ 0 ] == 2 else dict ( compact = compact ) arg = pprint . pformat ( arg , width = width , ** compact ) numpy . set_printoptions ( ** npopt ) if height == 1 : arg = clean_whitespace ( arg , compact = True ) return arg [ : width ] argl = list ( arg . splitlines ( ) ) if len ( argl ) > height : arg = '\n' . join ( argl [ : height // 2 ] + [ '...' ] + argl [ - height // 2 : ] ) return arg | Return pretty formatted representation of object as string . |
52,703 | def snipstr ( string , width = 79 , snipat = None , ellipsis = '...' ) : if snipat is None : snipat = 0.5 if ellipsis is None : if isinstance ( string , bytes ) : ellipsis = b'...' else : ellipsis = u'\u2026' esize = len ( ellipsis ) splitlines = string . splitlines ( ) result = [ ] for line in splitlines : if line is None : result . append ( ellipsis ) continue linelen = len ( line ) if linelen <= width : result . append ( string ) continue split = snipat if split is None or split == 1 : split = linelen elif 0 < abs ( split ) < 1 : split = int ( math . floor ( linelen * split ) ) if split < 0 : split += linelen if split < 0 : split = 0 if esize == 0 or width < esize + 1 : if split <= 0 : result . append ( string [ - width : ] ) else : result . append ( string [ : width ] ) elif split <= 0 : result . append ( ellipsis + string [ esize - width : ] ) elif split >= linelen or width < esize + 4 : result . append ( string [ : width - esize ] + ellipsis ) else : splitlen = linelen - width + esize end1 = split - splitlen // 2 end2 = end1 + splitlen result . append ( string [ : end1 ] + ellipsis + string [ end2 : ] ) if isinstance ( string , bytes ) : return b'\n' . join ( result ) return '\n' . join ( result ) | Return string cut to specified length . |
52,704 | def enumarg ( enum , arg ) : try : return enum ( arg ) except Exception : try : return enum [ arg . upper ( ) ] except Exception : raise ValueError ( 'invalid argument %s' % arg ) | Return enum member from its name or value . |
52,705 | def parse_kwargs ( kwargs , * keys , ** keyvalues ) : result = { } for key in keys : if key in kwargs : result [ key ] = kwargs [ key ] del kwargs [ key ] for key , value in keyvalues . items ( ) : if key in kwargs : result [ key ] = kwargs [ key ] del kwargs [ key ] else : result [ key ] = value return result | Return dict with keys from keys|keyvals and values from kwargs|keyvals . |
52,706 | def update_kwargs ( kwargs , ** keyvalues ) : for key , value in keyvalues . items ( ) : if key not in kwargs : kwargs [ key ] = value | Update dict with keys and values if keys do not already exist . |
52,707 | def validate_jhove ( filename , jhove = None , ignore = None ) : import subprocess if ignore is None : ignore = [ 'More than 50 IFDs' ] if jhove is None : jhove = 'jhove' out = subprocess . check_output ( [ jhove , filename , '-m' , 'TIFF-hul' ] ) if b'ErrorMessage: ' in out : for line in out . splitlines ( ) : line = line . strip ( ) if line . startswith ( b'ErrorMessage: ' ) : error = line [ 14 : ] . decode ( 'utf8' ) for i in ignore : if i in error : break else : raise ValueError ( error ) break | Validate TIFF file using jhove - m TIFF - hul . |
52,708 | def _write_image_description ( self ) : if ( not self . _datashape or self . _datashape [ 0 ] == 1 or self . _descriptionoffset <= 0 ) : return colormapped = self . _colormap is not None if self . _imagej : isrgb = self . _shape [ - 1 ] in ( 3 , 4 ) description = imagej_description ( self . _datashape , isrgb , colormapped , ** self . _metadata ) else : description = json_description ( self . _datashape , ** self . _metadata ) description = description . encode ( 'utf-8' ) description = description [ : self . _descriptionlen - 1 ] pos = self . _fh . tell ( ) self . _fh . seek ( self . _descriptionoffset ) self . _fh . write ( description ) self . _fh . seek ( self . _descriptionlenoffset ) self . _fh . write ( struct . pack ( self . _byteorder + self . _offsetformat , len ( description ) + 1 ) ) self . _fh . seek ( pos ) self . _descriptionoffset = 0 self . _descriptionlenoffset = 0 self . _descriptionlen = 0 | Write metadata to ImageDescription tag . |
52,709 | def close ( self ) : if not self . _truncate : self . _write_remaining_pages ( ) self . _write_image_description ( ) self . _fh . close ( ) | Write remaining pages and close file handle . |
52,710 | def series ( self ) : if not self . pages : return [ ] useframes = self . pages . useframes keyframe = self . pages . keyframe . index series = [ ] for name in ( 'lsm' , 'ome' , 'imagej' , 'shaped' , 'fluoview' , 'sis' , 'uniform' , 'mdgel' ) : if getattr ( self , 'is_' + name , False ) : series = getattr ( self , '_series_' + name ) ( ) break self . pages . useframes = useframes self . pages . keyframe = keyframe if not series : series = self . _series_generic ( ) series = [ s for s in series if product ( s . shape ) > 0 ] for i , s in enumerate ( series ) : s . index = i return series | Return related pages as TiffPageSeries . |
52,711 | def _series_generic ( self ) : pages = self . pages pages . _clear ( False ) pages . useframes = False if pages . cache : pages . _load ( ) result = [ ] keys = [ ] series = { } for page in pages : if not page . shape or product ( page . shape ) == 0 : continue key = page . hash if key in series : series [ key ] . append ( page ) else : keys . append ( key ) series [ key ] = [ page ] for key in keys : pages = series [ key ] page = pages [ 0 ] shape = page . shape axes = page . axes if len ( pages ) > 1 : shape = ( len ( pages ) , ) + shape axes = 'I' + axes result . append ( TiffPageSeries ( pages , shape , page . dtype , axes , kind = 'Generic' ) ) self . is_uniform = len ( result ) == 1 return result | Return image series in file . |
52,712 | def _series_uniform ( self ) : page = self . pages [ 0 ] shape = page . shape axes = page . axes dtype = page . dtype validate = not ( page . is_scanimage or page . is_nih ) pages = self . pages . _getlist ( validate = validate ) lenpages = len ( pages ) if lenpages > 1 : shape = ( lenpages , ) + shape axes = 'I' + axes if page . is_scanimage : kind = 'ScanImage' elif page . is_nih : kind = 'NIHImage' else : kind = 'Uniform' return [ TiffPageSeries ( pages , shape , dtype , axes , kind = kind ) ] | Return all images in file as single series . |
52,713 | def _series_shaped ( self ) : pages = self . pages pages . useframes = True lenpages = len ( pages ) def append_series ( series , pages , axes , shape , reshape , name , truncated ) : page = pages [ 0 ] if not axes : shape = page . shape axes = page . axes if len ( pages ) > 1 : shape = ( len ( pages ) , ) + shape axes = 'Q' + axes size = product ( shape ) resize = product ( reshape ) if page . is_contiguous and resize > size and resize % size == 0 : if truncated is None : truncated = True axes = 'Q' + axes shape = ( resize // size , ) + shape try : axes = reshape_axes ( axes , shape , reshape ) shape = reshape except ValueError as exc : log . warning ( 'Shaped series: %s: %s' , exc . __class__ . __name__ , exc ) series . append ( TiffPageSeries ( pages , shape , page . dtype , axes , name = name , kind = 'Shaped' , truncated = truncated ) ) keyframe = axes = shape = reshape = name = None series = [ ] index = 0 while True : if index >= lenpages : break pages . keyframe = index keyframe = pages . keyframe if not keyframe . is_shaped : log . warning ( 'Shaped series: invalid metadata or corrupted file' ) return None axes = None shape = None metadata = json_description_metadata ( keyframe . is_shaped ) name = metadata . get ( 'name' , '' ) reshape = metadata [ 'shape' ] truncated = metadata . get ( 'truncated' , None ) if 'axes' in metadata : axes = metadata [ 'axes' ] if len ( axes ) == len ( reshape ) : shape = reshape else : axes = '' log . warning ( 'Shaped series: axes do not match shape' ) spages = [ keyframe ] size = product ( reshape ) npages , mod = divmod ( size , product ( keyframe . shape ) ) if mod : log . warning ( 'Shaped series: series shape does not match page shape' ) return None if 1 < npages <= lenpages - index : size *= keyframe . _dtype . itemsize if truncated : npages = 1 elif ( keyframe . is_final and keyframe . offset + size < pages [ index + 1 ] . offset ) : truncated = False else : truncated = False for j in range ( index + 1 , index + npages ) : page = pages [ j ] page . keyframe = keyframe spages . append ( page ) append_series ( series , spages , axes , shape , reshape , name , truncated ) index += npages self . is_uniform = len ( series ) == 1 return series | Return image series in shaped file . |
52,714 | def _series_imagej ( self ) : pages = self . pages pages . useframes = True pages . keyframe = 0 page = pages [ 0 ] ij = self . imagej_metadata def is_hyperstack ( ) : if not page . is_final : return False images = ij . get ( 'images' , 0 ) if images <= 1 : return False offset , count = page . is_contiguous if ( count != product ( page . shape ) * page . bitspersample // 8 or offset + count * images > self . filehandle . size ) : raise ValueError ( ) if len ( pages ) > 1 and offset + count * images > pages [ 1 ] . offset : return False return True try : hyperstack = is_hyperstack ( ) except ValueError : log . warning ( 'ImageJ series: invalid metadata or corrupted file' ) return None if hyperstack : pages = [ page ] else : pages = pages [ : ] shape = [ ] axes = [ ] if 'frames' in ij : shape . append ( ij [ 'frames' ] ) axes . append ( 'T' ) if 'slices' in ij : shape . append ( ij [ 'slices' ] ) axes . append ( 'Z' ) if 'channels' in ij and not ( page . photometric == 2 and not ij . get ( 'hyperstack' , False ) ) : shape . append ( ij [ 'channels' ] ) axes . append ( 'C' ) remain = ij . get ( 'images' , len ( pages ) ) // ( product ( shape ) if shape else 1 ) if remain > 1 : shape . append ( remain ) axes . append ( 'I' ) if page . axes [ 0 ] == 'I' : shape . extend ( page . shape [ 1 : ] ) axes . extend ( page . axes [ 1 : ] ) elif page . axes [ : 2 ] == 'SI' : shape = page . shape [ 0 : 1 ] + tuple ( shape ) + page . shape [ 2 : ] axes = list ( page . axes [ 0 ] ) + axes + list ( page . axes [ 2 : ] ) else : shape . extend ( page . shape ) axes . extend ( page . axes ) truncated = ( hyperstack and len ( self . pages ) == 1 and page . is_contiguous [ 1 ] != product ( shape ) * page . bitspersample // 8 ) self . is_uniform = True return [ TiffPageSeries ( pages , shape , page . dtype , axes , kind = 'ImageJ' , truncated = truncated ) ] | Return image series in ImageJ file . |
52,715 | def _series_fluoview ( self ) : pages = self . pages . _getlist ( validate = False ) mm = self . fluoview_metadata mmhd = list ( reversed ( mm [ 'Dimensions' ] ) ) axes = '' . join ( TIFF . MM_DIMENSIONS . get ( i [ 0 ] . upper ( ) , 'Q' ) for i in mmhd if i [ 1 ] > 1 ) shape = tuple ( int ( i [ 1 ] ) for i in mmhd if i [ 1 ] > 1 ) self . is_uniform = True return [ TiffPageSeries ( pages , shape , pages [ 0 ] . dtype , axes , name = mm [ 'ImageName' ] , kind = 'FluoView' ) ] | Return image series in FluoView file . |
52,716 | def _series_mdgel ( self ) : self . pages . useframes = False self . pages . keyframe = 0 md = self . mdgel_metadata if md [ 'FileTag' ] in ( 2 , 128 ) : dtype = numpy . dtype ( 'float32' ) scale = md [ 'ScalePixel' ] scale = scale [ 0 ] / scale [ 1 ] if md [ 'FileTag' ] == 2 : def transform ( a ) : return a . astype ( 'float32' ) ** 2 * scale else : def transform ( a ) : return a . astype ( 'float32' ) * scale else : transform = None page = self . pages [ 0 ] self . is_uniform = False return [ TiffPageSeries ( [ page ] , page . shape , dtype , page . axes , transform = transform , kind = 'MDGel' ) ] | Return image series in MD Gel file . |
52,717 | def _series_sis ( self ) : pages = self . pages . _getlist ( validate = False ) page = pages [ 0 ] lenpages = len ( pages ) md = self . sis_metadata if 'shape' in md and 'axes' in md : shape = md [ 'shape' ] + page . shape axes = md [ 'axes' ] + page . axes elif lenpages == 1 : shape = page . shape axes = page . axes else : shape = ( lenpages , ) + page . shape axes = 'I' + page . axes self . is_uniform = True return [ TiffPageSeries ( pages , shape , page . dtype , axes , kind = 'SIS' ) ] | Return image series in Olympus SIS file . |
52,718 | def _series_lsm ( self ) : lsmi = self . lsm_metadata axes = TIFF . CZ_LSMINFO_SCANTYPE [ lsmi [ 'ScanType' ] ] if self . pages [ 0 ] . photometric == 2 : axes = axes . replace ( 'C' , '' ) . replace ( 'XY' , 'XYC' ) if lsmi . get ( 'DimensionP' , 0 ) > 1 : axes += 'P' if lsmi . get ( 'DimensionM' , 0 ) > 1 : axes += 'M' axes = axes [ : : - 1 ] shape = tuple ( int ( lsmi [ TIFF . CZ_LSMINFO_DIMENSIONS [ i ] ] ) for i in axes ) name = lsmi . get ( 'Name' , '' ) pages = self . pages . _getlist ( slice ( 0 , None , 2 ) , validate = False ) dtype = pages [ 0 ] . dtype series = [ TiffPageSeries ( pages , shape , dtype , axes , name = name , kind = 'LSM' ) ] if self . pages [ 1 ] . is_reduced : pages = self . pages . _getlist ( slice ( 1 , None , 2 ) , validate = False ) dtype = pages [ 0 ] . dtype cp = 1 i = 0 while cp < len ( pages ) and i < len ( shape ) - 2 : cp *= shape [ i ] i += 1 shape = shape [ : i ] + pages [ 0 ] . shape axes = axes [ : i ] + 'CYX' series . append ( TiffPageSeries ( pages , shape , dtype , axes , name = name , kind = 'LSMreduced' ) ) self . is_uniform = False return series | Return main and thumbnail series in LSM file . |
52,719 | def _lsm_load_pages ( self ) : pages = self . pages pages . cache = True pages . useframes = True pages . keyframe = 1 pages . keyframe = 0 pages . _load ( keyframe = None ) self . _lsm_fix_strip_offsets ( ) self . _lsm_fix_strip_bytecounts ( ) keyframe = pages [ 0 ] for page in pages [ : : 2 ] : page . keyframe = keyframe keyframe = pages [ 1 ] for page in pages [ 1 : : 2 ] : page . keyframe = keyframe | Load and fix all pages from LSM file . |
52,720 | def _lsm_fix_strip_offsets ( self ) : if self . filehandle . size < 2 ** 32 : return pages = self . pages npages = len ( pages ) series = self . series [ 0 ] axes = series . axes positions = 1 for i in 0 , 1 : if series . axes [ i ] in 'PM' : positions *= series . shape [ i ] if positions > 1 : ntimes = 0 for i in 1 , 2 : if axes [ i ] == 'T' : ntimes = series . shape [ i ] break if ntimes : div , mod = divmod ( npages , 2 * positions * ntimes ) assert mod == 0 shape = ( positions , ntimes , div , 2 ) indices = numpy . arange ( product ( shape ) ) . reshape ( shape ) indices = numpy . moveaxis ( indices , 1 , 0 ) else : indices = numpy . arange ( npages ) . reshape ( - 1 , 2 ) if pages [ 0 ] . _offsetscounts [ 0 ] [ 0 ] > pages [ 1 ] . _offsetscounts [ 0 ] [ 0 ] : indices = indices [ ... , : : - 1 ] wrap = 0 previousoffset = 0 for i in indices . flat : page = pages [ int ( i ) ] dataoffsets = [ ] for currentoffset in page . _offsetscounts [ 0 ] : if currentoffset < previousoffset : wrap += 2 ** 32 dataoffsets . append ( currentoffset + wrap ) previousoffset = currentoffset page . _offsetscounts = dataoffsets , page . _offsetscounts [ 1 ] | Unwrap strip offsets for LSM files greater than 4 GB . |
52,721 | def _lsm_fix_strip_bytecounts ( self ) : pages = self . pages if pages [ 0 ] . compression == 1 : return pages = sorted ( pages , key = lambda p : p . _offsetscounts [ 0 ] [ 0 ] ) npages = len ( pages ) - 1 for i , page in enumerate ( pages ) : if page . index % 2 : continue offsets , bytecounts = page . _offsetscounts if i < npages : lastoffset = pages [ i + 1 ] . _offsetscounts [ 0 ] [ 0 ] else : lastoffset = min ( offsets [ - 1 ] + 2 * bytecounts [ - 1 ] , self . _fh . size ) for j in range ( len ( bytecounts ) - 1 ) : bytecounts [ j ] = offsets [ j + 1 ] - offsets [ j ] bytecounts [ - 1 ] = lastoffset - offsets [ - 1 ] | Set databytecounts to size of compressed data . |
52,722 | def is_mdgel ( self ) : try : ismdgel = self . pages [ 0 ] . is_mdgel or self . pages [ 1 ] . is_mdgel if ismdgel : self . is_uniform = False return ismdgel except IndexError : return False | File has MD Gel format . |
52,723 | def is_uniform ( self ) : pages = self . pages page = pages [ 0 ] if page . is_scanimage or page . is_nih : return True try : useframes = pages . useframes pages . useframes = False h = page . hash for i in ( 1 , 7 , - 1 ) : if pages [ i ] . aspage ( ) . hash != h : return False except IndexError : return False finally : pages . useframes = useframes return True | Return if file contains a uniform series of pages . |
52,724 | def is_appendable ( self ) : return not ( self . is_lsm or self . is_stk or self . is_imagej or self . is_fluoview or self . is_micromanager ) | Return if pages can be appended to file without corrupting . |
52,725 | def shaped_metadata ( self ) : if not self . is_shaped : return None return tuple ( json_description_metadata ( s . pages [ 0 ] . is_shaped ) for s in self . series if s . kind . lower ( ) == 'shaped' ) | Return tifffile metadata from JSON descriptions as dicts . |
52,726 | def stk_metadata ( self ) : if not self . is_stk : return None page = self . pages [ 0 ] tags = page . tags result = { } result [ 'NumberPlanes' ] = tags [ 'UIC2tag' ] . count if page . description : result [ 'PlaneDescriptions' ] = page . description . split ( '\0' ) if 'UIC1tag' in tags : result . update ( tags [ 'UIC1tag' ] . value ) if 'UIC3tag' in tags : result . update ( tags [ 'UIC3tag' ] . value ) if 'UIC4tag' in tags : result . update ( tags [ 'UIC4tag' ] . value ) uic2tag = tags [ 'UIC2tag' ] . value result [ 'ZDistance' ] = uic2tag [ 'ZDistance' ] result [ 'TimeCreated' ] = uic2tag [ 'TimeCreated' ] result [ 'TimeModified' ] = uic2tag [ 'TimeModified' ] try : result [ 'DatetimeCreated' ] = numpy . array ( [ julian_datetime ( * dt ) for dt in zip ( uic2tag [ 'DateCreated' ] , uic2tag [ 'TimeCreated' ] ) ] , dtype = 'datetime64[ns]' ) result [ 'DatetimeModified' ] = numpy . array ( [ julian_datetime ( * dt ) for dt in zip ( uic2tag [ 'DateModified' ] , uic2tag [ 'TimeModified' ] ) ] , dtype = 'datetime64[ns]' ) except ValueError as exc : log . warning ( 'STK metadata: %s: %s' , exc . __class__ . __name__ , exc ) return result | Return STK metadata from UIC tags as dict . |
52,727 | def imagej_metadata ( self ) : if not self . is_imagej : return None page = self . pages [ 0 ] result = imagej_description_metadata ( page . is_imagej ) if 'IJMetadata' in page . tags : try : result . update ( page . tags [ 'IJMetadata' ] . value ) except Exception : pass return result | Return consolidated ImageJ metadata as dict . |
52,728 | def fluoview_metadata ( self ) : if not self . is_fluoview : return None result = { } page = self . pages [ 0 ] result . update ( page . tags [ 'MM_Header' ] . value ) result [ 'Stamp' ] = page . tags [ 'MM_Stamp' ] . value return result | Return consolidated FluoView metadata as dict . |
52,729 | def fei_metadata ( self ) : if not self . is_fei : return None tags = self . pages [ 0 ] . tags if 'FEI_SFEG' in tags : return tags [ 'FEI_SFEG' ] . value if 'FEI_HELIOS' in tags : return tags [ 'FEI_HELIOS' ] . value return None | Return FEI metadata from SFEG or HELIOS tags as dict . |
52,730 | def sis_metadata ( self ) : if not self . is_sis : return None tags = self . pages [ 0 ] . tags result = { } try : result . update ( tags [ 'OlympusINI' ] . value ) except Exception : pass try : result . update ( tags [ 'OlympusSIS' ] . value ) except Exception : pass return result | Return Olympus SIS metadata from SIS and INI tags as dict . |
52,731 | def mdgel_metadata ( self ) : for page in self . pages [ : 2 ] : if 'MDFileTag' in page . tags : tags = page . tags break else : return None result = { } for code in range ( 33445 , 33453 ) : name = TIFF . TAGS [ code ] if name not in tags : continue result [ name [ 2 : ] ] = tags [ name ] . value return result | Return consolidated metadata from MD GEL tags as dict . |
52,732 | def micromanager_metadata ( self ) : if not self . is_micromanager : return None result = read_micromanager_metadata ( self . _fh ) result . update ( self . pages [ 0 ] . tags [ 'MicroManagerMetadata' ] . value ) return result | Return consolidated MicroManager metadata as dict . |
52,733 | def scanimage_metadata ( self ) : if not self . is_scanimage : return None result = { } try : framedata , roidata = read_scanimage_metadata ( self . _fh ) result [ 'FrameData' ] = framedata result . update ( roidata ) except ValueError : pass try : result [ 'Description' ] = scanimage_description_metadata ( self . pages [ 0 ] . description ) except Exception as exc : log . warning ( 'ScanImage metadata: %s: %s' , exc . __class__ . __name__ , exc ) return result | Return ScanImage non - varying frame and ROI metadata as dict . |
52,734 | def keyframe ( self , index ) : index = int ( index ) if index < 0 : index %= len ( self ) if self . _keyframe . index == index : return if index == 0 : self . _keyframe = self . pages [ 0 ] return if self . _indexed or index < len ( self . pages ) : page = self . pages [ index ] if isinstance ( page , TiffPage ) : self . _keyframe = page return if isinstance ( page , TiffFrame ) : self . pages [ index ] = page . offset tiffpage = self . _tiffpage self . _tiffpage = TiffPage try : self . _keyframe = self . _getitem ( index ) finally : self . _tiffpage = tiffpage self . pages [ index ] = self . _keyframe | Set current keyframe . Load TiffPage from file if necessary . |
52,735 | def _load ( self , keyframe = True ) : if self . _cached : return pages = self . pages if not pages : return if not self . _indexed : self . _seek ( - 1 ) if not self . _cache : return fh = self . parent . filehandle if keyframe is not None : keyframe = self . _keyframe for i , page in enumerate ( pages ) : if isinstance ( page , inttypes ) : fh . seek ( page ) page = self . _tiffpage ( self . parent , index = i , keyframe = keyframe ) pages [ i ] = page self . _cached = True | Read all remaining pages from file . |
52,736 | def _load_virtual_frames ( self ) : pages = self . pages try : if sys . version_info [ 0 ] == 2 : raise ValueError ( 'not supported on Python 2' ) if len ( pages ) > 1 : raise ValueError ( 'pages already loaded' ) page = pages [ 0 ] bytecounts = page . _offsetscounts [ 1 ] if len ( bytecounts ) != 1 : raise ValueError ( 'data not contiguous' ) self . _seek ( 4 ) delta = pages [ 2 ] - pages [ 1 ] if pages [ 3 ] - pages [ 2 ] != delta or pages [ 4 ] - pages [ 3 ] != delta : raise ValueError ( 'page offsets not equidistant' ) page1 = self . _getitem ( 1 , validate = page . hash ) offsetoffset = page1 . _offsetscounts [ 0 ] [ 0 ] - page1 . offset if offsetoffset < 0 or offsetoffset > delta : raise ValueError ( 'page offsets not equidistant' ) pages = [ page , page1 ] filesize = self . parent . filehandle . size - delta for index , offset in enumerate ( range ( page1 . offset + delta , filesize , delta ) ) : offsets = [ offset + offsetoffset ] offset = offset if offset < 2 ** 31 else None pages . append ( TiffFrame ( parent = page . parent , index = index + 2 , offset = None , offsets = offsets , bytecounts = bytecounts , keyframe = page ) ) except Exception as exc : log . warning ( 'TiffPages: failed to load virtual frames: %s' , str ( exc ) ) assert pages [ 1 ] self . pages = pages self . _cache = True self . _cached = True self . _indexed = True | Calculate virtual TiffFrames . |
52,737 | def _clear ( self , fully = True ) : pages = self . pages if not pages : return self . _keyframe = pages [ 0 ] if fully : for i , page in enumerate ( pages [ 1 : ] ) : if not isinstance ( page , inttypes ) and page . offset is not None : pages [ i + 1 ] = page . offset elif TiffFrame is not TiffPage : for i , page in enumerate ( pages ) : if isinstance ( page , TiffFrame ) and page . offset is not None : pages [ i ] = page . offset self . _cached = False | Delete all but first page from cache . Set keyframe to first page . |
52,738 | def _seek ( self , index , maxpages = None ) : pages = self . pages lenpages = len ( pages ) if lenpages == 0 : raise IndexError ( 'index out of range' ) fh = self . parent . filehandle if fh . closed : raise ValueError ( 'seek of closed file' ) if self . _indexed or 0 <= index < lenpages : page = pages [ index ] offset = page if isinstance ( page , inttypes ) else page . offset fh . seek ( offset ) return tiff = self . parent . tiff offsetformat = tiff . ifdoffsetformat offsetsize = tiff . ifdoffsetsize tagnoformat = tiff . tagnoformat tagnosize = tiff . tagnosize tagsize = tiff . tagsize unpack = struct . unpack page = pages [ - 1 ] offset = page if isinstance ( page , inttypes ) else page . offset if maxpages is None : maxpages = 2 ** 22 while lenpages < maxpages : fh . seek ( offset ) try : tagno = unpack ( tagnoformat , fh . read ( tagnosize ) ) [ 0 ] if tagno > 4096 : raise TiffFileError ( 'suspicious number of tags: %i' % tagno ) except Exception : log . warning ( 'TiffPages: corrupted tag list of page %i @ %i' , lenpages , offset ) del pages [ - 1 ] lenpages -= 1 self . _indexed = True break self . _nextpageoffset = offset + tagnosize + tagno * tagsize fh . seek ( self . _nextpageoffset ) offset = unpack ( offsetformat , fh . read ( offsetsize ) ) [ 0 ] if offset == 0 : self . _indexed = True break if offset >= fh . size : log . warning ( 'TiffPages: invalid page offset (%i)' , offset ) self . _indexed = True break pages . append ( offset ) lenpages += 1 if 0 <= index < lenpages : break if lenpages == 100 : for p in pages [ : - 1 ] : if offset == ( p if isinstance ( p , inttypes ) else p . offset ) : raise TiffFileError ( 'invalid circular IFD reference' ) if index >= lenpages : raise IndexError ( 'index out of range' ) page = pages [ index ] fh . seek ( page if isinstance ( page , inttypes ) else page . offset ) | Seek file to offset of page specified by index . |
52,739 | def _getlist ( self , key = None , useframes = True , validate = True ) : getitem = self . _getitem _useframes = self . useframes if key is None : key = iter ( range ( len ( self ) ) ) elif isinstance ( key , Iterable ) : key = iter ( key ) elif isinstance ( key , slice ) : start , stop , _ = key . indices ( 2 ** 31 - 1 ) if not self . _indexed and max ( stop , start ) > len ( self . pages ) : self . _seek ( - 1 ) key = iter ( range ( * key . indices ( len ( self . pages ) ) ) ) elif isinstance ( key , inttypes ) : self . useframes = False if key == 0 : return [ self . pages [ key ] ] try : return [ getitem ( key ) ] finally : self . useframes = _useframes else : raise TypeError ( 'key must be an integer, slice, or iterable' ) keyframe = self . _keyframe self . keyframe = next ( key ) if validate : validate = self . _keyframe . hash if useframes : self . useframes = True try : pages = [ getitem ( i , validate ) for i in key ] pages . insert ( 0 , self . _keyframe ) finally : self . _keyframe = keyframe if useframes : self . useframes = _useframes return pages | Return specified pages as list of TiffPages or TiffFrames . |
52,740 | def _getitem ( self , key , validate = False ) : key = int ( key ) pages = self . pages if key < 0 : key %= len ( self ) elif self . _indexed and key >= len ( pages ) : raise IndexError ( 'index out of range' ) if key < len ( pages ) : page = pages [ key ] if self . _cache : if not isinstance ( page , inttypes ) : if validate and validate != page . hash : raise RuntimeError ( 'page hash mismatch' ) return page elif isinstance ( page , ( TiffPage , self . _tiffpage ) ) : if validate and validate != page . hash : raise RuntimeError ( 'page hash mismatch' ) return page self . _seek ( key ) page = self . _tiffpage ( self . parent , index = key , keyframe = self . _keyframe ) if validate and validate != page . hash : raise RuntimeError ( 'page hash mismatch' ) if self . _cache : pages [ key ] = page return page | Return specified page from cache or file . |
52,741 | def hash ( self ) : return hash ( self . _shape + ( self . tilewidth , self . tilelength , self . tiledepth , self . bitspersample , self . fillorder , self . predictor , self . extrasamples , self . photometric , self . compression , self . planarconfig ) ) | Return checksum to identify pages in same series . |
52,742 | def _offsetscounts ( self ) : if self . is_contiguous : offset , bytecount = self . is_contiguous return [ offset ] , [ bytecount ] if self . is_tiled : return self . dataoffsets , self . databytecounts return clean_offsetscounts ( self . dataoffsets , self . databytecounts ) | Return simplified offsets and bytecounts . |
52,743 | def is_final ( self ) : return ( self . is_contiguous and self . fillorder == 1 and self . predictor == 1 and not self . is_subsampled ) | Return if page s image data are stored in final form . |
52,744 | def is_memmappable ( self ) : return ( self . parent . filehandle . is_file and self . is_final and self . is_contiguous [ 0 ] % self . dtype . itemsize == 0 ) | Return if page s image data in file can be memory - mapped . |
52,745 | def flags ( self ) : return set ( ( name . lower ( ) for name in sorted ( TIFF . FILE_FLAGS ) if getattr ( self , 'is_' + name ) ) ) | Return set of flags . |
52,746 | def andor_tags ( self ) : if not self . is_andor : return None tags = self . tags result = { 'Id' : tags [ 'AndorId' ] . value } for tag in list ( self . tags . values ( ) ) : code = tag . code if not 4864 < code < 5031 : continue value = tag . value name = tag . name [ 5 : ] if len ( tag . name ) > 5 else tag . name result [ name ] = value del tags [ tag . name ] return result | Return consolidated metadata from Andor tags as dict . |
52,747 | def epics_tags ( self ) : if not self . is_epics : return None result = { } tags = self . tags for tag in list ( self . tags . values ( ) ) : code = tag . code if not 65000 <= code < 65500 : continue value = tag . value if code == 65000 : result [ 'timeStamp' ] = datetime . datetime . fromtimestamp ( float ( value ) ) elif code == 65001 : result [ 'uniqueID' ] = int ( value ) elif code == 65002 : result [ 'epicsTSSec' ] = int ( value ) elif code == 65003 : result [ 'epicsTSNsec' ] = int ( value ) else : key , value = value . split ( ':' , 1 ) result [ key ] = astype ( value ) del tags [ tag . name ] return result | Return consolidated metadata from EPICS areaDetector tags as dict . |
52,748 | def ndpi_tags ( self ) : if not self . is_ndpi : return None tags = self . tags result = { } for name in ( 'Make' , 'Model' , 'Software' ) : result [ name ] = tags [ name ] . value for code , name in TIFF . NDPI_TAGS . items ( ) : code = str ( code ) if code in tags : result [ name ] = tags [ code ] . value return result | Return consolidated metadata from Hamamatsu NDPI as dict . |
52,749 | def is_imagej ( self ) : for description in ( self . description , self . description1 ) : if not description : return None if description [ : 7 ] == 'ImageJ=' : return description return None | Return ImageJ description if exists else None . |
52,750 | def is_shaped ( self ) : for description in ( self . description , self . description1 ) : if not description : return None if description [ : 1 ] == '{' and '"shape":' in description : return description if description [ : 6 ] == 'shape=' : return description return None | Return description containing array shape if exists else None . |
52,751 | def is_metaseries ( self ) : if self . index > 1 or self . software != 'MetaSeries' : return False d = self . description return d . startswith ( '<MetaData>' ) and d . endswith ( '</MetaData>' ) | Page contains MDS MetaSeries metadata in ImageDescription tag . |
52,752 | def is_ome ( self ) : if self . index > 1 or not self . description : return False d = self . description return d [ : 14 ] == '<?xml version=' and d [ - 6 : ] == '</OME>' | Page contains OME - XML in ImageDescription tag . |
52,753 | def is_scn ( self ) : if self . index > 1 or not self . description : return False d = self . description return d [ : 14 ] == '<?xml version=' and d [ - 6 : ] == '</scn>' | Page contains Leica SCN XML in ImageDescription tag . |
52,754 | def aspage ( self ) : if self . offset is None : raise ValueError ( 'cannot return virtual frame as page.' ) self . parent . filehandle . seek ( self . offset ) return TiffPage ( self . parent , index = self . index ) | Return TiffPage from file . |
52,755 | def asrgb ( self , * args , ** kwargs ) : if self . _keyframe is None : raise RuntimeError ( 'keyframe not set' ) kwargs [ 'validate' ] = False return TiffPage . asrgb ( self , * args , ** kwargs ) | Read image data from file and return RGB image as numpy array . |
52,756 | def keyframe ( self , keyframe ) : if self . _keyframe == keyframe : return if self . _keyframe is not None : raise RuntimeError ( 'cannot reset keyframe' ) if len ( self . _offsetscounts [ 0 ] ) != len ( keyframe . dataoffsets ) : raise RuntimeError ( 'incompatible keyframe' ) if keyframe . is_tiled : pass if keyframe . is_contiguous : self . _offsetscounts = ( [ self . _offsetscounts [ 0 ] [ 0 ] ] , [ keyframe . is_contiguous [ 1 ] ] ) else : self . _offsetscounts = clean_offsetscounts ( * self . _offsetscounts ) self . _keyframe = keyframe | Set keyframe . |
52,757 | def name ( self ) : try : return TIFF . TAGS [ self . code ] except KeyError : return str ( self . code ) | Return name of tag from TIFF . TAGS registry . |
52,758 | def _fix_lsm_bitspersample ( self , parent ) : if self . code != 258 or self . count != 2 : return log . warning ( 'TiffTag %i: correcting LSM bitspersample tag' , self . code ) value = struct . pack ( '<HH' , * self . value ) self . valueoffset = struct . unpack ( '<I' , value ) [ 0 ] parent . filehandle . seek ( self . valueoffset ) self . value = struct . unpack ( '<HH' , parent . filehandle . read ( 4 ) ) | Correct LSM bitspersample tag . |
52,759 | def asarray ( self , out = None ) : if self . parent : result = self . parent . asarray ( series = self , out = out ) if self . transform is not None : result = self . transform ( result ) return result return None | Return image data from series of TIFF pages as numpy array . |
52,760 | def offset ( self ) : if not self . _pages : return None pos = 0 for page in self . _pages : if page is None : return None if not page . is_final : return None if not pos : pos = page . is_contiguous [ 0 ] + page . is_contiguous [ 1 ] continue if pos != page . is_contiguous [ 0 ] : return None pos += page . is_contiguous [ 1 ] page = self . _pages [ 0 ] offset = page . is_contiguous [ 0 ] if ( page . is_imagej or page . is_shaped ) and len ( self . _pages ) == 1 : return offset if pos == offset + product ( self . shape ) * self . dtype . itemsize : return offset return None | Return offset to series data in file if any . |
52,761 | def _getitem ( self , key ) : key = int ( key ) if key < 0 : key %= self . _len if len ( self . _pages ) == 1 and 0 < key < self . _len : index = self . _pages [ 0 ] . index return self . parent . pages . _getitem ( index + key ) return self . _pages [ key ] | Return specified page of series from cache or file . |
52,762 | def asarray ( self , file = None , out = None , ** kwargs ) : if file is not None : if isinstance ( file , int ) : return self . imread ( self . files [ file ] , ** kwargs ) return self . imread ( file , ** kwargs ) im = self . imread ( self . files [ 0 ] , ** kwargs ) shape = self . shape + im . shape result = create_output ( out , shape , dtype = im . dtype ) result = result . reshape ( - 1 , * im . shape ) for index , fname in zip ( self . _indices , self . files ) : index = [ i - j for i , j in zip ( index , self . _startindex ) ] index = numpy . ravel_multi_index ( index , self . shape ) im = self . imread ( fname , ** kwargs ) result [ index ] = im result . shape = shape return result | Read image data from files and return as numpy array . |
52,763 | def _parse ( self ) : if not self . pattern : raise TiffSequence . ParseError ( 'invalid pattern' ) pattern = re . compile ( self . pattern , re . IGNORECASE | re . VERBOSE ) matches = pattern . findall ( os . path . split ( self . files [ 0 ] ) [ - 1 ] ) if not matches : raise TiffSequence . ParseError ( 'pattern does not match file names' ) matches = matches [ - 1 ] if len ( matches ) % 2 : raise TiffSequence . ParseError ( 'pattern does not match axis name and index' ) axes = '' . join ( m for m in matches [ : : 2 ] if m ) if not axes : raise TiffSequence . ParseError ( 'pattern does not match file names' ) indices = [ ] for fname in self . files : fname = os . path . split ( fname ) [ - 1 ] matches = pattern . findall ( fname ) [ - 1 ] if axes != '' . join ( m for m in matches [ : : 2 ] if m ) : raise ValueError ( 'axes do not match within image sequence' ) indices . append ( [ int ( m ) for m in matches [ 1 : : 2 ] if m ] ) shape = tuple ( numpy . max ( indices , axis = 0 ) ) startindex = tuple ( numpy . min ( indices , axis = 0 ) ) shape = tuple ( i - j + 1 for i , j in zip ( shape , startindex ) ) if product ( shape ) != len ( self . files ) : log . warning ( 'TiffSequence: files are missing. Missing data are zeroed' ) self . axes = axes . upper ( ) self . shape = shape self . _indices = indices self . _startindex = startindex | Get axes and shape from file names . |
52,764 | def open ( self ) : if self . _fh : return if isinstance ( self . _file , pathlib . Path ) : self . _file = str ( self . _file ) if isinstance ( self . _file , basestring ) : self . _file = os . path . realpath ( self . _file ) self . _dir , self . _name = os . path . split ( self . _file ) self . _fh = open ( self . _file , self . _mode ) self . _close = True if self . _offset is None : self . _offset = 0 elif isinstance ( self . _file , FileHandle ) : self . _fh = self . _file . _fh if self . _offset is None : self . _offset = 0 self . _offset += self . _file . _offset self . _close = False if not self . _name : if self . _offset : name , ext = os . path . splitext ( self . _file . _name ) self . _name = '%s@%i%s' % ( name , self . _offset , ext ) else : self . _name = self . _file . _name if self . _mode and self . _mode != self . _file . _mode : raise ValueError ( 'FileHandle has wrong mode' ) self . _mode = self . _file . _mode self . _dir = self . _file . _dir elif hasattr ( self . _file , 'seek' ) : try : self . _file . tell ( ) except Exception : raise ValueError ( 'binary stream is not seekable' ) self . _fh = self . _file if self . _offset is None : self . _offset = self . _file . tell ( ) self . _close = False if not self . _name : try : self . _dir , self . _name = os . path . split ( self . _fh . name ) except AttributeError : self . _name = 'Unnamed binary stream' try : self . _mode = self . _fh . mode except AttributeError : pass else : raise ValueError ( 'The first parameter must be a file name, ' 'seekable binary stream, or FileHandle' ) if self . _offset : self . _fh . seek ( self . _offset ) if self . _size is None : pos = self . _fh . tell ( ) self . _fh . seek ( self . _offset , 2 ) self . _size = self . _fh . tell ( ) self . _fh . seek ( pos ) try : self . _fh . fileno ( ) self . is_file = True except Exception : self . is_file = False | Open or re - open file . |
52,765 | def read ( self , size = - 1 ) : if size < 0 and self . _offset : size = self . _size return self . _fh . read ( size ) | Read size bytes from file or until EOF is reached . |
52,766 | def memmap_array ( self , dtype , shape , offset = 0 , mode = 'r' , order = 'C' ) : if not self . is_file : raise ValueError ( 'Cannot memory-map file without fileno' ) return numpy . memmap ( self . _fh , dtype = dtype , mode = mode , offset = self . _offset + offset , shape = shape , order = order ) | Return numpy . memmap of data stored in file . |
52,767 | def read_array ( self , dtype , count = - 1 , out = None ) : fh = self . _fh dtype = numpy . dtype ( dtype ) if count < 0 : size = self . _size if out is None else out . nbytes count = size // dtype . itemsize else : size = count * dtype . itemsize result = numpy . empty ( count , dtype ) if out is None else out if result . nbytes != size : raise ValueError ( 'size mismatch' ) n = fh . readinto ( result ) if n != size : raise ValueError ( 'failed to read %i bytes' % size ) if not result . dtype . isnative : if not dtype . isnative : result . byteswap ( True ) result = result . newbyteorder ( ) elif result . dtype . isnative != dtype . isnative : result . byteswap ( True ) if out is not None : if hasattr ( out , 'flush' ) : out . flush ( ) return result | Return numpy array from file in native byte order . |
52,768 | def read_record ( self , dtype , shape = 1 , byteorder = None ) : rec = numpy . rec try : record = rec . fromfile ( self . _fh , dtype , shape , byteorder = byteorder ) except Exception : dtype = numpy . dtype ( dtype ) if shape is None : shape = self . _size // dtype . itemsize size = product ( sequence ( shape ) ) * dtype . itemsize data = self . _fh . read ( size ) record = rec . fromstring ( data , dtype , shape , byteorder = byteorder ) return record [ 0 ] if shape == 1 else record | Return numpy record from file . |
52,769 | def write_empty ( self , size ) : if size < 1 : return self . _fh . seek ( size - 1 , 1 ) self . _fh . write ( b'\x00' ) | Append size bytes to file . Position must be at end of file . |
52,770 | def write_array ( self , data ) : try : data . tofile ( self . _fh ) except Exception : self . _fh . write ( data . tostring ( ) ) | Write numpy array to binary file . |
52,771 | def seek ( self , offset , whence = 0 ) : if self . _offset : if whence == 0 : self . _fh . seek ( self . _offset + offset , whence ) return if whence == 2 and self . _size > 0 : self . _fh . seek ( self . _offset + self . _size + offset , 0 ) return self . _fh . seek ( offset , whence ) | Set file s current position . |
52,772 | def close ( self ) : if self . _close and self . _fh : self . _fh . close ( ) self . _fh = None | Close file . |
52,773 | def open ( self , filehandle ) : with self . lock : if filehandle in self . files : self . files [ filehandle ] += 1 elif filehandle . closed : filehandle . open ( ) self . files [ filehandle ] = 1 self . past . append ( filehandle ) | Re - open file if necessary . |
52,774 | def close ( self , filehandle ) : with self . lock : if filehandle in self . files : self . files [ filehandle ] -= 1 index = 0 size = len ( self . past ) while size > self . size and index < size : filehandle = self . past [ index ] if self . files [ filehandle ] == 0 : filehandle . close ( ) del self . files [ filehandle ] del self . past [ index ] size -= 1 else : index += 1 | Close openend file if no longer used . |
52,775 | def clear ( self ) : with self . lock : for filehandle , refcount in list ( self . files . items ( ) ) : if refcount == 0 : filehandle . close ( ) del self . files [ filehandle ] del self . past [ self . past . index ( filehandle ) ] | Close all opened files if not in use . |
52,776 | def grab_literal ( template , l_del ) : global _CURRENT_LINE try : literal , template = template . split ( l_del , 1 ) _CURRENT_LINE += literal . count ( '\n' ) return ( literal , template ) except ValueError : return ( template , '' ) | Parse a literal from the template |
52,777 | def l_sa_check ( template , literal , is_standalone ) : if literal . find ( '\n' ) != - 1 or is_standalone : padding = literal . split ( '\n' ) [ - 1 ] if padding . isspace ( ) or padding == '' : return True else : return False | Do a preliminary check to see if a tag could be a standalone |
52,778 | def r_sa_check ( template , tag_type , is_standalone ) : if is_standalone and tag_type not in [ 'variable' , 'no escape' ] : on_newline = template . split ( '\n' , 1 ) if on_newline [ 0 ] . isspace ( ) or not on_newline [ 0 ] : return True else : return False else : return False | Do a final checkto see if a tag could be a standalone |
52,779 | def parse_tag ( template , l_del , r_del ) : global _CURRENT_LINE global _LAST_TAG_LINE tag_types = { '!' : 'comment' , '#' : 'section' , '^' : 'inverted section' , '/' : 'end' , '>' : 'partial' , '=' : 'set delimiter?' , '{' : 'no escape?' , '&' : 'no escape' } try : tag , template = template . split ( r_del , 1 ) except ValueError : raise ChevronError ( 'unclosed tag ' 'at line {0}' . format ( _CURRENT_LINE ) ) tag_type = tag_types . get ( tag [ 0 ] , 'variable' ) if tag_type != 'variable' : tag = tag [ 1 : ] if tag_type == 'set delimiter?' : if tag . endswith ( '=' ) : tag_type = 'set delimiter' tag = tag [ : - 1 ] else : raise ChevronError ( 'unclosed set delimiter tag\n' 'at line {0}' . format ( _CURRENT_LINE ) ) elif tag_type == 'no escape?' : if l_del == '{{' and r_del == '}}' and template . startswith ( '}' ) : template = template [ 1 : ] tag_type = 'no escape' return ( ( tag_type , tag . strip ( ) ) , template ) | Parse a tag from a template |
52,780 | def tokenize ( template , def_ldel = '{{' , def_rdel = '}}' ) : global _CURRENT_LINE , _LAST_TAG_LINE _CURRENT_LINE = 1 _LAST_TAG_LINE = None try : template = template . read ( ) except AttributeError : pass is_standalone = True open_sections = [ ] l_del = def_ldel r_del = def_rdel while template : literal , template = grab_literal ( template , l_del ) if not template : yield ( 'literal' , literal ) break is_standalone = l_sa_check ( template , literal , is_standalone ) tag , template = parse_tag ( template , l_del , r_del ) tag_type , tag_key = tag if tag_type == 'set delimiter' : dels = tag_key . strip ( ) . split ( ' ' ) l_del , r_del = dels [ 0 ] , dels [ - 1 ] elif tag_type in [ 'section' , 'inverted section' ] : open_sections . append ( tag_key ) _LAST_TAG_LINE = _CURRENT_LINE elif tag_type == 'end' : try : last_section = open_sections . pop ( ) except IndexError : raise ChevronError ( 'Trying to close tag "{0}"\n' 'Looks like it was not opened.\n' 'line {1}' . format ( tag_key , _CURRENT_LINE + 1 ) ) if tag_key != last_section : raise ChevronError ( 'Trying to close tag "{0}"\n' 'last open tag is "{1}"\n' 'line {2}' . format ( tag_key , last_section , _CURRENT_LINE + 1 ) ) is_standalone = r_sa_check ( template , tag_type , is_standalone ) if is_standalone : template = template . split ( '\n' , 1 ) [ - 1 ] if tag_type != 'partial' : literal = literal . rstrip ( ' ' ) if literal != '' : yield ( 'literal' , literal ) if tag_type not in [ 'comment' , 'set delimiter?' ] : yield ( tag_type , tag_key ) if open_sections : raise ChevronError ( 'Unexpected EOF\n' 'the tag "{0}" was never closed\n' 'was opened at line {1}' . format ( open_sections [ - 1 ] , _LAST_TAG_LINE ) ) | Tokenize a mustache template |
52,781 | def cli_main ( ) : import argparse import os def is_file_or_pipe ( arg ) : if not os . path . exists ( arg ) or os . path . isdir ( arg ) : parser . error ( 'The file {0} does not exist!' . format ( arg ) ) else : return arg def is_dir ( arg ) : if not os . path . isdir ( arg ) : parser . error ( 'The directory {0} does not exist!' . format ( arg ) ) else : return arg parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( '-v' , '--version' , action = 'version' , version = version ) parser . add_argument ( 'template' , help = 'The mustache file' , type = is_file_or_pipe ) parser . add_argument ( '-d' , '--data' , dest = 'data' , help = 'The json data file' , type = is_file_or_pipe , default = { } ) parser . add_argument ( '-p' , '--path' , dest = 'partials_path' , help = 'The directory where your partials reside' , type = is_dir , default = '.' ) parser . add_argument ( '-e' , '--ext' , dest = 'partials_ext' , help = 'The extension for your mustache\ partials, \'mustache\' by default' , default = 'mustache' ) parser . add_argument ( '-l' , '--left-delimiter' , dest = 'def_ldel' , help = 'The default left delimiter, "{{" by default.' , default = '{{' ) parser . add_argument ( '-r' , '--right-delimiter' , dest = 'def_rdel' , help = 'The default right delimiter, "}}" by default.' , default = '}}' ) args = vars ( parser . parse_args ( ) ) try : sys . stdout . write ( main ( ** args ) ) sys . stdout . flush ( ) except SyntaxError as e : print ( 'Chevron: syntax error' ) print ( ' ' + '\n ' . join ( e . args [ 0 ] . split ( '\n' ) ) ) exit ( 1 ) | Render mustache templates using json files |
52,782 | def _get_key ( key , scopes ) : if key == '.' : return scopes [ 0 ] for scope in scopes : try : for child in key . split ( '.' ) : try : scope = scope [ child ] except ( TypeError , AttributeError ) : try : scope = scope . __dict__ [ child ] except ( TypeError , AttributeError ) : scope = scope [ int ( child ) ] if scope is 0 : return 0 if scope is False : return False try : if scope . _CHEVRON_return_scope_when_falsy : return scope except AttributeError : return scope or '' except ( AttributeError , KeyError , IndexError , ValueError ) : pass return '' | Get a key from the current scope |
52,783 | def _get_partial ( name , partials_dict , partials_path , partials_ext ) : try : return partials_dict [ name ] except KeyError : try : path_ext = ( '.' + partials_ext if partials_ext else '' ) path = partials_path + '/' + name + path_ext with io . open ( path , 'r' , encoding = 'utf-8' ) as partial : return partial . read ( ) except IOError : return '' | Load a partial |
52,784 | def commit ( self ) : url = self . api_endpoints [ 'send' ] payload = { 'actions' : self . _bulk_query , } payload . update ( self . _payload ) self . _bulk_query = [ ] return self . _make_request ( url , json . dumps ( payload ) , headers = { 'content-type' : 'application/json' } , ) | This method executes the bulk query flushes stored queries and returns the response |
52,785 | def get_request_token ( cls , consumer_key , redirect_uri = 'http://example.com/' , state = None ) : headers = { 'X-Accept' : 'application/json' , } url = 'https://getpocket.com/v3/oauth/request' payload = { 'consumer_key' : consumer_key , 'redirect_uri' : redirect_uri , } if state : payload [ 'state' ] = state return cls . _make_request ( url , payload , headers ) [ 0 ] [ 'code' ] | Returns the request token that can be used to fetch the access token |
52,786 | def get_credentials ( cls , consumer_key , code ) : headers = { 'X-Accept' : 'application/json' , } url = 'https://getpocket.com/v3/oauth/authorize' payload = { 'consumer_key' : consumer_key , 'code' : code , } return cls . _make_request ( url , payload , headers ) [ 0 ] | Fetches access token from using the request token and consumer key |
52,787 | def remove ( self , obj ) : relationship_table = self . params [ 'relationship_table' ] with self . obj . backend . transaction ( implicit = True ) : condition = and_ ( relationship_table . c [ self . params [ 'related_pk_field_name' ] ] == obj . pk , relationship_table . c [ self . params [ 'pk_field_name' ] ] == self . obj . pk ) self . obj . backend . connection . execute ( delete ( relationship_table ) . where ( condition ) ) self . _queryset = None | Remove an object from the relation |
52,788 | def begin ( self ) : if self . in_transaction : if self . _auto_transaction : self . _auto_transaction = False return self . commit ( ) self . in_transaction = True for collection , store in self . stores . items ( ) : store . begin ( ) indexes = self . indexes [ collection ] for index in indexes . values ( ) : index . begin ( ) | Start a new transaction . |
52,789 | def rollback ( self , transaction = None ) : if not self . in_transaction : raise NotInTransaction for collection , store in self . stores . items ( ) : store . rollback ( ) indexes = self . indexes [ collection ] indexes_to_rebuild = [ ] for key , index in indexes . items ( ) : try : index . rollback ( ) except NotInTransaction : indexes_to_rebuild . append ( key ) if indexes_to_rebuild : self . rebuild_indexes ( collection , indexes_to_rebuild ) self . in_transaction = False | Roll back a transaction . |
52,790 | def commit ( self , transaction = None ) : for collection in self . collections : store = self . get_collection_store ( collection ) store . commit ( ) indexes = self . get_collection_indexes ( collection ) for index in indexes . values ( ) : index . commit ( ) self . in_transaction = False self . begin ( ) | Commit all pending transactions to the database . |
52,791 | def get_pk_index ( self , collection ) : cls = self . collections [ collection ] if not cls . get_pk_name ( ) in self . indexes [ collection ] : self . create_index ( cls . get_pk_name ( ) , collection ) return self . indexes [ collection ] [ cls . get_pk_name ( ) ] | Return the primary key index for a given collection . |
52,792 | def register ( self , cls , parameters = None , overwrite = False ) : if cls in self . deprecated_classes and not overwrite : return False if parameters is None : parameters = { } if 'collection' in parameters : collection_name = parameters [ 'collection' ] elif hasattr ( cls . Meta , 'collection' ) : collection_name = cls . Meta . collection else : collection_name = cls . __name__ . lower ( ) delete_list = [ ] def register_class ( collection_name , cls ) : self . collections [ collection_name ] = cls self . classes [ cls ] = parameters . copy ( ) self . classes [ cls ] [ 'collection' ] = collection_name if collection_name in self . collections : old_cls = self . collections [ collection_name ] if ( issubclass ( cls , old_cls ) and not ( cls is old_cls ) ) or overwrite : logger . warning ( "Replacing class %s with %s for collection %s" % ( old_cls , cls , collection_name ) ) self . deprecated_classes [ old_cls ] = self . classes [ old_cls ] del self . classes [ old_cls ] register_class ( collection_name , cls ) return True else : logger . debug ( "Registering class %s under collection %s" % ( cls , collection_name ) ) register_class ( collection_name , cls ) return True return False | Explicitly register a new document class for use in the backend . |
52,793 | def autoregister ( self , cls ) : params = self . get_meta_attributes ( cls ) return self . register ( cls , params ) | Autoregister a class that is encountered for the first time . |
52,794 | def create_instance ( self , collection_or_class , attributes , lazy = False , call_hook = True , deserialize = True , db_loader = None ) : creation_args = { 'backend' : self , 'autoload' : self . _autoload_embedded , 'lazy' : lazy , 'db_loader' : db_loader } if collection_or_class in self . classes : cls = collection_or_class elif collection_or_class in self . collections : cls = self . collections [ collection_or_class ] else : raise AttributeError ( "Unknown collection or class: %s!" % str ( collection_or_class ) ) if deserialize : deserialized_attributes = self . deserialize ( attributes , create_instance = False ) else : deserialized_attributes = attributes if 'constructor' in self . classes [ cls ] : obj = self . classes [ cls ] [ 'constructor' ] ( deserialized_attributes , ** creation_args ) else : obj = cls ( deserialized_attributes , ** creation_args ) if call_hook : self . call_hook ( 'after_load' , obj ) return obj | Creates an instance of a Document class corresponding to the given collection name or class . |
52,795 | def transaction ( self , implicit = False ) : class TransactionManager ( object ) : def __init__ ( self , backend , implicit = False ) : self . backend = backend self . implicit = implicit def __enter__ ( self ) : self . within_transaction = True if self . backend . current_transaction else False self . transaction = self . backend . begin ( ) def __exit__ ( self , exc_type , exc_value , traceback_obj ) : if exc_type : self . backend . rollback ( self . transaction ) return False else : self . backend . commit ( self . transaction ) return TransactionManager ( self , implicit = implicit ) | This returns a context guard which will automatically open and close a transaction |
52,796 | def get_cls_for_collection ( self , collection ) : for cls , params in self . classes . items ( ) : if params [ 'collection' ] == collection : return cls raise AttributeError ( "Unknown collection: %s" % collection ) | Return the class for a given collection name . |
52,797 | def clear ( self ) : self . _index = defaultdict ( list ) self . _reverse_index = defaultdict ( list ) self . _undefined_keys = { } | Clear index . |
52,798 | def get_value ( self , attributes , key = None ) : value = attributes if key is None : key = self . _splitted_key for i , elem in enumerate ( key ) : if isinstance ( value , ( list , tuple ) ) : return [ self . get_value ( v , key [ i : ] ) for v in value ] else : value = value [ elem ] return value | Get value to be indexed from document attributes . |
52,799 | def save_to_store ( self ) : if not self . _store : raise AttributeError ( 'No datastore defined!' ) saved_data = self . save_to_data ( in_place = True ) data = Serializer . serialize ( saved_data ) self . _store . store_blob ( data , 'all_keys_with_undefined' ) | Save index to store . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.