idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
53,700 | def has_axis ( self , axis ) : if self . type != EventType . POINTER_AXIS : raise AttributeError ( _wrong_meth . format ( self . type ) ) return self . _libinput . libinput_event_pointer_has_axis ( self . _handle , axis ) | Check if the event has a valid value for the given axis . |
53,701 | def get_axis_value ( self , axis ) : if self . type != EventType . POINTER_AXIS : raise AttributeError ( _wrong_meth . format ( self . type ) ) return self . _libinput . libinput_event_pointer_get_axis_value ( self . _handle , axis ) | Return the axis value of the given axis . |
53,702 | def axis_source ( self ) : if self . type != EventType . POINTER_AXIS : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_pointer_get_axis_source ( self . _handle ) | The source for a given axis event . |
53,703 | def get_axis_value_discrete ( self , axis ) : if self . type != EventType . POINTER_AXIS : raise AttributeError ( _wrong_meth . format ( self . type ) ) return self . _libinput . libinput_event_pointer_get_axis_value_discrete ( self . _handle , axis ) | Return the axis value in discrete steps for a given axis event . |
53,704 | def slot ( self ) : if self . type == EventType . TOUCH_FRAME : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_touch_get_slot ( self . _handle ) | The slot of this touch event . |
53,705 | def seat_slot ( self ) : if self . type == EventType . TOUCH_FRAME : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_touch_get_seat_slot ( self . _handle ) | The seat slot of the touch event . |
53,706 | def coords ( self ) : if self . type not in { EventType . TOUCH_DOWN , EventType . TOUCH_MOTION } : raise AttributeError ( _wrong_prop . format ( self . type ) ) x = self . _libinput . libinput_event_touch_get_x ( self . _handle ) y = self . _libinput . libinput_event_touch_get_y ( self . _handle ) return x , y | The current absolute coordinates of the touch event in mm from the top left corner of the device . |
53,707 | def transform_coords ( self , width , height ) : if self . type not in { EventType . TOUCH_DOWN , EventType . TOUCH_MOTION } : raise AttributeError ( _wrong_meth . format ( self . type ) ) x = self . _libinput . libinput_event_touch_get_x_transformed ( self . _handle , width ) y = self . _libinput . libinput_event_touch_get_y_transformed ( self . _handle , height ) return x , y | Return the current absolute coordinates of the touch event transformed to screen coordinates . |
53,708 | def cancelled ( self ) : if self . type not in { EventType . GESTURE_SWIPE_END , EventType . GESTURE_PINCH_END } : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_gesture_get_cancelled ( self . _handle ) | Return if the gesture ended normally or if it was cancelled . |
53,709 | def scale ( self ) : if self . type not in { EventType . GESTURE_PINCH_BEGIN , EventType . GESTURE_PINCH_UPDATE , EventType . GESTURE_PINCH_END } : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_gesture_get_scale ( self . _handle ) | The absolute scale of a pinch gesture the scale is the division of the current distance between the fingers and the distance at the start of the gesture . |
53,710 | def rotation ( self ) : rotation = self . _libinput . libinput_event_tablet_tool_get_rotation ( self . _handle ) changed = self . _libinput . libinput_event_tablet_tool_rotation_has_changed ( self . _handle ) return rotation , changed | The current Z rotation of the tool in degrees clockwise from the tool s logical neutral position and whether it has changed in this event . |
53,711 | def wheel_delta ( self ) : delta = self . _libinput . libinput_event_tablet_tool_get_wheel_delta ( self . _handle ) changed = self . _libinput . libinput_event_tablet_tool_wheel_has_changed ( self . _handle ) return delta , changed | The delta for the wheel in degrees and whether it has changed in this event . |
53,712 | def tool ( self ) : htablettool = self . _libinput . libinput_event_tablet_tool_get_tool ( self . _handle ) return TabletTool ( htablettool , self . _libinput ) | The tool that was in use during this event . |
53,713 | def seat_button_count ( self ) : if self . type != EventType . TABLET_TOOL_BUTTON : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_tool_get_seat_button_count ( self . _handle ) | The total number of buttons pressed on all devices on the associated seat after the the event was triggered . |
53,714 | def ring_position ( self ) : if self . type != EventType . TABLET_PAD_RING : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_pad_get_ring_position ( self . _handle ) | The current position of the ring in degrees counterclockwise from the northern - most point of the ring in the tablet s current logical orientation . |
53,715 | def ring_number ( self ) : if self . type != EventType . TABLET_PAD_RING : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_pad_get_ring_number ( self . _handle ) | The number of the ring that has changed state with 0 being the first ring . |
53,716 | def ring_source ( self ) : if self . type != EventType . TABLET_PAD_RING : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_pad_get_ring_source ( self . _handle ) | The source of the interaction with the ring . |
53,717 | def strip_number ( self ) : if self . type != EventType . TABLET_PAD_STRIP : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_pad_get_strip_number ( self . _handle ) | The number of the strip that has changed state with 0 being the first strip . |
53,718 | def strip_source ( self ) : if self . type != EventType . TABLET_PAD_STRIP : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_pad_get_strip_source ( self . _handle ) | The source of the interaction with the strip . |
53,719 | def button_number ( self ) : if self . type != EventType . TABLET_PAD_BUTTON : raise AttributeError ( _wrong_prop . format ( self . type ) ) return self . _libinput . libinput_event_tablet_pad_get_button_number ( self . _handle ) | The button number that triggered this event starting at 0 . |
53,720 | def mode_group ( self ) : hmodegroup = self . _libinput . libinput_event_tablet_pad_get_mode_group ( self . _handle ) return TabletPadModeGroup ( hmodegroup , self . _libinput ) | The mode group that the button ring or strip that triggered this event is considered in . |
53,721 | def get_long_description ( ) : description = [ ] with open ( "README.rst" ) as file : for line in file : if ".. code:: python" in line and len ( description ) >= 2 : blockLine = description [ - 2 ] if re . search ( r":$" , blockLine ) and not re . search ( r"::$" , blockLine ) : description [ - 2 ] = "::" . join ( blockLine . rsplit ( ":" , 1 ) ) continue description . append ( line ) return "" . join ( description ) | Returns the Package long description . |
53,722 | def ftp_listing_paths ( ftpconn : FTP , root : str ) -> Iterable [ str ] : for current_path , dirs , files in ftp_walk ( ftpconn , root ) : yield from ( os . path . join ( current_path , file ) for file in files ) | Generate the full file paths from a root path . |
53,723 | def ftp_walk ( ftpconn : FTP , rootpath = '' ) : current_directory = rootpath try : directories , files = directory_listing ( ftpconn , current_directory ) except ftplib . error_perm : return yield current_directory , directories , files for name in directories : new_path = os . path . join ( current_directory , name ) for entry in ftp_walk ( ftpconn , rootpath = new_path ) : yield entry else : return | Recursively traverse an ftp directory to discovery directory listing . |
53,724 | def directory_listing ( conn : FTP , path : str ) -> Tuple [ List , List ] : entries = deque ( ) conn . dir ( path , entries . append ) entries = map ( parse_line , entries ) grouped_entries = defaultdict ( list ) for key , value in entries : grouped_entries [ key ] . append ( value ) directories = grouped_entries [ ListingType . directory ] files = grouped_entries [ ListingType . file ] return directories , files | Return the directories and files for single FTP listing . |
53,725 | def download_ftp_url ( source_url , target_uri , buffer_size = 8192 ) : ensure_file_directory ( target_uri ) with urllib . request . urlopen ( source_url ) as source_file : with open ( target_uri , 'wb' ) as target_file : shutil . copyfileobj ( source_file , target_file , buffer_size ) | Uses urllib . thread safe? |
53,726 | def devices ( self ) : eax = self . attributes . get ( 'devices' ) if eax is None : eax = self . _all_devices if not isinstance ( eax , list ) : eax = [ eax ] return [ str ( dev ) for dev in eax ] | List of devices to test |
53,727 | def exceptions ( error_is_fatal = True , error_messages = None ) : def exception_decorator ( func ) : nonlocal error_messages @ functools . wraps ( func ) def exc_wrapper ( * args , ** kwargs ) : nonlocal error_messages try : result = func ( * args , ** kwargs ) except sa . exc . SQLAlchemyError as err : result = None details = None err_type = err . __class__ if error_messages and err_type in error_messages : details = error_messages [ err_type ] if details : LOG . error ( details ) LOG . error ( "For developers: (%s) %s" , err . __class__ , str ( err ) ) if error_is_fatal : sys . exit ( "Abort, SQL operation failed." ) if not ui . ask ( "I can continue at your own risk, do you want that?" ) : raise err return result return exc_wrapper return exception_decorator | Handle SQLAlchemy exceptions in a sane way . |
53,728 | def get_version_data ( ) : connect_str = str ( settings . CFG [ "db" ] [ "connect_string" ] ) repo_url = path . template_path ( "../db/" ) return ( connect_str , repo_url ) | Retreive migration information . |
53,729 | def enforce_versioning ( force = False ) : connect_str , repo_url = get_version_data ( ) LOG . warning ( "Your database uses an unversioned benchbuild schema." ) if not force and not ui . ask ( "Should I enforce version control on your schema?" ) : LOG . error ( "User declined schema versioning." ) return None repo_version = migrate . version ( repo_url , url = connect_str ) migrate . version_control ( connect_str , repo_url , version = repo_version ) return repo_version | Install versioning on the db . |
53,730 | def init_functions ( connection ) : if settings . CFG [ "db" ] [ "create_functions" ] : print ( "Refreshing SQL functions..." ) for file in path . template_files ( "../sql/" , exts = [ ".sql" ] ) : func = sa . DDL ( path . template_str ( file ) ) LOG . info ( "Loading: '%s' into database" , file ) connection . execute ( func ) connection . commit ( ) | Initialize all SQL functions in the database . |
53,731 | def connect_engine ( self ) : try : self . connection = self . engine . connect ( ) return True except sa . exc . OperationalError as opex : LOG . fatal ( "Could not connect to the database. The error was: '%s'" , str ( opex ) ) return False | Establish a connection to the database . |
53,732 | def configure_engine ( self ) : try : self . connection . execution_options ( isolation_level = "SERIALIZABLE" ) except sa . exc . ArgumentError : LOG . debug ( "Unable to set isolation level to SERIALIZABLE" ) return True | Configure the databse connection . |
53,733 | def parse_date ( date_str : str , pattern = _RE_DATE ) -> dt . date : groups = re . match ( pattern , date_str ) return dt . date ( * _date_to_tuple ( groups . groupdict ( ) ) ) | Parse datetime . date from YYYY - MM - DD format . |
53,734 | def _datetime_to_tuple ( dt_dict ) : year , month , day = _date_to_tuple ( dt_dict ) hour , minute , second , microsecond = _time_to_tuple ( dt_dict ) return year , month , day , hour , minute , second , microsecond | datetime . datetime components from dictionary to tuple . |
53,735 | def convert_2_utc ( self , datetime_ , timezone ) : datetime_ = self . tz_mapper [ timezone ] . localize ( datetime_ ) return datetime_ . astimezone ( pytz . UTC ) | convert to datetime to UTC offset . |
53,736 | def esearch ( database , query , userhistory = True , webenv = False , query_key = False , retstart = False , retmax = False , api_key = False , email = False , ** kwargs ) -> Optional [ EsearchResult ] : cleaned_query = urllib . parse . quote_plus ( query , safe = '/+' ) url = BASE_URL + f'esearch.fcgi?db={database}&term={cleaned_query}&retmode=json' url = check_userhistory ( userhistory , url ) url = check_webenv ( webenv , url ) url = check_query_key ( query_key , url ) url = check_retstart ( retstart , url ) url = check_retmax ( retmax , url ) url = check_api_key ( api_key , url ) url = check_email ( email , url ) time . sleep ( PAUSE ) resp = requests . get ( url ) if resp . status_code != 200 : print ( 'There was a server error' ) return text = resp . json ( ) time . sleep ( .5 ) return EsearchResult ( text [ 'esearchresult' ] . get ( 'idlist' , [ ] ) , make_number ( text [ 'esearchresult' ] . get ( 'count' , '' ) , int ) , text [ 'esearchresult' ] . get ( 'webenv' , '' ) , text [ 'esearchresult' ] . get ( 'querykey' , '' ) ) | Search for a query using the Entrez ESearch API . |
53,737 | def epost ( database , ids : List [ str ] , webenv = False , api_key = False , email = False , ** kwargs ) -> Optional [ EpostResult ] : url = BASE_URL + f'epost.fcgi' id = ',' . join ( ids ) url_params = f'db={database}&id={id}' url_params = check_webenv ( webenv , url_params ) url_params = check_api_key ( api_key , url_params ) url_params = check_email ( email , url_params ) resp = entrez_try_put_multiple_times ( url , url_params , num_tries = 3 ) time . sleep ( .5 ) return parse_epost ( resp . text ) | Post IDs using the Entrez ESearch API . |
53,738 | def efetch ( database , ids = False , webenv = False , query_key = False , count = False , retstart = False , retmax = False , rettype = 'full' , retmode = 'xml' , api_key = False , email = False , ** kwargs ) -> str : url = BASE_URL + f'efetch.fcgi?db={database}&retmode={retmode}&rettype={rettype}' url = check_webenv ( webenv , url ) url = check_query_key ( query_key , url ) url = check_api_key ( api_key , url ) url = check_email ( email , url ) if ids : if isinstance ( ids , str ) : id = ids else : id = ',' . join ( ids ) url += f'&id={id}' count = len ( id . split ( ',' ) ) for resp in entrez_sets_of_results ( url , retstart , retmax , count ) : yield resp . text | Get documents using the Entrez ESearch API . gg |
53,739 | def entrez_sets_of_results ( url , retstart = False , retmax = False , count = False ) -> Optional [ List [ requests . Response ] ] : if not retstart : retstart = 0 if not retmax : retmax = 500 if not count : count = retmax retmax = 500 while retstart < count : diff = count - retstart if diff < 500 : retmax = diff _url = url + f'&retstart={retstart}&retmax={retmax}' resp = entrez_try_get_multiple_times ( _url ) if resp is None : return retstart += retmax yield resp | Gets sets of results back from Entrez . |
53,740 | def print_runs ( query ) : if query is None : return for tup in query : print ( ( "{0} @ {1} - {2} id: {3} group: {4}" . format ( tup . end , tup . experiment_name , tup . project_name , tup . experiment_group , tup . run_group ) ) ) | Print all rows in this result query . |
53,741 | def print_logs ( query , types = None ) : if query is None : return for run , log in query : print ( ( "{0} @ {1} - {2} id: {3} group: {4} status: {5}" . format ( run . end , run . experiment_name , run . project_name , run . experiment_group , run . run_group , log . status ) ) ) print ( ( "command: {0}" . format ( run . command ) ) ) if "stderr" in types : print ( "StdErr:" ) print ( ( log . stderr ) ) if "stdout" in types : print ( "StdOut:" ) print ( ( log . stdout ) ) print ( ) | Print status logs . |
53,742 | def get_svg_layers ( svg_sources ) : layers = [ ] width , height = None , None def extract_length ( attr ) : 'Extract length in pixels.' match = CRE_MM_LENGTH . match ( attr ) if match : return INKSCAPE_PPmm . magnitude * float ( match . group ( 'length' ) ) else : return float ( attr ) for svg_source_i in svg_sources : xml_root = etree . parse ( svg_source_i ) svg_root = xml_root . xpath ( '/svg:svg' , namespaces = INKSCAPE_NSMAP ) [ 0 ] width = max ( extract_length ( svg_root . attrib [ 'width' ] ) , width ) height = max ( extract_length ( svg_root . attrib [ 'height' ] ) , height ) layers += svg_root . xpath ( '//svg:g[@inkscape:groupmode="layer"]' , namespaces = INKSCAPE_NSMAP ) for i , layer_i in enumerate ( layers ) : layer_i . attrib [ 'id' ] = 'layer%d' % ( i + 1 ) return ( width , height ) , layers | Collect layers from input svg sources . |
53,743 | def merge_svg_layers ( svg_sources , share_transform = True ) : ( width , height ) , layers = get_svg_layers ( svg_sources ) if share_transform : transforms = [ layer_i . attrib [ 'transform' ] for layer_i in layers if 'transform' in layer_i . attrib ] if len ( transforms ) > 1 : raise ValueError ( 'Transform can only be shared if *exactly one* ' 'layer has a transform ({} layers have ' '`transform` attributes)' . format ( len ( transforms ) ) ) elif transforms : for layer_i in layers : layer_i . attrib [ 'transform' ] = transforms [ 0 ] dwg = svgwrite . Drawing ( profile = 'tiny' , debug = False , size = ( width , height ) ) output_svg_root = etree . fromstring ( dwg . tostring ( ) ) output_svg_root . extend ( layers ) output = StringIO . StringIO ( ) output . write ( etree . tostring ( output_svg_root ) ) output . seek ( 0 ) return output | Merge layers from input svg sources into a single XML document . |
53,744 | def main ( argv = None ) : arguments = cli_common ( __doc__ , argv = argv ) campaign_file = arguments [ 'CAMPAIGN_FILE' ] if arguments [ '-g' ] : if osp . exists ( campaign_file ) : raise Exception ( 'Campaign file already exists' ) with open ( campaign_file , 'w' ) as ostr : Generator ( ) . write ( ostr ) else : node = arguments . get ( '-n' ) output_dir = arguments . get ( '--output-dir' ) exclude_nodes = arguments . get ( '--exclude-nodes' ) srun_tag = arguments . get ( '--srun' ) driver = CampaignDriver ( campaign_file , node = node , output_dir = output_dir , srun = srun_tag , exclude_nodes = exclude_nodes , ) driver ( ) if argv is not None : return driver campaign_fd = int ( arguments . get ( '--campaign-path-fd' ) or 1 ) message = ( osp . abspath ( driver . campaign_path ) + '\n' ) . encode ( ) os . write ( campaign_fd , message ) | ben - sh entry point |
53,745 | def main ( argv = None ) : arguments = cli_common ( __doc__ , argv = argv ) driver = CampaignDriver ( arguments [ 'CAMPAIGN-DIR' ] , expandcampvars = False ) driver ( no_exec = True ) if argv is not None : return driver | ben - umb entry point |
53,746 | def count_by ( records : Sequence [ Dict ] , field_name : str ) -> defaultdict : counter = defaultdict ( int ) for record in records : name = record [ field_name ] counter [ name ] += 1 return counter | Frequency each value occurs in a record sequence for a given field name . |
53,747 | def generate ( self ) : exp_name = self . exp_name ( ) fname = os . path . basename ( self . out_path ) fname = "{exp}_{prefix}_{name}{ending}" . format ( exp = exp_name , prefix = os . path . splitext ( fname ) [ 0 ] , ending = os . path . splitext ( fname ) [ - 1 ] , name = "full" ) first = True for chunk in self . report ( ) : print ( "Writing chunk to :'{0}'" . format ( fname ) ) chunk . to_csv ( fname , header = first , mode = 'a' ) first = False | Fetch all rows associated with this experiment . |
53,748 | def main ( argv = None ) : arguments = cli_common ( __doc__ , argv = argv ) campaign_path = arguments [ 'CAMPAIGN-DIR' ] driver = CampaignDriver ( campaign_path , expandcampvars = False ) with pushd ( campaign_path ) : render ( template = arguments [ '--template' ] , ostr = arguments [ '--output' ] , campaign = driver , ) if argv is not None : return driver | ben - doc entry point |
53,749 | def class_to_str ( obj ) : mod_str = obj . __module__ name_str = obj . __name__ if mod_str == '__main__' : return name_str else : return '.' . join ( [ mod_str , name_str ] ) | get class string from object |
53,750 | def get_module_path ( module ) : return pathlib . Path ( os . path . dirname ( os . path . abspath ( inspect . getfile ( module ) ) ) ) | return a directory path to a module |
53,751 | def get_data_path ( data , module , check_exists = True ) : basepath = os . path . dirname ( os . path . abspath ( inspect . getfile ( module ) ) ) if isinstance ( data , basestring ) : data = [ data ] dirpath = os . path . join ( basepath , * data ) if check_exists : assert os . path . exists ( dirpath ) , '{0} does not exist' . format ( dirpath ) return pathlib . Path ( dirpath ) | return a directory path to data within a module |
53,752 | def load_memit ( ) : from IPython . core . magic import Magics , line_magic , magics_class from memory_profiler import memory_usage as _mu try : ip = get_ipython ( ) except NameError as err : raise Exception ( 'not in ipython/jupyter kernel:\n {}' . format ( err ) ) @ magics_class class MemMagics ( Magics ) : @ line_magic def memit ( self , line = '' , setup = 'pass' ) : opts , stmt = self . parse_options ( line , 'r:t:i' , posix = False , strict = False ) repeat = int ( getattr ( opts , 'r' , 3 ) ) if repeat < 1 : repeat == 1 timeout = int ( getattr ( opts , 't' , 0 ) ) if timeout <= 0 : timeout = None run_in_place = hasattr ( opts , 'i' ) try : import multiprocessing as pr from multiprocessing . queues import SimpleQueue q = SimpleQueue ( ) except ImportError : class ListWithPut ( list ) : def put ( self , x ) : self . append ( x ) q = ListWithPut ( ) print ( 'WARNING: cannot import module `multiprocessing`. Forcing ' 'the `-i` option.' ) run_in_place = True ns = self . shell . user_ns def _get_usage ( q , stmt , setup = 'pass' , ns = { } ) : try : exec ( setup ) in ns _mu0 = _mu ( ) [ 0 ] exec ( stmt ) in ns _mu1 = _mu ( ) [ 0 ] q . put ( _mu1 - _mu0 ) except Exception as e : q . put ( float ( '-inf' ) ) raise e if run_in_place : for _ in range ( repeat ) : _get_usage ( q , stmt , ns = ns ) else : at_least_one_worked = False for _ in range ( repeat ) : p = pr . Process ( target = _get_usage , args = ( q , stmt , 'pass' , ns ) ) p . start ( ) p . join ( timeout = timeout ) if p . exitcode == 0 : at_least_one_worked = True else : p . terminate ( ) if p . exitcode is None : print ( 'Subprocess timed out.' ) else : print ( 'Subprocess exited with code %d.' % p . exitcode ) q . put ( float ( '-inf' ) ) if not at_least_one_worked : print ( 'ERROR: all subprocesses exited unsuccessfully. Try ' 'again with the `-i` option.' ) usages = [ q . get ( ) for _ in range ( repeat ) ] usage = max ( usages ) print ( "maximum of %d: %f MB per loop" % ( repeat , usage ) ) ip . register_magics ( MemMagics ) | load memory usage ipython magic require memory_profiler package to be installed |
53,753 | def parse_tree_from_dict ( node , locs ) : d = dict ( ) for n , l in locs . items ( ) : try : if l [ 1 ] == 'text' : d [ n ] = node . find ( l [ 0 ] ) . text elif l [ 1 ] == 'child' : child = node . find ( l [ 0 ] ) . getchildren ( ) if len ( child ) > 1 : raise AmbiguousElementException ( 'There are too many elements' ) elif l [ 2 ] == 'text' : d [ n ] = child [ 0 ] . text elif l [ 2 ] == 'tag' : d [ n ] = child [ 0 ] . tag else : d [ n ] = node . find ( l [ 0 ] ) . get ( l [ 1 ] ) except : pass return d | Processes key locations . |
53,754 | def xml_to_root ( xml : Union [ str , IO ] ) -> ElementTree . Element : if isinstance ( xml , str ) : if '<' in xml : return ElementTree . fromstring ( xml ) else : with open ( xml ) as fh : xml_to_root ( fh ) tree = ElementTree . parse ( xml ) return tree . getroot ( ) | Parse XML into an ElemeTree object . |
53,755 | def mergeAllLayers ( self ) : start = time . time ( ) while ( len ( self . layers ) > 1 ) : self . mergeBottomLayers ( ) print ( 'merge time:' + str ( time . time ( ) - start ) ) return self . layers [ 0 ] | Merge all the layers together . |
53,756 | def synonyms ( self ) : syns = { } for k , v in self . _declared_terms . items ( ) : k = k . strip ( ) if v . get ( 'synonym' ) : syns [ k . lower ( ) ] = v [ 'synonym' ] if not '.' in k : syns [ ROOT_TERM + '.' + k . lower ( ) ] = v [ 'synonym' ] return syns | Return a dict of term synonyms |
53,757 | def super_terms ( self ) : if self . doc and self . doc . super_terms : return self . doc . super_terms return { k . lower ( ) : v [ 'inheritsfrom' ] . lower ( ) for k , v in self . _declared_terms . items ( ) if 'inheritsfrom' in v } | Return a dictionary mapping term names to their super terms |
53,758 | def declare_dict ( self ) : if not self . root : for _ in self : pass return { 'sections' : self . _declared_sections , 'terms' : self . _declared_terms , 'synonyms' : self . synonyms } | Return declared sections terms and synonyms as a dict |
53,759 | def errors_as_dict ( self ) : errors = [ ] for e in self . errors : errors . append ( { 'file' : e . term . file_name , 'row' : e . term . row if e . term else '<unknown>' , 'col' : e . term . col if e . term else '<unknown>' , 'term' : e . term . join if e . term else '<unknown>' , 'error' : str ( e ) } ) return errors | Return parse errors as a dict |
53,760 | def inherited_children ( self , t ) : if not t . get ( 'inheritsfrom' ) : return if not 'section' in t : raise DeclarationError ( "DeclareTerm for '{}' must specify a section to use InheritsFrom" . format ( t [ 'term' ] ) ) t_p , t_r = Term . split_term ( t [ 'term' ] ) ih_p , ih_r = Term . split_term ( t [ 'inheritsfrom' ] ) section_terms = self . _declared_sections [ t [ 'section' ] . lower ( ) ] [ 'terms' ] for st_name in section_terms : if st_name . lower ( ) . startswith ( ih_r . lower ( ) + '.' ) : st_p , st_r = Term . split_term ( st_name ) subtype_name = t_r + '.' + st_r subtype_d = dict ( self . _declared_terms [ st_name . lower ( ) ] . items ( ) ) subtype_d [ 'inheritsfrom' ] = '' subtype_d [ 'term' ] = subtype_name yield subtype_d | Generate inherited children based on a terms InhertsFrom property . The input term must have both an InheritsFrom property and a defined Section |
53,761 | def read_levels ( text ) : x = [ ] for i in range ( 0 , len ( NAUTILUSRESOLVER . getMetadata ( text ) . citation ) ) : x . append ( NAUTILUSRESOLVER . getReffs ( text , level = i ) ) return x | Read text and get there reffs |
53,762 | def FlaskNautilusManager ( resolver , flask_nautilus ) : global NAUTILUSRESOLVER NAUTILUSRESOLVER = resolver @ click . group ( ) @ click . option ( '--verbose' , default = False ) def CLI ( verbose ) : click . echo ( "Command Line Interface of Flask" ) resolver . logger . disabled = not verbose @ CLI . command ( ) def flush_resolver ( ) : if resolver . clear ( ) is True : click . echo ( "Caching of Resolver Cleared" ) @ CLI . command ( ) def flush_http_cache ( ) : flask_nautilus . flaskcache . clear ( ) @ CLI . command ( ) def flush_both ( ) : if resolver . cache . clear ( ) is True : click . echo ( "Caching of Resolver Cleared" ) if flask_nautilus . flaskcache . clear ( ) is True : click . echo ( "Caching of HTTP Cleared" ) @ CLI . command ( ) def parse ( ) : ret = resolver . parse ( ) click . echo ( "Preprocessed %s texts" % len ( ret . readableDescendants ) ) @ CLI . command ( ) @ click . option ( '--threads' , default = 0 , type = int ) def process_reffs ( threads ) : if threads < 1 : threads = THREADS texts = list ( resolver . getMetadata ( ) . readableDescendants ) click . echo ( "Using {} processes to parse references of {} texts" . format ( threads , len ( texts ) ) ) with Pool ( processes = threads ) as executor : for future in executor . imap_unordered ( read_levels , [ t . id for t in texts ] ) : del future click . echo ( "References parsed" ) return CLI | Provides a manager for flask scripts to perform specific maintenance operations |
53,763 | def is_iter_non_string ( obj ) : if isinstance ( obj , list ) or isinstance ( obj , tuple ) : return True return False | test if object is a list or tuple |
53,764 | def is_dict_like ( obj , attr = ( 'keys' , 'items' ) ) : for a in attr : if not hasattr ( obj , a ) : return False return True | test if object is dict like |
53,765 | def is_list_of_dict_like ( obj , attr = ( 'keys' , 'items' ) ) : try : if len ( obj ) == 0 : return False return all ( [ is_dict_like ( i , attr ) for i in obj ] ) except Exception : return False | test if object is a list only containing dict like items |
53,766 | def is_path_like ( obj , attr = ( 'name' , 'is_file' , 'is_dir' , 'iterdir' ) ) : for a in attr : if not hasattr ( obj , a ) : return False return True | test if object is pathlib . Path like |
53,767 | def convert_type ( d , intype , outtype , convert_list = True , in_place = True ) : if not in_place : out_dict = copy . deepcopy ( d ) else : out_dict = d def _convert ( obj ) : if isinstance ( obj , intype ) : try : obj = outtype ( obj ) except Exception : pass elif isinstance ( obj , list ) and convert_list : obj = _traverse_iter ( obj ) elif isinstance ( obj , tuple ) and convert_list : obj = tuple ( _traverse_iter ( obj ) ) return obj def _traverse_dict ( dic ) : for key in dic . keys ( ) : if is_dict_like ( dic [ key ] ) : _traverse_dict ( dic [ key ] ) else : dic [ key ] = _convert ( dic [ key ] ) def _traverse_iter ( iter ) : new_iter = [ ] for key in iter : if is_dict_like ( key ) : _traverse_dict ( key ) new_iter . append ( key ) else : new_iter . append ( _convert ( key ) ) return new_iter if is_dict_like ( out_dict ) : _traverse_dict ( out_dict ) else : _convert ( out_dict ) return out_dict | convert all values of one type to another |
53,768 | def extract ( d , path = None ) : path = [ ] if path is None else path d_new = copy . deepcopy ( d ) d_sub = d_new for key in path [ : - 1 ] : d_sub = d_sub [ key ] key = path [ - 1 ] d_extract = { key : d_sub [ key ] } d_sub . pop ( key ) return d_new , d_extract | extract section of dictionary |
53,769 | def indexes ( dic , keys = None ) : keys = [ ] if keys is None else keys assert hasattr ( dic , 'keys' ) new = dic . copy ( ) old_key = None for key in keys : if not hasattr ( new , 'keys' ) : raise KeyError ( 'No indexes after: {}' . format ( old_key ) ) old_key = key new = new [ key ] return new | index dictionary by multiple keys |
53,770 | def unflatten ( d , key_as_tuple = True , delim = '.' , list_of_dicts = None , deepcopy = True ) : r if not d : return d if deepcopy : try : d = copy . deepcopy ( d ) except Exception : warnings . warn ( 'error in deepcopy, so using references to input dict' ) if key_as_tuple : result = d . pop ( ( ) ) if ( ) in d else { } else : result = d . pop ( '' ) if '' in d else { } for key , value in d . items ( ) : if not isinstance ( key , tuple ) and key_as_tuple : raise ValueError ( 'key not tuple and key_as_tuple set to True: {}' . format ( key ) ) elif not isinstance ( key , basestring ) and not key_as_tuple : raise ValueError ( 'key not string and key_as_tuple set to False: {}' . format ( key ) ) elif isinstance ( key , basestring ) and not key_as_tuple : parts = key . split ( delim ) else : parts = key d = result for part in parts [ : - 1 ] : if part not in d : d [ part ] = { } d = d [ part ] if not is_dict_like ( d ) : v1 , v2 = sorted ( [ str ( d ) , str ( { parts [ - 1 ] : value } ) ] ) raise KeyError ( "child conflict for path: " "{0}; {1} and {2}" . format ( parts [ : - 1 ] , v1 , v2 ) ) elif parts [ - 1 ] in d : try : value = merge ( [ d [ parts [ - 1 ] ] , value ] ) except Exception : v1 , v2 = sorted ( [ str ( value ) , str ( d [ parts [ - 1 ] ] ) ] ) raise KeyError ( "child conflict for path: " "{0}; {1} and {2}" . format ( parts , v1 , v2 ) ) d [ parts [ - 1 ] ] = value if list_of_dicts is not None : result = _recreate_lists ( result , list_of_dicts ) return result | r unflatten dictionary with keys as tuples or delimited strings |
53,771 | def remove_keys ( d , keys = None , use_wildcards = True , list_of_dicts = False , deepcopy = True ) : keys = [ ] if keys is None else keys list_of_dicts = '__list__' if list_of_dicts else None def is_in ( a , bs ) : if use_wildcards : for b in bs : try : if a == b : return True if fnmatch ( a , b ) : return True except Exception : pass return False else : try : return a in bs except Exception : return False if not hasattr ( d , 'items' ) : return d else : dic = flatten ( d , list_of_dicts = list_of_dicts ) new_dic = { } for key , value in dic . items ( ) : new_key = tuple ( [ i for i in key if not is_in ( i , keys ) ] ) if not new_key : continue try : if new_key [ - 1 ] . startswith ( list_of_dicts ) : continue except Exception : pass new_dic [ new_key ] = value return unflatten ( new_dic , list_of_dicts = list_of_dicts , deepcopy = deepcopy ) | remove certain keys from nested dict retaining preceeding paths |
53,772 | def remove_paths ( d , keys , list_of_dicts = False , deepcopy = True ) : keys = [ ( key , ) if not isinstance ( key , tuple ) else key for key in keys ] list_of_dicts = '__list__' if list_of_dicts else None def contains ( path ) : for k in keys : if set ( k ) . issubset ( path ) : return True return False flatd = flatten ( d , list_of_dicts = list_of_dicts ) flatd = { path : v for path , v in flatd . items ( ) if not contains ( path ) } return unflatten ( flatd , list_of_dicts = list_of_dicts , deepcopy = deepcopy ) | remove paths containing certain keys from dict |
53,773 | def filter_values ( d , vals = None , list_of_dicts = False , deepcopy = True ) : vals = [ ] if vals is None else vals list_of_dicts = '__list__' if list_of_dicts else None flatd = flatten ( d , list_of_dicts = list_of_dicts ) def is_in ( a , b ) : try : return a in b except Exception : return False flatd = { k : v for k , v in flatd . items ( ) if is_in ( v , vals ) } return unflatten ( flatd , list_of_dicts = list_of_dicts , deepcopy = deepcopy ) | filters leaf nodes of nested dictionary |
53,774 | def filter_keys ( d , keys , use_wildcards = False , list_of_dicts = False , deepcopy = True ) : list_of_dicts = '__list__' if list_of_dicts else None flatd = flatten ( d , list_of_dicts = list_of_dicts ) def is_in ( a , bs ) : if use_wildcards : for b in bs : try : if a == b : return True if fnmatch ( b , a ) : return True except Exception : pass return False else : try : return a in bs except Exception : return False flatd = { paths : v for paths , v in flatd . items ( ) if any ( [ is_in ( k , paths ) for k in keys ] ) } return unflatten ( flatd , list_of_dicts = list_of_dicts , deepcopy = deepcopy ) | filter dict by certain keys |
53,775 | def filter_paths ( d , paths , list_of_dicts = False , deepcopy = True ) : list_of_dicts = '__list__' if list_of_dicts else None all_keys = [ x for y in paths if isinstance ( y , tuple ) for x in y ] all_keys += [ x for x in paths if not isinstance ( x , tuple ) ] new_d = filter_keys ( d , all_keys , list_of_dicts = list_of_dicts ) new_d = flatten ( d , list_of_dicts = list_of_dicts ) for key in list ( new_d . keys ( ) ) : if not any ( [ set ( key ) . issuperset ( path if isinstance ( path , tuple ) else [ path ] ) for path in paths ] ) : new_d . pop ( key ) return unflatten ( new_d , list_of_dicts = list_of_dicts , deepcopy = deepcopy ) | filter dict by certain paths containing key sets |
53,776 | def rename_keys ( d , keymap = None , list_of_dicts = False , deepcopy = True ) : list_of_dicts = '__list__' if list_of_dicts else None keymap = { } if keymap is None else keymap flatd = flatten ( d , list_of_dicts = list_of_dicts ) flatd = { tuple ( [ keymap . get ( k , k ) for k in path ] ) : v for path , v in flatd . items ( ) } return unflatten ( flatd , list_of_dicts = list_of_dicts , deepcopy = deepcopy ) | rename keys in dict |
53,777 | def combine_lists ( d , keys = None , deepcopy = True ) : if isinstance ( d , list ) : init_list = True d = { 'dummy_key843' : d } else : init_list = False flattened = flatten ( d , list_of_dicts = None ) for key , value in list ( flattened . items ( ) ) : if keys is not None : try : if not key [ - 1 ] in keys : continue except Exception : continue if not isinstance ( value , list ) : continue if not all ( [ is_dict_like ( d ) for d in value ] ) : continue newd = { } for subdic in value : for subk , subv in subdic . items ( ) : if subk not in newd : newd [ subk ] = [ ] newd [ subk ] . append ( subv ) flattened [ key ] = newd final = unflatten ( flattened , list_of_dicts = None , deepcopy = deepcopy ) if init_list : return list ( final . values ( ) ) [ 0 ] else : return final | combine lists of dicts |
53,778 | def list_to_dict ( lst , key = None , remove_key = True ) : assert all ( [ is_dict_like ( d ) for d in lst ] ) if key is not None : assert all ( [ key in d for d in lst ] ) new_dict = { } for i , d in enumerate ( lst ) : d = unflatten ( flatten ( d ) ) if key is None : new_dict [ str ( i ) ] = d else : if remove_key : k = d . pop ( key ) else : k = d [ key ] new_dict [ k ] = d return new_dict | convert a list of dicts to a dict with root keys |
53,779 | def diff ( new_dict , old_dict , iter_prefix = '__iter__' , np_allclose = False , ** kwargs ) : if np_allclose : try : import numpy except ImportError : raise ValueError ( "to use np_allclose, numpy must be installed" ) dct1_flat = flatten ( new_dict , all_iters = iter_prefix ) dct2_flat = flatten ( old_dict , all_iters = iter_prefix ) outcome = { 'insertions' : [ ] , 'deletions' : [ ] , 'changes' : [ ] , 'uncomparable' : [ ] } for path , val in dct1_flat . items ( ) : if path not in dct2_flat : outcome [ 'insertions' ] . append ( ( path , val ) ) continue other_val = dct2_flat . pop ( path ) if np_allclose : try : if numpy . allclose ( val , other_val , ** kwargs ) : continue except Exception : pass try : if val != other_val : outcome [ 'changes' ] . append ( ( path , ( val , other_val ) ) ) except Exception : outcome [ 'uncomparable' ] . append ( ( path , ( val , other_val ) ) ) for path2 , val2 in dct2_flat . items ( ) : outcome [ 'deletions' ] . append ( ( path2 , val2 ) ) for key in list ( outcome . keys ( ) ) : if not outcome [ key ] : outcome . pop ( key ) try : outcome [ key ] = sorted ( outcome [ key ] ) except Exception : pass return outcome | return the difference between two dict_like objects |
53,780 | def copy ( src , dst , only_update = False , copystat = True , cwd = None , dest_is_dir = False , create_dest_dirs = False , logger = None ) : if cwd : if not os . path . isabs ( src ) : src = os . path . join ( cwd , src ) if not os . path . isabs ( dst ) : dst = os . path . join ( cwd , dst ) if not os . path . exists ( src ) : msg = "Source: `{}` does not exist" . format ( src ) raise FileNotFoundError ( msg ) if dest_is_dir : if not dst [ - 1 ] == '/' : dst = dst + '/' else : if os . path . exists ( dst ) and os . path . isdir ( dst ) : dest_is_dir = True if dest_is_dir : dest_dir = dst dest_fname = os . path . basename ( src ) dst = os . path . join ( dest_dir , dest_fname ) else : dest_dir = os . path . dirname ( dst ) dest_fname = os . path . basename ( dst ) if not os . path . exists ( dest_dir ) : if create_dest_dirs : make_dirs ( dest_dir , logger = logger ) else : msg = "You must create directory first." raise FileNotFoundError ( msg ) if only_update : if not missing_or_other_newer ( dst , src ) : if logger : logger . debug ( "Did not copy {} to {} (source not newer)" . format ( src , dst ) ) return if os . path . islink ( dst ) : if os . path . abspath ( os . path . realpath ( dst ) ) == os . path . abspath ( dst ) : pass else : if logger : logger . debug ( "Copying {} to {}" . format ( src , dst ) ) shutil . copy ( src , dst ) if copystat : shutil . copystat ( src , dst ) return dst | Augmented shutil . copy with extra options and slightly modified behaviour |
53,781 | def md5_of_file ( path , nblocks = 128 ) : md = md5 ( ) with open ( path , 'rb' ) as f : for chunk in iter ( lambda : f . read ( nblocks * md . block_size ) , b'' ) : md . update ( chunk ) return md | Computes the md5 hash of a file . |
53,782 | def missing_or_other_newer ( path , other_path , cwd = None ) : cwd = cwd or '.' path = get_abspath ( path , cwd = cwd ) other_path = get_abspath ( other_path , cwd = cwd ) if not os . path . exists ( path ) : return True if os . path . getmtime ( other_path ) - 1e-6 >= os . path . getmtime ( path ) : return True return False | Investigate if path is non - existant or older than provided reference path . |
53,783 | def find_binary_of_command ( candidates ) : from distutils . spawn import find_executable for c in candidates : binary_path = find_executable ( c ) if c and binary_path : return c , binary_path raise RuntimeError ( 'No binary located for candidates: {}' . format ( candidates ) ) | Calls find_executable from distuils for provided candidates and returns first hit . If no candidate mathces a RuntimeError is raised |
53,784 | def get_from_metadata_file ( cls , dirpath , key ) : fullpath = os . path . join ( dirpath , cls . metadata_filename ) if os . path . exists ( fullpath ) : d = pickle . load ( open ( fullpath , 'rb' ) ) return d [ key ] else : raise FileNotFoundError ( "No such file: {0}" . format ( fullpath ) ) | Get value of key in metadata file dict . |
53,785 | def view_plugins ( category = None ) : if category is not None : if category == 'parsers' : return { name : { "descript" : klass . plugin_descript , "regex" : klass . file_regex } for name , klass in _all_plugins [ category ] . items ( ) } return { name : klass . plugin_descript for name , klass in _all_plugins [ category ] . items ( ) } else : return { cat : { name : klass . plugin_descript for name , klass in plugins . items ( ) } for cat , plugins in _all_plugins . items ( ) } | return a view of the loaded plugin names and descriptions |
53,786 | def unload_plugin ( name , category = None ) : if category is not None : _all_plugins [ category ] . pop ( name ) else : for cat in _all_plugins : if name in _all_plugins [ cat ] : _all_plugins [ cat ] . pop ( name ) | remove single plugin |
53,787 | def load_plugin_classes ( classes , category = None , overwrite = False ) : load_errors = [ ] for klass in classes : for pcat , pinterface in _plugins_interface . items ( ) : if category is not None and not pcat == category : continue if all ( [ hasattr ( klass , attr ) for attr in pinterface ] ) : if klass . plugin_name in _all_plugins [ pcat ] and not overwrite : err = '{0} is already set for {1}' . format ( klass . plugin_name , pcat ) load_errors . append ( ( klass . __name__ , '{}' . format ( err ) ) ) continue _all_plugins [ pcat ] [ klass . plugin_name ] = klass ( ) else : load_errors . append ( ( klass . __name__ , 'does not match {} interface: {}' . format ( pcat , pinterface ) ) ) return load_errors | load plugins from class objects |
53,788 | def load_plugins_dir ( path , category = None , overwrite = False ) : if hasattr ( path , 'glob' ) : pypaths = path . glob ( '*.py' ) else : pypaths = glob . glob ( os . path . join ( path , '*.py' ) ) load_errors = [ ] for pypath in pypaths : mod_name = str ( uuid . uuid4 ( ) ) try : if hasattr ( pypath , 'resolve' ) : pypath = pypath . resolve ( ) with warnings . catch_warnings ( record = True ) : warnings . filterwarnings ( "ignore" , category = ImportWarning ) if hasattr ( pypath , 'maketemp' ) : with pypath . maketemp ( ) as f : module = load_source ( mod_name , f . name ) else : module = load_source ( mod_name , str ( pypath ) ) except Exception as err : load_errors . append ( ( str ( pypath ) , 'Load Error: {}' . format ( err ) ) ) continue class_members = inspect . getmembers ( module , inspect . isclass ) classes = [ klass for klass_name , klass in class_members if klass . __module__ == mod_name ] load_errors += load_plugin_classes ( classes , category , overwrite ) return load_errors | load plugins from a directory |
53,789 | def load_builtin_plugins ( category = None , overwrite = False ) : load_errors = [ ] for cat , path in _plugins_builtin . items ( ) : if cat != category and category is not None : continue load_errors += load_plugins_dir ( path , cat , overwrite = overwrite ) return load_errors | load plugins from builtin directories |
53,790 | def encode ( obj , outtype = 'json' , raise_error = False ) : for encoder in get_plugins ( 'encoders' ) . values ( ) : if ( isinstance ( obj , encoder . objclass ) and hasattr ( encoder , 'to_{}' . format ( outtype ) ) ) : return getattr ( encoder , 'to_{}' . format ( outtype ) ) ( obj ) break if raise_error : raise ValueError ( "No JSON serializer is available for" "{0} (of type {1})" . format ( obj , type ( obj ) ) ) else : return obj | encode objects via encoder plugins to new types |
53,791 | def decode ( dct , intype = 'json' , raise_error = False ) : for decoder in get_plugins ( 'decoders' ) . values ( ) : if ( set ( list ( decoder . dict_signature ) ) . issubset ( dct . keys ( ) ) and hasattr ( decoder , 'from_{}' . format ( intype ) ) and getattr ( decoder , 'allow_other_keys' , False ) ) : return getattr ( decoder , 'from_{}' . format ( intype ) ) ( dct ) break elif ( sorted ( list ( decoder . dict_signature ) ) == sorted ( dct . keys ( ) ) and hasattr ( decoder , 'from_{}' . format ( intype ) ) ) : return getattr ( decoder , 'from_{}' . format ( intype ) ) ( dct ) break if raise_error : raise ValueError ( 'no suitable plugin found for: {}' . format ( dct ) ) else : return dct | decode dict objects via decoder plugins to new type |
53,792 | def parser_available ( fpath ) : if isinstance ( fpath , basestring ) : fname = fpath elif hasattr ( fpath , 'open' ) and hasattr ( fpath , 'name' ) : fname = fpath . name elif hasattr ( fpath , 'readline' ) and hasattr ( fpath , 'name' ) : fname = fpath . name else : raise ValueError ( 'fpath should be a str or file_like object: {}' . format ( fpath ) ) for parser in get_plugins ( 'parsers' ) . values ( ) : if fnmatch ( fname , parser . file_regex ) : return True return False | test if parser plugin available for fpath |
53,793 | def from_zipfile ( cls , path , filename , encoding , dialect , fields , converters ) : stream = ZipReader ( path , filename ) . readlines ( encoding ) return cls ( stream , dialect , fields , converters ) | Read delimited text from zipfile . |
53,794 | def from_file ( cls , path , encoding , dialect , fields , converters , field_index ) : return cls ( open ( path , 'r' , encoding = encoding ) , dialect , fields , converters , field_index ) | Read delimited text from a text file . |
53,795 | def read_bytes ( self ) : with ZipFile ( self . path , mode = 'r' ) as archive : return archive . read ( self . filename ) | Read content into byte string . |
53,796 | def readlines_bytes ( self ) : with open_zipfile_archive ( self . path , self . filename ) as file : for line in file : yield line . rstrip ( b'\r\n' ) | Read content into byte str line iterator . |
53,797 | def xmlparse ( self , file ) : if self . CACHE_FULL_TEI is True : return self . get_or ( _cache_key ( "Nautilus" , self . name , "File" , "Tree" , file . name ) , super ( ProtoNautilusCtsResolver , self ) . xmlparse , file ) return super ( ProtoNautilusCtsResolver , self ) . xmlparse ( file ) | Parse a XML file |
53,798 | def get_or ( self , cache_key , callback , * args , ** kwargs ) : cached = self . cache . get ( cache_key ) if cached is not None : return cached else : try : output = callback ( * args , ** kwargs ) except MyCapytain . errors . UnknownCollection as E : raise UnknownCollection ( str ( E ) ) except Exception as E : raise E self . cache . set ( cache_key , output , self . TIMEOUT ) return output | Get or set the cache using callback and arguments |
53,799 | def read ( self , identifier , path = None ) : if self . CACHE_FULL_TEI is True : o = self . cache . get ( _cache_key ( self . texts_parsed_cache_key , identifier ) ) if o is not None : return o else : with open ( path ) as f : o = Text ( urn = identifier , resource = self . xmlparse ( f ) ) self . cache . set ( _cache_key ( self . texts_parsed_cache_key , identifier ) , o ) else : with open ( path ) as f : o = Text ( urn = identifier , resource = self . xmlparse ( f ) ) return o | Read a text object given an identifier and a path |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.