idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
57,200 | async def index_page ( self , request ) : context = { "initial_state" : self . schema . to_json ( ) } return render_template ( self . template , request , context , app_key = TEMPLATE_APP_KEY , ) | Return index page with initial state for admin |
57,201 | async def logout ( self , request ) : if "Authorization" not in request . headers : msg = "Auth header is not present, can not destroy token" raise JsonValidaitonError ( msg ) response = json_response ( ) await forget ( request , response ) return response | Simple handler for logout |
57,202 | def validate_query_structure ( query ) : query_dict = dict ( query ) filters = query_dict . pop ( '_filters' , None ) if filters : try : f = json . loads ( filters ) except ValueError : msg = '_filters field can not be serialized' raise JsonValidaitonError ( msg ) else : query_dict [ '_filters' ] = f try : q = ListQuery ( query_dict ) except t . DataError as exc : msg = '_filters query invalid' raise JsonValidaitonError ( msg , ** as_dict ( exc ) ) return q | Validate query arguments in list request . |
57,203 | def to_json ( self ) : endpoints = [ ] for endpoint in self . endpoints : list_fields = endpoint . fields resource_type = endpoint . Meta . resource_type table = endpoint . Meta . table data = endpoint . to_dict ( ) data [ 'fields' ] = resource_type . get_type_of_fields ( list_fields , table , ) endpoints . append ( data ) data = { 'title' : self . title , 'endpoints' : sorted ( endpoints , key = lambda x : x [ 'name' ] ) , } return json . dumps ( data ) | Prepare data for the initial state of the admin - on - rest |
57,204 | def resources ( self ) : resources = [ ] for endpoint in self . endpoints : resource_type = endpoint . Meta . resource_type table = endpoint . Meta . table url = endpoint . name resources . append ( ( resource_type , { 'table' : table , 'url' : url } ) ) return resources | Return list of all registered resources . |
57,205 | def get_type_of_fields ( fields , table ) : if not fields : fields = table . primary_key actual_fields = [ field for field in table . c . items ( ) if field [ 0 ] in fields ] data_type_fields = { name : FIELD_TYPES . get ( type ( field_type . type ) , rc . TEXT_FIELD . value ) for name , field_type in actual_fields } return data_type_fields | Return data types of fields that are in table . If a given parameter is empty return primary key . |
57,206 | def get_type_for_inputs ( table ) : return [ dict ( type = INPUT_TYPES . get ( type ( field_type . type ) , rc . TEXT_INPUT . value ) , name = name , isPrimaryKey = ( name in table . primary_key ) , props = None , ) for name , field_type in table . c . items ( ) ] | Return information about table s fields in dictionary type . |
57,207 | def _setup ( app , * , schema , title = None , app_key = APP_KEY , db = None ) : admin = web . Application ( loop = app . loop ) app [ app_key ] = admin loader = jinja2 . FileSystemLoader ( [ TEMPLATES_ROOT , ] ) aiohttp_jinja2 . setup ( admin , loader = loader , app_key = TEMPLATE_APP_KEY ) if title : schema . title = title resources = [ init ( db , info [ 'table' ] , url = info [ 'url' ] ) for init , info in schema . resources ] admin_handler = AdminOnRestHandler ( admin , resources = resources , loop = app . loop , schema = schema , ) admin [ 'admin_handler' ] = admin_handler setup_admin_on_rest_handlers ( admin , admin_handler ) return admin | Initialize the admin - on - rest admin |
57,208 | def to_dict ( self ) : data = { "name" : self . name , "canEdit" : self . can_edit , "canCreate" : self . can_create , "canDelete" : self . can_delete , "perPage" : self . per_page , "showPage" : self . generate_data_for_show_page ( ) , "editPage" : self . generate_data_for_edit_page ( ) , "createPage" : self . generate_data_for_create_page ( ) , } return data | Return dict with the all base information about the instance . |
57,209 | def generate_data_for_edit_page ( self ) : if not self . can_edit : return { } if self . edit_form : return self . edit_form . to_dict ( ) return self . generate_simple_data_page ( ) | Generate a custom representation of table s fields in dictionary type if exist edit form else use default representation . |
57,210 | def generate_data_for_create_page ( self ) : if not self . can_create : return { } if self . create_form : return self . create_form . to_dict ( ) return self . generate_simple_data_page ( ) | Generate a custom representation of table s fields in dictionary type if exist create form else use default representation . |
57,211 | async def register ( self , request ) : session = await get_session ( request ) user_id = session . get ( 'user_id' ) if user_id : return redirect ( request , 'timeline' ) error = None form = None if request . method == 'POST' : form = await request . post ( ) user_id = await db . get_user_id ( self . mongo . user , form [ 'username' ] ) if not form [ 'username' ] : error = 'You have to enter a username' elif not form [ 'email' ] or '@' not in form [ 'email' ] : error = 'You have to enter a valid email address' elif not form [ 'password' ] : error = 'You have to enter a password' elif form [ 'password' ] != form [ 'password2' ] : error = 'The two passwords do not match' elif user_id is not None : error = 'The username is already taken' else : await self . mongo . user . insert ( { 'username' : form [ 'username' ] , 'email' : form [ 'email' ] , 'pw_hash' : generate_password_hash ( form [ 'password' ] ) } ) return redirect ( request , 'login' ) return { "error" : error , "form" : form } | Registers the user . |
57,212 | async def follow_user ( self , request ) : username = request . match_info [ 'username' ] session = await get_session ( request ) user_id = session . get ( 'user_id' ) if not user_id : raise web . HTTPNotAuthorized ( ) whom_id = await db . get_user_id ( self . mongo . user , username ) if whom_id is None : raise web . HTTPFound ( ) await self . mongo . follower . update ( { 'who_id' : ObjectId ( user_id ) } , { '$push' : { 'whom_id' : whom_id } } , upsert = True ) return redirect ( request , 'user_timeline' , parts = { "username" : username } ) | Adds the current user as follower of the given user . |
57,213 | async def add_message ( self , request ) : session = await get_session ( request ) user_id = session . get ( 'user_id' ) if not user_id : raise web . HTTPNotAuthorized ( ) form = await request . post ( ) if form . get ( 'text' ) : user = await self . mongo . user . find_one ( { '_id' : ObjectId ( session [ 'user_id' ] ) } , { 'email' : 1 , 'username' : 1 } ) await self . mongo . message . insert ( { 'author_id' : ObjectId ( user_id ) , 'email' : user [ 'email' ] , 'username' : user [ 'username' ] , 'text' : form [ 'text' ] , 'pub_date' : datetime . datetime . utcnow ( ) } ) return redirect ( request , 'timeline' ) | Registers a new message for the user . |
57,214 | def robo_avatar_url ( user_data , size = 80 ) : hash = md5 ( str ( user_data ) . strip ( ) . lower ( ) . encode ( 'utf-8' ) ) . hexdigest ( ) url = "https://robohash.org/{hash}.png?size={size}x{size}" . format ( hash = hash , size = size ) return url | Return the gravatar image for the given email address . |
57,215 | def waitgrab ( self , timeout = 60 , autocrop = True , cb_imgcheck = None ) : t = 0 sleep_time = 0.3 repeat_time = 1 while 1 : log . debug ( 'sleeping %s secs' % str ( sleep_time ) ) time . sleep ( sleep_time ) t += sleep_time img = self . grab ( autocrop = autocrop ) if img : if not cb_imgcheck : break if cb_imgcheck ( img ) : break sleep_time = repeat_time repeat_time += 1 if t > timeout : msg = 'Timeout! elapsed time:%s timeout:%s ' % ( t , timeout ) raise DisplayTimeoutError ( msg ) break log . debug ( 'screenshot is empty, next try..' ) assert img return img | start process and create screenshot . Repeat screenshot until it is not empty and cb_imgcheck callback function returns True for current screenshot . |
57,216 | def _setup_xauth ( self ) : handle , filename = tempfile . mkstemp ( prefix = 'PyVirtualDisplay.' , suffix = '.Xauthority' ) self . _xauth_filename = filename os . close ( handle ) self . _old_xauth = { } self . _old_xauth [ 'AUTHFILE' ] = os . getenv ( 'AUTHFILE' ) self . _old_xauth [ 'XAUTHORITY' ] = os . getenv ( 'XAUTHORITY' ) os . environ [ 'AUTHFILE' ] = os . environ [ 'XAUTHORITY' ] = filename cookie = xauth . generate_mcookie ( ) xauth . call ( 'add' , self . new_display_var , '.' , cookie ) | Set up the Xauthority file and the XAUTHORITY environment variable . |
57,217 | def _clear_xauth ( self ) : os . remove ( self . _xauth_filename ) for varname in [ 'AUTHFILE' , 'XAUTHORITY' ] : if self . _old_xauth [ varname ] is None : del os . environ [ varname ] else : os . environ [ varname ] = self . _old_xauth [ varname ] self . _old_xauth = None | Clear the Xauthority file and restore the environment variables . |
57,218 | def GetCookies ( self ) : sectoken = self . GetSecurityToken ( self . Username , self . Password ) url = self . share_point_site + '/_forms/default.aspx?wa=wsignin1.0' response = requests . post ( url , data = sectoken ) return response . cookies | Grabs the cookies form your Office Sharepoint site and uses it as Authentication for the rest of the calls |
57,219 | def DeleteList ( self , listName ) : soap_request = soap ( 'DeleteList' ) soap_request . add_parameter ( 'listName' , listName ) self . last_request = str ( soap_request ) response = self . _session . post ( url = self . _url ( 'Lists' ) , headers = self . _headers ( 'DeleteList' ) , data = str ( soap_request ) , verify = self . _verify_ssl , timeout = self . timeout ) if response == 200 : return response . text else : return response | Delete a List with given name |
57,220 | def GetListCollection ( self ) : soap_request = soap ( 'GetListCollection' ) self . last_request = str ( soap_request ) response = self . _session . post ( url = self . _url ( 'SiteData' ) , headers = self . _headers ( 'GetListCollection' ) , data = str ( soap_request ) , verify = self . _verify_ssl , timeout = self . timeout ) if response . status_code == 200 : envelope = etree . fromstring ( response . text . encode ( 'utf-8' ) , parser = etree . XMLParser ( huge_tree = self . huge_tree ) ) result = envelope [ 0 ] [ 0 ] [ 0 ] . text lists = envelope [ 0 ] [ 0 ] [ 1 ] data = [ ] for _list in lists : _list_data = { } for item in _list : key = item . tag . replace ( '{http://schemas.microsoft.com/sharepoint/soap/}' , '' ) value = item . text _list_data [ key ] = value data . append ( _list_data ) return data else : return response | Returns List information for current Site |
57,221 | def _convert_to_internal ( self , data ) : for _dict in data : keys = list ( _dict . keys ( ) ) [ : ] for key in keys : if key not in self . _disp_cols : raise Exception ( key + ' not a column in current List.' ) _dict [ self . _disp_cols [ key ] [ 'name' ] ] = self . _sp_type ( key , _dict . pop ( key ) ) | From Column Title to Column_x0020_Title |
57,222 | def _convert_to_display ( self , data ) : for _dict in data : keys = list ( _dict . keys ( ) ) [ : ] for key in keys : if key not in self . _sp_cols : raise Exception ( key + ' not a column in current List.' ) _dict [ self . _sp_cols [ key ] [ 'name' ] ] = self . _python_type ( key , _dict . pop ( key ) ) | From Column_x0020_Title to Column Title |
57,223 | def GetView ( self , viewname ) : soap_request = soap ( 'GetView' ) soap_request . add_parameter ( 'listName' , self . listName ) if viewname == None : views = self . GetViewCollection ( ) for view in views : if 'DefaultView' in view : if views [ view ] [ 'DefaultView' ] == 'TRUE' : viewname = view break if self . listName not in [ 'UserInfo' , 'User Information List' ] : soap_request . add_parameter ( 'viewName' , self . views [ viewname ] [ 'Name' ] [ 1 : - 1 ] ) else : soap_request . add_parameter ( 'viewName' , viewname ) self . last_request = str ( soap_request ) response = self . _session . post ( url = self . _url ( 'Views' ) , headers = self . _headers ( 'GetView' ) , data = str ( soap_request ) , verify = self . _verify_ssl , timeout = self . timeout ) if response . status_code == 200 : envelope = etree . fromstring ( response . text . encode ( 'utf-8' ) , parser = etree . XMLParser ( huge_tree = self . huge_tree ) ) view = envelope [ 0 ] [ 0 ] [ 0 ] [ 0 ] info = { key : value for ( key , value ) in view . items ( ) } fields = [ x . items ( ) [ 0 ] [ 1 ] for x in view [ 1 ] ] return { 'info' : info , 'fields' : fields } else : raise Exception ( "ERROR:" , response . status_code , response . text ) | Get Info on View Name |
57,224 | def UpdateListItems ( self , data , kind ) : if type ( data ) != list : raise Exception ( 'data must be a list of dictionaries' ) soap_request = soap ( 'UpdateListItems' ) soap_request . add_parameter ( 'listName' , self . listName ) if kind != 'Delete' : self . _convert_to_internal ( data ) soap_request . add_actions ( data , kind ) self . last_request = str ( soap_request ) response = self . _session . post ( url = self . _url ( 'Lists' ) , headers = self . _headers ( 'UpdateListItems' ) , data = str ( soap_request ) , verify = self . _verify_ssl , timeout = self . timeout ) if response . status_code == 200 : envelope = etree . fromstring ( response . text . encode ( 'utf-8' ) , parser = etree . XMLParser ( huge_tree = self . huge_tree ) ) results = envelope [ 0 ] [ 0 ] [ 0 ] [ 0 ] data = { } for result in results : if result . text != '0x00000000' and result [ 0 ] . text != '0x00000000' : data [ result . attrib [ 'ID' ] ] = ( result [ 0 ] . text , result [ 1 ] . text ) else : data [ result . attrib [ 'ID' ] ] = result [ 0 ] . text return data else : return response | Update List Items kind = New Update or Delete |
57,225 | def GetAttachmentCollection ( self , _id ) : soap_request = soap ( 'GetAttachmentCollection' ) soap_request . add_parameter ( 'listName' , self . listName ) soap_request . add_parameter ( 'listItemID' , _id ) self . last_request = str ( soap_request ) response = self . _session . post ( url = self . _url ( 'Lists' ) , headers = self . _headers ( 'GetAttachmentCollection' ) , data = str ( soap_request ) , verify = False , timeout = self . timeout ) if response . status_code == 200 : envelope = etree . fromstring ( response . text . encode ( 'utf-8' ) , parser = etree . XMLParser ( huge_tree = self . huge_tree ) ) attaches = envelope [ 0 ] [ 0 ] [ 0 ] [ 0 ] attachments = [ ] for attachment in attaches . getchildren ( ) : attachments . append ( attachment . text ) return attachments else : return response | Get Attachments for given List Item ID |
57,226 | def changes ( new_cmp_dict , old_cmp_dict , id_column , columns ) : update_ldict = [ ] same_keys = set ( new_cmp_dict ) . intersection ( set ( old_cmp_dict ) ) for same_key in same_keys : old_dict = old_cmp_dict [ same_key ] new_dict = new_cmp_dict [ same_key ] dict_keys = set ( old_dict ) . intersection ( set ( new_dict ) ) update_dict = { } for dict_key in columns : old_val = old_dict . get ( dict_key , 'NaN' ) new_val = new_dict . get ( dict_key , 'NaN' ) if old_val != new_val and new_val != 'NaN' : if id_column != None : try : update_dict [ id_column ] = old_dict [ id_column ] except KeyError : print ( "Input Dictionary 'old_cmp_dict' must have ID column" ) update_dict [ dict_key ] = new_val if update_dict : update_ldict . append ( update_dict ) return update_ldict | Return a list dict of the changes of the rows that exist in both dictionaries User must provide an ID column for old_cmp_dict |
57,227 | def unique ( new_cmp_dict , old_cmp_dict ) : newkeys = set ( new_cmp_dict ) oldkeys = set ( old_cmp_dict ) unique = newkeys - oldkeys unique_ldict = [ ] for key in unique : unique_ldict . append ( new_cmp_dict [ key ] ) return unique_ldict | Return a list dict of the unique keys in new_cmp_dict |
57,228 | def traceplot ( trace : sample_types , labels : List [ Union [ str , Tuple [ str , str ] ] ] = None , ax : Any = None , x0 : int = 0 ) -> Any : if labels is None : labels = list ( trace . keys ( ) ) if ax is None : _ , ax = plt . subplots ( len ( labels ) , 1 , squeeze = False ) for index , label in enumerate ( labels ) : data = [ sample for sample in trace [ label ] ] ax [ index ] [ 0 ] . set_title ( label ) ax [ index ] [ 0 ] . plot ( __integer_xaxis ( ax [ index ] [ 0 ] , x0 , len ( data ) ) , data ) __pause_for_crude_animation ( ) return ax | Plot samples values . |
57,229 | def read_file_snippets ( file , snippet_store ) : start_reg = re . compile ( "(.*%%SNIPPET_START%% )([a-zA-Z0-9]+)" ) end_reg = re . compile ( "(.*%%SNIPPET_END%% )([a-zA-Z0-9]+)" ) open_snippets = { } with open ( file , encoding = "utf-8" ) as w : lines = w . readlines ( ) for line in lines : printd ( "Got Line: {}" . format ( line ) ) m = start_reg . match ( line ) if m : printd ( "Opened Snippet {}" . format ( m . group ( 2 ) ) ) if m . group ( 2 ) in snippet_store : record_error ( "Repeat definition of Snippet {}" . format ( m . group ( 2 ) ) ) elif m . group ( 2 ) in open_snippets : record_error ( "Snippet already opened {}" . format ( m . group ( 2 ) ) ) else : printd ( "Added {} to open snippets list" . format ( m . group ( 2 ) ) ) open_snippets [ m . group ( 2 ) ] = [ ] continue m = end_reg . match ( line ) if m : printd ( "Found end of Snippet {}" . format ( m . group ( 2 ) ) ) if m . group ( 2 ) not in open_snippets : record_error ( "Reached Snippet End but no start" ) elif m . group ( 2 ) in snippet_store : record_error ( "Repeat definition of Snippet {}" . format ( m . group ( 2 ) ) ) else : snippet_store [ m . group ( 2 ) ] = open_snippets [ m . group ( 2 ) ] del open_snippets [ m . group ( 2 ) ] continue for snippet in open_snippets . values ( ) : printd ( "Adding Line to snippet" ) snippet . append ( line ) for opened in open_snippets : record_error ( "Snippet {} left open - ignoring" . format ( opened ) ) | Parse a file and add all snippets to the snippet_store dictionary |
57,230 | def strip_block_whitespace ( string_list ) : min_ws = min ( [ ( len ( x ) - len ( x . lstrip ( ) ) ) for x in string_list if x != '\n' ] ) return [ x [ min_ws : ] if x != '\n' else x for x in string_list ] | Treats a list of strings as a code block and strips whitespace so that the min whitespace line sits at char 0 of line . |
57,231 | async def prepare ( self , request ) : if request . method != 'GET' : raise HTTPMethodNotAllowed ( request . method , [ 'GET' ] ) if not self . prepared : writer = await super ( ) . prepare ( request ) self . _loop = request . app . loop self . _ping_task = self . _loop . create_task ( self . _ping ( ) ) self . enable_chunked_encoding ( ) return writer else : if request . protocol . transport is None : raise asyncio . CancelledError ( ) | Prepare for streaming and send HTTP headers . |
57,232 | async def send ( self , data , id = None , event = None , retry = None ) : buffer = io . StringIO ( ) if id is not None : buffer . write ( self . LINE_SEP_EXPR . sub ( '' , 'id: {}' . format ( id ) ) ) buffer . write ( self . _sep ) if event is not None : buffer . write ( self . LINE_SEP_EXPR . sub ( '' , 'event: {}' . format ( event ) ) ) buffer . write ( self . _sep ) for chunk in self . LINE_SEP_EXPR . split ( data ) : buffer . write ( 'data: {}' . format ( chunk ) ) buffer . write ( self . _sep ) if retry is not None : if not isinstance ( retry , int ) : raise TypeError ( 'retry argument must be int' ) buffer . write ( 'retry: {}' . format ( retry ) ) buffer . write ( self . _sep ) buffer . write ( self . _sep ) await self . write ( buffer . getvalue ( ) . encode ( 'utf-8' ) ) | Send data using EventSource protocol |
57,233 | async def wait ( self ) : if self . _ping_task is None : raise RuntimeError ( 'Response is not started' ) with contextlib . suppress ( asyncio . CancelledError ) : await self . _ping_task | EventSourceResponse object is used for streaming data to the client this method returns future so we can wain until connection will be closed or other task explicitly call stop_streaming method . |
57,234 | def ping_interval ( self , value ) : if not isinstance ( value , int ) : raise TypeError ( "ping interval must be int" ) if value < 0 : raise ValueError ( "ping interval must be greater then 0" ) self . _ping_interval = value | Setter for ping_interval property . |
57,235 | def get_parser ( segmenter , ** options ) : if segmenter == 'nlapi' : return NLAPIParser ( ** options ) elif segmenter == 'mecab' : return MecabParser ( ) elif segmenter == 'tinysegmenter' : return TinysegmenterParser ( ) else : raise ValueError ( 'Segmenter {} is not supported.' . format ( segmenter ) ) | Gets a parser . |
57,236 | def preprocess ( source ) : doc = html5lib . parseFragment ( source ) source = ET . tostring ( doc , encoding = 'utf-8' , method = 'text' ) . decode ( 'utf-8' ) source = source . replace ( u'\n' , u'' ) . strip ( ) source = re . sub ( r'\s\s+' , u' ' , source ) return source | Removes unnecessary break lines and white spaces . |
57,237 | def main ( ) : args = docopt ( __doc__ ) if args [ '--version' ] : print ( __version__ ) sys . exit ( ) result = parse ( args [ '<source>' ] , segmenter = args [ '--segmenter' ] , language = args [ '--language' ] , classname = args [ '--classname' ] ) print ( result [ 'html_code' ] ) sys . exit ( ) | Budou main method for the command line tool . |
57,238 | def parse ( source , segmenter = 'nlapi' , language = None , max_length = None , classname = None , attributes = None , ** kwargs ) : parser = get_parser ( segmenter , ** kwargs ) return parser . parse ( source , language = language , max_length = max_length , classname = classname , attributes = attributes ) | Parses input source . |
57,239 | def authenticate ( json_path = None ) : msg = ( 'budou.authentication() is deprecated. ' 'Please use budou.get_parser() to obtain a parser instead.' ) warnings . warn ( msg , DeprecationWarning ) parser = get_parser ( 'nlapi' , credentials_path = json_path ) return parser | Gets a Natural Language API parser by authenticating the API . |
57,240 | def _memorize ( func ) : def _wrapper ( self , * args , ** kwargs ) : if self . use_cache : cache = load_cache ( self . cache_filename ) original_key = ':' . join ( [ self . __class__ . __name__ , func . __name__ , '_' . join ( [ str ( a ) for a in args ] ) , '_' . join ( [ str ( w ) for w in kwargs . values ( ) ] ) ] ) cache_key = hashlib . md5 ( original_key . encode ( 'utf-8' ) ) . hexdigest ( ) cached_val = cache . get ( cache_key ) if cached_val : return cached_val val = func ( self , * args , ** kwargs ) if self . use_cache : cache . set ( cache_key , val ) return val return _wrapper | Decorator to cache the given function s output . |
57,241 | def _get_source_chunks ( self , input_text , language = None ) : chunks = ChunkList ( ) seek = 0 result = self . _get_annotations ( input_text , language = language ) tokens = result [ 'tokens' ] language = result [ 'language' ] for i , token in enumerate ( tokens ) : word = token [ 'text' ] [ 'content' ] begin_offset = token [ 'text' ] [ 'beginOffset' ] label = token [ 'dependencyEdge' ] [ 'label' ] pos = token [ 'partOfSpeech' ] [ 'tag' ] if begin_offset > seek : chunks . append ( Chunk . space ( ) ) seek = begin_offset chunk = Chunk ( word , pos , label ) if chunk . label in _DEPENDENT_LABEL : chunk . dependency = i < token [ 'dependencyEdge' ] [ 'headTokenIndex' ] if chunk . is_punct ( ) : chunk . dependency = chunk . is_open_punct ( ) chunks . append ( chunk ) seek += len ( word ) return chunks , language | Returns a chunk list retrieved from Syntax Analysis results . |
57,242 | def _group_chunks_by_entities ( self , chunks , entities ) : for entity in entities : chunks_to_concat = chunks . get_overlaps ( entity [ 'beginOffset' ] , len ( entity [ 'content' ] ) ) if not chunks_to_concat : continue new_chunk_word = u'' . join ( [ chunk . word for chunk in chunks_to_concat ] ) new_chunk = Chunk ( new_chunk_word ) chunks . swap ( chunks_to_concat , new_chunk ) return chunks | Groups chunks by entities retrieved from NL API Entity Analysis . |
57,243 | def _get_annotations ( self , text , language = '' ) : body = { 'document' : { 'type' : 'PLAIN_TEXT' , 'content' : text , } , 'features' : { 'extract_syntax' : True , } , 'encodingType' : 'UTF32' , } if language : body [ 'document' ] [ 'language' ] = language request = self . service . documents ( ) . annotateText ( body = body ) response = request . execute ( ) tokens = response . get ( 'tokens' , [ ] ) language = response . get ( 'language' ) return { 'tokens' : tokens , 'language' : language } | Returns the list of annotations retrieved from the given text . |
57,244 | def _get_entities ( self , text , language = '' ) : body = { 'document' : { 'type' : 'PLAIN_TEXT' , 'content' : text , } , 'encodingType' : 'UTF32' , } if language : body [ 'document' ] [ 'language' ] = language request = self . service . documents ( ) . analyzeEntities ( body = body ) response = request . execute ( ) result = [ ] for entity in response . get ( 'entities' , [ ] ) : mentions = entity . get ( 'mentions' , [ ] ) if not mentions : continue entity_text = mentions [ 0 ] [ 'text' ] offset = entity_text [ 'beginOffset' ] for word in entity_text [ 'content' ] . split ( ) : result . append ( { 'content' : word , 'beginOffset' : offset } ) offset += len ( word ) return result | Returns the list of entities retrieved from the given text . |
57,245 | def get ( self , key ) : self . _create_file_if_none_exists ( ) with open ( self . filename , 'rb' ) as file_object : cache_pickle = pickle . load ( file_object ) val = cache_pickle . get ( key , None ) return val | Gets a value by a key . |
57,246 | def set ( self , key , val ) : self . _create_file_if_none_exists ( ) with open ( self . filename , 'r+b' ) as file_object : cache_pickle = pickle . load ( file_object ) cache_pickle [ key ] = val file_object . seek ( 0 ) pickle . dump ( cache_pickle , file_object ) | Sets a value in a key . |
57,247 | def serialize ( self ) : return { 'word' : self . word , 'pos' : self . pos , 'label' : self . label , 'dependency' : self . dependency , 'has_cjk' : self . has_cjk ( ) , } | Returns serialized chunk data in dictionary . |
57,248 | def has_cjk ( self ) : cjk_codepoint_ranges = [ ( 4352 , 4607 ) , ( 11904 , 42191 ) , ( 43072 , 43135 ) , ( 44032 , 55215 ) , ( 63744 , 64255 ) , ( 65072 , 65103 ) , ( 65381 , 65500 ) , ( 131072 , 196607 ) ] for char in self . word : if any ( [ start <= ord ( char ) <= end for start , end in cjk_codepoint_ranges ] ) : return True return False | Checks if the word of the chunk contains CJK characters . |
57,249 | def get_overlaps ( self , offset , length ) : if '' . join ( [ chunk . word for chunk in self ] ) [ offset ] == ' ' : offset += 1 index = 0 result = ChunkList ( ) for chunk in self : if offset < index + len ( chunk . word ) and index < offset + length : result . append ( chunk ) index += len ( chunk . word ) return result | Returns chunks overlapped with the given range . |
57,250 | def swap ( self , old_chunks , new_chunk ) : indexes = [ self . index ( chunk ) for chunk in old_chunks ] del self [ indexes [ 0 ] : indexes [ - 1 ] + 1 ] self . insert ( indexes [ 0 ] , new_chunk ) | Swaps old consecutive chunks with new chunk . |
57,251 | def resolve_dependencies ( self ) : self . _concatenate_inner ( True ) self . _concatenate_inner ( False ) self . _insert_breaklines ( ) | Resolves chunk dependency by concatenating them . |
57,252 | def _concatenate_inner ( self , direction ) : tmp_bucket = [ ] source_chunks = self if direction else self [ : : - 1 ] target_chunks = ChunkList ( ) for chunk in source_chunks : if ( chunk . dependency == direction or ( direction is False and chunk . is_space ( ) ) ) : tmp_bucket . append ( chunk ) continue tmp_bucket . append ( chunk ) if not direction : tmp_bucket = tmp_bucket [ : : - 1 ] new_word = '' . join ( [ tmp_chunk . word for tmp_chunk in tmp_bucket ] ) new_chunk = Chunk ( new_word , pos = chunk . pos , label = chunk . label , dependency = chunk . dependency ) target_chunks . append ( new_chunk ) tmp_bucket = ChunkList ( ) if tmp_bucket : target_chunks += tmp_bucket if not direction : target_chunks = target_chunks [ : : - 1 ] self . list = target_chunks | Concatenates chunks based on each chunk s dependency . |
57,253 | def _insert_breaklines ( self ) : target_chunks = ChunkList ( ) for chunk in self : if chunk . word [ - 1 ] == ' ' and chunk . has_cjk ( ) : chunk . word = chunk . word [ : - 1 ] target_chunks . append ( chunk ) target_chunks . append ( chunk . breakline ( ) ) else : target_chunks . append ( chunk ) self . list = target_chunks | Inserts a breakline instead of a trailing space if the chunk is in CJK . |
57,254 | def html_serialize ( self , attributes , max_length = None ) : doc = ET . Element ( 'span' ) for chunk in self : if ( chunk . has_cjk ( ) and not ( max_length and len ( chunk . word ) > max_length ) ) : ele = ET . Element ( 'span' ) ele . text = chunk . word for key , val in attributes . items ( ) : ele . attrib [ key ] = val doc . append ( ele ) else : if doc . getchildren ( ) : if doc . getchildren ( ) [ - 1 ] . tail is None : doc . getchildren ( ) [ - 1 ] . tail = chunk . word else : doc . getchildren ( ) [ - 1 ] . tail += chunk . word else : if doc . text is None : doc . text = chunk . word else : doc . text += chunk . word result = ET . tostring ( doc , encoding = 'utf-8' ) . decode ( 'utf-8' ) result = html5lib . serialize ( html5lib . parseFragment ( result ) , sanitize = True , quote_attr_values = 'always' ) return result | Returns concatenated HTML code with SPAN tag . |
57,255 | def _etextno_to_uri_subdirectory ( etextno ) : str_etextno = str ( etextno ) . zfill ( 2 ) all_but_last_digit = list ( str_etextno [ : - 1 ] ) subdir_part = "/" . join ( all_but_last_digit ) subdir = "{}/{}" . format ( subdir_part , etextno ) return subdir | Returns the subdirectory that an etextno will be found in a gutenberg mirror . Generally one finds the subdirectory by separating out each digit of the etext number and uses it for a directory . The exception here is for etext numbers less than 10 which are prepended with a 0 for the directory traversal . |
57,256 | def _format_download_uri_for_extension ( etextno , extension , mirror = None ) : mirror = mirror or _GUTENBERG_MIRROR root = mirror . strip ( ) . rstrip ( '/' ) path = _etextno_to_uri_subdirectory ( etextno ) uri = '{root}/{path}/{etextno}{extension}' . format ( root = root , path = path , etextno = etextno , extension = extension ) return uri | Returns the download location on the Project Gutenberg servers for a given text and extension . The list of available extensions for a given text can be found via the formaturi metadata extractor . |
57,257 | def _format_download_uri ( etextno , mirror = None , prefer_ascii = False ) : mirror = mirror or _GUTENBERG_MIRROR if not _does_mirror_exist ( mirror ) : raise UnknownDownloadUriException ( 'Could not reach Gutenberg mirror "{:s}". Try setting a ' 'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for ' '--mirror flag or GUTENBERG_MIRROR environment variable.' . format ( mirror ) ) ascii_first = ( '.txt' , '-0.txt' , '-8.txt' ) utf8_first = ( '-0.txt' , '-8.txt' , '.txt' ) extensions = ascii_first if prefer_ascii else utf8_first for extension in extensions : uri = _format_download_uri_for_extension ( etextno , extension , mirror ) if _does_uri_exist ( uri ) : return uri raise UnknownDownloadUriException ( 'Failed to find a textual download candidate for {} on {}. ' 'Either the book does not exist or it is only available in ' 'non-textual formats.' . format ( etextno , mirror ) ) | Returns the download location on the Project Gutenberg servers for a given text . |
57,258 | def load_etext ( etextno , refresh_cache = False , mirror = None , prefer_ascii = False ) : etextno = validate_etextno ( etextno ) cached = os . path . join ( _TEXT_CACHE , '{}.txt.gz' . format ( etextno ) ) if refresh_cache : remove ( cached ) if not os . path . exists ( cached ) : makedirs ( os . path . dirname ( cached ) ) download_uri = _format_download_uri ( etextno , mirror , prefer_ascii ) response = requests . get ( download_uri ) if response . encoding != response . apparent_encoding : response . encoding = response . apparent_encoding text = response . text with closing ( gzip . open ( cached , 'w' ) ) as cache : cache . write ( text . encode ( 'utf-8' ) ) with closing ( gzip . open ( cached , 'r' ) ) as cache : text = cache . read ( ) . decode ( 'utf-8' ) return text | Returns a unicode representation of the full body of a Project Gutenberg text . After making an initial remote call to Project Gutenberg s servers the text is persisted locally . |
57,259 | def disable_logging ( logger = None ) : logger = logger or logging . getLogger ( ) disabled = logger . disabled logger . disabled = True yield logger . disabled = disabled | Context manager to temporarily suppress all logging for a given logger or the root logger if no particular logger is specified . |
57,260 | def makedirs ( * args , ** kwargs ) : try : os . makedirs ( * args , ** kwargs ) except OSError as ex : if ex . errno != errno . EEXIST : raise | Wrapper around os . makedirs that doesn t raise an exception if the directory already exists . |
57,261 | def remove ( path ) : if not os . path . exists ( path ) : return if os . path . isdir ( path ) : return shutil . rmtree ( path ) if os . path . isfile ( path ) : return os . remove ( path ) | Wrapper that switches between os . remove and shutil . rmtree depending on whether the provided path is a file or directory . |
57,262 | def determine_encoding ( path , default = None ) : byte_order_marks = ( ( 'utf-8-sig' , ( codecs . BOM_UTF8 , ) ) , ( 'utf-16' , ( codecs . BOM_UTF16_LE , codecs . BOM_UTF16_BE ) ) , ( 'utf-32' , ( codecs . BOM_UTF32_LE , codecs . BOM_UTF32_BE ) ) , ) try : with open ( path , 'rb' ) as infile : raw = infile . read ( 4 ) except IOError : return default for encoding , boms in byte_order_marks : if any ( raw . startswith ( bom ) for bom in boms ) : return encoding return default | Determines the encoding of a file based on byte order marks . |
57,263 | def reopen_encoded ( fileobj , mode = 'r' , fallback_encoding = None ) : encoding = determine_encoding ( fileobj . name , fallback_encoding ) fileobj . close ( ) return open ( fileobj . name , mode , encoding = encoding ) | Makes sure that a file was opened with some valid encoding . |
57,264 | def get_metadata ( feature_name , etextno ) : metadata_values = MetadataExtractor . get ( feature_name ) . get_metadata ( etextno ) return frozenset ( metadata_values ) | Looks up the value of a meta - data feature for a given text . |
57,265 | def get_etexts ( feature_name , value ) : matching_etexts = MetadataExtractor . get ( feature_name ) . get_etexts ( value ) return frozenset ( matching_etexts ) | Looks up all the texts that have meta - data matching some criterion . |
57,266 | def _uri_to_etext ( cls , uri_ref ) : try : return validate_etextno ( int ( os . path . basename ( uri_ref . toPython ( ) ) ) ) except InvalidEtextIdException : return None | Converts the representation used to identify a text in the meta - data RDF graph to a human - friendly integer text identifier . |
57,267 | def _implementations ( cls ) : if cls . __implementations : return cls . __implementations cls . __implementations = { } for implementation in all_subclasses ( MetadataExtractor ) : try : feature_name = implementation . feature_name ( ) cls . __implementations [ feature_name ] = implementation except NotImplementedError : pass return cls . __implementations | Returns all the concrete subclasses of MetadataExtractor . |
57,268 | def get ( feature_name ) : implementations = MetadataExtractor . _implementations ( ) try : return implementations [ feature_name ] except KeyError : raise UnsupportedFeatureException ( 'no MetadataExtractor registered for feature "{feature_name}" ' '(try any of the following: {supported_features})' . format ( feature_name = feature_name , supported_features = ', ' . join ( sorted ( implementations ) ) ) ) | Returns the MetadataExtractor that can extract information about the provided feature name . |
57,269 | def set_metadata_cache ( cache ) : global _METADATA_CACHE if _METADATA_CACHE and _METADATA_CACHE . is_open : _METADATA_CACHE . close ( ) _METADATA_CACHE = cache | Sets the metadata cache object to use . |
57,270 | def _create_metadata_cache ( cache_location ) : cache_url = os . getenv ( 'GUTENBERG_FUSEKI_URL' ) if cache_url : return FusekiMetadataCache ( cache_location , cache_url ) try : return SleepycatMetadataCache ( cache_location ) except InvalidCacheException : logging . warning ( 'Unable to create cache based on BSD-DB. ' 'Falling back to SQLite backend. ' 'Performance may be degraded significantly.' ) return SqliteMetadataCache ( cache_location ) | Creates a new metadata cache instance appropriate for this platform . |
57,271 | def open ( self ) : try : self . graph . open ( self . cache_uri , create = False ) self . _add_namespaces ( self . graph ) self . is_open = True except Exception : raise InvalidCacheException ( 'The cache is invalid or not created' ) | Opens an existing cache . |
57,272 | def populate ( self ) : if self . exists : raise CacheAlreadyExistsException ( 'location: %s' % self . cache_uri ) self . _populate_setup ( ) with closing ( self . graph ) : with self . _download_metadata_archive ( ) as metadata_archive : for fact in self . _iter_metadata_triples ( metadata_archive ) : self . _add_to_graph ( fact ) | Populates a new cache . |
57,273 | def refresh ( self ) : if self . exists : self . delete ( ) self . populate ( ) self . open ( ) | Refresh the cache by deleting the old one and creating a new one . |
57,274 | def _download_metadata_archive ( self ) : with tempfile . NamedTemporaryFile ( delete = False ) as metadata_archive : shutil . copyfileobj ( urlopen ( self . catalog_source ) , metadata_archive ) yield metadata_archive . name remove ( metadata_archive . name ) | Makes a remote call to the Project Gutenberg servers and downloads the entire Project Gutenberg meta - data catalog . The catalog describes the texts on Project Gutenberg in RDF . The function returns a file - pointer to the catalog . |
57,275 | def _metadata_is_invalid ( cls , fact ) : return any ( isinstance ( token , URIRef ) and ' ' in token for token in fact ) | Determines if the fact is not well formed . |
57,276 | def _iter_metadata_triples ( cls , metadata_archive_path ) : pg_rdf_regex = re . compile ( r'pg\d+.rdf$' ) with closing ( tarfile . open ( metadata_archive_path ) ) as metadata_archive : for item in metadata_archive : if pg_rdf_regex . search ( item . name ) : with disable_logging ( ) : extracted = metadata_archive . extractfile ( item ) graph = Graph ( ) . parse ( extracted ) for fact in graph : if cls . _metadata_is_invalid ( fact ) : logging . info ( 'skipping invalid triple %s' , fact ) else : yield fact | Yields all meta - data of Project Gutenberg texts contained in the catalog dump . |
57,277 | def _populate_setup ( self ) : makedirs ( os . path . dirname ( self . _cache_marker ) ) with codecs . open ( self . _cache_marker , 'w' , encoding = 'utf-8' ) as fobj : fobj . write ( self . cache_uri ) self . graph . open ( self . cache_uri ) | Just create a local marker file since the actual database should already be created on the Fuseki server . |
57,278 | def delete ( self ) : MetadataCache . delete ( self ) try : self . graph . query ( 'DELETE WHERE { ?s ?p ?o . }' ) except ResultException : logging . exception ( 'error when deleting graph' ) | Deletes the local marker file and also any data in the Fuseki server . |
57,279 | def _metadata_is_invalid ( cls , fact ) : return ( MetadataCache . _metadata_is_invalid ( fact ) or any ( isinstance ( token , BNode ) for token in fact ) ) | Filters out blank nodes since the SPARQLUpdateStore does not support them . |
57,280 | def all_subclasses ( cls ) : subclasses = cls . __subclasses__ ( ) descendants = ( descendant for subclass in subclasses for descendant in all_subclasses ( subclass ) ) return set ( subclasses ) | set ( descendants ) | Recursively returns all the subclasses of the provided class . |
57,281 | def _collapse_cursor ( self , parts ) : final_parts = [ ] for part in parts : if not part : continue if part == CursorMoveUp : if final_parts : final_parts . pop ( ) while final_parts and '\n' not in final_parts [ - 1 ] : final_parts . pop ( ) continue final_parts . append ( part ) return final_parts | Act on any CursorMoveUp commands by deleting preceding tokens |
57,282 | def prepare ( self , ansi = '' , ensure_trailing_newline = False ) : body , styles = self . apply_regex ( ansi ) if ensure_trailing_newline and _needs_extra_newline ( body ) : body += '\n' self . _attrs = { 'dark_bg' : self . dark_bg , 'line_wrap' : self . line_wrap , 'font_size' : self . font_size , 'body' : body , 'styles' : styles , } return self . _attrs | Load the contents of ansi into this object |
57,283 | def run ( self ) : if self . has_rust_extensions ( ) : log . info ( "running build_rust" ) build_rust = self . get_finalized_command ( "build_rust" ) build_rust . inplace = self . inplace build_rust . run ( ) _build_ext . run ( self ) | Run build_rust sub command |
57,284 | def get_lib_name ( self ) : import toml cfg = toml . load ( self . path ) name = cfg . get ( "lib" , { } ) . get ( "name" ) if name is None : name = cfg . get ( "package" , { } ) . get ( "name" ) if name is None : raise Exception ( "Can not parse library name from Cargo.toml. " "Cargo.toml missing value for 'name' key " "in both the [package] section and the [lib] section" ) name = re . sub ( r"[./\\-]" , "_" , name ) return name | Parse Cargo . toml to get the name of the shared library . |
57,285 | def find_rust_extensions ( * directories , ** kwargs ) : libfile = kwargs . get ( "libfile" , "lib.rs" ) directories = directories or [ os . getcwd ( ) ] extensions = [ ] for directory in directories : for base , dirs , files in os . walk ( directory ) : if libfile in files : dotpath = os . path . relpath ( base ) . replace ( os . path . sep , "." ) tomlpath = os . path . join ( base , "Cargo.toml" ) ext = RustExtension ( dotpath , tomlpath , ** kwargs ) ext . libfile = os . path . join ( base , libfile ) extensions . append ( ext ) return extensions | Attempt to find Rust extensions in given directories . |
57,286 | def register ( self , event , fn ) : self . _handler_dict . setdefault ( event , [ ] ) if fn not in self . _handler_dict [ event ] : self . _handler_dict [ event ] . append ( fn ) | Registers the given function as a handler to be applied in response to the the given event . |
57,287 | def apply ( self , event , document , * args , ** kwargs ) : for fn in self . _handler_dict . get ( event , [ ] ) : fn ( document , * args , ** kwargs ) | Applies all middleware functions registered against the given event in order to the given document . |
57,288 | def deregister ( self , event , fn ) : if event in self . _handler_dict and fn in self . _handler_dict [ event ] : self . _handler_dict [ event ] . remove ( fn ) | Deregister the handler function from the given event . |
57,289 | def unpack_scope ( cls , scope ) : query = { } projection = { } options = { } if isinstance ( scope , tuple ) : if len ( scope ) > 3 : raise ValueError ( "Invalid scope" ) if len ( scope ) >= 1 : query = scope [ 0 ] if len ( scope ) >= 2 : projection = scope [ 1 ] if len ( scope ) == 3 : options = scope [ 2 ] elif isinstance ( scope , dict ) : query = scope else : raise ValueError ( "Invalid scope" ) return query , projection , options | Unpacks the response from a scope function . The function should return either a query a query and a projection or a query a projection and an query options hash . |
57,290 | def register_fn ( cls , f ) : def inner ( self , * args , ** kwargs ) : try : query , projection , options = cls . unpack_scope ( f ( * args , ** kwargs ) ) new_query = deepcopy ( self . query ) new_projection = deepcopy ( self . projection ) new_options = deepcopy ( self . options ) deep_merge ( query , new_query ) new_projection . update ( projection ) new_options . update ( options ) return ScopeBuilder ( self . model , self . fns , new_query , new_projection , new_options ) except ValueError : raise ValueError ( "Scope function \"{}\ returns an invalid scope" . format ( f . __name__ ) ) setattr ( cls , f . __name__ , inner ) | Registers a scope function on this builder . |
57,291 | def cursor ( self ) : if not self . _active_cursor : self . _active_cursor = self . model . find ( self . query , self . projection or None , ** self . options ) return self . _active_cursor | Returns a cursor for the currently assembled query creating it if it doesn t already exist . |
57,292 | def _ensure_object_id ( cls , id ) : if isinstance ( id , ObjectId ) : return id if isinstance ( id , basestring ) and OBJECTIDEXPR . match ( id ) : return ObjectId ( id ) return id | Checks whether the given id is an ObjectId instance and if not wraps it . |
57,293 | def apply_defaults ( self ) : self . emit ( 'will_apply_defaults' ) self . schema . apply_defaults ( self ) self . emit ( 'did_apply_defaults' ) | Apply schema defaults to this document . |
57,294 | def reload ( self ) : self . emit ( 'will_reload' ) self . populate ( self . collection . find_one ( type ( self ) . _id_spec ( self [ '_id' ] ) ) ) self . emit ( 'did_reload' ) | Reloads the current model s data from the underlying database record updating it in - place . |
57,295 | def on ( cls , event , handler_func = None ) : if handler_func : cls . handler_registrar ( ) . register ( event , handler_func ) return def register ( fn ) : cls . handler_registrar ( ) . register ( event , fn ) return fn return register | Registers a handler function whenever an instance of the model emits the given event . |
57,296 | def _emit ( self , event , document , * args , ** kwargs ) : self . handler_registrar ( ) . apply ( event , document , * args , ** kwargs ) | Inner version of emit which passes the given document as the primary argument to handler functions . |
57,297 | def emit ( self , event , * args , ** kwargs ) : self . _emit ( event , self , * args , ** kwargs ) | Emits an event call to all handler functions registered against this model s class and the given event type . |
57,298 | def static_method ( cls , f ) : setattr ( cls , f . __name__ , staticmethod ( f ) ) return f | Decorator which dynamically binds static methods to the model for later use . |
57,299 | def class_method ( cls , f ) : setattr ( cls , f . __name__ , classmethod ( f ) ) return f | Decorator which dynamically binds class methods to the model for later use . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.