idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
53,900
async def monitor_tasks ( self , interval : float = 1.0 ) -> None : Log . debug ( 'monitor running' ) while True : try : await asyncio . sleep ( interval ) for name , task in self . all_tasks . items ( ) : if self . terminate_on_finish : if task in self . running_tasks and task . running : await task . stop ( ) elif task . enabled : if task not in self . running_tasks : Log . debug ( 'task %s enabled, restarting' , task . name ) await self . insert ( task ) else : if task in self . running_tasks : Log . debug ( 'task %s disabled, stopping' , task . name ) await task . stop ( ) if self . terminate_on_finish and not self . running_tasks : Log . debug ( 'all tasks completed, terminating' ) break except CancelledError : Log . debug ( 'monitor cancelled' ) break except Exception : Log . exception ( 'monitoring exception' ) self . monitor = None self . loop . call_later ( 0 , self . terminate )
Monitor all known tasks for run state . Ensure that enabled tasks are running and that disabled tasks are stopped .
53,901
def exception ( self , loop : asyncio . BaseEventLoop , context : dict ) -> None : Log . error ( 'unhandled exception: %s' , context [ 'message' ] ) Log . error ( '%s' , context ) if 'exception' in context : Log . error ( ' %s' , context [ 'exception' ] )
Log unhandled exceptions from anywhere in the event loop .
53,902
def sigint ( self ) -> None : if self . stop_attempts < 1 : Log . info ( 'gracefully stopping tasks' ) self . stop_attempts += 1 self . terminate ( ) elif self . stop_attempts < 2 : Log . info ( 'forcefully cancelling tasks' ) self . stop_attempts += 1 self . terminate ( force = True ) else : Log . info ( 'forcefully stopping event loop' ) self . loop . stop ( )
Handle the user pressing Ctrl - C by stopping tasks nicely at first then forcibly upon further presses .
53,903
def sigterm ( self ) -> None : if self . stop_attempts < 1 : Log . info ( 'received SIGTERM, gracefully stopping tasks' ) self . stop_attempts += 1 self . terminate ( ) else : Log . info ( 'received SIGTERM, bravely waiting for tasks' )
Handle SIGTERM from the system by stopping tasks gracefully . Repeated signals will be ignored while waiting for tasks to finish .
53,904
def parse ( cls , parser , text , pos ) : match = cls . regex . match ( text ) if match : if match . group ( 0 ) . lower ( ) not in cls . grammar : result = text , SyntaxError ( repr ( match . group ( 0 ) ) + " is not a member of " + repr ( cls . grammar ) ) else : result = text [ len ( match . group ( 0 ) ) : ] , cls ( match . group ( 0 ) ) else : result = text , SyntaxError ( "expecting " + repr ( cls . __name__ ) ) return result
Checks if terminal token is a keyword after lower - casing it .
53,905
def parse ( cls , parser , text , pos ) : try : remaining_text , keyword = parser . parse ( text , cls . grammar ) if keyword . lower ( ) == 'texkey' : parser . _parsing_texkey_expression = True return remaining_text , InspireKeyword ( keyword ) except SyntaxError as e : parser . _parsing_texkey_expression = False return text , e
Parse InspireKeyword .
53,906
def parse_terminal_token ( cls , parser , text ) : token_regex = cls . token_regex if parser . _parsing_texkey_expression : token_regex = cls . texkey_token_regex parser . _parsing_texkey_expression = False match = token_regex . match ( text ) if match : matched_token = match . group ( 0 ) if not parser . _parsing_parenthesized_terminal and matched_token . lower ( ) in Keyword . table : return text , SyntaxError ( "found DSL keyword: " + matched_token ) remaining_text = text [ len ( matched_token ) : ] if cls . starts_with_colon . match ( remaining_text ) : return text , SyntaxError ( "parsing a keyword (token followed by \":\"): \"" + repr ( matched_token ) + "\"" ) if not parser . _parsing_parenthesized_simple_values_expression and matched_token in INSPIRE_KEYWORDS_SET : return text , SyntaxError ( "parsing a keyword (non shortened INSPIRE keyword)" ) result = remaining_text , matched_token else : result = text , SyntaxError ( "expecting match on " + repr ( cls . token_regex . pattern ) ) return result
Parses a terminal token that doesn t contain parentheses nor colon symbol .
53,907
def parse ( cls , parser , text , pos ) : found = False match = cls . date_specifiers_regex . match ( text ) if match : remaining_text , token , found = text [ len ( match . group ( 0 ) ) : ] , match . group ( 0 ) , True else : match = cls . arxiv_token_regex . match ( text ) if match : remaining_text , token , found = text [ len ( match . group ( ) ) : ] , match . group ( 2 ) , True else : remaining_text , token = SimpleValueUnit . parse_terminal_token ( parser , text ) if type ( token ) != SyntaxError : found = True else : try : parser . _parsing_parenthesized_terminal = True remaining_text , token = parser . parse ( text , cls . parenthesized_token_grammar , pos ) found = True except SyntaxError : pass except GrammarValueError : raise except ValueError : pass finally : parser . _parsing_parenthesized_terminal = False if found : result = remaining_text , SimpleValueUnit ( token ) else : result = text , SyntaxError ( "expecting match on " + cls . __name__ ) return result
Imitates parsing a list grammar .
53,908
def unconsume_and_reconstruct_input ( remaining_text , recognized_tokens , complex_value_idx ) : slicing_start_idx = 2 if not INSPIRE_PARSER_KEYWORDS . get ( recognized_tokens [ complex_value_idx - slicing_start_idx ] . value , None ) : slicing_start_idx = 1 reconstructed_terminals = recognized_tokens [ : complex_value_idx - slicing_start_idx ] reconstructed_text = '{} {}' . format ( '' . join ( [ token . value for token in recognized_tokens [ complex_value_idx - slicing_start_idx : ] ] ) , remaining_text ) return reconstructed_text , reconstructed_terminals
Reconstruct input in case of consuming a keyword query or a value query with ComplexValue as value .
53,909
def parse ( cls , parser , text , pos ) : try : parser . _parsing_parenthesized_simple_values_expression = True remaining_text , recognized_tokens = parser . parse ( text , cls . grammar ) return remaining_text , recognized_tokens except SyntaxError as e : return text , e finally : parser . _parsing_parenthesized_simple_values_expression = False
Using our own parse to enable the flag below .
53,910
def _generate_fieldnames_if_bai_query ( self , node_value , bai_field_variation , query_bai_field_if_dots_in_name ) : if bai_field_variation not in ( FieldVariations . search , FieldVariations . raw ) : raise ValueError ( 'Non supported field variation "{}".' . format ( bai_field_variation ) ) normalized_author_name = normalize_name ( node_value ) . strip ( '.' ) if ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] and ElasticSearchVisitor . BAI_REGEX . match ( node_value ) : return [ ElasticSearchVisitor . AUTHORS_BAI_FIELD + '.' + bai_field_variation ] elif not whitespace . search ( normalized_author_name ) and query_bai_field_if_dots_in_name and ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] and '.' in normalized_author_name : return [ ElasticSearchVisitor . AUTHORS_BAI_FIELD + '.' + bai_field_variation ] + force_list ( ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] ) else : return None
Generates new fieldnames in case of BAI query .
53,911
def _generate_author_query ( self , author_name ) : name_variations = [ name_variation . lower ( ) for name_variation in generate_minimal_name_variations ( author_name ) ] if author_name_contains_fullnames ( author_name ) : specialized_author_filter = [ { 'bool' : { 'must' : [ { 'term' : { ElasticSearchVisitor . AUTHORS_NAME_VARIATIONS_FIELD : names_variation [ 0 ] } } , generate_match_query ( ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] , names_variation [ 1 ] , with_operator_and = True ) ] } } for names_variation in product ( name_variations , name_variations ) ] else : specialized_author_filter = [ { 'term' : { ElasticSearchVisitor . AUTHORS_NAME_VARIATIONS_FIELD : name_variation } } for name_variation in name_variations ] query = { 'bool' : { 'filter' : { 'bool' : { 'should' : specialized_author_filter } } , 'must' : { 'match' : { ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] : author_name } } } } return generate_nested_query ( ElasticSearchVisitor . AUTHORS_NESTED_QUERY_PATH , query )
Generates a query handling specifically authors .
53,912
def _generate_exact_author_query ( self , author_name_or_bai ) : if ElasticSearchVisitor . BAI_REGEX . match ( author_name_or_bai ) : bai = author_name_or_bai . lower ( ) query = self . _generate_term_query ( '.' . join ( ( ElasticSearchVisitor . AUTHORS_BAI_FIELD , FieldVariations . search ) ) , bai ) else : author_name = normalize ( 'NFKC' , normalize_name ( author_name_or_bai ) ) . lower ( ) query = self . _generate_term_query ( ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'exact-author' ] , author_name ) return generate_nested_query ( ElasticSearchVisitor . AUTHORS_NESTED_QUERY_PATH , query )
Generates a term query handling authors and BAIs .
53,913
def _generate_date_with_wildcard_query ( self , date_value ) : if date_value . endswith ( ast . GenericValue . WILDCARD_TOKEN ) : try : date_value = _truncate_wildcard_from_date ( date_value ) except ValueError : return { } return self . _generate_range_queries ( self . KEYWORD_TO_ES_FIELDNAME [ 'date' ] , { ES_RANGE_EQ_OPERATOR : date_value } ) else : return { }
Helper for generating a date keyword query containing a wildcard .
53,914
def _generate_queries_for_title_symbols ( title_field , query_value ) : values_tokenized_by_whitespace = query_value . split ( ) symbol_queries = [ ] for value in values_tokenized_by_whitespace : if any ( character in value for character in ElasticSearchVisitor . TITLE_SYMBOL_INDICATING_CHARACTER ) : symbol_queries . append ( generate_match_query ( '.' . join ( [ title_field , FieldVariations . search ] ) , value , with_operator_and = False ) ) return wrap_queries_in_bool_clauses_if_more_than_one ( symbol_queries , use_must_clause = True )
Generate queries for any symbols in the title against the whitespace tokenized field of titles .
53,915
def _generate_type_code_query ( self , value ) : mapping_for_value = self . TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING . get ( value , None ) if mapping_for_value : return generate_match_query ( * mapping_for_value , with_operator_and = True ) else : return { 'bool' : { 'minimum_should_match' : 1 , 'should' : [ generate_match_query ( 'document_type' , value , with_operator_and = True ) , generate_match_query ( 'publication_type' , value , with_operator_and = True ) , ] } }
Generate type - code queries .
53,916
def _generate_range_queries ( self , fieldnames , operator_value_pairs ) : if ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'date' ] == fieldnames : range_queries = [ ] for fieldname in fieldnames : updated_operator_value_pairs = update_date_value_in_operator_value_pairs_for_fieldname ( fieldname , operator_value_pairs ) if not updated_operator_value_pairs : break else : range_query = { 'range' : { fieldname : updated_operator_value_pairs } } range_queries . append ( generate_nested_query ( ElasticSearchVisitor . DATE_NESTED_QUERY_PATH , range_query ) if fieldname in ElasticSearchVisitor . DATE_NESTED_FIELDS else range_query ) else : range_queries = [ { 'range' : { fieldname : operator_value_pairs } } for fieldname in fieldnames ] return wrap_queries_in_bool_clauses_if_more_than_one ( range_queries , use_must_clause = False )
Generates ElasticSearch range queries .
53,917
def _generate_malformed_query ( data ) : if isinstance ( data , six . text_type ) : query_str = data . replace ( ':' , ' ' ) else : query_str = ' ' . join ( [ word . strip ( ':' ) for word in data . children ] ) return { 'simple_query_string' : { 'fields' : [ '_all' ] , 'query' : query_str } }
Generates a query on the _all field with all the query content .
53,918
def visit_partial_match_value ( self , node , fieldnames = None ) : if ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'date' ] == fieldnames : if node . contains_wildcard : return self . _generate_date_with_wildcard_query ( node . value ) return self . _generate_range_queries ( force_list ( fieldnames ) , { ES_RANGE_EQ_OPERATOR : node . value } ) if ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'exact-author' ] == fieldnames : return self . _generate_exact_author_query ( node . value ) elif ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'type-code' ] == fieldnames : return self . _generate_type_code_query ( node . value ) elif ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'journal' ] == fieldnames : return self . _generate_journal_nested_queries ( node . value ) value = ( '' if node . value . startswith ( ast . GenericValue . WILDCARD_TOKEN ) else '*' ) + node . value + ( '' if node . value . endswith ( ast . GenericValue . WILDCARD_TOKEN ) else '*' ) bai_fieldnames = self . _generate_fieldnames_if_bai_query ( node . value , bai_field_variation = FieldVariations . search , query_bai_field_if_dots_in_name = True ) query = self . _generate_query_string_query ( value , fieldnames = bai_fieldnames or fieldnames , analyze_wildcard = True ) if ( bai_fieldnames and ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] in bai_fieldnames ) or ( fieldnames and ElasticSearchVisitor . KEYWORD_TO_ES_FIELDNAME [ 'author' ] in fieldnames ) : return generate_nested_query ( ElasticSearchVisitor . AUTHORS_NESTED_QUERY_PATH , query ) return query
Generates a query which looks for a substring of the node s value in the given fieldname .
53,919
def run ( self , schedule_id , ** kwargs ) : log = self . get_logger ( ** kwargs ) try : schedule = Schedule . objects . get ( id = schedule_id ) except Schedule . DoesNotExist : log . error ( "Missing Schedule %s" , schedule_id , exc_info = True ) if schedule . scheduler_schedule_id is None : result = self . scheduler . create_schedule ( schedule . scheduler_format ) schedule . scheduler_schedule_id = result [ "id" ] post_save . disconnect ( schedule_saved , sender = Schedule ) schedule . save ( update_fields = ( "scheduler_schedule_id" , ) ) post_save . connect ( schedule_saved , sender = Schedule ) log . info ( "Created schedule %s on scheduler for schedule %s" , schedule . scheduler_schedule_id , schedule . id , ) else : result = self . scheduler . update_schedule ( str ( schedule . scheduler_schedule_id ) , schedule . scheduler_format ) log . info ( "Updated schedule %s on scheduler for schedule %s" , schedule . scheduler_schedule_id , schedule . id , )
Synchronises the schedule specified by the ID schedule_id to the scheduler service .
53,920
def run ( self , scheduler_schedule_id , ** kwargs ) : log = self . get_logger ( ** kwargs ) self . scheduler . update_schedule ( scheduler_schedule_id , { "active" : False } ) log . info ( "Deactivated schedule %s in the scheduler service" , scheduler_schedule_id )
Deactivates the schedule specified by the ID scheduler_schedule_id in the scheduler service .
53,921
def get_for_queryset ( self , obj_queryset ) : qs = Tag . objects . language ( get_language ( ) ) if obj_queryset . count ( ) == 0 : return qs . none ( ) qs = qs . filter ( tagged_items__object_id__in = [ obj . id for obj in obj_queryset ] , tagged_items__content_type = ctype_models . ContentType . objects . get_for_model ( obj_queryset [ 0 ] ) ) return qs . distinct ( )
Returns all tags for a whole queryset of objects .
53,922
def post_send_process ( context ) : if "error" in context : return context [ deserialized_subscription ] = serializers . deserialize ( "json" , context [ "subscription" ] ) subscription = deserialized_subscription . object [ messageset ] = serializers . deserialize ( "json" , context [ "messageset" ] ) messageset = messageset . object set_max = messageset . messages . filter ( lang = subscription . lang ) . count ( ) logger . debug ( "set_max calculated - %s" % set_max ) if subscription . next_sequence_number == set_max : with transaction . atomic ( ) : logger . debug ( "marking current subscription as complete" ) subscription . completed = True subscription . active = False subscription . process_status = 2 deserialized_subscription . save ( update_fields = ( "completed" , "active" , "process_status" ) ) if messageset . next_set : logger . info ( "Creating new subscription for next set" ) newsub = Subscription . objects . create ( identity = subscription . identity , lang = subscription . lang , messageset = messageset . next_set , schedule = messageset . next_set . default_schedule , ) logger . debug ( "Created Subscription <%s>" % newsub . id ) else : logger . debug ( "incrementing next_sequence_number" ) subscription . next_sequence_number = F ( "next_sequence_number" ) + 1 logger . debug ( "setting process status back to 0" ) subscription . process_status = 0 logger . debug ( "saving subscription" ) deserialized_subscription . save ( update_fields = ( "next_sequence_number" , "process_status" ) ) return "Subscription for %s updated" % str ( subscription . id )
Task to ensure subscription is bumped or converted
53,923
def calculate_subscription_lifecycle ( subscription_id ) : subscription = Subscription . objects . select_related ( "messageset" , "schedule" ) . get ( id = subscription_id ) behind = subscription . messages_behind ( ) if behind == 0 : return current_messageset = subscription . messageset current_sequence_number = subscription . next_sequence_number end_subscription = Subscription . fast_forward_lifecycle ( subscription , save = False ) [ - 1 ] BehindSubscription . objects . create ( subscription = subscription , messages_behind = behind , current_messageset = current_messageset , current_sequence_number = current_sequence_number , expected_messageset = end_subscription . messageset , expected_sequence_number = end_subscription . next_sequence_number , )
Calculates the expected lifecycle position the subscription in subscription_ids and creates a BehindSubscription entry for them .
53,924
def find_behind_subscriptions ( ) : subscriptions = Subscription . objects . filter ( active = True , completed = False , process_status = 0 ) . values_list ( "id" , flat = True ) for subscription_id in subscriptions . iterator ( ) : calculate_subscription_lifecycle . delay ( str ( subscription_id ) )
Finds any subscriptions that are behind according to where they should be and creates a BehindSubscription entry for them .
53,925
def send ( self , request , pk = None ) : schedule = self . get_object ( ) queue_subscription_send . delay ( str ( schedule . id ) ) return Response ( { } , status = status . HTTP_202_ACCEPTED )
Sends all the subscriptions for the specified schedule
53,926
def get ( self , key : Any , default : Any = None ) -> Any : return self . data . get ( key , default )
Return the configured value for the given key name or default if no value is available or key is invalid .
53,927
def task_config ( self , task : Task ) -> Any : return self . get ( task . __class__ . __name__ )
Return the task - specific configuration .
53,928
async def init ( self ) -> None : if self . data : return if self . json_data : try : self . data = json . loads ( self . json_data ) except Exception : Log . exception ( 'Falied to load raw configuration' ) else : try : with open ( self . json_path , 'r' ) as f : self . data = json . load ( f ) except Exception : Log . exception ( 'Failed to load configuration from %s' , self . json_path ) self . data = { }
Load configuration in JSON format from either a file or a raw data string .
53,929
def reset ( self ) -> None : Log . debug ( 'resetting timer task %s' ) self . target = self . time ( ) + self . DELAY
Reset task execution to DELAY seconds from now .
53,930
def internal_only ( view_func ) : @ functools . wraps ( view_func ) def wrapper ( request , * args , ** kwargs ) : forwards = request . META . get ( "HTTP_X_FORWARDED_FOR" , "" ) . split ( "," ) if len ( forwards ) > 1 : raise PermissionDenied ( ) return view_func ( request , * args , ** kwargs ) return wrapper
A view decorator which blocks access for requests coming through the load balancer .
53,931
def post ( self , request , * args , ** kwargs ) : if "data" in request . data : if "metadata" not in request . data [ "data" ] : request . data [ "data" ] [ "metadata" ] = { } if "initial_sequence_number" not in request . data [ "data" ] : request . data [ "data" ] [ "initial_sequence_number" ] = request . data [ "data" ] . get ( "next_sequence_number" ) subscription = SubscriptionSerializer ( data = request . data [ "data" ] ) if subscription . is_valid ( ) : subscription . save ( ) status = 201 accepted = { "accepted" : True } return Response ( accepted , status = status ) else : status = 400 return Response ( subscription . errors , status = status ) else : status = 400 message = { "data" : [ "This field is required." ] } return Response ( message , status = status )
Validates subscription data before creating Subscription message
53,932
def find_behind_subscriptions ( self , request ) : task_id = find_behind_subscriptions . delay ( ) return Response ( { "accepted" : True , "task_id" : str ( task_id ) } , status = status . HTTP_202_ACCEPTED )
Starts a celery task that looks through active subscriptions to find and subscriptions that are behind where they should be and adds a BehindSubscription for them .
53,933
def repl ( ) : while True : try : sys . stdout . write ( "Type in next query: \n> " ) import locale query_str = raw_input ( ) . decode ( sys . stdin . encoding or locale . getpreferredencoding ( True ) ) except KeyboardInterrupt : break if u'quit' in query_str : break print_query_and_parse_tree ( query_str )
Read - Eval - Print - Loop for reading the query printing it and its parse tree .
53,934
def schedule_saved ( sender , instance , ** kwargs ) : from contentstore . tasks import sync_schedule sync_schedule . delay ( str ( instance . id ) )
Fires off the celery task to ensure that this schedule is in the scheduler
53,935
def schedule_deleted ( sender , instance , ** kwargs ) : from contentstore . tasks import deactivate_schedule deactivate_schedule . delay ( str ( instance . scheduler_schedule_id ) )
Fires off the celery task to ensure that this schedule is deactivated
53,936
def group_by ( keys , values = None , reduction = None , axis = 0 ) : g = GroupBy ( keys , axis ) if values is None : return g groups = g . split ( values ) if reduction is None : return g . unique , groups return [ ( key , reduction ( group ) ) for key , group in zip ( g . unique , groups ) ]
construct a grouping object on the given keys optionally performing the given reduction on the given values
53,937
def split_iterable_as_iterable ( self , values ) : values = iter ( enumerate ( values ) ) cache = dict ( ) def get_value ( ti ) : try : return cache . pop ( ti ) except : while True : i , v = next ( values ) if i == ti : return v cache [ i ] = v s = iter ( self . index . sorter ) for c in self . count : yield ( get_value ( i ) for i in itertools . islice ( s , int ( c ) ) )
Group iterable into iterables in the order of the keys
53,938
def split_iterable_as_unordered_iterable ( self , values ) : from collections import defaultdict cache = defaultdict ( list ) count = self . count unique = self . unique key = ( lambda i : unique [ i ] ) if isinstance ( unique , np . ndarray ) else ( lambda i : tuple ( c [ i ] for c in unique ) ) for i , v in zip ( self . inverse , values ) : cache [ i ] . append ( v ) if len ( cache [ i ] ) == count [ i ] : yield key ( i ) , cache . pop ( i )
Group iterable into iterables without regard for the ordering of self . index . unique key - group tuples are yielded as soon as they are complete
53,939
def split_sequence_as_iterable ( self , values ) : print ( self . count ) s = iter ( self . index . sorter ) for c in self . count : yield ( values [ i ] for i in itertools . islice ( s , int ( c ) ) )
Group sequence into iterables
53,940
def split_array_as_array ( self , values ) : if not self . index . uniform : raise ValueError ( "Array can only be split as array if all groups have the same size" ) values = np . asarray ( values ) values = values [ self . index . sorter ] return values . reshape ( self . groups , - 1 , * values . shape [ 1 : ] )
Group ndarray into ndarray by means of reshaping
53,941
def split_array_as_list ( self , values ) : values = np . asarray ( values ) values = values [ self . index . sorter ] return np . split ( values , self . index . slices [ 1 : - 1 ] , axis = 0 )
Group values as a list of arrays or a jagged - array
53,942
def reduce ( self , values , operator = np . add , axis = 0 , dtype = None ) : values = np . take ( values , self . index . sorter , axis = axis ) return operator . reduceat ( values , self . index . start , axis = axis , dtype = dtype )
Reduce the values over identical key groups using the given ufunc reduction is over the first axis which should have elements corresponding to the keys all other axes are treated indepenently for the sake of this reduction
53,943
def sum ( self , values , axis = 0 , dtype = None ) : values = np . asarray ( values ) return self . unique , self . reduce ( values , axis = axis , dtype = dtype )
compute the sum over each group
53,944
def prod ( self , values , axis = 0 , dtype = None ) : values = np . asarray ( values ) return self . unique , self . reduce ( values , axis = axis , dtype = dtype , operator = np . multiply )
compute the product over each group
53,945
def mean ( self , values , axis = 0 , weights = None , dtype = None ) : values = np . asarray ( values ) if weights is None : result = self . reduce ( values , axis = axis , dtype = dtype ) shape = [ 1 ] * values . ndim shape [ axis ] = self . groups weights = self . count . reshape ( shape ) else : weights = np . asarray ( weights ) result = self . reduce ( values * weights , axis = axis , dtype = dtype ) weights = self . reduce ( weights , axis = axis , dtype = dtype ) return self . unique , result / weights
compute the mean over each group
53,946
def var ( self , values , axis = 0 , weights = None , dtype = None ) : values = np . asarray ( values ) unique , mean = self . mean ( values , axis , weights , dtype ) err = values - mean . take ( self . inverse , axis ) if weights is None : shape = [ 1 ] * values . ndim shape [ axis ] = self . groups group_weights = self . count . reshape ( shape ) var = self . reduce ( err ** 2 , axis = axis , dtype = dtype ) else : weights = np . asarray ( weights ) group_weights = self . reduce ( weights , axis = axis , dtype = dtype ) var = self . reduce ( weights * err ** 2 , axis = axis , dtype = dtype ) return unique , var / group_weights
compute the variance over each group
53,947
def std ( self , values , axis = 0 , weights = None , dtype = None ) : unique , var = self . var ( values , axis , weights , dtype ) return unique , np . sqrt ( var )
standard deviation over each group
53,948
def median ( self , values , axis = 0 , average = True ) : mid_2 = self . index . start + self . index . stop hi = ( mid_2 ) // 2 lo = ( mid_2 - 1 ) // 2 sorted_group_rank_per_key = self . index . sorted_group_rank_per_key def median1d ( slc ) : slc = slc [ self . index . sorter ] sorter = np . lexsort ( ( slc , sorted_group_rank_per_key ) ) slc = slc [ sorter ] return ( slc [ lo ] + slc [ hi ] ) / 2 if average else slc [ hi ] values = np . asarray ( values ) if values . ndim > 1 : values = np . apply_along_axis ( median1d , axis , values ) else : values = median1d ( values ) return self . unique , values
compute the median value over each group .
53,949
def mode ( self , values , weights = None ) : if weights is None : unique , weights = npi . count ( ( self . index . sorted_group_rank_per_key , values ) ) else : unique , weights = npi . group_by ( ( self . index . sorted_group_rank_per_key , values ) ) . sum ( weights ) x , bin = npi . group_by ( unique [ 0 ] ) . argmax ( weights ) return x , unique [ 1 ] [ bin ]
compute the mode within each group .
53,950
def min ( self , values , axis = 0 ) : values = np . asarray ( values ) return self . unique , self . reduce ( values , np . minimum , axis )
return the minimum within each group
53,951
def max ( self , values , axis = 0 ) : values = np . asarray ( values ) return self . unique , self . reduce ( values , np . maximum , axis )
return the maximum within each group
53,952
def first ( self , values , axis = 0 ) : values = np . asarray ( values ) return self . unique , np . take ( values , self . index . sorter [ self . index . start ] , axis )
return values at first occurance of its associated key
53,953
def last ( self , values , axis = 0 ) : values = np . asarray ( values ) return self . unique , np . take ( values , self . index . sorter [ self . index . stop - 1 ] , axis )
return values at last occurance of its associated key
53,954
def any ( self , values , axis = 0 ) : values = np . asarray ( values ) if not values . dtype == np . bool : values = values != 0 return self . unique , self . reduce ( values , axis = axis ) > 0
compute if any item evaluates to true in each group
53,955
def all ( self , values , axis = 0 ) : values = np . asarray ( values ) return self . unique , self . reduce ( values , axis = axis , operator = np . multiply ) != 0
compute if all items evaluates to true in each group
53,956
def argmin ( self , values ) : keys , minima = self . min ( values ) minima = minima [ self . inverse ] index = as_index ( ( self . inverse , values == minima ) ) return keys , index . sorter [ index . start [ - self . groups : ] ]
return the index into values corresponding to the minimum value of the group
53,957
def argmax ( self , values ) : keys , maxima = self . max ( values ) maxima = maxima [ self . inverse ] index = as_index ( ( self . inverse , values == maxima ) ) return keys , index . sorter [ index . start [ - self . groups : ] ]
return the index into values corresponding to the maximum value of the group
53,958
def as_index ( keys , axis = semantics . axis_default , base = False , stable = True , lex_as_struct = False ) : if isinstance ( keys , Index ) : if type ( keys ) is BaseIndex and base == False : keys = keys . keys else : return keys if isinstance ( keys , tuple ) : if lex_as_struct : keys = as_struct_array ( * keys ) else : return LexIndex ( keys , stable ) try : keys = np . asarray ( keys ) except : raise TypeError ( 'Given object does not form a valid set of keys' ) if axis is None : keys = keys . flatten ( ) if keys . ndim == 1 : if base : return BaseIndex ( keys ) else : return Index ( keys , stable = stable ) else : return ObjectIndex ( keys , axis , stable = stable )
casting rules for a keys object to an index object
53,959
def unique ( self ) : return tuple ( ( array_as_typed ( s , k . dtype , k . shape ) if k . ndim > 1 else s ) [ self . start ] for s , k in zip ( self . sorted , self . _keys ) )
returns a tuple of unique key columns
53,960
def as_struct_array ( * columns ) : columns = [ np . asarray ( c ) for c in columns ] rows = len ( columns [ 0 ] ) names = [ 'f' + str ( i ) for i in range ( len ( columns ) ) ] dtype = [ ( names [ i ] , c . dtype , c . shape [ 1 : ] ) for i , c in enumerate ( columns ) ] data = np . empty ( rows , dtype ) for i , c in enumerate ( columns ) : data [ names [ i ] ] = c return data
pack a sequence of columns into a recarray
53,961
def axis_as_object ( arr , axis = - 1 ) : shape = arr . shape arr = np . ascontiguousarray ( np . rollaxis ( arr , axis , arr . ndim ) ) nbytes = arr . dtype . itemsize * shape [ axis ] voidtype = np . dtype ( ( np . void , nbytes ) ) return arr . view ( voidtype ) . reshape ( np . delete ( shape , axis ) )
cast the given axis of an array to a void object if the axis to be cast is contiguous a view is returned otherwise a copy is made this is useful for efficiently sorting by the content of an axis for instance
53,962
def object_as_axis ( arr , dtype , axis = - 1 ) : arr = arr . view ( dtype ) . reshape ( arr . shape + ( - 1 , ) ) return np . rollaxis ( arr , - 1 , axis )
cast an array of void objects to a typed axis
53,963
def binning ( keys , start , end , count , axes = None ) : if isinstance ( keys , tuple ) : n_keys = len ( keys ) else : n_keys = 1 bins = np . linspace ( start , end , count + 1 , endpoint = True ) idx = np . searchsorted ( bins , keys ) if axes is None : axes = [ - 1 ]
Perform binning over the given axes of the keys
53,964
def multiplicity ( keys , axis = semantics . axis_default ) : index = as_index ( keys , axis ) return index . count [ index . inverse ]
return the multiplicity of each key or how often it occurs in the set
53,965
def rank ( keys , axis = semantics . axis_default ) : index = as_index ( keys , axis ) return index . rank
where each item is in the pecking order .
53,966
def mode ( keys , axis = semantics . axis_default , weights = None , return_indices = False ) : index = as_index ( keys , axis ) if weights is None : unique , weights = count ( index ) else : unique , weights = group_by ( index ) . sum ( weights ) bin = np . argmax ( weights ) _mode = unique [ bin ] if return_indices : indices = index . sorter [ index . start [ bin ] : index . stop [ bin ] ] return _mode , indices else : return _mode
compute the mode or most frequent occuring key in a set
53,967
def incidence ( boundary ) : return GroupBy ( boundary ) . split ( np . arange ( boundary . size ) // boundary . shape [ 1 ] )
given an Nxm matrix containing boundary info between simplices compute indidence info matrix not very reusable ; should probably not be in this lib
53,968
def all_unique ( keys , axis = semantics . axis_default ) : index = as_index ( keys , axis ) return index . groups == index . size
Returns true if all keys are unique
53,969
def any_unique ( keys , axis = semantics . axis_default ) : index = as_index ( keys , axis ) return np . any ( index . count == 1 )
returns true if any of the keys is unique
53,970
def all_equal ( keys , axis = semantics . axis_default ) : index = as_index ( keys , axis ) return index . groups == 1
returns true of all keys are equal
53,971
def is_uniform ( keys , axis = semantics . axis_default ) : index = as_index ( keys , axis ) return index . uniform
returns true if all keys have equal multiplicity
53,972
def unique ( self , values ) : _ , count = self . count ( ) if not np . array_equiv ( count , 1 ) : raise ValueError ( "Not every entry in the table is assigned a unique value" ) return self . sum ( values )
Place each entry in a table while asserting that each entry occurs once
53,973
def unique ( keys , axis = semantics . axis_default , return_index = False , return_inverse = False , return_count = False ) : stable = return_index or return_inverse index = as_index ( keys , axis , base = not stable , stable = stable ) ret = index . unique , if return_index : ret = ret + ( index . index , ) if return_inverse : ret = ret + ( index . inverse , ) if return_count : ret = ret + ( index . count , ) return ret [ 0 ] if len ( ret ) == 1 else ret
compute the set of unique keys
53,974
def contains ( this , that , axis = semantics . axis_default ) : this = as_index ( this , axis = axis , lex_as_struct = True , base = True ) that = as_index ( that , axis = axis , lex_as_struct = True ) left = np . searchsorted ( that . _keys , this . _keys , sorter = that . sorter , side = 'left' ) right = np . searchsorted ( that . _keys , this . _keys , sorter = that . sorter , side = 'right' ) flags = np . zeros ( that . size + 1 , dtype = np . int ) np . add . at ( flags , left , 1 ) np . add . at ( flags , right , - 1 ) return np . cumsum ( flags ) [ : - 1 ] . astype ( np . bool ) [ that . rank ]
Returns bool for each element of that indicating if it is contained in this
53,975
def in_ ( this , that , axis = semantics . axis_default ) : this = as_index ( this , axis = axis , lex_as_struct = True , base = True ) that = as_index ( that , axis = axis , lex_as_struct = True ) left = np . searchsorted ( that . _keys , this . _keys , sorter = that . sorter , side = 'left' ) right = np . searchsorted ( that . _keys , this . _keys , sorter = that . sorter , side = 'right' ) return left != right
Returns bool for each element of this indicating if it is present in that
53,976
def _set_preprocess ( sets , ** kwargs ) : axis = kwargs . get ( 'axis' , semantics . axis_default ) assume_unique = kwargs . get ( 'assume_unique' , False ) if assume_unique : sets = [ as_index ( s , axis = axis ) . unique for s in sets ] else : sets = [ as_index ( s , axis = axis ) . unique for s in sets ] return sets
upcasts a sequence of indexable objects to Index objets according to the given kwargs
53,977
def _set_concatenate ( sets ) : def con ( set ) : try : return np . concatenate ( [ s for s in sets if len ( s ) ] ) except ValueError : return set [ 0 ] if any ( not isinstance ( s , tuple ) for s in sets ) : return con ( sets ) else : return tuple ( con ( s ) for s in zip ( * sets ) )
concatenate indexable objects .
53,978
def _set_count ( sets , n , ** kwargs ) : sets = _set_preprocess ( sets , ** kwargs ) i = as_index ( _set_concatenate ( sets ) , axis = 0 , base = True ) return i . unique [ i . count == n ]
return the elements which occur n times over the sequence of sets
53,979
def union ( * sets , ** kwargs ) : sets = _set_preprocess ( sets , ** kwargs ) return as_index ( _set_concatenate ( sets ) , axis = 0 , base = True ) . unique
all unique items which occur in any one of the sets
53,980
def difference ( * sets , ** kwargs ) : head , tail = sets [ 0 ] , sets [ 1 : ] idx = as_index ( head , ** kwargs ) lhs = idx . unique rhs = [ intersection ( idx , s , ** kwargs ) for s in tail ] return exclusive ( lhs , * rhs , axis = 0 , assume_unique = True )
subtracts all tail sets from the head set
53,981
def _generate_div_id_chart ( prefix = "chart_id" , digits = 8 ) : choices = ( random . randrange ( 0 , 52 ) for _ in range ( digits ) ) return prefix + "" . join ( ( string . ascii_letters [ x ] for x in choices ) )
Generate a random id for div chart .
53,982
def get_additional_params ( self , ** params ) : polling_params = [ 'locationschema' , 'carrierschema' , 'sorttype' , 'sortorder' , 'originairports' , 'destinationairports' , 'stops' , 'outbounddeparttime' , 'outbounddepartstarttime' , 'outbounddepartendtime' , 'inbounddeparttime' , 'inbounddepartstarttime' , 'inbounddepartendtime' , 'duration' , 'includecarriers' , 'excludecarriers' ] additional_params = dict ( ( key , value ) for key , value in params . items ( ) if key in polling_params ) return additional_params
Filter to get the additional params needed for polling
53,983
def get_result ( self , errors = GRACEFUL , ** params ) : additional_params = self . get_additional_params ( ** params ) return self . poll_session ( self . create_session ( ** params ) , errors = errors , ** additional_params )
Get all results no filtering etc . by creating and polling the session .
53,984
def make_request ( self , service_url , method = 'get' , headers = None , data = None , callback = None , errors = GRACEFUL , ** params ) : error_modes = ( STRICT , GRACEFUL , IGNORE ) error_mode = errors or GRACEFUL if error_mode . lower ( ) not in error_modes : raise ValueError ( 'Possible values for errors argument are: %s' % ', ' . join ( error_modes ) ) if callback is None : callback = self . _default_resp_callback if 'apikey' not in service_url . lower ( ) : params . update ( { 'apiKey' : self . api_key } ) request = getattr ( requests , method . lower ( ) ) log . debug ( '* Request URL: %s' % service_url ) log . debug ( '* Request method: %s' % method ) log . debug ( '* Request query params: %s' % params ) log . debug ( '* Request headers: %s' % headers ) r = request ( service_url , headers = headers , data = data , params = params ) try : r . raise_for_status ( ) return callback ( r ) except Exception as e : return self . _with_error_handling ( r , e , error_mode , self . response_format )
Reusable method for performing requests .
53,985
def _construct_params ( params , required_keys , opt_keys = None ) : try : params_list = [ params . pop ( key ) for key in required_keys ] except KeyError as e : raise MissingParameter ( 'Missing expected request parameter: %s' % e ) if opt_keys : params_list . extend ( [ params . pop ( key ) for key in opt_keys if key in params ] ) return '/' . join ( str ( p ) for p in params_list )
Construct params list in order of given keys .
53,986
def get_queryset ( self ) : if self . queryset is None : if self . model : return self . model . _default_manager . all ( ) else : raise ImproperlyConfigured ( "%(cls)s is missing a QuerySet. Define " "%(cls)s.model, %(cls)s.queryset, or override " "%(cls)s.get_queryset()." % { 'cls' : self . __class__ . __name__ } ) return self . queryset . all ( )
Return the QuerySet that will be used to look up the object . Note that this method is called by the default implementation of get_object and may not be called if get_object is overridden .
53,987
def login_required ( view_func ) : @ wraps ( view_func ) def wrapper ( bot , update , ** kwargs ) : chat = Chat . objects . get ( id = update . message . chat . id ) if chat . is_authenticated ( ) : return view_func ( bot , update , ** kwargs ) from telegrambot . bot_views . login import LoginBotView login_command_view = LoginBotView . as_command_view ( ) bot_model = Bot . objects . get ( token = bot . token ) kwargs [ 'link' ] = reverse ( 'telegrambot:auth' , kwargs = { 'bot' : bot_model . user_api . username } ) return login_command_view ( bot , update , ** kwargs ) return wrapper
Decorator for command views that checks that the chat is authenticated sends message with link for authenticated if necessary .
53,988
def pipe_util ( func ) : @ wraps ( func ) def pipe_util_wrapper ( function , * args , ** kwargs ) : if isinstance ( function , XObject ) : function = ~ function original_function = function if args or kwargs : function = xpartial ( function , * args , ** kwargs ) name = lambda : '%s(%s)' % ( get_name ( func ) , ', ' . join ( filter ( None , ( get_name ( original_function ) , repr_args ( * args , ** kwargs ) ) ) ) ) f = func ( function ) result = pipe | set_name ( name , f ) attrs = getattr ( f , 'attrs' , { } ) for k , v in dict_items ( attrs ) : setattr ( result , k , v ) return result return pipe_util_wrapper
Decorator that handles X objects and partial application for pipe - utils .
53,989
def auto_string_formatter ( func ) : @ wraps ( func ) def auto_string_formatter_wrapper ( function , * args , ** kwargs ) : if isinstance ( function , string_types ) : function = StringFormatter ( function ) return func ( function , * args , ** kwargs ) return auto_string_formatter_wrapper
Decorator that handles automatic string formatting .
53,990
def data_structure_builder ( func ) : @ wraps ( func ) def ds_builder_wrapper ( function , * args , ** kwargs ) : try : function = DSBuilder ( function ) except NoBuilder : pass return func ( function , * args , ** kwargs ) return ds_builder_wrapper
Decorator to handle automatic data structure creation for pipe - utils .
53,991
def regex_condition ( func ) : @ wraps ( func ) def regex_condition_wrapper ( condition , * args , ** kwargs ) : if isinstance ( condition , string_types ) : condition = maybe | partial ( re . match , condition ) return func ( condition , * args , ** kwargs ) return regex_condition_wrapper
If a condition is given as string instead of a function it is turned into a regex - matching function .
53,992
def sort_by ( function ) : f = partial ( sorted , key = function ) f . attrs = { 'descending' : _descending_sort_by ( function ) } return f
Sorts an incoming sequence by using the given function as key .
53,993
def drop_first ( count ) : def _drop_first ( iterable ) : g = ( x for x in range ( 1 , count + 1 ) ) return dropwhile ( lambda i : unless ( StopIteration , lambda : next ( g ) ) ( ) , iterable ) return pipe | set_name ( 'drop_first(%s)' % count , _drop_first )
Assumes an iterable on the input returns an iterable with identical items except for the first count .
53,994
def unless ( exception_class_or_tuple , func , * args , ** kwargs ) : @ pipe_util @ auto_string_formatter @ data_structure_builder def construct_unless ( function ) : def _unless ( * args , ** kwargs ) : try : return function ( * args , ** kwargs ) except exception_class_or_tuple : pass return _unless name = lambda : 'unless(%s, %s)' % ( exception_class_or_tuple , ', ' . join ( filter ( None , ( get_name ( func ) , repr_args ( * args , ** kwargs ) ) ) ) ) return set_name ( name , construct_unless ( func , * args , ** kwargs ) )
When exception_class_or_tuple occurs while executing func it will be caught and None will be returned .
53,995
def group_by ( function ) : def _group_by ( seq ) : result = { } for item in seq : result . setdefault ( function ( item ) , [ ] ) . append ( item ) return dict_items ( result ) return _group_by
Groups input sequence by function .
53,996
def gmst ( utc_time ) : ut1 = jdays2000 ( utc_time ) / 36525.0 theta = 67310.54841 + ut1 * ( 876600 * 3600 + 8640184.812866 + ut1 * ( 0.093104 - ut1 * 6.2 * 10e-6 ) ) return np . deg2rad ( theta / 240.0 ) % ( 2 * np . pi )
Greenwich mean sidereal utc_time in radians .
53,997
def sun_earth_distance_correction ( utc_time ) : year = 365.256363004 corr = 1 - 0.0334 * np . cos ( 2 * np . pi * ( jdays2000 ( utc_time ) - 2 ) / year ) return corr
Calculate the sun earth distance correction relative to 1 AU .
53,998
def observer_position ( time , lon , lat , alt ) : lon = np . deg2rad ( lon ) lat = np . deg2rad ( lat ) theta = ( gmst ( time ) + lon ) % ( 2 * np . pi ) c = 1 / np . sqrt ( 1 + F * ( F - 2 ) * np . sin ( lat ) ** 2 ) sq = c * ( 1 - F ) ** 2 achcp = ( A * c + alt ) * np . cos ( lat ) x = achcp * np . cos ( theta ) y = achcp * np . sin ( theta ) z = ( A * sq + alt ) * np . sin ( lat ) vx = - MFACTOR * y vy = MFACTOR * x vz = 0 return ( x , y , z ) , ( vx , vy , vz )
Calculate observer ECI position .
53,999
def get_last_an_time ( self , utc_time ) : dt = np . timedelta64 ( 10 , 'm' ) t_old = utc_time t_new = t_old - dt pos0 , vel0 = self . get_position ( t_old , normalize = False ) pos1 , vel1 = self . get_position ( t_new , normalize = False ) while not ( pos0 [ 2 ] > 0 and pos1 [ 2 ] < 0 ) : pos0 = pos1 t_old = t_new t_new = t_old - dt pos1 , vel1 = self . get_position ( t_new , normalize = False ) if np . abs ( pos0 [ 2 ] ) < 1 : return t_old elif np . abs ( pos1 [ 2 ] ) < 1 : return t_new while np . abs ( pos1 [ 2 ] ) > 1 : dt = ( t_old - t_new ) / 2 t_mid = t_old - dt pos1 , vel1 = self . get_position ( t_mid , normalize = False ) if pos1 [ 2 ] > 0 : t_old = t_mid else : t_new = t_mid return t_mid
Calculate time of last ascending node relative to the specified time