idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
59,200
def netflix ( es , ps , e0 , l = .0001 ) : m = len ( es ) n = len ( ps [ 0 ] ) X = np . stack ( ps ) . T pTy = .5 * ( n * e0 ** 2 + ( X ** 2 ) . sum ( axis = 0 ) - n * np . array ( es ) ** 2 ) w = np . linalg . pinv ( X . T . dot ( X ) + l * n * np . eye ( m ) ) . dot ( pTy ) return X . dot ( w ) , w
Combine predictions with the optimal weights to minimize RMSE .
59,201
def save_data ( X , y , path ) : catalog = { '.csv' : save_csv , '.sps' : save_libsvm , '.h5' : save_hdf5 } ext = os . path . splitext ( path ) [ 1 ] func = catalog [ ext ] if y is None : y = np . zeros ( ( X . shape [ 0 ] , ) ) func ( X , y , path )
Save data as a CSV LibSVM or HDF5 file based on the file extension .
59,202
def save_csv ( X , y , path ) : if sparse . issparse ( X ) : X = X . todense ( ) np . savetxt ( path , np . hstack ( ( y . reshape ( ( - 1 , 1 ) ) , X ) ) , delimiter = ',' )
Save data as a CSV file .
59,203
def save_libsvm ( X , y , path ) : dump_svmlight_file ( X , y , path , zero_based = False )
Save data as a LibSVM file .
59,204
def save_hdf5 ( X , y , path ) : with h5py . File ( path , 'w' ) as f : is_sparse = 1 if sparse . issparse ( X ) else 0 f [ 'issparse' ] = is_sparse f [ 'target' ] = y if is_sparse : if not sparse . isspmatrix_csr ( X ) : X = X . tocsr ( ) f [ 'shape' ] = np . array ( X . shape ) f [ 'data' ] = X . data f [ 'indices' ] = X . indices f [ 'indptr' ] = X . indptr else : f [ 'data' ] = X
Save data as a HDF5 file .
59,205
def load_data ( path , dense = False ) : catalog = { '.csv' : load_csv , '.sps' : load_svmlight_file , '.h5' : load_hdf5 } ext = os . path . splitext ( path ) [ 1 ] func = catalog [ ext ] X , y = func ( path ) if dense and sparse . issparse ( X ) : X = X . todense ( ) return X , y
Load data from a CSV LibSVM or HDF5 file based on the file extension .
59,206
def load_csv ( path ) : with open ( path ) as f : line = f . readline ( ) . strip ( ) X = np . loadtxt ( path , delimiter = ',' , skiprows = 0 if is_number ( line . split ( ',' ) [ 0 ] ) else 1 ) y = np . array ( X [ : , 0 ] ) . flatten ( ) X = X [ : , 1 : ] return X , y
Load data from a CSV file .
59,207
def load_hdf5 ( path ) : with h5py . File ( path , 'r' ) as f : is_sparse = f [ 'issparse' ] [ ... ] if is_sparse : shape = tuple ( f [ 'shape' ] [ ... ] ) data = f [ 'data' ] [ ... ] indices = f [ 'indices' ] [ ... ] indptr = f [ 'indptr' ] [ ... ] X = sparse . csr_matrix ( ( data , indices , indptr ) , shape = shape ) else : X = f [ 'data' ] [ ... ] y = f [ 'target' ] [ ... ] return X , y
Load data from a HDF5 file .
59,208
def read_sps ( path ) : for line in open ( path ) : xs = line . rstrip ( ) . split ( ' ' ) yield xs [ 1 : ] , int ( xs [ 0 ] )
Read a LibSVM file line - by - line .
59,209
def gini ( y , p ) : assert y . shape == p . shape n_samples = y . shape [ 0 ] arr = np . array ( [ y , p ] ) . transpose ( ) true_order = arr [ arr [ : , 0 ] . argsort ( ) ] [ : : - 1 , 0 ] pred_order = arr [ arr [ : , 1 ] . argsort ( ) ] [ : : - 1 , 0 ] l_true = np . cumsum ( true_order ) / np . sum ( true_order ) l_pred = np . cumsum ( pred_order ) / np . sum ( pred_order ) l_ones = np . linspace ( 1 / n_samples , 1 , n_samples ) g_true = np . sum ( l_ones - l_true ) g_pred = np . sum ( l_ones - l_pred ) return g_pred / g_true
Normalized Gini Coefficient .
59,210
def logloss ( y , p ) : p [ p < EPS ] = EPS p [ p > 1 - EPS ] = 1 - EPS return log_loss ( y , p )
Bounded log loss error .
59,211
def convert ( input_file_name , ** kwargs ) : delimiter = kwargs [ "delimiter" ] or "," quotechar = kwargs [ "quotechar" ] or "|" if six . PY2 : delimiter = delimiter . encode ( "utf-8" ) quotechar = quotechar . encode ( "utf-8" ) with open ( input_file_name , "rb" ) as input_file : reader = csv . reader ( input_file , encoding = "utf-8" , delimiter = delimiter , quotechar = quotechar ) csv_headers = [ ] if not kwargs . get ( "no_header" ) : csv_headers = next ( reader ) csv_rows = [ row for row in reader if row ] if not csv_headers and len ( csv_rows ) > 0 : end = len ( csv_rows [ 0 ] ) + 1 csv_headers = [ "Column {}" . format ( n ) for n in range ( 1 , end ) ] html = render_template ( csv_headers , csv_rows , ** kwargs ) return freeze_js ( html )
Convert CSV file to HTML table
59,212
def save ( file_name , content ) : with open ( file_name , "w" , encoding = "utf-8" ) as output_file : output_file . write ( content ) return output_file . name
Save content to a file
59,213
def serve ( content ) : temp_folder = tempfile . gettempdir ( ) temp_file_name = tempfile . gettempprefix ( ) + str ( uuid . uuid4 ( ) ) + ".html" temp_file_path = os . path . join ( temp_folder , temp_file_name ) save ( temp_file_path , content ) webbrowser . open ( "file://{}" . format ( temp_file_path ) ) try : while True : time . sleep ( 1 ) except KeyboardInterrupt : os . remove ( temp_file_path )
Write content to a temp file and serve it in browser
59,214
def render_template ( table_headers , table_items , ** options ) : caption = options . get ( "caption" ) or "Table" display_length = options . get ( "display_length" ) or - 1 height = options . get ( "height" ) or "70vh" default_length_menu = [ - 1 , 10 , 25 , 50 ] pagination = options . get ( "pagination" ) virtual_scroll_limit = options . get ( "virtual_scroll" ) height = height . replace ( "%" , "vh" ) columns = [ ] for header in table_headers : columns . append ( { "title" : header } ) datatable_options = { "columns" : columns , "data" : table_items , "iDisplayLength" : display_length , "sScrollX" : "100%" , "sScrollXInner" : "100%" } is_paging = pagination virtual_scroll = False scroll_y = height if virtual_scroll_limit : if virtual_scroll_limit != - 1 and len ( table_items ) > virtual_scroll_limit : virtual_scroll = True display_length = - 1 fmt = ( "\nVirtual scroll is enabled since number of rows exceeds {limit}." " You can set custom row limit by setting flag -vs, --virtual-scroll." " Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable." ) logger . warn ( fmt . format ( limit = virtual_scroll_limit ) ) if not is_paging : fmt = "\nPagination can not be disabled in virtual scroll mode." logger . warn ( fmt ) is_paging = True if is_paging and not virtual_scroll : length_menu = [ ] if display_length != - 1 : length_menu = sorted ( default_length_menu + [ display_length ] ) else : length_menu = default_length_menu length_menu_label = [ str ( "All" ) if i == - 1 else i for i in length_menu ] datatable_options [ "lengthMenu" ] = [ length_menu , length_menu_label ] datatable_options [ "iDisplayLength" ] = display_length if is_paging : datatable_options [ "paging" ] = True else : datatable_options [ "paging" ] = False if scroll_y : datatable_options [ "scrollY" ] = scroll_y if virtual_scroll : datatable_options [ "scroller" ] = True datatable_options [ "bPaginate" ] = False datatable_options [ "deferRender" ] = True datatable_options [ "bLengthChange" ] = False enable_export = options . get ( "export" ) if enable_export : if options [ "export_options" ] : allowed = list ( options [ "export_options" ] ) else : allowed = [ "copy" , "csv" , "json" , "print" ] datatable_options [ "dom" ] = "Bfrtip" datatable_options [ "buttons" ] = allowed datatable_options_json = json . dumps ( datatable_options , separators = ( "," , ":" ) ) return template . render ( title = caption or "Table" , caption = caption , datatable_options = datatable_options_json , virtual_scroll = virtual_scroll , enable_export = enable_export )
Render Jinja2 template
59,215
def freeze_js ( html ) : matches = js_src_pattern . finditer ( html ) if not matches : return html for match in reversed ( tuple ( matches ) ) : file_name = match . group ( 1 ) file_path = os . path . join ( js_files_path , file_name ) with open ( file_path , "r" , encoding = "utf-8" ) as f : file_content = f . read ( ) fmt = '<script type="text/javascript">{}</script>' js_content = fmt . format ( file_content ) html = html [ : match . start ( ) ] + js_content + html [ match . end ( ) : ] return html
Freeze all JS assets to the rendered html itself .
59,216
def cli ( * args , ** kwargs ) : content = convert . convert ( kwargs [ "input_file" ] , ** kwargs ) if kwargs [ "serve" ] : convert . serve ( content ) elif kwargs [ "output_file" ] : if ( not kwargs [ "overwrite" ] and not prompt_overwrite ( kwargs [ "output_file" ] ) ) : raise click . Abort ( ) convert . save ( kwargs [ "output_file" ] , content ) click . secho ( "File converted successfully: {}" . format ( kwargs [ "output_file" ] ) , fg = "green" ) else : raise click . BadOptionUsage ( "Missing argument \"output_file\"." )
CSVtoTable commandline utility .
59,217
def activate_retry ( request , activation_key , template_name = 'userena/activate_retry_success.html' , extra_context = None ) : if not userena_settings . USERENA_ACTIVATION_RETRY : return redirect ( reverse ( 'userena_activate' , args = ( activation_key , ) ) ) try : if UserenaSignup . objects . check_expired_activation ( activation_key ) : new_key = UserenaSignup . objects . reissue_activation ( activation_key ) if new_key : if not extra_context : extra_context = dict ( ) return ExtraContextTemplateView . as_view ( template_name = template_name , extra_context = extra_context ) ( request ) else : return redirect ( reverse ( 'userena_activate' , args = ( activation_key , ) ) ) else : return redirect ( reverse ( 'userena_activate' , args = ( activation_key , ) ) ) except UserenaSignup . DoesNotExist : return redirect ( reverse ( 'userena_activate' , args = ( activation_key , ) ) )
Reissue a new activation_key for the user with the expired activation_key .
59,218
def disabled_account ( request , username , template_name , extra_context = None ) : user = get_object_or_404 ( get_user_model ( ) , username__iexact = username ) if user . is_active : raise Http404 if not extra_context : extra_context = dict ( ) extra_context [ 'viewed_user' ] = user extra_context [ 'profile' ] = get_user_profile ( user = user ) return ExtraContextTemplateView . as_view ( template_name = template_name , extra_context = extra_context ) ( request )
Checks if the account is disabled if so returns the disabled account template .
59,219
def profile_list ( request , page = 1 , template_name = 'userena/profile_list.html' , paginate_by = 50 , extra_context = None , ** kwargs ) : warnings . warn ( "views.profile_list is deprecated. Use ProfileListView instead" , DeprecationWarning , stacklevel = 2 ) try : page = int ( request . GET . get ( 'page' , None ) ) except ( TypeError , ValueError ) : page = page if userena_settings . USERENA_DISABLE_PROFILE_LIST and not request . user . is_staff : raise Http404 profile_model = get_profile_model ( ) queryset = profile_model . objects . get_visible_profiles ( request . user ) if not extra_context : extra_context = dict ( ) return ProfileListView . as_view ( queryset = queryset , paginate_by = paginate_by , page = page , template_name = template_name , extra_context = extra_context , ** kwargs ) ( request )
Returns a list of all profiles that are public .
59,220
def get_or_create ( self , um_from_user , um_to_user , message ) : created = False try : contact = self . get ( Q ( um_from_user = um_from_user , um_to_user = um_to_user ) | Q ( um_from_user = um_to_user , um_to_user = um_from_user ) ) except self . model . DoesNotExist : created = True contact = self . create ( um_from_user = um_from_user , um_to_user = um_to_user , latest_message = message ) return ( contact , created )
Get or create a Contact
59,221
def update_contact ( self , um_from_user , um_to_user , message ) : contact , created = self . get_or_create ( um_from_user , um_to_user , message ) if not created : contact . latest_message = message contact . save ( ) return contact
Get or update a contacts information
59,222
def get_contacts_for ( self , user ) : contacts = self . filter ( Q ( um_from_user = user ) | Q ( um_to_user = user ) ) return contacts
Returns the contacts for this user .
59,223
def send_message ( self , sender , um_to_user_list , body ) : msg = self . model ( sender = sender , body = body ) msg . save ( ) msg . save_recipients ( um_to_user_list ) msg . update_contacts ( um_to_user_list ) signals . email_sent . send ( sender = None , msg = msg ) return msg
Send a message from a user to a user .
59,224
def get_conversation_between ( self , um_from_user , um_to_user ) : messages = self . filter ( Q ( sender = um_from_user , recipients = um_to_user , sender_deleted_at__isnull = True ) | Q ( sender = um_to_user , recipients = um_from_user , messagerecipient__deleted_at__isnull = True ) ) return messages
Returns a conversation between two users
59,225
def count_unread_messages_for ( self , user ) : unread_total = self . filter ( user = user , read_at__isnull = True , deleted_at__isnull = True ) . count ( ) return unread_total
Returns the amount of unread messages for this user
59,226
def count_unread_messages_between ( self , um_to_user , um_from_user ) : unread_total = self . filter ( message__sender = um_from_user , user = um_to_user , read_at__isnull = True , deleted_at__isnull = True ) . count ( ) return unread_total
Returns the amount of unread messages between two users
59,227
def reissue_activation ( self , activation_key ) : try : userena = self . get ( activation_key = activation_key ) except self . model . DoesNotExist : return False try : salt , new_activation_key = generate_sha1 ( userena . user . username ) userena . activation_key = new_activation_key userena . save ( using = self . _db ) userena . user . date_joined = get_datetime_now ( ) userena . user . save ( using = self . _db ) userena . send_activation_email ( ) return True except Exception : return False
Creates a new activation_key resetting activation timeframe when users let the previous key expire .
59,228
def check_expired_activation ( self , activation_key ) : if SHA1_RE . search ( activation_key ) : userena = self . get ( activation_key = activation_key ) return userena . activation_key_expired ( ) raise self . model . DoesNotExist
Check if activation_key is still valid .
59,229
def check_permissions ( self ) : changed_permissions = [ ] changed_users = [ ] warnings = [ ] for model , perms in ASSIGNED_PERMISSIONS . items ( ) : if model == 'profile' : model_obj = get_profile_model ( ) else : model_obj = get_user_model ( ) model_content_type = ContentType . objects . get_for_model ( model_obj ) for perm in perms : try : Permission . objects . get ( codename = perm [ 0 ] , content_type = model_content_type ) except Permission . DoesNotExist : changed_permissions . append ( perm [ 1 ] ) Permission . objects . create ( name = perm [ 1 ] , codename = perm [ 0 ] , content_type = model_content_type ) for user in get_user_model ( ) . objects . exclude ( username = settings . ANONYMOUS_USER_NAME ) : try : user_profile = get_user_profile ( user = user ) except ObjectDoesNotExist : warnings . append ( _ ( "No profile found for %(username)s" ) % { 'username' : user . username } ) else : all_permissions = get_perms ( user , user_profile ) + get_perms ( user , user ) for model , perms in ASSIGNED_PERMISSIONS . items ( ) : if model == 'profile' : perm_object = get_user_profile ( user = user ) else : perm_object = user for perm in perms : if perm [ 0 ] not in all_permissions : assign_perm ( perm [ 0 ] , user , perm_object ) changed_users . append ( user ) return ( changed_permissions , changed_users , warnings )
Checks that all permissions are set correctly for the users .
59,230
def get_unread_message_count_for ( parser , token ) : try : tag_name , arg = token . contents . split ( None , 1 ) except ValueError : raise template . TemplateSyntaxError ( "%s tag requires arguments" % token . contents . split ( ) [ 0 ] ) m = re . search ( r'(.*?) as (\w+)' , arg ) if not m : raise template . TemplateSyntaxError ( "%s tag had invalid arguments" % tag_name ) user , var_name = m . groups ( ) return MessageCount ( user , var_name )
Returns the unread message count for a user .
59,231
def get_unread_message_count_between ( parser , token ) : try : tag_name , arg = token . contents . split ( None , 1 ) except ValueError : raise template . TemplateSyntaxError ( "%s tag requires arguments" % token . contents . split ( ) [ 0 ] ) m = re . search ( r'(.*?) and (.*?) as (\w+)' , arg ) if not m : raise template . TemplateSyntaxError ( "%s tag had invalid arguments" % tag_name ) um_from_user , um_to_user , var_name = m . groups ( ) return MessageCount ( um_from_user , var_name , um_to_user )
Returns the unread message count between two users .
59,232
def upload_to_mugshot ( instance , filename ) : extension = filename . split ( '.' ) [ - 1 ] . lower ( ) salt , hash = generate_sha1 ( instance . pk ) path = userena_settings . USERENA_MUGSHOT_PATH % { 'username' : instance . user . username , 'id' : instance . user . id , 'date' : instance . user . date_joined , 'date_now' : get_datetime_now ( ) . date ( ) } return '%(path)s%(hash)s.%(extension)s' % { 'path' : path , 'hash' : hash [ : 10 ] , 'extension' : extension }
Uploads a mugshot for a user to the USERENA_MUGSHOT_PATH and saving it under unique hash for the image . This is for privacy reasons so others can t just browse through the mugshot directory .
59,233
def message_compose ( request , recipients = None , compose_form = ComposeForm , success_url = None , template_name = "umessages/message_form.html" , recipient_filter = None , extra_context = None ) : initial_data = dict ( ) if recipients : username_list = [ r . strip ( ) for r in recipients . split ( "+" ) ] recipients = [ u for u in get_user_model ( ) . objects . filter ( username__in = username_list ) ] initial_data [ "to" ] = recipients form = compose_form ( initial = initial_data ) if request . method == "POST" : form = compose_form ( request . POST ) if form . is_valid ( ) : requested_redirect = request . GET . get ( REDIRECT_FIELD_NAME , request . POST . get ( REDIRECT_FIELD_NAME , False ) ) message = form . save ( request . user ) recipients = form . cleaned_data [ 'to' ] if userena_settings . USERENA_USE_MESSAGES : messages . success ( request , _ ( 'Message is sent.' ) , fail_silently = True ) redirect_to = reverse ( 'userena_umessages_list' ) if requested_redirect : redirect_to = requested_redirect elif success_url : redirect_to = success_url elif len ( recipients ) == 1 : redirect_to = reverse ( 'userena_umessages_detail' , kwargs = { 'username' : recipients [ 0 ] . username } ) return redirect ( redirect_to ) if not extra_context : extra_context = dict ( ) extra_context [ "form" ] = form extra_context [ "recipients" ] = recipients return render ( request , template_name , extra_context )
Compose a new message
59,234
def message_remove ( request , undo = False ) : message_pks = request . POST . getlist ( 'message_pks' ) redirect_to = request . GET . get ( REDIRECT_FIELD_NAME , request . POST . get ( REDIRECT_FIELD_NAME , False ) ) if message_pks : valid_message_pk_list = set ( ) for pk in message_pks : try : valid_pk = int ( pk ) except ( TypeError , ValueError ) : pass else : valid_message_pk_list . add ( valid_pk ) now = get_datetime_now ( ) changed_message_list = set ( ) for pk in valid_message_pk_list : message = get_object_or_404 ( Message , pk = pk ) if message . sender == request . user : if undo : message . sender_deleted_at = None else : message . sender_deleted_at = now message . save ( ) changed_message_list . add ( message . pk ) if request . user in message . recipients . all ( ) : mr = message . messagerecipient_set . get ( user = request . user , message = message ) if undo : mr . deleted_at = None else : mr . deleted_at = now mr . save ( ) changed_message_list . add ( message . pk ) if ( len ( changed_message_list ) > 0 ) and userena_settings . USERENA_USE_MESSAGES : if undo : message = ungettext ( 'Message is succesfully restored.' , 'Messages are succesfully restored.' , len ( changed_message_list ) ) else : message = ungettext ( 'Message is successfully removed.' , 'Messages are successfully removed.' , len ( changed_message_list ) ) messages . success ( request , message , fail_silently = True ) if redirect_to : return redirect ( redirect_to ) else : return redirect ( reverse ( 'userena_umessages_list' ) )
A POST to remove messages .
59,235
def save ( self ) : new_user = super ( SignupFormExtra , self ) . save ( ) new_user . first_name = self . cleaned_data [ 'first_name' ] new_user . last_name = self . cleaned_data [ 'last_name' ] new_user . save ( ) return new_user
Override the save method to save the first and last name to the user field .
59,236
def save ( self , sender ) : um_to_user_list = self . cleaned_data [ 'to' ] body = self . cleaned_data [ 'body' ] msg = Message . objects . send_message ( sender , um_to_user_list , body ) return msg
Save the message and send it out into the wide world .
59,237
def clean_username ( self ) : try : user = get_user_model ( ) . objects . get ( username__iexact = self . cleaned_data [ 'username' ] ) except get_user_model ( ) . DoesNotExist : pass else : if userena_settings . USERENA_ACTIVATION_REQUIRED and UserenaSignup . objects . filter ( user__username__iexact = self . cleaned_data [ 'username' ] ) . exclude ( activation_key = userena_settings . USERENA_ACTIVATED ) : raise forms . ValidationError ( _ ( 'This username is already taken but not confirmed. Please check your email for verification steps.' ) ) raise forms . ValidationError ( _ ( 'This username is already taken.' ) ) if self . cleaned_data [ 'username' ] . lower ( ) in userena_settings . USERENA_FORBIDDEN_USERNAMES : raise forms . ValidationError ( _ ( 'This username is not allowed.' ) ) return self . cleaned_data [ 'username' ]
Validate that the username is alphanumeric and is not already in use . Also validates that the username is not listed in USERENA_FORBIDDEN_USERNAMES list .
59,238
def save ( self ) : while True : username = sha1 ( str ( random . random ( ) ) . encode ( 'utf-8' ) ) . hexdigest ( ) [ : 5 ] try : get_user_model ( ) . objects . get ( username__iexact = username ) except get_user_model ( ) . DoesNotExist : break self . cleaned_data [ 'username' ] = username return super ( SignupFormOnlyEmail , self ) . save ( )
Generate a random username before falling back to parent signup form
59,239
def parse_file ( self , sourcepath ) : with open ( sourcepath , 'r' ) as logfile : jsonlist = logfile . readlines ( ) data = { } data [ 'entries' ] = [ ] for line in jsonlist : entry = self . parse_line ( line ) data [ 'entries' ] . append ( entry ) if self . tzone : for e in data [ 'entries' ] : e [ 'tzone' ] = self . tzone return data
Parse an object - per - line JSON file into a log data dict
59,240
def parse_file ( self , sourcepath ) : with open ( sourcepath , 'r' ) as logfile : jsonstr = logfile . read ( ) data = { } data [ 'entries' ] = json . loads ( jsonstr ) if self . tzone : for e in data [ 'entries' ] : e [ 'tzone' ] = self . tzone return data
Parse single JSON object into a LogData object
59,241
def run_job ( self ) : try : self . load_parsers ( ) self . load_filters ( ) self . load_outputs ( ) self . config_args ( ) if self . args . list_parsers : self . list_parsers ( ) if self . args . verbosemode : print ( 'Loading input files' ) self . load_inputs ( ) if self . args . verbosemode : print ( 'Running parsers' ) self . run_parse ( ) if self . args . verbosemode : print ( 'Merging data' ) self . data_set [ 'finalized_data' ] = logdissect . utils . merge_logs ( self . data_set [ 'data_set' ] , sort = True ) if self . args . verbosemode : print ( 'Running filters' ) self . run_filters ( ) if self . args . verbosemode : print ( 'Running output' ) self . run_output ( ) except KeyboardInterrupt : sys . exit ( 1 )
Execute a logdissect job
59,242
def run_parse ( self ) : parsedset = { } parsedset [ 'data_set' ] = [ ] for log in self . input_files : parsemodule = self . parse_modules [ self . args . parser ] try : if self . args . tzone : parsemodule . tzone = self . args . tzone except NameError : pass parsedset [ 'data_set' ] . append ( parsemodule . parse_file ( log ) ) self . data_set = parsedset del ( parsedset )
Parse one or more log files
59,243
def run_output ( self ) : for f in logdissect . output . __formats__ : ouroutput = self . output_modules [ f ] ouroutput . write_output ( self . data_set [ 'finalized_data' ] , args = self . args ) del ( ouroutput ) if not self . args . silentmode : if self . args . verbosemode : print ( '\n==== ++++ ==== Output: ==== ++++ ====\n' ) for line in self . data_set [ 'finalized_data' ] [ 'entries' ] : print ( line [ 'raw_text' ] )
Output finalized data
59,244
def config_args ( self ) : self . arg_parser . add_argument ( '--version' , action = 'version' , version = '%(prog)s ' + str ( __version__ ) ) self . arg_parser . add_argument ( '--verbose' , action = 'store_true' , dest = 'verbosemode' , help = _ ( 'set verbose terminal output' ) ) self . arg_parser . add_argument ( '-s' , action = 'store_true' , dest = 'silentmode' , help = _ ( 'silence terminal output' ) ) self . arg_parser . add_argument ( '--list-parsers' , action = 'store_true' , dest = 'list_parsers' , help = _ ( 'return a list of available parsers' ) ) self . arg_parser . add_argument ( '-p' , action = 'store' , dest = 'parser' , default = 'syslog' , help = _ ( 'select a parser (default: syslog)' ) ) self . arg_parser . add_argument ( '-z' , '--unzip' , action = 'store_true' , dest = 'unzip' , help = _ ( 'include files compressed with gzip' ) ) self . arg_parser . add_argument ( '-t' , action = 'store' , dest = 'tzone' , help = _ ( 'specify timezone offset to UTC (e.g. \'+0500\')' ) ) self . arg_parser . add_argument ( 'files' , metavar = 'file' , nargs = '*' , help = _ ( 'specify input files' ) ) self . arg_parser . add_argument_group ( self . filter_args ) self . arg_parser . add_argument_group ( self . output_args ) self . args = self . arg_parser . parse_args ( )
Set config options
59,245
def load_inputs ( self ) : for f in self . args . files : if os . path . isfile ( f ) : fparts = str ( f ) . split ( '.' ) if fparts [ - 1 ] == 'gz' : if self . args . unzip : fullpath = os . path . abspath ( str ( f ) ) self . input_files . append ( fullpath ) else : return 0 elif fparts [ - 1 ] == 'bz2' or fparts [ - 1 ] == 'zip' : return 0 else : fullpath = os . path . abspath ( str ( f ) ) self . input_files . append ( fullpath ) else : print ( 'File ' + f + ' not found' ) return 1
Load the specified inputs
59,246
def list_parsers ( self , * args ) : print ( '==== Available parsing modules: ====\n' ) for parser in sorted ( self . parse_modules ) : print ( self . parse_modules [ parser ] . name . ljust ( 16 ) + ': ' + self . parse_modules [ parser ] . desc ) sys . exit ( 0 )
Return a list of available parsing modules
59,247
def get_utc_date ( entry ) : if entry [ 'numeric_date_stamp' ] == '0' : entry [ 'numeric_date_stamp_utc' ] = '0' return entry else : if '.' in entry [ 'numeric_date_stamp' ] : t = datetime . strptime ( entry [ 'numeric_date_stamp' ] , '%Y%m%d%H%M%S.%f' ) else : t = datetime . strptime ( entry [ 'numeric_date_stamp' ] , '%Y%m%d%H%M%S' ) tdelta = timedelta ( hours = int ( entry [ 'tzone' ] [ 1 : 3 ] ) , minutes = int ( entry [ 'tzone' ] [ 3 : 5 ] ) ) if entry [ 'tzone' ] [ 0 ] == '-' : ut = t + tdelta else : ut = t - tdelta entry [ 'numeric_date_stamp_utc' ] = ut . strftime ( '%Y%m%d%H%M%S.%f' ) return entry
Return datestamp converted to UTC
59,248
def get_local_tzone ( ) : if localtime ( ) . tm_isdst : if altzone < 0 : tzone = '+' + str ( int ( float ( altzone ) / 60 // 60 ) ) . rjust ( 2 , '0' ) + str ( int ( float ( altzone ) / 60 % 60 ) ) . ljust ( 2 , '0' ) else : tzone = '-' + str ( int ( float ( altzone ) / 60 // 60 ) ) . rjust ( 2 , '0' ) + str ( int ( float ( altzone ) / 60 % 60 ) ) . ljust ( 2 , '0' ) else : if altzone < 0 : tzone = '+' + str ( int ( float ( timezone ) / 60 // 60 ) ) . rjust ( 2 , '0' ) + str ( int ( float ( timezone ) / 60 % 60 ) ) . ljust ( 2 , '0' ) else : tzone = '-' + str ( int ( float ( timezone ) / 60 // 60 ) ) . rjust ( 2 , '0' ) + str ( int ( float ( timezone ) / 60 % 60 ) ) . ljust ( 2 , '0' ) return tzone
Get the current time zone on the local host
59,249
def merge_logs ( dataset , sort = True ) : ourlog = { } ourlog [ 'entries' ] = [ ] for d in dataset : ourlog [ 'entries' ] = ourlog [ 'entries' ] + d [ 'entries' ] if sort : ourlog [ 'entries' ] . sort ( key = lambda x : x [ 'numeric_date_stamp_utc' ] ) return ourlog
Merge log dictionaries together into one log dictionary
59,250
def write_output ( self , data , args = None , filename = None , label = None ) : if args : if not args . outlog : return 0 if not filename : filename = args . outlog lastpath = '' with open ( str ( filename ) , 'w' ) as output_file : for entry in data [ 'entries' ] : if args . label : if entry [ 'source_path' ] == lastpath : output_file . write ( entry [ 'raw_text' ] + '\n' ) elif args . label == 'fname' : output_file . write ( '======== ' + entry [ 'source_path' ] . split ( '/' ) [ - 1 ] + ' >>>>\n' + entry [ 'raw_text' ] + '\n' ) elif args . label == 'fpath' : output_file . write ( '======== ' + entry [ 'source_path' ] + ' >>>>\n' + entry [ 'raw_text' ] + '\n' ) else : output_file . write ( entry [ 'raw_text' ] + '\n' ) lastpath = entry [ 'source_path' ]
Write log data to a log file
59,251
def write_output ( self , data , args = None , filename = None , pretty = False ) : if args : if not args . sojson : return 0 pretty = args . pretty if not filename : filename = args . sojson if pretty : logstring = json . dumps ( data [ 'entries' ] , indent = 2 , sort_keys = True , separators = ( ',' , ': ' ) ) else : logstring = json . dumps ( data [ 'entries' ] , sort_keys = True ) with open ( str ( filename ) , 'w' ) as output_file : output_file . write ( logstring )
Write log data to a single JSON object
59,252
def write_output ( self , data , filename = None , args = None ) : if args : if not args . linejson : return 0 if not filename : filename = args . linejson entrylist = [ ] for entry in data [ 'entries' ] : entrystring = json . dumps ( entry , sort_keys = True ) entrylist . append ( entrystring ) with open ( str ( filename ) , 'w' ) as output_file : output_file . write ( '\n' . join ( entrylist ) )
Write log data to a file with one JSON object per line
59,253
def parse_file ( self , sourcepath ) : self . date_regex = re . compile ( r'{}' . format ( self . format_regex ) ) if self . backup_format_regex : self . backup_date_regex = re . compile ( r'{}' . format ( self . backup_format_regex ) ) data = { } data [ 'entries' ] = [ ] data [ 'parser' ] = self . name data [ 'source_path' ] = sourcepath data [ 'source_file' ] = sourcepath . split ( '/' ) [ - 1 ] data [ 'source_file_mtime' ] = os . path . getmtime ( data [ 'source_path' ] ) timestamp = datetime . fromtimestamp ( data [ 'source_file_mtime' ] ) data [ 'source_file_year' ] = timestamp . year entryyear = timestamp . year currentmonth = '99' if self . datestamp_type == 'nodate' : self . datedata = { } self . datedata [ 'timestamp' ] = timestamp self . datedata [ 'entry_time' ] = int ( timestamp . strftime ( '%H%M%S' ) ) if not self . tzone : self . backuptzone = logdissect . utils . get_local_tzone ( ) fparts = sourcepath . split ( '.' ) if fparts [ - 1 ] == 'gz' : with gzip . open ( sourcepath , 'r' ) as logfile : loglines = reversed ( logfile . readlines ( ) ) else : with open ( str ( sourcepath ) , 'r' ) as logfile : loglines = reversed ( logfile . readlines ( ) ) for line in loglines : ourline = line . rstrip ( ) entry = self . parse_line ( ourline ) if entry : if 'date_stamp' in self . fields : if self . datestamp_type == 'standard' : if int ( entry [ 'month' ] ) > int ( currentmonth ) : entryyear = entryyear - 1 currentmonth = entry [ 'month' ] entry [ 'numeric_date_stamp' ] = str ( entryyear ) + entry [ 'month' ] + entry [ 'day' ] + entry [ 'tstamp' ] entry [ 'year' ] = str ( entryyear ) if self . tzone : entry [ 'tzone' ] = self . tzone else : entry [ 'tzone' ] = self . backuptzone entry = logdissect . utils . get_utc_date ( entry ) entry [ 'raw_text' ] = ourline entry [ 'source_path' ] = data [ 'source_path' ] data [ 'entries' ] . append ( entry ) else : continue data [ 'entries' ] . reverse ( ) return data
Parse a file into a LogData object
59,254
def parse_line ( self , line ) : match = re . findall ( self . date_regex , line ) if match : fields = self . fields elif self . backup_format_regex and not match : match = re . findall ( self . backup_date_regex , line ) fields = self . backup_fields if match : entry = { } entry [ 'raw_text' ] = line entry [ 'parser' ] = self . name matchlist = list ( zip ( fields , match [ 0 ] ) ) for f , v in matchlist : entry [ f ] = v if 'date_stamp' in entry . keys ( ) : if self . datestamp_type == 'standard' : entry = logdissect . utils . convert_standard_datestamp ( entry ) elif self . datestamp_type == 'iso' : entry = logdissect . utils . convert_iso_datestamp ( entry ) elif self . datestamp_type == 'webaccess' : entry = logdissect . utils . convert_webaccess_datestamp ( entry ) elif self . datestamp_type == 'nodate' : entry , self . datedata = logdissect . utils . convert_nodate_datestamp ( entry , self . datedata ) elif self . datestamp_type == 'unix' : entry = logdissect . utils . convert_unix_datestamp ( entry ) if self . datestamp_type == 'now' : entry = logdissect . utils . convert_now_datestamp ( entry ) entry = self . post_parse_action ( entry ) return entry else : return None
Parse a line into a dictionary
59,255
def post_parse_action ( self , entry ) : if 'source_host' in entry . keys ( ) : host = self . ip_port_regex . findall ( entry [ 'source_host' ] ) if host : hlist = host [ 0 ] . split ( '.' ) entry [ 'source_host' ] = '.' . join ( hlist [ : 4 ] ) entry [ 'source_port' ] = hlist [ - 1 ] if 'dest_host' in entry . keys ( ) : host = self . ip_port_regex . findall ( entry [ 'dest_host' ] ) if host : hlist = host [ 0 ] . split ( '.' ) entry [ 'dest_host' ] = '.' . join ( hlist [ : 4 ] ) entry [ 'dest_port' ] = hlist [ - 1 ] return entry
separate hosts and ports after entry is parsed
59,256
def find_partition_multiplex ( graphs , partition_type , ** kwargs ) : n_layers = len ( graphs ) partitions = [ ] layer_weights = [ 1 ] * n_layers for graph in graphs : partitions . append ( partition_type ( graph , ** kwargs ) ) optimiser = Optimiser ( ) improvement = optimiser . optimise_partition_multiplex ( partitions , layer_weights ) return partitions [ 0 ] . membership , improvement
Detect communities for multiplex graphs .
59,257
def find_partition_temporal ( graphs , partition_type , interslice_weight = 1 , slice_attr = 'slice' , vertex_id_attr = 'id' , edge_type_attr = 'type' , weight_attr = 'weight' , ** kwargs ) : G_layers , G_interslice , G = time_slices_to_layers ( graphs , interslice_weight , slice_attr = slice_attr , vertex_id_attr = vertex_id_attr , edge_type_attr = edge_type_attr , weight_attr = weight_attr ) arg_dict = { } if 'node_sizes' in partition_type . __init__ . __code__ . co_varnames : arg_dict [ 'node_sizes' ] = 'node_size' if 'weights' in partition_type . __init__ . __code__ . co_varnames : arg_dict [ 'weights' ] = 'weight' arg_dict . update ( kwargs ) partitions = [ ] for H in G_layers : arg_dict [ 'graph' ] = H partitions . append ( partition_type ( ** arg_dict ) ) partition_interslice = CPMVertexPartition ( G_interslice , resolution_parameter = 0 , node_sizes = 'node_size' , weights = weight_attr ) optimiser = Optimiser ( ) improvement = optimiser . optimise_partition_multiplex ( partitions + [ partition_interslice ] ) membership = { ( v [ slice_attr ] , v [ vertex_id_attr ] ) : m for v , m in zip ( G . vs , partitions [ 0 ] . membership ) } membership_time_slices = [ ] for slice_idx , H in enumerate ( graphs ) : membership_slice = [ membership [ ( slice_idx , v [ vertex_id_attr ] ) ] for v in H . vs ] membership_time_slices . append ( list ( membership_slice ) ) return membership_time_slices , improvement
Detect communities for temporal graphs .
59,258
def build_ext ( self ) : try : from setuptools . command . build_ext import build_ext except ImportError : from distutils . command . build_ext import build_ext buildcfg = self class custom_build_ext ( build_ext ) : def run ( self ) : if buildcfg . use_pkgconfig : detected = buildcfg . detect_from_pkgconfig ( ) else : detected = False if os . path . exists ( "igraphcore" ) : buildcfg . use_built_igraph ( ) detected = True if not detected : if buildcfg . download_igraph_if_needed and is_unix_like ( ) : detected = buildcfg . download_and_compile_igraph ( ) if detected : buildcfg . use_built_igraph ( ) if not detected : buildcfg . use_educated_guess ( ) if buildcfg . static_extension : buildcfg . replace_static_libraries ( exclusions = [ "m" ] ) buildcfg . print_build_info ( ) ext = first ( extension for extension in self . extensions if extension . name == "louvain._c_louvain" ) buildcfg . configure ( ext ) build_ext . run ( self ) return custom_build_ext
Returns a class that can be used as a replacement for the build_ext command in distutils and that will download and compile the C core of igraph if needed .
59,259
def Bipartite ( graph , resolution_parameter_01 , resolution_parameter_0 = 0 , resolution_parameter_1 = 0 , degree_as_node_size = False , types = 'type' , ** kwargs ) : if types is not None : if isinstance ( types , str ) : types = graph . vs [ types ] else : types = list ( types ) if set ( types ) != set ( [ 0 , 1 ] ) : new_type = _ig . UniqueIdGenerator ( ) types = [ new_type [ t ] for t in types ] if set ( types ) != set ( [ 0 , 1 ] ) : raise ValueError ( "More than one type specified." ) if degree_as_node_size : if ( graph . is_directed ( ) ) : raise ValueError ( "This method is not suitable for directed graphs " + "when using degree as node sizes." ) node_sizes = graph . degree ( ) else : node_sizes = [ 1 ] * graph . vcount ( ) partition_01 = CPMVertexPartition ( graph , node_sizes = node_sizes , resolution_parameter = resolution_parameter_01 , ** kwargs ) H_0 = graph . subgraph_edges ( [ ] , delete_vertices = False ) partition_0 = CPMVertexPartition ( H_0 , weights = None , node_sizes = [ s if t == 0 else 0 for v , s , t in zip ( graph . vs , node_sizes , types ) ] , resolution_parameter = resolution_parameter_01 - resolution_parameter_0 , ** kwargs ) H_1 = graph . subgraph_edges ( [ ] , delete_vertices = False ) partition_1 = CPMVertexPartition ( H_1 , weights = None , node_sizes = [ s if t == 1 else 0 for v , s , t in zip ( graph . vs , node_sizes , types ) ] , resolution_parameter = resolution_parameter_01 - resolution_parameter_1 , ** kwargs ) return partition_01 , partition_0 , partition_1
Create three layers for bipartite partitions .
59,260
def spacing_file ( path ) : with open ( os . path . abspath ( path ) ) as f : return spacing_text ( f . read ( ) )
Perform paranoid text spacing from file .
59,261
def compute ( self , text , lang = "eng" ) : params = { "lang" : lang , "text" : text , "topClustersCount" : self . _nrOfEventsToReturn } res = self . _er . jsonRequest ( "/json/getEventForText/enqueueRequest" , params ) requestId = res [ "requestId" ] for i in range ( 10 ) : time . sleep ( 1 ) res = self . _er . jsonRequest ( "/json/getEventForText/testRequest" , { "requestId" : requestId } ) if isinstance ( res , list ) and len ( res ) > 0 : return res return None
compute the list of most similar events for the given text
59,262
def annotate ( self , text , lang = None , customParams = None ) : params = { "lang" : lang , "text" : text } if customParams : params . update ( customParams ) return self . _er . jsonRequestAnalytics ( "/api/v1/annotate" , params )
identify the list of entities and nonentities mentioned in the text
59,263
def sentiment ( self , text , method = "vocabulary" ) : assert method == "vocabulary" or method == "rnn" endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN" return self . _er . jsonRequestAnalytics ( "/api/v1/" + endpoint , { "text" : text } )
determine the sentiment of the provided text in English language
59,264
def semanticSimilarity ( self , text1 , text2 , distanceMeasure = "cosine" ) : return self . _er . jsonRequestAnalytics ( "/api/v1/semanticSimilarity" , { "text1" : text1 , "text2" : text2 , "distanceMeasure" : distanceMeasure } )
determine the semantic similarity of the two provided documents
59,265
def extractArticleInfo ( self , url , proxyUrl = None , headers = None , cookies = None ) : params = { "url" : url } if proxyUrl : params [ "proxyUrl" ] = proxyUrl if headers : if isinstance ( headers , dict ) : headers = json . dumps ( headers ) params [ "headers" ] = headers if cookies : if isinstance ( cookies , dict ) : cookies = json . dumps ( cookies ) params [ "cookies" ] = cookies return self . _er . jsonRequestAnalytics ( "/api/v1/extractArticleInfo" , params )
extract all available information about an article available at url url . Returned information will include article title body authors links in the articles ...
59,266
def trainTopicOnTweets ( self , twitterQuery , useTweetText = True , useIdfNormalization = True , normalization = "linear" , maxTweets = 2000 , maxUsedLinks = 500 , ignoreConceptTypes = [ ] , maxConcepts = 20 , maxCategories = 10 , notifyEmailAddress = None ) : assert maxTweets < 5000 , "we can analyze at most 5000 tweets" params = { "twitterQuery" : twitterQuery , "useTweetText" : useTweetText , "useIdfNormalization" : useIdfNormalization , "normalization" : normalization , "maxTweets" : maxTweets , "maxUsedLinks" : maxUsedLinks , "maxConcepts" : maxConcepts , "maxCategories" : maxCategories } if notifyEmailAddress : params [ "notifyEmailAddress" ] = notifyEmailAddress if len ( ignoreConceptTypes ) > 0 : params [ "ignoreConceptTypes" ] = ignoreConceptTypes return self . _er . jsonRequestAnalytics ( "/api/v1/trainTopicOnTwitter" , params )
create a new topic and train it using the tweets that match the twitterQuery
59,267
def trainTopicGetTrainedTopic ( self , uri , maxConcepts = 20 , maxCategories = 10 , ignoreConceptTypes = [ ] , idfNormalization = True ) : return self . _er . jsonRequestAnalytics ( "/api/v1/trainTopic" , { "action" : "getTrainedTopic" , "uri" : uri , "maxConcepts" : maxConcepts , "maxCategories" : maxCategories , "idfNormalization" : idfNormalization } )
retrieve topic for the topic for which you have already finished training
59,268
def createTopicPage1 ( ) : topic = TopicPage ( er ) topic . addKeyword ( "renewable energy" , 30 ) topic . addConcept ( er . getConceptUri ( "biofuel" ) , 50 ) topic . addConcept ( er . getConceptUri ( "solar energy" ) , 50 ) topic . addCategory ( er . getCategoryUri ( "renewable" ) , 50 ) topic . articleHasDuplicateFilter ( "skipHasDuplicates" ) topic . articleHasEventFilter ( "skipArticlesWithoutEvent" ) arts1 = topic . getArticles ( page = 1 , sortBy = "rel" ) arts2 = topic . getArticles ( page = 2 , sortBy = "rel" ) events1 = topic . getEvents ( page = 1 )
create a topic page directly
59,269
def createTopicPage2 ( ) : topic = TopicPage ( er ) topic . addCategory ( er . getCategoryUri ( "renewable" ) , 50 ) topic . addKeyword ( "renewable energy" , 30 ) topic . addConcept ( er . getConceptUri ( "biofuel" ) , 50 ) topic . addConcept ( er . getConceptUri ( "solar energy" ) , 50 ) topic . restrictToSetConceptsAndKeywords ( True ) topic . setLanguages ( [ "eng" , "deu" , "spa" ] ) topic . setMaxDaysBack ( 3 ) topic . setArticleThreshold ( 30 ) arts1 = topic . getArticles ( page = 1 , sortBy = "date" , returnInfo = ReturnInfo ( articleInfo = ArticleInfoFlags ( concepts = True , categories = True ) ) ) for art in arts1 . get ( "articles" , { } ) . get ( "results" , [ ] ) : print ( art )
create a topic page directly set the article threshold restrict results to set concepts and keywords
59,270
def count ( self , eventRegistry ) : self . setRequestedResult ( RequestEventArticles ( ** self . queryParams ) ) res = eventRegistry . execQuery ( self ) if "error" in res : print ( res [ "error" ] ) count = res . get ( self . queryParams [ "eventUri" ] , { } ) . get ( "articles" , { } ) . get ( "totalResults" , 0 ) return count
return the number of articles that match the criteria
59,271
def initWithComplexQuery ( query ) : q = QueryArticles ( ) if isinstance ( query , ComplexArticleQuery ) : q . _setVal ( "query" , json . dumps ( query . getQuery ( ) ) ) elif isinstance ( query , six . string_types ) : foo = json . loads ( query ) q . _setVal ( "query" , query ) elif isinstance ( query , dict ) : q . _setVal ( "query" , json . dumps ( query ) ) else : assert False , "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict" return q
create a query using a complex article query
59,272
def _getNextArticleBatch ( self ) : self . _articlePage += 1 if self . _totalPages != None and self . _articlePage > self . _totalPages : return self . setRequestedResult ( RequestArticlesInfo ( page = self . _articlePage , sortBy = self . _sortBy , sortByAsc = self . _sortByAsc , returnInfo = self . _returnInfo ) ) if self . _er . _verboseOutput : print ( "Downloading article page %d..." % ( self . _articlePage ) ) res = self . _er . execQuery ( self ) if "error" in res : print ( "Error while obtaining a list of articles: " + res [ "error" ] ) else : self . _totalPages = res . get ( "articles" , { } ) . get ( "pages" , 0 ) results = res . get ( "articles" , { } ) . get ( "results" , [ ] ) self . _articleList . extend ( results )
download next batch of articles based on the article uris in the uri list
59,273
def initWithComplexQuery ( query ) : q = QueryEvents ( ) if isinstance ( query , ComplexEventQuery ) : q . _setVal ( "query" , json . dumps ( query . getQuery ( ) ) ) elif isinstance ( query , six . string_types ) : foo = json . loads ( query ) q . _setVal ( "query" , query ) elif isinstance ( query , dict ) : q . _setVal ( "query" , json . dumps ( query ) ) else : assert False , "The instance of query parameter was not a ComplexEventQuery, a string or a python dict" return q
create a query using a complex event query
59,274
def count ( self , eventRegistry ) : self . setRequestedResult ( RequestEventsInfo ( ) ) res = eventRegistry . execQuery ( self ) if "error" in res : print ( res [ "error" ] ) count = res . get ( "events" , { } ) . get ( "totalResults" , 0 ) return count
return the number of events that match the criteria
59,275
def _setFlag ( self , name , val , defVal ) : if not hasattr ( self , "flags" ) : self . flags = { } if val != defVal : self . flags [ name ] = val
set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal
59,276
def _setVal ( self , name , val , defVal = None ) : if val == defVal : return if not hasattr ( self , "vals" ) : self . vals = { } self . vals [ name ] = val
set value of name to val in case the val ! = defVal
59,277
def _getVals ( self , prefix = "" ) : if not hasattr ( self , "vals" ) : self . vals = { } dict = { } for key in list ( self . vals . keys ( ) ) : if prefix == "" : newkey = key [ : 1 ] . lower ( ) + key [ 1 : ] if key else "" dict [ newkey ] = self . vals [ key ] else : newkey = key [ : 1 ] . upper ( ) + key [ 1 : ] if key else "" dict [ prefix + newkey ] = self . vals [ key ] return dict
return the values in the vals dict in case prefix is change the first letter of the name to lowercase otherwise use prefix + name as the new name
59,278
def loadFromFile ( fileName ) : assert os . path . exists ( fileName ) , "File " + fileName + " does not exist" conf = json . load ( open ( fileName ) ) return ReturnInfo ( articleInfo = ArticleInfoFlags ( ** conf . get ( "articleInfo" , { } ) ) , eventInfo = EventInfoFlags ( ** conf . get ( "eventInfo" , { } ) ) , sourceInfo = SourceInfoFlags ( ** conf . get ( "sourceInfo" , { } ) ) , categoryInfo = CategoryInfoFlags ( ** conf . get ( "categoryInfo" , { } ) ) , conceptInfo = ConceptInfoFlags ( ** conf . get ( "conceptInfo" , { } ) ) , locationInfo = LocationInfoFlags ( ** conf . get ( "locationInfo" , { } ) ) , storyInfo = StoryInfoFlags ( ** conf . get ( "storyInfo" , { } ) ) , conceptClassInfo = ConceptClassInfoFlags ( ** conf . get ( "conceptClassInfo" , { } ) ) , conceptFolderInfo = ConceptFolderInfoFlags ( ** conf . get ( "conceptFolderInfo" , { } ) ) )
load the configuration for the ReturnInfo from a fileName
59,279
def loadTopicPageFromER ( self , uri ) : params = { "action" : "getTopicPageJson" , "includeConceptDescription" : True , "includeTopicPageDefinition" : True , "includeTopicPageOwner" : True , "uri" : uri } self . topicPage = self . _createEmptyTopicPage ( ) self . concept = self . eventRegistry . jsonRequest ( "/json/topicPage" , params ) self . topicPage . update ( self . concept . get ( "topicPage" , { } ) )
load an existing topic page from Event Registry based on the topic page URI
59,280
def loadTopicPageFromFile ( self , fname ) : assert os . path . exists ( fname ) f = open ( fname , "r" , encoding = "utf-8" ) self . topicPage = json . load ( f )
load topic page from an existing file
59,281
def saveTopicPageDefinitionToFile ( self , fname ) : open ( fname , "w" , encoding = "utf-8" ) . write ( json . dumps ( self . topicPage , indent = 4 , sort_keys = True ) )
save the topic page definition to a file
59,282
def setArticleThreshold ( self , value ) : assert isinstance ( value , int ) assert value >= 0 self . topicPage [ "articleTreshWgt" ] = value
what is the minimum total weight that an article has to have in order to get it among the results?
59,283
def setEventThreshold ( self , value ) : assert isinstance ( value , int ) assert value >= 0 self . topicPage [ "eventTreshWgt" ] = value
what is the minimum total weight that an event has to have in order to get it among the results?
59,284
def setMaxDaysBack ( self , maxDaysBack ) : assert isinstance ( maxDaysBack , int ) , "maxDaysBack value has to be a positive integer" assert maxDaysBack >= 1 self . topicPage [ "maxDaysBack" ] = maxDaysBack
what is the maximum allowed age of the results?
59,285
def addConcept ( self , conceptUri , weight , label = None , conceptType = None ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" concept = { "uri" : conceptUri , "wgt" : weight } if label != None : concept [ "label" ] = label if conceptType != None : concept [ "type" ] = conceptType self . topicPage [ "concepts" ] . append ( concept )
add a relevant concept to the topic page
59,286
def addKeyword ( self , keyword , weight ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" self . topicPage [ "keywords" ] . append ( { "keyword" : keyword , "wgt" : weight } )
add a relevant keyword to the topic page
59,287
def addCategory ( self , categoryUri , weight ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" self . topicPage [ "categories" ] . append ( { "uri" : categoryUri , "wgt" : weight } )
add a relevant category to the topic page
59,288
def addSource ( self , sourceUri , weight ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" self . topicPage [ "sources" ] . append ( { "uri" : sourceUri , "wgt" : weight } )
add a news source to the topic page
59,289
def addSourceLocation ( self , sourceLocationUri , weight ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" self . topicPage [ "sourceLocations" ] . append ( { "uri" : sourceLocationUri , "wgt" : weight } )
add a list of relevant sources by identifying them by their geographic location
59,290
def addSourceGroup ( self , sourceGroupUri , weight ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" self . topicPage [ "sourceGroups" ] . append ( { "uri" : sourceGroupUri , "wgt" : weight } )
add a list of relevant sources by specifying a whole source group to the topic page
59,291
def addLocation ( self , locationUri , weight ) : assert isinstance ( weight , ( float , int ) ) , "weight value has to be a positive or negative integer" self . topicPage [ "locations" ] . append ( { "uri" : locationUri , "wgt" : weight } )
add relevant location to the topic page
59,292
def setLanguages ( self , languages ) : if isinstance ( languages , six . string_types ) : languages = [ languages ] for lang in languages : assert len ( lang ) == 3 , "Expected to get language in ISO3 code" self . topicPage [ "langs" ] = languages
restrict the results to the list of specified languages
59,293
def getArticles ( self , page = 1 , count = 100 , sortBy = "rel" , sortByAsc = False , returnInfo = ReturnInfo ( ) ) : assert page >= 1 assert count <= 100 params = { "action" : "getArticlesForTopicPage" , "resultType" : "articles" , "dataType" : self . topicPage [ "dataType" ] , "articlesCount" : count , "articlesSortBy" : sortBy , "articlesSortByAsc" : sortByAsc , "page" : page , "topicPage" : json . dumps ( self . topicPage ) } params . update ( returnInfo . getParams ( "articles" ) ) return self . eventRegistry . jsonRequest ( "/json/article" , params )
return a list of articles that match the topic page
59,294
def AND ( queryArr , exclude = None ) : assert isinstance ( queryArr , list ) , "provided argument as not a list" assert len ( queryArr ) > 0 , "queryArr had an empty list" q = CombinedQuery ( ) q . setQueryParam ( "$and" , [ ] ) for item in queryArr : assert isinstance ( item , ( CombinedQuery , BaseQuery ) ) , "item in the list was not a CombinedQuery or BaseQuery instance" q . getQuery ( ) [ "$and" ] . append ( item . getQuery ( ) ) if exclude != None : assert isinstance ( exclude , ( CombinedQuery , BaseQuery ) ) , "exclude parameter was not a CombinedQuery or BaseQuery instance" q . setQueryParam ( "$not" , exclude . getQuery ( ) ) return q
create a combined query with multiple items on which to perform an AND operation
59,295
async def start_pairing ( self ) : self . srp . initialize ( ) msg = messages . crypto_pairing ( { tlv8 . TLV_METHOD : b'\x00' , tlv8 . TLV_SEQ_NO : b'\x01' } ) resp = await self . protocol . send_and_receive ( msg , generate_identifier = False ) pairing_data = _get_pairing_data ( resp ) if tlv8 . TLV_BACK_OFF in pairing_data : time = int . from_bytes ( pairing_data [ tlv8 . TLV_BACK_OFF ] , byteorder = 'big' ) raise Exception ( 'back off {0}s' . format ( time ) ) self . _atv_salt = pairing_data [ tlv8 . TLV_SALT ] self . _atv_pub_key = pairing_data [ tlv8 . TLV_PUBLIC_KEY ]
Start pairing procedure .
59,296
async def finish_pairing ( self , pin ) : self . srp . step1 ( pin ) pub_key , proof = self . srp . step2 ( self . _atv_pub_key , self . _atv_salt ) msg = messages . crypto_pairing ( { tlv8 . TLV_SEQ_NO : b'\x03' , tlv8 . TLV_PUBLIC_KEY : pub_key , tlv8 . TLV_PROOF : proof } ) resp = await self . protocol . send_and_receive ( msg , generate_identifier = False ) pairing_data = _get_pairing_data ( resp ) atv_proof = pairing_data [ tlv8 . TLV_PROOF ] log_binary ( _LOGGER , 'Device' , Proof = atv_proof ) encrypted_data = self . srp . step3 ( ) msg = messages . crypto_pairing ( { tlv8 . TLV_SEQ_NO : b'\x05' , tlv8 . TLV_ENCRYPTED_DATA : encrypted_data } ) resp = await self . protocol . send_and_receive ( msg , generate_identifier = False ) pairing_data = _get_pairing_data ( resp ) encrypted_data = pairing_data [ tlv8 . TLV_ENCRYPTED_DATA ] return self . srp . step4 ( encrypted_data )
Finish pairing process .
59,297
async def verify_credentials ( self ) : _ , public_key = self . srp . initialize ( ) msg = messages . crypto_pairing ( { tlv8 . TLV_SEQ_NO : b'\x01' , tlv8 . TLV_PUBLIC_KEY : public_key } ) resp = await self . protocol . send_and_receive ( msg , generate_identifier = False ) resp = _get_pairing_data ( resp ) session_pub_key = resp [ tlv8 . TLV_PUBLIC_KEY ] encrypted = resp [ tlv8 . TLV_ENCRYPTED_DATA ] log_binary ( _LOGGER , 'Device' , Public = self . credentials . ltpk , Encrypted = encrypted ) encrypted_data = self . srp . verify1 ( self . credentials , session_pub_key , encrypted ) msg = messages . crypto_pairing ( { tlv8 . TLV_SEQ_NO : b'\x03' , tlv8 . TLV_ENCRYPTED_DATA : encrypted_data } ) resp = await self . protocol . send_and_receive ( msg , generate_identifier = False ) self . _output_key , self . _input_key = self . srp . verify2 ( )
Verify credentials with device .
59,298
def lookup_tag ( name ) : return next ( ( _TAGS [ t ] for t in _TAGS if t == name ) , DmapTag ( _read_unknown , 'unknown tag' ) )
Look up a tag based on its key . Returns a DmapTag .
59,299
def connect_to_apple_tv ( details , loop , protocol = None , session = None ) : service = _get_service_used_to_connect ( details , protocol ) if session is None : session = ClientSession ( loop = loop ) airplay = _setup_airplay ( loop , session , details ) if service . protocol == PROTOCOL_DMAP : return DmapAppleTV ( loop , session , details , airplay ) return MrpAppleTV ( loop , session , details , airplay )
Connect and logins to an Apple TV .