idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
59,700 | def genl_ctrl_resolve_grp ( sk , family_name , grp_name ) : family = genl_ctrl_probe_by_name ( sk , family_name ) if family is None : return - NLE_OBJ_NOTFOUND return genl_ctrl_grp_by_name ( family , grp_name ) | Resolve Generic Netlink family group name . |
59,701 | def _safe_read ( path , length ) : if not os . path . exists ( os . path . join ( HERE , path ) ) : return '' file_handle = codecs . open ( os . path . join ( HERE , path ) , encoding = 'utf-8' ) contents = file_handle . read ( length ) file_handle . close ( ) return contents | Read file contents . |
59,702 | def error_handler ( _ , err , arg ) : arg . value = err . error return libnl . handlers . NL_STOP | Update the mutable integer arg with the error code . |
59,703 | def callback_trigger ( msg , arg ) : gnlh = genlmsghdr ( nlmsg_data ( nlmsg_hdr ( msg ) ) ) if gnlh . cmd == nl80211 . NL80211_CMD_SCAN_ABORTED : arg . value = 1 elif gnlh . cmd == nl80211 . NL80211_CMD_NEW_SCAN_RESULTS : arg . value = 0 return libnl . handlers . NL_SKIP | Called when the kernel is done scanning . Only signals if it was successful or if it failed . No other data . |
59,704 | def callback_dump ( msg , results ) : bss = dict ( ) gnlh = genlmsghdr ( nlmsg_data ( nlmsg_hdr ( msg ) ) ) tb = dict ( ( i , None ) for i in range ( nl80211 . NL80211_ATTR_MAX + 1 ) ) nla_parse ( tb , nl80211 . NL80211_ATTR_MAX , genlmsg_attrdata ( gnlh , 0 ) , genlmsg_attrlen ( gnlh , 0 ) , None ) if not tb [ nl80211 . NL80211_ATTR_BSS ] : print ( 'WARNING: BSS info missing for an access point.' ) return libnl . handlers . NL_SKIP if nla_parse_nested ( bss , nl80211 . NL80211_BSS_MAX , tb [ nl80211 . NL80211_ATTR_BSS ] , bss_policy ) : print ( 'WARNING: Failed to parse nested attributes for an access point!' ) return libnl . handlers . NL_SKIP if not bss [ nl80211 . NL80211_BSS_BSSID ] : print ( 'WARNING: No BSSID detected for an access point!' ) return libnl . handlers . NL_SKIP if not bss [ nl80211 . NL80211_BSS_INFORMATION_ELEMENTS ] : print ( 'WARNING: No additional information available for an access point!' ) return libnl . handlers . NL_SKIP bss_parsed = parse_bss ( bss ) results [ bss_parsed [ 'bssid' ] ] = bss_parsed return libnl . handlers . NL_SKIP | Here is where SSIDs and their data is decoded from the binary data sent by the kernel . |
59,705 | def do_scan_trigger ( sk , if_index , driver_id , mcid ) : _LOGGER . debug ( 'Joining group %d.' , mcid ) ret = nl_socket_add_membership ( sk , mcid ) if ret < 0 : return ret msg = nlmsg_alloc ( ) genlmsg_put ( msg , 0 , 0 , driver_id , 0 , 0 , nl80211 . NL80211_CMD_TRIGGER_SCAN , 0 ) nla_put_u32 ( msg , nl80211 . NL80211_ATTR_IFINDEX , if_index ) ssids_to_scan = nlmsg_alloc ( ) nla_put ( ssids_to_scan , 1 , 0 , b'' ) nla_put_nested ( msg , nl80211 . NL80211_ATTR_SCAN_SSIDS , ssids_to_scan ) err = ctypes . c_int ( 1 ) results = ctypes . c_int ( - 1 ) cb = libnl . handlers . nl_cb_alloc ( libnl . handlers . NL_CB_DEFAULT ) libnl . handlers . nl_cb_set ( cb , libnl . handlers . NL_CB_VALID , libnl . handlers . NL_CB_CUSTOM , callback_trigger , results ) libnl . handlers . nl_cb_err ( cb , libnl . handlers . NL_CB_CUSTOM , error_handler , err ) libnl . handlers . nl_cb_set ( cb , libnl . handlers . NL_CB_ACK , libnl . handlers . NL_CB_CUSTOM , ack_handler , err ) libnl . handlers . nl_cb_set ( cb , libnl . handlers . NL_CB_SEQ_CHECK , libnl . handlers . NL_CB_CUSTOM , lambda * _ : libnl . handlers . NL_OK , None ) _LOGGER . debug ( 'Sending NL80211_CMD_TRIGGER_SCAN...' ) ret = nl_send_auto ( sk , msg ) if ret < 0 : return ret while err . value > 0 : _LOGGER . debug ( 'Retrieving NL80211_CMD_TRIGGER_SCAN acknowledgement...' ) ret = nl_recvmsgs ( sk , cb ) if ret < 0 : return ret if err . value < 0 : error ( 'Unknown error {0} ({1})' . format ( err . value , errmsg [ abs ( err . value ) ] ) ) while results . value < 0 : _LOGGER . debug ( 'Retrieving NL80211_CMD_TRIGGER_SCAN final response...' ) ret = nl_recvmsgs ( sk , cb ) if ret < 0 : return ret if results . value > 0 : error ( 'The kernel aborted the scan.' ) _LOGGER . debug ( 'Leaving group %d.' , mcid ) return nl_socket_drop_membership ( sk , mcid ) | Issue a scan request to the kernel and wait for it to reply with a signal . |
59,706 | def eta_letters ( seconds ) : final_days , final_hours , final_minutes , final_seconds = 0 , 0 , 0 , seconds if final_seconds >= 86400 : final_days = int ( final_seconds / 86400.0 ) final_seconds -= final_days * 86400 if final_seconds >= 3600 : final_hours = int ( final_seconds / 3600.0 ) final_seconds -= final_hours * 3600 if final_seconds >= 60 : final_minutes = int ( final_seconds / 60.0 ) final_seconds -= final_minutes * 60 final_seconds = int ( math . ceil ( final_seconds ) ) if final_days : template = '{1:d}d {2:d}h {3:02d}m {4:02d}s' elif final_hours : template = '{2:d}h {3:02d}m {4:02d}s' elif final_minutes : template = '{3:02d}m {4:02d}s' else : template = '{4:02d}s' return template . format ( final_days , final_hours , final_minutes , final_seconds ) | Convert seconds remaining into human readable strings . |
59,707 | def print_table ( data ) : table = AsciiTable ( [ COLUMNS ] ) table . justify_columns [ 2 ] = 'right' table . justify_columns [ 3 ] = 'right' table . justify_columns [ 4 ] = 'right' table_data = list ( ) for row_in in data : row_out = [ str ( row_in . get ( 'ssid' , '' ) ) . replace ( '\0' , '' ) , str ( row_in . get ( 'security' , '' ) ) , str ( row_in . get ( 'channel' , '' ) ) , str ( row_in . get ( 'frequency' , '' ) ) , str ( row_in . get ( 'signal' , '' ) ) , str ( row_in . get ( 'bssid' , '' ) ) , ] if row_out [ 3 ] : row_out [ 3 ] += ' MHz' if row_out [ 4 ] : row_out [ 4 ] += ' dBm' table_data . append ( row_out ) sort_by_column = [ c . lower ( ) for c in COLUMNS ] . index ( OPTIONS [ '--key' ] . lower ( ) ) table_data . sort ( key = lambda c : c [ sort_by_column ] , reverse = OPTIONS [ '--reverse' ] ) table . table_data . extend ( table_data ) print ( table . table ) | Print the table of detected SSIDs and their data to screen . |
59,708 | def generateObject ( self , sObjectType ) : obj = self . _sforce . factory . create ( 'ens:sObject' ) obj . type = sObjectType return obj | Generate a Salesforce object such as a Lead or Contact |
59,709 | def _marshallSObjects ( self , sObjects , tag = 'sObjects' ) : if not isinstance ( sObjects , ( tuple , list ) ) : sObjects = ( sObjects , ) if sObjects [ 0 ] . type in [ 'LeadConvert' , 'SingleEmailMessage' , 'MassEmailMessage' ] : nsPrefix = 'tns:' else : nsPrefix = 'ens:' li = [ ] for obj in sObjects : el = Element ( tag ) el . set ( 'xsi:type' , nsPrefix + obj . type ) for k , v in obj : if k == 'type' : continue if v == None : tmp = Element ( k ) tmp . set ( 'xsi:nil' , 'true' ) el . append ( tmp ) elif isinstance ( v , ( list , tuple ) ) : for value in v : el . append ( Element ( k ) . setText ( value ) ) elif isinstance ( v , suds . sudsobject . Object ) : el . append ( self . _marshallSObjects ( v , k ) ) else : el . append ( Element ( k ) . setText ( v ) ) li . append ( el ) return li | Marshall generic sObjects into a list of SAX elements |
59,710 | def _setHeaders ( self , call = None , ** kwargs ) : headers = { 'SessionHeader' : self . _sessionHeader } if 'debug_categories' in kwargs : debug_categories = kwargs [ 'debug_categories' ] headers [ 'DebuggingHeader' ] = { 'categories' : debug_categories } if call in ( 'convertLead' , 'create' , 'merge' , 'process' , 'undelete' , 'update' , 'upsert' ) : if self . _allowFieldTruncationHeader is not None : headers [ 'AllowFieldTruncationHeader' ] = self . _allowFieldTruncationHeader if call in ( 'create' , 'merge' , 'update' , 'upsert' ) : if self . _assignmentRuleHeader is not None : headers [ 'AssignmentRuleHeader' ] = self . _assignmentRuleHeader if self . _callOptions is not None : if call in ( 'create' , 'merge' , 'queryAll' , 'query' , 'queryMore' , 'retrieve' , 'search' , 'update' , 'upsert' , 'convertLead' , 'login' , 'delete' , 'describeGlobal' , 'describeLayout' , 'describeTabs' , 'describeSObject' , 'describeSObjects' , 'getDeleted' , 'getUpdated' , 'process' , 'undelete' , 'getServerTimestamp' , 'getUserInfo' , 'setPassword' , 'resetPassword' ) : headers [ 'CallOptions' ] = self . _callOptions if call in ( 'create' , 'delete' , 'resetPassword' , 'update' , 'upsert' ) : if self . _emailHeader is not None : headers [ 'EmailHeader' ] = self . _emailHeader if call in ( 'describeSObject' , 'describeSObjects' ) : if self . _localeOptions is not None : headers [ 'LocaleOptions' ] = self . _localeOptions if call == 'login' : if self . _loginScopeHeader is not None : headers [ 'LoginScopeHeader' ] = self . _loginScopeHeader if call in ( 'create' , 'merge' , 'query' , 'retrieve' , 'update' , 'upsert' ) : if self . _mruHeader is not None : headers [ 'MruHeader' ] = self . _mruHeader if call in ( 'convertLead' , 'create' , 'delete' , 'describeGlobal' , 'describeLayout' , 'describeSObject' , 'describeSObjects' , 'describeTabs' , 'merge' , 'process' , 'query' , 'retrieve' , 'search' , 'undelete' , 'update' , 'upsert' ) : if self . _packageVersionHeader is not None : headers [ 'PackageVersionHeader' ] = self . _packageVersionHeader if call in ( 'query' , 'queryAll' , 'queryMore' , 'retrieve' ) : if self . _queryOptions is not None : headers [ 'QueryOptions' ] = self . _queryOptions if call == 'delete' : if self . _userTerritoryDeleteHeader is not None : headers [ 'UserTerritoryDeleteHeader' ] = self . _userTerritoryDeleteHeader self . _sforce . set_options ( soapheaders = headers ) | Attach particular SOAP headers to the request depending on the method call made |
59,711 | def invalidateSessions ( self , sessionIds ) : self . _setHeaders ( 'invalidateSessions' ) return self . _handleResultTyping ( self . _sforce . service . invalidateSessions ( sessionIds ) ) | Invalidate a Salesforce session |
59,712 | def query ( self , queryString ) : self . _setHeaders ( 'query' ) return self . _sforce . service . query ( queryString ) | Executes a query against the specified object and returns data that matches the specified criteria . |
59,713 | def queryAll ( self , queryString ) : self . _setHeaders ( 'queryAll' ) return self . _sforce . service . queryAll ( queryString ) | Retrieves data from specified objects whether or not they have been deleted . |
59,714 | def queryMore ( self , queryLocator ) : self . _setHeaders ( 'queryMore' ) return self . _sforce . service . queryMore ( queryLocator ) | Retrieves the next batch of objects from a query . |
59,715 | def resetPassword ( self , userId ) : self . _setHeaders ( 'resetPassword' ) return self . _sforce . service . resetPassword ( userId ) | Changes a user s password to a system - generated value . |
59,716 | def setPassword ( self , userId , password ) : self . _setHeaders ( 'setPassword' ) return self . _sforce . service . setPassword ( userId , password ) | Sets the specified user s password to the specified value . |
59,717 | def nl_msgtype_lookup ( ops , msgtype ) : for i in ops . co_msgtypes : if i . mt_id == msgtype : return i return None | Lookup message type cache association . |
59,718 | def nl_cache_mngt_register ( ops ) : global cache_ops if not ops . co_name or not ops . co_obj_ops : return - NLE_INVAL with cache_ops_lock : if _nl_cache_ops_lookup ( ops . co_name ) : return - NLE_EXIST ops . co_refcnt = 0 ops . co_next = cache_ops cache_ops = ops _LOGGER . debug ( 'Registered cache operations {0}' . format ( ops . co_name ) ) return 0 | Register a set of cache operations . |
59,719 | def nl_connect ( sk , protocol ) : flags = getattr ( socket , 'SOCK_CLOEXEC' , 0 ) if sk . s_fd != - 1 : return - NLE_BAD_SOCK try : sk . socket_instance = socket . socket ( getattr ( socket , 'AF_NETLINK' , - 1 ) , socket . SOCK_RAW | flags , protocol ) except OSError as exc : return - nl_syserr2nlerr ( exc . errno ) if not sk . s_flags & NL_SOCK_BUFSIZE_SET : err = nl_socket_set_buffer_size ( sk , 0 , 0 ) if err < 0 : sk . socket_instance . close ( ) return err try : sk . socket_instance . bind ( ( sk . s_local . nl_pid , sk . s_local . nl_groups ) ) except OSError as exc : sk . socket_instance . close ( ) return - nl_syserr2nlerr ( exc . errno ) sk . s_local . nl_pid = sk . socket_instance . getsockname ( ) [ 0 ] if sk . s_local . nl_family != socket . AF_NETLINK : sk . socket_instance . close ( ) return - NLE_AF_NOSUPPORT sk . s_proto = protocol return 0 | Create file descriptor and bind socket . |
59,720 | def nl_complete_msg ( sk , msg ) : nlh = msg . nm_nlh if nlh . nlmsg_pid == NL_AUTO_PORT : nlh . nlmsg_pid = nl_socket_get_local_port ( sk ) if nlh . nlmsg_seq == NL_AUTO_SEQ : nlh . nlmsg_seq = sk . s_seq_next sk . s_seq_next += 1 if msg . nm_protocol == - 1 : msg . nm_protocol = sk . s_proto nlh . nlmsg_flags |= NLM_F_REQUEST if not sk . s_flags & NL_NO_AUTO_ACK : nlh . nlmsg_flags |= NLM_F_ACK | Finalize Netlink message . |
59,721 | def nl_send_simple ( sk , type_ , flags , buf = None , size = 0 ) : msg = nlmsg_alloc_simple ( type_ , flags ) if buf is not None and size : err = nlmsg_append ( msg , buf , size , NLMSG_ALIGNTO ) if err < 0 : return err return nl_send_auto ( sk , msg ) | Construct and transmit a Netlink message . |
59,722 | def nl_recv ( sk , nla , buf , creds = None ) : flags = 0 page_size = resource . getpagesize ( ) * 4 if sk . s_flags & NL_MSG_PEEK : flags |= socket . MSG_PEEK | socket . MSG_TRUNC iov_len = sk . s_bufsize or page_size if creds and sk . s_flags & NL_SOCK_PASSCRED : raise NotImplementedError while True : try : if hasattr ( sk . socket_instance , 'recvmsg' ) : iov , _ , msg_flags , address = sk . socket_instance . recvmsg ( iov_len , 0 , flags ) else : iov , address = sk . socket_instance . recvfrom ( iov_len , flags ) msg_flags = 0 except OSError as exc : if exc . errno == errno . EINTR : continue return - nl_syserr2nlerr ( exc . errno ) nla . nl_family = sk . socket_instance . family if not iov : return 0 if msg_flags & socket . MSG_CTRUNC : raise NotImplementedError if iov_len < len ( iov ) or msg_flags & socket . MSG_TRUNC : iov_len = len ( iov ) continue if flags : flags = 0 continue nla . nl_pid = address [ 0 ] nla . nl_groups = address [ 1 ] if creds and sk . s_flags * NL_SOCK_PASSCRED : raise NotImplementedError if iov : buf += iov return len ( buf ) | Receive data from Netlink socket . |
59,723 | def nl_recvmsgs_report ( sk , cb ) : if cb . cb_recvmsgs_ow : return int ( cb . cb_recvmsgs_ow ( sk , cb ) ) return int ( recvmsgs ( sk , cb ) ) | Receive a set of messages from a Netlink socket and report parsed messages . |
59,724 | def nl_recvmsgs ( sk , cb ) : err = nl_recvmsgs_report ( sk , cb ) if err > 0 : return 0 return int ( err ) | Receive a set of messages from a Netlink socket . |
59,725 | def nl_wait_for_ack ( sk ) : cb = nl_cb_clone ( sk . s_cb ) nl_cb_set ( cb , NL_CB_ACK , NL_CB_CUSTOM , lambda * _ : NL_STOP , None ) return int ( nl_recvmsgs ( sk , cb ) ) | Wait for ACK . |
59,726 | def get_plugin_client_settings ( self ) : settings = { } user_path = self . get_plugin_settings_path ( "User" ) def_path = self . get_plugin_settings_path ( "MavensMate" ) if def_path == None : if 'ATOM' in self . plugin_client : file_name = 'atom' elif 'SUBLIME_TEXT' in self . plugin_client : file_name = 'st3' elif 'BRACKETS' in self . plugin_client : file_name = 'brackets' settings [ 'default' ] = util . parse_json_from_file ( config . base_path + "/" + config . support_dir + "/config/" + file_name + ".json" ) if config . plugin_client_settings != None : settings [ 'user' ] = config . plugin_client_settings else : workspace = self . params . get ( 'workspace' , None ) if self . project_name != None and workspace != None : try : settings [ 'project' ] = util . parse_json_from_file ( os . path . join ( workspace , self . project_name , self . project_name + '.sublime-settings' ) ) except : debug ( 'Project settings could not be loaded' ) if not user_path == None : try : settings [ 'user' ] = util . parse_json_from_file ( user_path ) except : debug ( 'User settings could not be loaded' ) if not def_path == None : try : settings [ 'default' ] = util . parse_json_from_file ( def_path ) except : raise MMException ( 'Could not load default MavensMate settings.' ) if settings == { } : raise MMException ( 'Could not load MavensMate settings. Please ensure they contain valid JSON' ) return settings | if the default path for settings is none we re either dealing with a bad client setup or a new client like Atom . io . Let s load the settings from the default cache and optionally allow them to pipe settings in via STDIN |
59,727 | def nlmsg_for_each_attr ( nlh , hdrlen , rem ) : return nla_for_each_attr ( nlmsg_attrdata ( nlh , hdrlen ) , nlmsg_attrlen ( nlh , hdrlen ) , rem ) | Iterate over a stream of attributes in a message . |
59,728 | def nlmsg_attrdata ( nlh , hdrlen ) : data = nlmsg_data ( nlh ) return libnl . linux_private . netlink . nlattr ( bytearray_ptr ( data , libnl . linux_private . netlink . NLMSG_ALIGN ( hdrlen ) ) ) | Head of attributes data . |
59,729 | def nlmsg_attrlen ( nlh , hdrlen ) : return max ( nlmsg_len ( nlh ) - libnl . linux_private . netlink . NLMSG_ALIGN ( hdrlen ) , 0 ) | Length of attributes data . |
59,730 | def nlmsg_ok ( nlh , remaining ) : sizeof = libnl . linux_private . netlink . nlmsghdr . SIZEOF return remaining . value >= sizeof and sizeof <= nlh . nlmsg_len <= remaining . value | Check if the Netlink message fits into the remaining bytes . |
59,731 | def nlmsg_next ( nlh , remaining ) : totlen = libnl . linux_private . netlink . NLMSG_ALIGN ( nlh . nlmsg_len ) remaining . value -= totlen return libnl . linux_private . netlink . nlmsghdr ( bytearray_ptr ( nlh . bytearray , totlen ) ) | Next Netlink message in message stream . |
59,732 | def nlmsg_parse ( nlh , hdrlen , tb , maxtype , policy ) : if not nlmsg_valid_hdr ( nlh , hdrlen ) : return - NLE_MSG_TOOSHORT return nla_parse ( tb , maxtype , nlmsg_attrdata ( nlh , hdrlen ) , nlmsg_attrlen ( nlh , hdrlen ) , policy ) | Parse attributes of a Netlink message . |
59,733 | def nlmsg_find_attr ( nlh , hdrlen , attrtype ) : return nla_find ( nlmsg_attrdata ( nlh , hdrlen ) , nlmsg_attrlen ( nlh , hdrlen ) , attrtype ) | Find a specific attribute in a Netlink message . |
59,734 | def nlmsg_alloc ( len_ = default_msg_size ) : len_ = max ( libnl . linux_private . netlink . nlmsghdr . SIZEOF , len_ ) nm = nl_msg ( ) nm . nm_refcnt = 1 nm . nm_nlh = libnl . linux_private . netlink . nlmsghdr ( bytearray ( b'\0' ) * len_ ) nm . nm_protocol = - 1 nm . nm_size = len_ nm . nm_nlh . nlmsg_len = nlmsg_total_size ( 0 ) _LOGGER . debug ( 'msg 0x%x: Allocated new message, maxlen=%d' , id ( nm ) , len_ ) return nm | Allocate a new Netlink message with maximum payload size specified . |
59,735 | def nlmsg_inherit ( hdr = None ) : nm = nlmsg_alloc ( ) if hdr : new = nm . nm_nlh new . nlmsg_type = hdr . nlmsg_type new . nlmsg_flags = hdr . nlmsg_flags new . nlmsg_seq = hdr . nlmsg_seq new . nlmsg_pid = hdr . nlmsg_pid return nm | Allocate a new Netlink message and inherit Netlink message header . |
59,736 | def nlmsg_alloc_simple ( nlmsgtype , flags ) : nlh = libnl . linux_private . netlink . nlmsghdr ( nlmsg_type = nlmsgtype , nlmsg_flags = flags ) msg = nlmsg_inherit ( nlh ) _LOGGER . debug ( 'msg 0x%x: Allocated new simple message' , id ( msg ) ) return msg | Allocate a new Netlink message . |
59,737 | def nlmsg_convert ( hdr ) : nm = nlmsg_alloc ( hdr . nlmsg_len ) if not nm : return None nm . nm_nlh . bytearray = hdr . bytearray . copy ( ) [ : hdr . nlmsg_len ] return nm | Convert a Netlink message received from a Netlink socket to an nl_msg . |
59,738 | def nlmsg_reserve ( n , len_ , pad ) : nlmsg_len_ = n . nm_nlh . nlmsg_len tlen = len_ if not pad else ( ( len_ + ( pad - 1 ) ) & ~ ( pad - 1 ) ) if tlen + nlmsg_len_ > n . nm_size : return None buf = bytearray_ptr ( n . nm_nlh . bytearray , nlmsg_len_ ) n . nm_nlh . nlmsg_len += tlen if tlen > len_ : bytearray_ptr ( buf , len_ , tlen ) [ : ] = bytearray ( b'\0' ) * ( tlen - len_ ) _LOGGER . debug ( 'msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d' , id ( n ) , tlen , len_ , pad , n . nm_nlh . nlmsg_len ) return buf | Reserve room for additional data in a Netlink message . |
59,739 | def nlmsg_append ( n , data , len_ , pad ) : tmp = nlmsg_reserve ( n , len_ , pad ) if tmp is None : return - NLE_NOMEM tmp [ : len_ ] = data . bytearray [ : len_ ] _LOGGER . debug ( 'msg 0x%x: Appended %d bytes with padding %d' , id ( n ) , len_ , pad ) return 0 | Append data to tail of a Netlink message . |
59,740 | def nlmsg_put ( n , pid , seq , type_ , payload , flags ) : if n . nm_nlh . nlmsg_len < libnl . linux_private . netlink . NLMSG_HDRLEN : raise BUG nlh = n . nm_nlh nlh . nlmsg_type = type_ nlh . nlmsg_flags = flags nlh . nlmsg_pid = pid nlh . nlmsg_seq = seq _LOGGER . debug ( 'msg 0x%x: Added netlink header type=%d, flags=%d, pid=%d, seq=%d' , id ( n ) , type_ , flags , pid , seq ) if payload > 0 and nlmsg_reserve ( n , payload , libnl . linux_private . netlink . NLMSG_ALIGNTO ) is None : return None return nlh | Add a Netlink message header to a Netlink message . |
59,741 | def nl_nlmsg_flags2str ( flags , buf , _ = None ) : del buf [ : ] all_flags = ( ( 'REQUEST' , libnl . linux_private . netlink . NLM_F_REQUEST ) , ( 'MULTI' , libnl . linux_private . netlink . NLM_F_MULTI ) , ( 'ACK' , libnl . linux_private . netlink . NLM_F_ACK ) , ( 'ECHO' , libnl . linux_private . netlink . NLM_F_ECHO ) , ( 'ROOT' , libnl . linux_private . netlink . NLM_F_ROOT ) , ( 'MATCH' , libnl . linux_private . netlink . NLM_F_MATCH ) , ( 'ATOMIC' , libnl . linux_private . netlink . NLM_F_ATOMIC ) , ( 'REPLACE' , libnl . linux_private . netlink . NLM_F_REPLACE ) , ( 'EXCL' , libnl . linux_private . netlink . NLM_F_EXCL ) , ( 'CREATE' , libnl . linux_private . netlink . NLM_F_CREATE ) , ( 'APPEND' , libnl . linux_private . netlink . NLM_F_APPEND ) , ) print_flags = [ ] for k , v in all_flags : if not flags & v : continue flags &= ~ v print_flags . append ( k ) if flags : print_flags . append ( '0x{0:x}' . format ( flags ) ) buf . extend ( ',' . join ( print_flags ) . encode ( 'ascii' ) ) return buf | Netlink Message Flags Translations . |
59,742 | def dump_hex ( ofd , start , len_ , prefix = 0 ) : prefix_whitespaces = ' ' * prefix limit = 16 - ( prefix * 2 ) start_ = start [ : len_ ] for line in ( start_ [ i : i + limit ] for i in range ( 0 , len ( start_ ) , limit ) ) : hex_lines , ascii_lines = list ( ) , list ( ) for c in line : hex_lines . append ( '{0:02x}' . format ( c if hasattr ( c , 'real' ) else ord ( c ) ) ) c2 = chr ( c ) if hasattr ( c , 'real' ) else c ascii_lines . append ( c2 if c2 in string . printable [ : 95 ] else '.' ) hex_line = ' ' . join ( hex_lines ) . ljust ( limit * 3 ) ascii_line = '' . join ( ascii_lines ) ofd ( ' %s%s%s' , prefix_whitespaces , hex_line , ascii_line ) | Convert start to hex and logs it 16 bytes per log statement . |
59,743 | def nl_msg_dump ( msg , ofd = _LOGGER . debug ) : hdr = nlmsg_hdr ( msg ) ofd ( '-------------------------- BEGIN NETLINK MESSAGE ---------------------------' ) ofd ( ' [NETLINK HEADER] %d octets' , hdr . SIZEOF ) print_hdr ( ofd , msg ) if hdr . nlmsg_type == libnl . linux_private . netlink . NLMSG_ERROR : dump_error_msg ( msg , ofd ) elif nlmsg_len ( hdr ) > 0 : print_msg ( msg , ofd , hdr ) ofd ( '--------------------------- END NETLINK MESSAGE ---------------------------' ) | Dump message in human readable format to callable . |
59,744 | def nl_object_alloc ( ops ) : new = nl_object ( ) nl_init_list_head ( new . ce_list ) new . ce_ops = ops if ops . oo_constructor : ops . oo_constructor ( new ) _LOGGER . debug ( 'Allocated new object 0x%x' , id ( new ) ) return new | Allocate a new object of kind specified by the operations handle . |
59,745 | def genl_register_family ( ops ) : if not ops . o_name or ( ops . o_cmds and ops . o_ncmds <= 0 ) : return - NLE_INVAL if ops . o_id and lookup_family ( ops . o_id ) : return - NLE_EXIST if lookup_family_by_name ( ops . o_name ) : return - NLE_EXIST nl_list_add_tail ( ops . o_list , genl_ops_list ) return 0 | Register Generic Netlink family and associated commands . |
59,746 | def genl_register ( ops ) : if ops . co_protocol != NETLINK_GENERIC : return - NLE_PROTO_MISMATCH if ops . co_hdrsize < GENL_HDRSIZE ( 0 ) : return - NLE_INVAL if ops . co_genl is None : return - NLE_INVAL ops . co_genl . o_cache_ops = ops ops . co_genl . o_hdrsize = ops . co_hdrsize - GENL_HDRLEN ops . co_genl . o_name = ops . co_msgtypes [ 0 ] . mt_name ops . co_genl . o_id = ops . co_msgtypes [ 0 ] . mt_id ops . co_msg_parser = genl_msg_parser err = genl_register_family ( ops . co_genl ) if err < 0 : return err return nl_cache_mngt_register ( ops ) | Register Generic Netlink family backed cache . |
59,747 | def __setup_connection ( self ) : if self . payload != None and type ( self . payload ) is dict and 'settings' in self . payload : config . plugin_client_settings = self . payload [ 'settings' ] config . offline = self . args . offline config . connection = PluginConnection ( client = self . args . client or 'SUBLIME_TEXT_3' , ui = self . args . ui_switch , args = self . args , params = self . payload , operation = self . operation , verbose = self . args . verbose ) config . project = MavensMateProject ( params = self . payload , ui = self . args . ui_switch ) config . sfdc_client = config . project . sfdc_client | each operation requested represents a session the session holds information about the plugin running it and establishes a project object |
59,748 | def execute ( self ) : try : self . __setup_connection ( ) if self . args . ui_switch == True : config . logger . debug ( 'UI operation requested, attempting to launch MavensMate UI' ) tmp_html_file = util . generate_ui ( self . operation , self . payload , self . args ) if config . connection . plugin_client == 'ATOM' : self . __printr ( util . generate_success_response ( tmp_html_file ) ) else : util . launch_ui ( tmp_html_file ) self . __printr ( util . generate_success_response ( 'UI Generated Successfully' ) ) else : commands = get_available_commands ( ) try : command_clazz = commands [ self . operation ] ( params = self . payload , args = self . args ) except KeyError : raise MMUnsupportedOperationException ( 'Could not find the operation you requested. Be sure the command is located in mm.commands, inherits from Command (found in basecommand.py) and includes an execute method.' ) except NotImplementedError : raise MMException ( "This command is not properly implemented. Be sure it contains an 'execute' method." ) self . __printr ( command_clazz . execute ( ) ) except Exception , e : self . __printr ( e , is_exception = True ) | Executes requested command |
59,749 | def get_alert ( self , alert ) : if alert > self . alerts_count ( ) or self . alerts_count ( ) is None : return None else : return self . get ( ) [ alert - 1 ] | Recieves a day as an argument and returns the prediction for that alert if is available . If not function will return None . |
59,750 | def get_forecast ( self , latitude , longitude ) : reply = self . http_get ( self . url_builder ( latitude , longitude ) ) self . forecast = json . loads ( reply ) for item in self . forecast . keys ( ) : setattr ( self , item , self . forecast [ item ] ) | Gets the weather data from darksky api and stores it in the respective dictionaries if available . This function should be used to fetch weather information . |
59,751 | def get_forecast_fromstr ( self , reply ) : self . forecast = json . loads ( reply ) for item in self . forecast . keys ( ) : setattr ( self , item , self . forecast [ item ] ) | Gets the weather data from a darksky api response string and stores it in the respective dictionaries if available . This function should be used to fetch weather information . |
59,752 | def url_builder ( self , latitude , longitude ) : try : float ( latitude ) float ( longitude ) except TypeError : raise TypeError ( 'Latitude (%s) and Longitude (%s) must be a float number' % ( latitude , longitude ) ) url = self . _darksky_url + self . forecast_io_api_key + '/' url += str ( latitude ) . strip ( ) + ',' + str ( longitude ) . strip ( ) if self . time_url and not self . time_url . isspace ( ) : url += ',' + self . time_url . strip ( ) url += '?units=' + self . units_url . strip ( ) url += '&lang=' + self . lang_url . strip ( ) if self . exclude_url is not None : excludes = '' if self . exclude_url in self . _allowed_excludes_extends : excludes += self . exclude_url + ',' else : for item in self . exclude_url : if item in self . _allowed_excludes_extends : excludes += item + ',' if len ( excludes ) > 0 : url += '&exclude=' + excludes . rstrip ( ',' ) if self . extend_url is not None : extends = '' if self . extend_url in self . _allowed_excludes_extends : extends += self . extend_url + ',' else : for item in self . extend_url : if item in self . _allowed_excludes_extends : extends += item + ',' if len ( extends ) > 0 : url += '&extend=' + extends . rstrip ( ',' ) return url | This function is used to build the correct url to make the request to the forecast . io api . Recieves the latitude and the longitude . Return a string with the url . |
59,753 | def http_get ( self , request_url ) : try : headers = { 'Accept-Encoding' : 'gzip, deflate' } response = requests . get ( request_url , headers = headers ) except requests . exceptions . Timeout as ext : log . error ( 'Error: Timeout' , ext ) except requests . exceptions . TooManyRedirects as extmr : log . error ( 'Error: TooManyRedirects' , extmr ) except requests . exceptions . RequestException as ex : log . error ( 'Error: RequestException' , ex ) sys . exit ( 1 ) try : self . cache_control = response . headers [ 'Cache-Control' ] except KeyError as kerr : log . warning ( 'Warning: Could not get headers. %s' % kerr ) self . cache_control = None try : self . expires = response . headers [ 'Expires' ] except KeyError as kerr : log . warning ( 'Warning: Could not get headers. %s' % kerr ) self . extend_url = None try : self . x_forecast_api_calls = response . headers [ 'X-Forecast-API-Calls' ] except KeyError as kerr : log . warning ( 'Warning: Could not get headers. %s' % kerr ) self . x_forecast_api_calls = None try : self . x_responde_time = response . headers [ 'X-Response-Time' ] except KeyError as kerr : log . warning ( 'Warning: Could not get headers. %s' % kerr ) self . x_responde_time = None if response . status_code is not 200 : raise requests . exceptions . HTTPError ( 'Bad response, status code: %x' % ( response . status_code ) ) self . raw_response = response . text return self . raw_response | This function recieves the request url and it is used internally to get the information via http . Returns the response content . Raises Timeout TooManyRedirects RequestException . Raises KeyError if headers are not present . Raises HTTPError if responde code is not 200 . |
59,754 | def _map_or_starmap ( function , iterable , args , kwargs , map_or_starmap ) : arg_newarg = ( ( "parallel" , "pm_parallel" ) , ( "chunksize" , "pm_chunksize" ) , ( "pool" , "pm_pool" ) , ( "processes" , "pm_processes" ) , ( "parmap_progress" , "pm_pbar" ) ) kwargs = _deprecated_kwargs ( kwargs , arg_newarg ) chunksize = kwargs . pop ( "pm_chunksize" , None ) progress = kwargs . pop ( "pm_pbar" , False ) progress = progress and HAVE_TQDM parallel , pool , close_pool = _create_pool ( kwargs ) if parallel : func_star = _get_helper_func ( map_or_starmap ) try : if progress and close_pool : try : num_tasks = len ( iterable ) chunksize = _get_default_chunksize ( chunksize , pool , num_tasks ) result = pool . map_async ( func_star , izip ( repeat ( function ) , iterable , repeat ( list ( args ) ) , repeat ( kwargs ) ) , chunksize ) finally : pool . close ( ) try : _do_pbar ( result , num_tasks , chunksize ) finally : output = result . get ( ) else : result = pool . map_async ( func_star , izip ( repeat ( function ) , iterable , repeat ( list ( args ) ) , repeat ( kwargs ) ) , chunksize ) output = result . get ( ) finally : if close_pool : if not progress : pool . close ( ) pool . join ( ) else : output = _serial_map_or_starmap ( function , iterable , args , kwargs , progress , map_or_starmap ) return output | Shared function between parmap . map and parmap . starmap . Refer to those functions for details . |
59,755 | def _map_or_starmap_async ( function , iterable , args , kwargs , map_or_starmap ) : arg_newarg = ( ( "parallel" , "pm_parallel" ) , ( "chunksize" , "pm_chunksize" ) , ( "pool" , "pm_pool" ) , ( "processes" , "pm_processes" ) , ( "callback" , "pm_callback" ) , ( "error_callback" , "pm_error_callback" ) ) kwargs = _deprecated_kwargs ( kwargs , arg_newarg ) chunksize = kwargs . pop ( "pm_chunksize" , None ) callback = kwargs . pop ( "pm_callback" , None ) error_callback = kwargs . pop ( "pm_error_callback" , None ) parallel , pool , close_pool = _create_pool ( kwargs ) if parallel : func_star = _get_helper_func ( map_or_starmap ) try : if sys . version_info [ 0 ] == 2 : result = pool . map_async ( func_star , izip ( repeat ( function ) , iterable , repeat ( list ( args ) ) , repeat ( kwargs ) ) , chunksize , callback ) else : result = pool . map_async ( func_star , izip ( repeat ( function ) , iterable , repeat ( list ( args ) ) , repeat ( kwargs ) ) , chunksize , callback , error_callback ) finally : if close_pool : pool . close ( ) result = _ParallelAsyncResult ( result , pool ) else : result = _ParallelAsyncResult ( result ) else : values = _serial_map_or_starmap ( function , iterable , args , kwargs , False , map_or_starmap ) result = _DummyAsyncResult ( values ) return result | Shared function between parmap . map_async and parmap . starmap_async . Refer to those functions for details . |
59,756 | def map_async ( function , iterable , * args , ** kwargs ) : return _map_or_starmap_async ( function , iterable , args , kwargs , "map" ) | This function is the multiprocessing . Pool . map_async version that supports multiple arguments . |
59,757 | def starmap_async ( function , iterables , * args , ** kwargs ) : return _map_or_starmap_async ( function , iterables , args , kwargs , "starmap" ) | This function is the multiprocessing . Pool . starmap_async version that supports multiple arguments . |
59,758 | def lookup_domain ( domain , nameservers = [ ] , rtype = "A" , exclude_nameservers = [ ] , timeout = 2 ) : dns_exp = DNSQuery ( domains = [ domain ] , nameservers = nameservers , rtype = rtype , exclude_nameservers = exclude_nameservers , timeout = timeout ) return dns_exp . lookup_domain ( domain ) | Wrapper for DNSQuery method |
59,759 | def parse_out_ips ( message ) : ips = [ ] for entry in message . answer : for rdata in entry . items : ips . append ( rdata . to_text ( ) ) return ips | Given a message parse out the ips in the answer |
59,760 | def send_chaos_queries ( self ) : names = [ "HOSTNAME.BIND" , "VERSION.BIND" , "ID.SERVER" ] self . results = { 'exp-name' : "chaos-queries" } for name in names : self . results [ name ] = { } for nameserver in self . nameservers : sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) sock . settimeout ( self . timeout ) query = dns . message . make_query ( name , dns . rdatatype . from_text ( "TXT" ) , dns . rdataclass . from_text ( "CH" ) ) sock . sendto ( query . to_wire ( ) , ( nameserver , 53 ) ) reads , _ , _ = select . select ( [ sock ] , [ ] , [ ] , self . timeout ) if len ( reads ) == 0 : self . results [ name ] [ nameserver ] = None else : response = reads [ 0 ] . recvfrom ( 4096 ) [ 0 ] self . results [ name ] [ nameserver ] = b64encode ( response ) return self . results | Send chaos queries to identify the DNS server and its manufacturer |
59,761 | def lookup_domains ( self ) : thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len ( self . domains ) for domain in self . domains : for nameserver in self . nameservers : wait_time = 0 while threading . active_count ( ) > self . max_threads : time . sleep ( 1 ) wait_time += 1 if wait_time > thread_wait_timeout : thread_error = True break if thread_error : self . results [ "error" ] = "Threads took too long to finish." break log_prefix = "%d/%d: " % ( ind , total_item_count ) thread = threading . Thread ( target = self . lookup_domain , args = ( domain , nameserver , log_prefix ) ) thread . setDaemon ( 1 ) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY : try : thread . start ( ) self . threads . append ( thread ) thread_open_success = True except : retries += 1 time . sleep ( THREAD_START_DELAY ) logging . error ( "%sThread start failed for %s, retrying... (%d/%d)" % ( log_prefix , domain , retries , MAX_THREAD_START_RETRY ) ) if retries == MAX_THREAD_START_RETRY : logging . error ( "%sCan't start a new thread for %s after %d retries." % ( log_prefix , domain , retries ) ) if thread_error : break ind += 1 for thread in self . threads : thread . join ( self . timeout * 3 ) return self . results | More complex DNS primitive that looks up domains concurrently |
59,762 | def start ( self , timeout = None ) : self . thread . start ( ) start_time = time . time ( ) if not timeout : timeout = self . timeout while start_time + timeout > time . time ( ) : self . thread . join ( 1 ) if self . started : return True if self . error : return False return False | Start running the command |
59,763 | def stop ( self , timeout = None ) : if not timeout : timeout = self . timeout self . kill_switch ( ) self . process . kill ( ) self . thread . join ( timeout ) try : os . killpg ( os . getpgid ( self . process . pid ) , signal . SIGTERM ) except : pass if self . stopped : return True else : return False | Stop the given command |
59,764 | def traceroute_batch ( input_list , results = { } , method = "udp" , cmd_arguments = None , delay_time = 0.1 , max_threads = 100 ) : threads = [ ] thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len ( input_list ) for domain in input_list : wait_time = 0 while threading . active_count ( ) > max_threads : time . sleep ( 1 ) wait_time += 1 if wait_time > thread_wait_timeout : thread_error = True break if thread_error : results [ "error" ] = "Threads took too long to finish." break time . sleep ( delay_time ) log_prefix = "%d/%d: " % ( ind , total_item_count ) thread = threading . Thread ( target = traceroute , args = ( domain , method , cmd_arguments , results , log_prefix ) ) ind += 1 thread . setDaemon ( 1 ) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY : try : thread . start ( ) threads . append ( thread ) thread_open_success = True except : retries += 1 time . sleep ( THREAD_START_DELAY ) logging . error ( "%sThread start failed for %s, retrying... (%d/%d)" % ( log_prefix , domain , retries , MAX_THREAD_START_RETRY ) ) if retries == MAX_THREAD_START_RETRY : logging . error ( "%sCan't start a new thread for %s after %d retries." % ( log_prefix , domain , retries ) ) for thread in threads : thread . join ( thread_wait_timeout ) return results | This is a parallel version of the traceroute primitive . |
59,765 | def _traceroute_callback ( self , line , kill_switch ) : line = line . lower ( ) if "traceroute to" in line : self . started = True if "enough privileges" in line : self . error = True self . kill_switch ( ) self . stopped = True if "service not known" in line : self . error = True self . kill_switch ( ) self . stopped = True | Callback function to handle traceroute . |
59,766 | def output_callback ( self , line , kill_switch ) : self . notifications += line + "\n" if "Initialization Sequence Completed" in line : self . started = True if "ERROR:" in line or "Cannot resolve host address:" in line : self . error = True if "process exiting" in line : self . stopped = True | Set status of openvpn according to what we process |
59,767 | def load_experiments ( self ) : logging . debug ( "Loading experiments." ) exp_dir = self . config [ 'dirs' ] [ 'experiments_dir' ] for path in glob . glob ( os . path . join ( exp_dir , '[!_]*.py' ) ) : name , ext = os . path . splitext ( os . path . basename ( path ) ) try : if name in loaded_modules : continue imp . load_source ( name , path ) loaded_modules . add ( name ) logging . debug ( "Loaded experiment \"%s(%s)\"." % ( name , path ) ) except Exception as exception : logging . exception ( "Failed to load experiment %s: %s" % ( name , exception ) ) logging . debug ( "Finished loading experiments." ) return ExperimentList . experiments | This function will return the list of experiments . |
59,768 | def _tcpdump_callback ( self , line , kill_switch ) : line = line . lower ( ) if ( "listening" in line ) or ( "reading" in line ) : self . started = True if ( "no suitable device" in line ) : self . error = True self . kill_switch ( ) if "by kernel" in line : self . stopped = True | Callback function to handle tcpdump |
59,769 | def _run ( ) : args = parse_args ( ) custom_meta = None if args . custom_meta : print "Adding custom parameters:" custom_meta = { } try : for item in args . custom_meta . split ( ',' ) : key , value = item . split ( ':' ) custom_meta [ key ] = value print 'key: %s, value: %s' % ( key , value ) except Exception as e : sys . stderr . write ( "ERROR: Can not parse custom meta tags! %s\n" % ( str ( e ) ) ) configuration = centinel . config . Configuration ( ) if args . config : configuration . parse_config ( args . config ) else : new_configuration = None if os . path . exists ( DEFAULT_CONFIG_FILE ) : configuration . parse_config ( DEFAULT_CONFIG_FILE ) else : print 'Configuration file does not exist. Creating a new one.' new_configuration = centinel . config . Configuration ( ) if not ( 'version' in configuration . params and configuration . params [ 'version' ] [ 'version' ] == centinel . __version__ ) : if not args . update_config : print ( 'WARNING: configuration file is from ' 'a different version (%s) of ' 'Centinel. Run with --update-config to update ' 'it.' % ( configuration . params [ 'version' ] [ 'version' ] ) ) else : new_configuration = centinel . config . Configuration ( ) backup_path = DEFAULT_CONFIG_FILE + ".old" new_configuration . update ( configuration , backup_path ) if new_configuration is not None : configuration = new_configuration configuration . write_out_config ( DEFAULT_CONFIG_FILE ) print 'New configuration written to %s' % ( DEFAULT_CONFIG_FILE ) if args . update_config : sys . exit ( 0 ) if args . verbose : if 'log' not in configuration . params : configuration . params [ 'log' ] = dict ( ) configuration . params [ 'log' ] [ 'log_level' ] = logging . DEBUG if custom_meta is not None : if 'custom_meta' in configuration . params : configuration . params [ 'custom_meta' ] . update ( custom_meta ) else : configuration . params [ 'custom_meta' ] = custom_meta centinel . conf = configuration . params client = centinel . client . Client ( configuration . params ) client . setup_logging ( ) if args . no_verify : configuration . params [ 'server' ] [ 'verify' ] = False user = centinel . backend . User ( configuration . params ) if args . sync : centinel . backend . sync ( configuration . params ) elif args . consent : user . informed_consent ( ) elif args . daemonize : if not os . path . exists ( args . binary ) : print "Error: no binary found to daemonize" exit ( 1 ) centinel . daemonize . daemonize ( args . auto_update , args . binary , args . user ) else : client . run ( ) | Entry point for package and cli uses |
59,770 | def get_fingerprint_batch ( input_list , results = { } , default_port = 443 , delay_time = 0.5 , max_threads = 100 ) : threads = [ ] thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len ( input_list ) for row in input_list : if len ( row . split ( ":" ) ) == 2 : host , port = row . split ( ":" ) elif len ( row . split ( ":" ) ) == 1 : host = row port = default_port else : continue port = int ( port ) wait_time = 0 while threading . active_count ( ) > max_threads : time . sleep ( 1 ) wait_time += 1 if wait_time > thread_wait_timeout : thread_error = True break if thread_error : results [ "error" ] = "Threads took too long to finish." break time . sleep ( delay_time ) log_prefix = "%d/%d: " % ( ind , total_item_count ) thread = threading . Thread ( target = get_fingerprint , args = ( host , port , results , log_prefix ) ) ind += 1 thread . setDaemon ( 1 ) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY : try : thread . start ( ) threads . append ( thread ) thread_open_success = True except : retries += 1 time . sleep ( THREAD_START_DELAY ) logging . error ( "%sThread start failed for %s, retrying... (%d/%d)" % ( log_prefix , host , retries , MAX_THREAD_START_RETRY ) ) if retries == MAX_THREAD_START_RETRY : logging . error ( "%sCan't start a new thread for %s after %d retries." % ( log_prefix , host , retries ) ) for thread in threads : thread . join ( thread_wait_timeout ) return results | This is a parallel version of the TLS fingerprint primitive . |
59,771 | def meta_redirect ( content ) : decoded = content . decode ( "utf-8" , errors = "replace" ) try : soup = BeautifulSoup . BeautifulSoup ( decoded ) except Exception as e : return None result = soup . find ( "meta" , attrs = { "http-equiv" : re . compile ( "^refresh$" , re . I ) } ) if result : try : wait , text = result [ "content" ] . split ( ";" ) text = text . strip ( ) if text . lower ( ) . startswith ( "url=" ) : url = text [ 4 : ] return url except : pass return None | Returns redirecting URL if there is a HTML refresh meta tag returns None otherwise |
59,772 | def _get_http_request ( netloc , path = "/" , headers = None , ssl = False ) : if ssl : port = 443 else : port = 80 host = netloc if len ( netloc . split ( ":" ) ) == 2 : host , port = netloc . split ( ":" ) request = { "host" : host , "port" : port , "path" : path , "ssl" : ssl , "method" : "GET" } if headers : request [ "headers" ] = headers response = { } try : conn = ICHTTPConnection ( host = host , port = port , timeout = 10 ) conn . request ( path , headers , ssl , timeout = 10 ) response [ "status" ] = conn . status response [ "reason" ] = conn . reason response [ "headers" ] = conn . headers body = conn . body try : response [ "body" ] = body . encode ( 'utf-8' ) except UnicodeDecodeError : response [ "body.b64" ] = body . encode ( 'base64' ) except Exception as err : response [ "failure" ] = str ( err ) result = { "response" : response , "request" : request } return result | Actually gets the http . Moved this to it s own private method since it is called several times for following redirects |
59,773 | def get_requests_batch ( input_list , results = { } , delay_time = 0.5 , max_threads = 100 ) : threads = [ ] thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len ( input_list ) user_agent = random . choice ( user_agent_pool ) for row in input_list : headers = { } path = "/" ssl = False theme = "http" if type ( row ) is dict : if "host" not in row : continue host = row [ "host" ] if "path" in row : path = row [ "path" ] if "headers" in row : if type ( row [ "headers" ] ) is dict : headers = row [ "headers" ] if "ssl" in row : ssl = row [ "ssl" ] theme = "https" if "url" in row : url = row [ "url" ] else : url = "%s://%s%s" % ( theme , host , path ) else : host = row url = "%s://%s%s" % ( theme , host , path ) wait_time = 0 while threading . active_count ( ) > max_threads : time . sleep ( 1 ) wait_time += 1 if wait_time > thread_wait_timeout : thread_error = True break if thread_error : results [ "error" ] = "Threads took too long to finish." break if "User-Agent" not in headers : headers [ "User-Agent" ] = user_agent time . sleep ( delay_time ) log_prefix = "%d/%d: " % ( ind , total_item_count ) thread = threading . Thread ( target = get_request , args = ( host , path , headers , ssl , results , url , log_prefix ) ) ind += 1 thread . setDaemon ( 1 ) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY : try : thread . start ( ) threads . append ( thread ) thread_open_success = True except : retries += 1 time . sleep ( THREAD_START_DELAY ) logging . error ( "%sThread start failed for %s, retrying... (%d/%d)" % ( log_prefix , url , retries , MAX_THREAD_START_RETRY ) ) if retries == MAX_THREAD_START_RETRY : logging . error ( "%sCan't start a new thread for %s after %d retries." % ( log_prefix , url , retries ) ) for thread in threads : thread . join ( thread_wait_timeout ) return results | This is a parallel version of the HTTP GET primitive . |
59,774 | def create_script_for_location ( content , destination ) : temp = tempfile . NamedTemporaryFile ( mode = 'w' , delete = False ) temp . write ( content ) temp . close ( ) shutil . move ( temp . name , destination ) cur_perms = os . stat ( destination ) . st_mode set_perms = cur_perms | stat . S_IXOTH | stat . S_IXGRP | stat . S_IXUSR os . chmod ( destination , set_perms ) | Create a script with the given content mv it to the destination and make it executable |
59,775 | def daemonize ( package , bin_loc , user ) : path = "/etc/cron.hourly/centinel-" + user if user != "root" : hourly = "" . join ( [ "#!/bin/bash\n" , "# cron job for centinel\n" , "su " , user , " -c '" , bin_loc , " --sync'\n" , "su " , user , " -c '" , bin_loc , "'\n" , "su " , user , " -c '" , bin_loc , " --sync'\n" ] ) else : hourly = "" . join ( [ "#!/bin/bash\n" , "# cron job for centinel\n" , bin_loc , " --sync\n" , bin_loc , "\n" , bin_loc , " --sync\n" ] ) create_script_for_location ( hourly , path ) if package is None : return updater = "" . join ( [ "#!/bin/bash\n" , "# autoupdater for centinel\n" "sudo pip install --upgrade " , package , "\n" ] ) create_script_for_location ( updater , "/etc/cron.daily/centinel-autoupdate" ) print "Successfully created cron jobs for user " + user | Create crontab entries to run centinel every hour and autoupdate every day |
59,776 | def create_config_files ( directory ) : template_url = ( "https://securenetconnection.com/vpnconfig/" "openvpn-template.ovpn" ) resp = requests . get ( template_url ) resp . raise_for_status ( ) template = resp . content server_url = ( "https://securenetconnection.com/vpnconfig/" "servers-cli.php" ) resp = requests . get ( server_url ) resp . raise_for_status ( ) servers = resp . content . split ( "\n" ) if not os . path . exists ( directory ) : os . makedirs ( directory ) with open ( os . path . join ( directory , "servers.txt" ) , 'w' ) as f : f . write ( resp . content ) for server_line in servers : if server_line . strip ( ) == "" : continue server_line = server_line . split ( "|" ) try : ip , desc , country , udp_sup , tcp_sup = server_line except ValueError : ip , desc , country , udp_sup , tcp_sup , no_rand = server_line with open ( os . path . join ( directory , ip + ".ovpn" ) , 'w' ) as file_o : file_o . write ( template ) tcp_sup = tcp_sup . strip ( ) if tcp_sup : port , proto = 443 , "tcp" else : port , proto = 53 , "udp" file_o . write ( "remote {0} {1}\n" . format ( ip , port ) ) file_o . write ( "proto {0}\n" . format ( proto ) ) file_o . write ( "up /etc/openvpn/update-resolv-conf\n" ) file_o . write ( "down /etc/openvpn/update-resolv-conf\n" ) | Create all available VPN configuration files in the given directory |
59,777 | def sync_scheduler ( self ) : url = "%s/%s/%s" % ( self . config [ 'server' ] [ 'server_url' ] , "experiments" , "scheduler.info" ) try : req = requests . get ( url , proxies = self . config [ 'proxy' ] [ 'proxy' ] , auth = self . auth , verify = self . verify ) req . raise_for_status ( ) except Exception as exp : logging . exception ( "Error trying to download scheduler.info: %s" % exp ) raise exp try : server_sched = json . loads ( req . content ) except Exception as exp : logging . exception ( "Error parsing server scheduler: %s" % exp ) raise exp sched_filename = os . path . join ( self . config [ 'dirs' ] [ 'experiments_dir' ] , 'scheduler.info' ) if not os . path . exists ( sched_filename ) : with open ( sched_filename , 'w' ) as file_p : json . dump ( server_sched , file_p , indent = 2 , separators = ( ',' , ': ' ) ) return client_sched = { } try : with open ( sched_filename , 'r' ) as file_p : client_sched = json . load ( file_p ) except Exception as exp : client_sched = { } logging . exception ( "Error loading scheduler file: %s" % exp ) logging . info ( "Making an empty scheduler" ) client_exp_keys = client_sched . keys ( ) for exp in client_exp_keys : if exp not in server_sched : del client_sched [ exp ] for exp in server_sched : if exp in client_sched : client_sched [ exp ] [ 'frequency' ] = server_sched [ exp ] [ 'frequency' ] else : client_sched [ exp ] = server_sched [ exp ] with open ( sched_filename , 'w' ) as file_p : json . dump ( client_sched , file_p , indent = 2 , separators = ( ',' , ': ' ) ) | Download the scheduler . info file and perform a smart comparison with what we currently have so that we don t overwrite the last_run timestamp |
59,778 | def informed_consent ( self ) : if self . typeable_handle is None : consent_url = [ self . config [ 'server' ] [ 'server_url' ] , "/get_initial_consent?username=" ] consent_url . append ( urlsafe_b64encode ( self . username ) ) consent_url . append ( "&password=" ) consent_url . append ( urlsafe_b64encode ( self . password ) ) else : consent_url = [ self . config [ 'server' ] [ 'server_url' ] , "/consent/" ] consent_url . append ( self . typeable_handle ) consent_url = "" . join ( consent_url ) print "Please go to %s to give your consent." % ( consent_url ) return consent_url | Create a URL for the user to give their consent through |
59,779 | def return_abs_path ( directory , path ) : if directory is None or path is None : return directory = os . path . expanduser ( directory ) return os . path . abspath ( os . path . join ( directory , path ) ) | Unfortunately Python is not smart enough to return an absolute path with tilde expansion so I writing functionality to do this |
59,780 | def _run ( ) : args = parse_args ( ) signal . signal ( signal . SIGTERM , signal_handler ) signal . signal ( signal . SIGINT , signal_handler ) log_formatter = logging . Formatter ( "%(asctime)s %(filename)s(line %(lineno)d) " "%(levelname)s: %(message)s" ) root_logger = logging . getLogger ( ) root_logger . setLevel ( logging . INFO ) console_handler = logging . StreamHandler ( ) console_handler . setFormatter ( log_formatter ) root_logger . addHandler ( console_handler ) if args . log_file : file_handler = logging . FileHandler ( args . log_file ) file_handler . setFormatter ( log_formatter ) root_logger . addHandler ( file_handler ) if args . vm_num < 1 : print "vm_num value cannot be negative!" return if args . vm_index < 1 or args . vm_index > args . vm_num : print "vm_index value cannot be negative or greater than vm_num!" return if args . create_conf_dir : if args . create_HMA : hma_dir = return_abs_path ( args . create_conf_dir , 'vpns' ) hma . create_config_files ( hma_dir ) elif args . create_IPVANISH : ipvanish_dir = return_abs_path ( args . create_conf_dir , 'vpns' ) ipvanish . create_config_files ( ipvanish_dir ) elif args . create_PUREVPN : purevpn_dir = return_abs_path ( args . create_conf_dir , 'vpns' ) purevpn . create_config_files ( purevpn_dir ) elif args . create_VPNGATE : vpngate_dir = return_abs_path ( args . create_conf_dir , 'vpns' ) vpngate . create_config_files ( vpngate_dir ) create_config_files ( args . create_conf_dir ) else : if ( args . tls_auth is not None and args . key_direction is None ) or ( args . tls_auth is None and args . key_direction is not None ) : logging . error ( "tls_auth and key_direction must be specified " "together!" ) return scan_vpns ( directory = args . directory , auth_file = args . auth_file , crt_file = args . crt_file , tls_auth = args . tls_auth , key_direction = args . key_direction , exclude_list = args . exclude_list , shuffle_lists = args . shuffle_lists , vm_num = args . vm_num , vm_index = args . vm_index , reduce_vp = args . reduce_vp ) | Entry point for all uses of centinel |
59,781 | def parse_config ( self , config_file ) : with open ( config_file , 'r' ) as f : config = json . load ( f ) self . params = config if self . params [ 'proxy' ] [ 'proxy_type' ] : self . params [ 'proxy' ] = { self . params [ 'proxy' ] [ 'proxy_type' ] : self . params [ 'proxy' ] [ 'proxy_url' ] } | Given a configuration file read in and interpret the results |
59,782 | def update ( self , old , backup_path = None ) : for category in old . params . keys ( ) : for parameter in old . params [ category ] . keys ( ) : if ( category in self . params and parameter in self . params [ category ] and ( old . params [ category ] [ parameter ] != self . params [ category ] [ parameter ] ) and ( category != "version" ) ) : print ( "Config value '%s.%s' " "in old configuration is different " "from the new version\n" "[old value] = %s\n" "[new value] = %s" "" % ( category , parameter , old . params [ category ] [ parameter ] , self . params [ category ] [ parameter ] ) ) answer = raw_input ( "Do you want to overwrite? ([y]/n) " ) while answer . lower ( ) not in [ 'y' , 'yes' , 'n' , 'no' ] : answer = raw_input ( "Answer not recongnized. Enter 'y' or 'n'. " ) if answer in [ 'n' , 'no' ] : old_value = old . params [ category ] [ parameter ] self . params [ category ] [ parameter ] = old_value elif not ( category in self . params and parameter in self . params [ category ] ) : print ( "Deprecated config option '%s.%s' has " "been removed." % ( category , parameter ) ) if backup_path is not None : old . write_out_config ( backup_path ) print "Backup saved in %s." % backup_path | Update the old configuration file with new values . |
59,783 | def write_out_config ( self , config_file ) : with open ( config_file , 'w' ) as f : json . dump ( self . params , f , indent = 2 , separators = ( ',' , ': ' ) ) | Write out the configuration file |
59,784 | def divide_url ( self , url ) : if 'https://' in url : host = url [ 8 : ] . split ( '/' ) [ 0 ] path = url [ 8 + len ( host ) : ] elif 'http://' in url : host = url [ 7 : ] . split ( '/' ) [ 0 ] path = url [ 7 + len ( host ) : ] else : host = url . split ( '/' ) [ 0 ] path = url [ len ( host ) : ] return host , path | divide url into host and path two parts |
59,785 | def hash_folder ( folder , regex = '[!_]*' ) : file_hashes = { } for path in glob . glob ( os . path . join ( folder , regex ) ) : if not os . path . isfile ( path ) : continue with open ( path , 'r' ) as fileP : md5_hash = hashlib . md5 ( fileP . read ( ) ) . digest ( ) file_name = os . path . basename ( path ) file_hashes [ file_name ] = urlsafe_b64encode ( md5_hash ) return file_hashes | Get the md5 sum of each file in the folder and return to the user |
59,786 | def compute_files_to_download ( client_hashes , server_hashes ) : to_dload , to_delete = [ ] , [ ] for filename in server_hashes : if filename not in client_hashes : to_dload . append ( filename ) continue if client_hashes [ filename ] != server_hashes [ filename ] : to_dload . append ( filename ) for filename in client_hashes : if filename not in server_hashes : to_delete . append ( filename ) return [ to_dload , to_delete ] | Given a dictionary of file hashes from the client and the server specify which files should be downloaded from the server |
59,787 | def spinner ( beep = False , disable = False , force = False ) : return Spinner ( beep , disable , force ) | This function creates a context manager that is used to display a spinner on stdout as long as the context has not exited . |
59,788 | def verifier ( self , url ) : webbrowser . open ( url ) print ( 'A browser should have opened up with a link to allow us to access' ) print ( 'your account, follow the instructions on the link and paste the verifier' ) print ( 'Code into here to give us access, if the browser didn\'t open, the link is:' ) print ( url ) print ( ) return input ( 'Verifier: ' ) . lstrip ( " " ) . rstrip ( " " ) | Will ask user to click link to accept app and write code |
59,789 | def write_config ( self ) : if not os . path . exists ( os . path . dirname ( self . config_file ) ) : os . makedirs ( os . path . dirname ( self . config_file ) ) with open ( self . config_file , 'w' ) as f : f . write ( json . dumps ( self . config ) ) f . close ( ) | Write config to file |
59,790 | def read_config ( self ) : try : with open ( self . config_file , 'r' ) as f : self . config = json . loads ( f . read ( ) ) f . close ( ) except IOError : return False return True | Read config from file |
59,791 | def post_note ( self ) : if self . args . note_title : note_title = self . args . note_title else : note_title = None note_content = self . args . note_content mynote = self . pump . Note ( display_name = note_title , content = note_content ) mynote . to = self . pump . me . followers mynote . cc = self . pump . Public mynote . send ( ) return mynote . id or None | Post note and return the URL of the posted note |
59,792 | def get_obj_id ( self , item ) : if item is not None : if isinstance ( item , six . string_types ) : return item elif hasattr ( item , 'id' ) : return item . id | Get the id of a PumpObject . |
59,793 | def get_page ( self , url ) : if url : data = self . feed . _request ( url , offset = self . _offset , since = self . _since , before = self . _before ) self . _before = False if self . _before is not None else None self . _since = False if self . _since is not None else None if getattr ( self . feed , 'issue65' , False ) : self . _offset = False if self . _since is not None : return reversed ( data [ 'items' ] ) else : return data [ 'items' ] else : return [ ] | Get a page of items from API |
59,794 | def done ( self ) : if self . _done : return self . _done if self . _limit is None : self . _done = False elif self . itemcount >= self . _limit : self . _done = True return self . _done | Check if we should stop returning objects |
59,795 | def _build_cache ( self ) : self . cache = [ ] if self . done : return for i in ( self . get_cached ( ) if self . _cached else self . get_page ( self . url ) ) : if not self . _cached : if not i . get ( "objectType" ) : i [ "objectType" ] = self . feed . object_types [ 0 ] obj = Mapper ( pypump = self . feed . _pump ) . get_object ( i ) else : obj = i self . cache . append ( obj ) if len ( self . cache ) <= 0 : self . _done = True if getattr ( self . feed , 'issue65' , False ) : if self . _offset is None : self . _offset = 0 self . _offset += 20 elif self . _since is not None : if self . feed . links . get ( 'prev' ) : self . url = self . feed . links [ 'prev' ] del self . feed . links [ 'prev' ] else : if self . feed . links . get ( 'next' ) : self . url = self . feed . links [ 'next' ] del self . feed . links [ 'next' ] else : self . url = None | Build a list of objects from feed s cached items or API page |
59,796 | def items ( self , offset = None , limit = 20 , since = None , before = None , * args , ** kwargs ) : return ItemList ( self , offset = offset , limit = limit , since = since , before = before , cached = self . is_cached ) | Get a feed s items . |
59,797 | def direct ( self ) : url = self . _subfeed ( "direct" ) if "direct" in self . url or "major" in self . url or "minor" in self . url : return self if self . _direct is None : self . _direct = self . __class__ ( url , pypump = self . _pump ) return self . _direct | Direct inbox feed contains activities addressed directly to the owner of the inbox . |
59,798 | def major ( self ) : url = self . _subfeed ( "major" ) if "major" in self . url or "minor" in self . url : return self if self . _major is None : self . _major = self . __class__ ( url , pypump = self . _pump ) return self . _major | Major inbox feed contains major activities such as notes and images . |
59,799 | def minor ( self ) : url = self . _subfeed ( "minor" ) if "minor" in self . url or "major" in self . url : return self if self . _minor is None : self . _minor = self . __class__ ( url , pypump = self . _pump ) return self . _minor | Minor inbox feed contains minor activities such as likes shares and follows . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.