idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
3,800
|
def edit_message ( current ) : current . output = { 'status' : 'OK' , 'code' : 200 } in_msg = current . input [ 'message' ] try : msg = Message ( current ) . objects . get ( sender_id = current . user_id , key = in_msg [ 'key' ] ) msg . body = in_msg [ 'body' ] msg . save ( ) except ObjectDoesNotExist : raise HTTPError ( 404 , "" )
|
Edit a message a user own .
|
3,801
|
def flag_message ( current ) : current . output = { 'status' : 'Created' , 'code' : 201 } FlaggedMessage . objects . get_or_create ( user_id = current . user_id , message_id = current . input [ 'key' ] )
|
Flag inappropriate messages
|
3,802
|
def unflag_message ( current ) : current . output = { 'status' : 'OK' , 'code' : 200 } FlaggedMessage ( current ) . objects . filter ( user_id = current . user_id , message_id = current . input [ 'key' ] ) . delete ( )
|
remove flag of a message
|
3,803
|
def get_message_actions ( current ) : current . output = { 'status' : 'OK' , 'code' : 200 , 'actions' : Message . objects . get ( current . input [ 'key' ] ) . get_actions_for ( current . user ) }
|
Returns applicable actions for current user for given message key
|
3,804
|
def add_to_favorites ( current ) : msg = Message . objects . get ( current . input [ 'key' ] ) current . output = { 'status' : 'Created' , 'code' : 201 } fav , new = Favorite . objects . get_or_create ( user_id = current . user_id , message = msg ) current . output [ 'favorite_key' ] = fav . key
|
Favorite a message
|
3,805
|
def remove_from_favorites ( current ) : try : current . output = { 'status' : 'OK' , 'code' : 200 } Favorite ( current ) . objects . get ( user_id = current . user_id , key = current . input [ 'key' ] ) . delete ( ) except ObjectDoesNotExist : raise HTTPError ( 404 , "" )
|
Remove a message from favorites
|
3,806
|
def list_favorites ( current ) : current . output = { 'status' : 'OK' , 'code' : 200 , 'favorites' : [ ] } query_set = Favorite ( current ) . objects . filter ( user_id = current . user_id ) if current . input [ 'channel_key' ] : query_set = query_set . filter ( channel_id = current . input [ 'channel_key' ] ) current . output [ 'favorites' ] = [ { 'key' : fav . key , 'channel_key' : fav . channel . key , 'message_key' : fav . message . key , 'message_summary' : fav . summary , 'channel_name' : fav . channel_name } for fav in query_set ]
|
List user s favorites . If channel_key given will return favorites belong to that channel .
|
3,807
|
def get_or_create_direct_channel ( cls , initiator_key , receiver_key ) : existing = cls . objects . OR ( ) . filter ( code_name = '%s_%s' % ( initiator_key , receiver_key ) ) . filter ( code_name = '%s_%s' % ( receiver_key , initiator_key ) ) receiver_name = UserModel . objects . get ( receiver_key ) . full_name if existing : channel = existing [ 0 ] else : channel_name = '%s_%s' % ( initiator_key , receiver_key ) channel = cls ( is_direct = True , code_name = channel_name , typ = 10 ) . blocking_save ( ) with BlockSave ( Subscriber ) : Subscriber . objects . get_or_create ( channel = channel , user_id = initiator_key , name = receiver_name ) Subscriber . objects . get_or_create ( channel = channel , user_id = receiver_key , name = UserModel . objects . get ( initiator_key ) . full_name ) return channel , receiver_name
|
Creates a direct messaging channel between two user
|
3,808
|
def create_exchange ( self ) : mq_channel = self . _connect_mq ( ) mq_channel . exchange_declare ( exchange = self . code_name , exchange_type = 'fanout' , durable = True )
|
Creates MQ exchange for this channel Needs to be defined only once .
|
3,809
|
def delete_exchange ( self ) : mq_channel = self . _connect_mq ( ) mq_channel . exchange_delete ( exchange = self . code_name )
|
Deletes MQ exchange for this channel Needs to be defined only once .
|
3,810
|
def get_channel_listing ( self ) : return { 'name' : self . name , 'key' : self . channel . key , 'type' : self . channel . typ , 'read_only' : self . read_only , 'is_online' : self . is_online ( ) , 'actions' : self . get_actions ( ) , 'unread' : self . unread_count ( ) }
|
serialized form for channel listing
|
3,811
|
def create_exchange ( self ) : channel = self . _connect_mq ( ) channel . exchange_declare ( exchange = self . user . prv_exchange , exchange_type = 'fanout' , durable = True )
|
Creates user s private exchange
|
3,812
|
def serialize ( self , user = None ) : return { 'content' : self . body , 'type' : self . typ , 'updated_at' : self . updated_at , 'timestamp' : self . updated_at , 'is_update' : not hasattr ( self , 'unsaved' ) , 'attachments' : [ attachment . serialize ( ) for attachment in self . attachment_set ] , 'title' : self . msg_title , 'url' : self . url , 'sender_name' : self . sender . full_name , 'sender_key' : self . sender . key , 'channel_key' : self . channel . key , 'cmd' : 'message' , 'avatar_url' : self . sender . avatar , 'key' : self . key , }
|
Serializes message for given user .
|
3,813
|
def _republish ( self ) : mq_channel = self . channel . _connect_mq ( ) mq_channel . basic_publish ( exchange = self . channel . key , routing_key = '' , body = json . dumps ( self . serialize ( ) ) )
|
Re - publishes updated message
|
3,814
|
def defaultCrawlId ( ) : timestamp = datetime . now ( ) . isoformat ( ) . replace ( ':' , '_' ) user = getuser ( ) return '_' . join ( ( 'crawl' , user , timestamp ) )
|
Provide a reasonable default crawl name using the user name and date
|
3,815
|
def main ( argv = None ) : global Verbose , Mock if argv is None : argv = sys . argv if len ( argv ) < 5 : die ( 'Bad args' ) try : opts , argv = getopt . getopt ( argv [ 1 : ] , 'hs:p:mv' , [ 'help' , 'server=' , 'port=' , 'mock' , 'verbose' ] ) except getopt . GetoptError as err : print ( err ) die ( ) serverEndpoint = DefaultServerEndpoint for opt , val in opts : if opt in ( '-h' , '--help' ) : echo2 ( USAGE ) sys . exit ( ) elif opt in ( '-s' , '--server' ) : serverEndpoint = val elif opt in ( '-p' , '--port' ) : serverEndpoint = 'http://localhost:%s' % val elif opt in ( '-m' , '--mock' ) : Mock = 1 elif opt in ( '-v' , '--verbose' ) : Verbose = 1 else : die ( USAGE ) cmd = argv [ 0 ] crawlId = argv [ 1 ] confId = argv [ 2 ] urlDir = argv [ 3 ] args = { } if len ( argv ) > 4 : args = eval ( argv [ 4 ] ) nt = Nutch ( crawlId , confId , serverEndpoint , urlDir ) nt . Jobs ( ) . create ( cmd , ** args )
|
Run Nutch command using REST API .
|
3,816
|
def call ( self , verb , servicePath , data = None , headers = None , forceText = False , sendJson = True ) : default_data = { } if sendJson else "" data = data if data else default_data headers = headers if headers else JsonAcceptHeader . copy ( ) if not sendJson : headers . update ( TextSendHeader ) if verb not in RequestVerbs : die ( 'Server call verb must be one of %s' % str ( RequestVerbs . keys ( ) ) ) if Verbose : echo2 ( "%s Endpoint:" % verb . upper ( ) , servicePath ) echo2 ( "%s Request data:" % verb . upper ( ) , data ) echo2 ( "%s Request headers:" % verb . upper ( ) , headers ) verbFn = RequestVerbs [ verb ] if sendJson : resp = verbFn ( self . serverEndpoint + servicePath , json = data , headers = headers ) else : resp = verbFn ( self . serverEndpoint + servicePath , data = data , headers = headers ) if Verbose : echo2 ( "Response headers:" , resp . headers ) echo2 ( "Response status:" , resp . status_code ) if resp . status_code != 200 : if self . raiseErrors : error = NutchException ( "Unexpected server response: %d" % resp . status_code ) error . status_code = resp . status_code raise error else : warn ( 'Nutch server returned status:' , resp . status_code ) if forceText or 'content-type' not in resp . headers or resp . headers [ 'content-type' ] == 'text/plain' : if Verbose : echo2 ( "Response text:" , resp . text ) return resp . text content_type = resp . headers [ 'content-type' ] if content_type == 'application/json' and not forceText : if Verbose : echo2 ( "Response JSON:" , resp . json ( ) ) return resp . json ( ) else : die ( 'Did not understand server response: %s' % resp . headers )
|
Call the Nutch Server do some error checking and return the response .
|
3,817
|
def list ( self , allJobs = False ) : jobs = self . server . call ( 'get' , '/job' ) return [ Job ( job [ 'id' ] , self . server ) for job in jobs if allJobs or self . _job_owned ( job ) ]
|
Return list of jobs at this endpoint .
|
3,818
|
def _nextJob ( self , job , nextRound = True ) : jobInfo = job . info ( ) assert jobInfo [ 'state' ] == 'FINISHED' roundEnd = False if jobInfo [ 'type' ] == 'INJECT' : nextCommand = 'GENERATE' elif jobInfo [ 'type' ] == 'GENERATE' : nextCommand = 'FETCH' elif jobInfo [ 'type' ] == 'FETCH' : nextCommand = 'PARSE' elif jobInfo [ 'type' ] == 'PARSE' : nextCommand = 'UPDATEDB' elif jobInfo [ 'type' ] == 'UPDATEDB' : nextCommand = 'INVERTLINKS' elif jobInfo [ 'type' ] == 'INVERTLINKS' : nextCommand = 'DEDUP' elif jobInfo [ 'type' ] == 'DEDUP' : if self . enable_index : nextCommand = 'INDEX' else : roundEnd = True elif jobInfo [ 'type' ] == 'INDEX' : roundEnd = True else : raise NutchException ( "Unrecognized job type {}" . format ( jobInfo [ 'type' ] ) ) if roundEnd : if nextRound and self . currentRound < self . totalRounds : nextCommand = 'GENERATE' self . currentRound += 1 else : return None return self . jobClient . create ( nextCommand )
|
Given a completed job start the next job in the round or return None
|
3,819
|
def progress ( self , nextRound = True ) : currentJob = self . currentJob if currentJob is None : return currentJob jobInfo = currentJob . info ( ) if jobInfo [ 'state' ] == 'RUNNING' : return currentJob elif jobInfo [ 'state' ] == 'FINISHED' : nextJob = self . _nextJob ( currentJob , nextRound ) self . currentJob = nextJob return nextJob else : error = NutchCrawlException ( "Unexpected job state: {}" . format ( jobInfo [ 'state' ] ) ) error . current_job = currentJob raise NutchCrawlException
|
Check the status of the current job activate the next job if it s finished and return the active job
|
3,820
|
def nextRound ( self ) : finishedJobs = [ ] if self . currentJob is None : self . currentJob = self . jobClient . create ( 'GENERATE' ) activeJob = self . progress ( nextRound = False ) while activeJob : oldJob = activeJob activeJob = self . progress ( nextRound = False ) if oldJob and oldJob != activeJob : finishedJobs . append ( oldJob ) sleep ( self . sleepTime ) self . currentRound += 1 return finishedJobs
|
Execute all jobs in the current round and return when they have finished .
|
3,821
|
def waitAll ( self ) : finishedRounds = [ self . nextRound ( ) ] while self . currentRound < self . totalRounds : finishedRounds . append ( self . nextRound ( ) ) return finishedRounds
|
Execute all queued rounds and return when they have finished .
|
3,822
|
def Jobs ( self , crawlId = None ) : crawlId = crawlId if crawlId else defaultCrawlId ( ) return JobClient ( self . server , crawlId , self . confId )
|
Create a JobClient for listing and creating jobs . The JobClient inherits the confId from the Nutch client .
|
3,823
|
def get ( self , attr , default = None ) : attrs = self . body . get ( 'attributes' ) or { } return attrs . get ( attr , default )
|
Get an attribute defined by this session
|
3,824
|
def get_all ( self , cat ) : return self . _get_from_local_cache ( cat ) or self . _get_from_cache ( cat ) or self . _get_from_db ( cat )
|
if data can t found in cache then it will be fetched from db parsed and stored to cache for each lang_code .
|
3,825
|
def _fill_get_item_cache ( self , catalog , key ) : lang = self . _get_lang ( ) keylist = self . get_all ( catalog ) self . ITEM_CACHE [ lang ] [ catalog ] = dict ( [ ( i [ 'value' ] , i [ 'name' ] ) for i in keylist ] ) return self . ITEM_CACHE [ lang ] [ catalog ] . get ( key )
|
get from redis cache locally then return
|
3,826
|
def run ( self , host , port , debug = True , validate_requests = True ) : if debug : logging . basicConfig ( level = logging . DEBUG ) app = self . create_wsgi_app ( validate_requests ) run_simple ( host , port , app , use_reloader = debug , use_debugger = debug )
|
Utility method to quickly get a server up and running .
|
3,827
|
def dispatch_request ( self , body ) : req_type = body . get ( 'request' , { } ) . get ( 'type' ) session_obj = body . get ( 'session' ) session = Session ( session_obj ) if session_obj else None if req_type == 'LaunchRequest' : return self . launch_fn ( session ) elif req_type == 'IntentRequest' : intent = body [ 'request' ] [ 'intent' ] [ 'name' ] intent_fn = self . intent_map . get ( intent , self . unknown_intent_fn ) slots = { slot [ 'name' ] : slot . get ( 'value' ) for _ , slot in body [ 'request' ] [ 'intent' ] . get ( 'slots' , { } ) . items ( ) } arity = intent_fn . __code__ . co_argcount if arity == 2 : return intent_fn ( slots , session ) return intent_fn ( ) elif req_type == 'SessionEndedRequest' : return self . session_end_fn ( ) log . error ( 'invalid request type: %s' , req_type ) raise ValueError ( 'bad request: %s' , body )
|
Given a parsed JSON request object call the correct Intent Launch or SessionEnded function .
|
3,828
|
def intent ( self , intent_name ) : def _decorator ( func ) : arity = func . __code__ . co_argcount if arity not in [ 0 , 2 ] : raise ValueError ( "expected 0 or 2 argument function" ) self . intent_map [ intent_name ] = func return func return _decorator
|
Decorator to register a handler for the given intent .
|
3,829
|
def encrypt_password ( self ) : if self . password and not self . password . startswith ( '$pbkdf2' ) : self . set_password ( self . password )
|
encrypt password if not already encrypted
|
3,830
|
def send_client_cmd ( self , data , cmd = None , via_queue = None ) : mq_channel = self . _connect_mq ( ) if cmd : data [ 'cmd' ] = cmd if via_queue : mq_channel . basic_publish ( exchange = '' , routing_key = via_queue , body = json . dumps ( data ) ) else : mq_channel . basic_publish ( exchange = self . prv_exchange , routing_key = '' , body = json . dumps ( data ) )
|
Send arbitrary cmd and data to client
|
3,831
|
def assign_yourself ( self ) : task_invitation = TaskInvitation . objects . get ( self . task_invitation_key ) wfi = task_invitation . instance if not wfi . current_actor . exist : wfi . current_actor = self . current . role wfi . save ( ) [ inv . delete ( ) for inv in TaskInvitation . objects . filter ( instance = wfi ) if not inv == task_invitation ] title = _ ( u"Successful" ) msg = _ ( u"You have successfully assigned the job to yourself." ) else : title = _ ( u"Unsuccessful" ) msg = _ ( u"Unfortunately, this job is already taken by someone else." ) self . current . msg_box ( title = title , msg = msg )
|
Assigning the workflow to itself . The selected job is checked to see if there is an assigned role . If it does not have a role assigned to it it takes the job to itself and displays a message that the process is successful . If there is a role assigned to it it does not do any operation and the message is displayed on the screen .
|
3,832
|
def send_workflow ( self ) : task_invitation = TaskInvitation . objects . get ( self . task_invitation_key ) wfi = task_invitation . instance select_role = self . input [ 'form' ] [ 'select_role' ] if wfi . current_actor == self . current . role : task_invitation . role = RoleModel . objects . get ( select_role ) wfi . current_actor = RoleModel . objects . get ( select_role ) wfi . save ( ) task_invitation . save ( ) [ inv . delete ( ) for inv in TaskInvitation . objects . filter ( instance = wfi ) if not inv == task_invitation ] title = _ ( u"Successful" ) msg = _ ( u"The workflow was assigned to someone else with success." ) else : title = _ ( u"Unsuccessful" ) msg = _ ( u"This workflow does not belong to you, you cannot assign it to someone else." ) self . current . msg_box ( title = title , msg = msg )
|
With the workflow instance and the task invitation is assigned a role .
|
3,833
|
def save_date ( self ) : task_invitation = TaskInvitation . objects . get ( self . task_invitation_key ) wfi = task_invitation . instance if wfi . current_actor . exist and wfi . current_actor == self . current . role : dt_start = datetime . strptime ( self . input [ 'form' ] [ 'start_date' ] , "%d.%m.%Y" ) dt_finish = datetime . strptime ( self . input [ 'form' ] [ 'finish_date' ] , "%d.%m.%Y" ) task_invitation . start_date = dt_start task_invitation . finish_date = dt_finish task_invitation . save ( ) wfi . start_date = dt_start wfi . finish_date = dt_finish wfi . save ( ) title = _ ( u"Successful" ) msg = _ ( u"You've extended the workflow time." ) else : title = _ ( u"Unsuccessful" ) msg = _ ( u"This workflow does not belong to you." ) self . current . msg_box ( title = title , msg = msg )
|
Invitations with the same workflow status are deleted . Workflow instance and invitation roles change .
|
3,834
|
def suspend ( self ) : task_invitation = TaskInvitation . objects . get ( self . task_invitation_key ) wfi = task_invitation . instance if wfi . current_actor . exist and wfi . current_actor == self . current . role : for m in RoleModel . objects . filter ( abstract_role = self . current . role . abstract_role , unit = self . current . role . unit ) : if m != self . current . role : task_invitation . key = '' task_invitation . role = m task_invitation . save ( ) wfi . current_actor = RoleModel ( ) wfi . save ( ) title = _ ( u"Successful" ) msg = _ ( u"You left the workflow." ) else : title = _ ( u"Unsuccessful" ) msg = _ ( u"Unfortunately, this workflow does not belong to you or is already idle." ) self . current . msg_box ( title = title , msg = msg )
|
If there is a role assigned to the workflow and it is the same as the user it can drop the workflow . If it does not exist it can not do anything .
|
3,835
|
def on_home_row ( self , location = None ) : location = location or self . location return ( self . color == color . white and location . rank == 1 ) or ( self . color == color . black and location . rank == 6 )
|
Finds out if the piece is on the home row .
|
3,836
|
def would_move_be_promotion ( self , location = None ) : location = location or self . location return ( location . rank == 1 and self . color == color . black ) or ( location . rank == 6 and self . color == color . white )
|
Finds if move from current get_location would result in promotion
|
3,837
|
def square_in_front ( self , location = None ) : location = location or self . location return location . shift_up ( ) if self . color == color . white else location . shift_down ( )
|
Finds square directly in front of Pawn
|
3,838
|
def forward_moves ( self , position ) : if position . is_square_empty ( self . square_in_front ( self . location ) ) : if self . would_move_be_promotion ( ) : for move in self . create_promotion_moves ( notation_const . PROMOTE ) : yield move else : yield self . create_move ( end_loc = self . square_in_front ( self . location ) , status = notation_const . MOVEMENT ) if self . on_home_row ( ) and position . is_square_empty ( self . two_squares_in_front ( self . location ) ) : yield self . create_move ( end_loc = self . square_in_front ( self . square_in_front ( self . location ) ) , status = notation_const . MOVEMENT )
|
Finds possible moves one step and two steps in front of Pawn .
|
3,839
|
def _one_diagonal_capture_square ( self , capture_square , position ) : if self . contains_opposite_color_piece ( capture_square , position ) : if self . would_move_be_promotion ( ) : for move in self . create_promotion_moves ( status = notation_const . CAPTURE_AND_PROMOTE , location = capture_square ) : yield move else : yield self . create_move ( end_loc = capture_square , status = notation_const . CAPTURE )
|
Adds specified diagonal as a capture move if it is one
|
3,840
|
def capture_moves ( self , position ) : try : right_diagonal = self . square_in_front ( self . location . shift_right ( ) ) for move in self . _one_diagonal_capture_square ( right_diagonal , position ) : yield move except IndexError : pass try : left_diagonal = self . square_in_front ( self . location . shift_left ( ) ) for move in self . _one_diagonal_capture_square ( left_diagonal , position ) : yield move except IndexError : pass
|
Finds out all possible capture moves
|
3,841
|
def on_en_passant_valid_location ( self ) : return ( self . color == color . white and self . location . rank == 4 ) or ( self . color == color . black and self . location . rank == 3 )
|
Finds out if pawn is on enemy center rank .
|
3,842
|
def _is_en_passant_valid ( self , opponent_pawn_location , position ) : try : pawn = position . piece_at_square ( opponent_pawn_location ) return pawn is not None and isinstance ( pawn , Pawn ) and pawn . color != self . color and position . piece_at_square ( opponent_pawn_location ) . just_moved_two_steps except IndexError : return False
|
Finds if their opponent s pawn is next to this pawn
|
3,843
|
def add_one_en_passant_move ( self , direction , position ) : try : if self . _is_en_passant_valid ( direction ( self . location ) , position ) : yield self . create_move ( end_loc = self . square_in_front ( direction ( self . location ) ) , status = notation_const . EN_PASSANT ) except IndexError : pass
|
Yields en_passant moves in given direction if it is legal .
|
3,844
|
def en_passant_moves ( self , position ) : if self . on_en_passant_valid_location ( ) : for move in itertools . chain ( self . add_one_en_passant_move ( lambda x : x . shift_right ( ) , position ) , self . add_one_en_passant_move ( lambda x : x . shift_left ( ) , position ) ) : yield move
|
Finds possible en passant moves .
|
3,845
|
def respond ( text = None , ssml = None , attributes = None , reprompt_text = None , reprompt_ssml = None , end_session = True ) : obj = { 'version' : '1.0' , 'response' : { 'outputSpeech' : { 'type' : 'PlainText' , 'text' : '' } , 'shouldEndSession' : end_session } , 'sessionAttributes' : attributes or { } } if text : obj [ 'response' ] [ 'outputSpeech' ] = { 'type' : 'PlainText' , 'text' : text } elif ssml : obj [ 'response' ] [ 'outputSpeech' ] = { 'type' : 'SSML' , 'ssml' : ssml } reprompt_output = None if reprompt_text : reprompt_output = { 'type' : 'PlainText' , 'text' : reprompt_text } elif reprompt_ssml : reprompt_output = { 'type' : 'SSML' , 'ssml' : reprompt_ssml } if reprompt_output : obj [ 'response' ] [ 'reprompt' ] = { 'outputSpeech' : reprompt_output } return obj
|
Build a dict containing a valid response to an Alexa request .
|
3,846
|
def validate_request_timestamp ( req_body , max_diff = 150 ) : time_str = req_body . get ( 'request' , { } ) . get ( 'timestamp' ) if not time_str : log . error ( 'timestamp not present %s' , req_body ) return False req_ts = datetime . strptime ( time_str , "%Y-%m-%dT%H:%M:%SZ" ) diff = ( datetime . utcnow ( ) - req_ts ) . total_seconds ( ) if abs ( diff ) > max_diff : log . error ( 'timestamp difference too high: %d sec' , diff ) return False return True
|
Ensure the request s timestamp doesn t fall outside of the app s specified tolerance .
|
3,847
|
def validate_request_certificate ( headers , data ) : if 'SignatureCertChainUrl' not in headers or 'Signature' not in headers : log . error ( 'invalid request headers' ) return False cert_url = headers [ 'SignatureCertChainUrl' ] sig = base64 . b64decode ( headers [ 'Signature' ] ) cert = _get_certificate ( cert_url ) if not cert : return False try : crypto . verify ( cert , sig , data , 'sha1' ) return True except : log . error ( 'invalid request signature' ) return False
|
Ensure that the certificate and signature specified in the request headers are truely from Amazon and correctly verify .
|
3,848
|
def _get_certificate ( cert_url ) : global _cache if cert_url in _cache : cert = _cache [ cert_url ] if cert . has_expired ( ) : _cache = { } else : return cert url = urlparse ( cert_url ) host = url . netloc . lower ( ) path = posixpath . normpath ( url . path ) if url . scheme != 'https' or host not in [ 's3.amazonaws.com' , 's3.amazonaws.com:443' ] or not path . startswith ( '/echo.api/' ) : log . error ( 'invalid cert location %s' , cert_url ) return resp = urlopen ( cert_url ) if resp . getcode ( ) != 200 : log . error ( 'failed to download certificate' ) return cert = crypto . load_certificate ( crypto . FILETYPE_PEM , resp . read ( ) ) if cert . has_expired ( ) or cert . get_subject ( ) . CN != 'echo-api.amazon.com' : log . error ( 'certificate expired or invalid' ) return _cache [ cert_url ] = cert return cert
|
Download and validate a specified Amazon PEM file .
|
3,849
|
def is_processed ( self , db_versions ) : return self . number in ( v . number for v in db_versions if v . date_done )
|
Check if version is already applied in the database .
|
3,850
|
def is_noop ( self ) : has_operations = [ mode . pre_operations or mode . post_operations for mode in self . _version_modes . values ( ) ] has_upgrade_addons = [ mode . upgrade_addons or mode . remove_addons for mode in self . _version_modes . values ( ) ] noop = not any ( ( has_upgrade_addons , has_operations ) ) return noop
|
Check if version is a no operation version .
|
3,851
|
def _get_version_mode ( self , mode = None ) : version_mode = self . _version_modes . get ( mode ) if not version_mode : version_mode = self . _version_modes [ mode ] = VersionMode ( name = mode ) return version_mode
|
Return a VersionMode for a mode name .
|
3,852
|
def add_operation ( self , operation_type , operation , mode = None ) : version_mode = self . _get_version_mode ( mode = mode ) if operation_type == 'pre' : version_mode . add_pre ( operation ) elif operation_type == 'post' : version_mode . add_post ( operation ) else : raise ConfigurationError ( u"Type of operation must be 'pre' or 'post', got %s" % ( operation_type , ) )
|
Add an operation to the version
|
3,853
|
def add_backup_operation ( self , backup , mode = None ) : try : if self . options . backup : self . options . backup . ignore_if_operation ( ) . execute ( ) except OperationError : self . backup = backup
|
Add a backup operation to the version .
|
3,854
|
def pre_operations ( self , mode = None ) : version_mode = self . _get_version_mode ( mode = mode ) return version_mode . pre_operations
|
Return pre - operations only for the mode asked
|
3,855
|
def post_operations ( self , mode = None ) : version_mode = self . _get_version_mode ( mode = mode ) return version_mode . post_operations
|
Return post - operations only for the mode asked
|
3,856
|
def upgrade_addons_operation ( self , addons_state , mode = None ) : installed = set ( a . name for a in addons_state if a . state in ( 'installed' , 'to upgrade' ) ) base_mode = self . _get_version_mode ( ) addons_list = base_mode . upgrade_addons . copy ( ) if mode : add_mode = self . _get_version_mode ( mode = mode ) addons_list |= add_mode . upgrade_addons to_install = addons_list - installed to_upgrade = installed & addons_list return UpgradeAddonsOperation ( self . options , to_install , to_upgrade )
|
Return merged set of main addons and mode s addons
|
3,857
|
def copy ( self ) : return type ( self ) ( reagents = [ x . copy ( ) for x in self . __reagents ] , meta = self . __meta . copy ( ) , products = [ x . copy ( ) for x in self . __products ] , reactants = [ x . copy ( ) for x in self . __reactants ] )
|
get copy of object
|
3,858
|
def implicify_hydrogens ( self ) : total = 0 for ml in ( self . __reagents , self . __reactants , self . __products ) : for m in ml : if hasattr ( m , 'implicify_hydrogens' ) : total += m . implicify_hydrogens ( ) if total : self . flush_cache ( ) return total
|
remove explicit hydrogens if possible
|
3,859
|
def compose ( self ) : rr = self . __reagents + self . __reactants if rr : if not all ( isinstance ( x , ( MoleculeContainer , CGRContainer ) ) for x in rr ) : raise TypeError ( 'Queries not composable' ) r = reduce ( or_ , rr ) else : r = MoleculeContainer ( ) if self . __products : if not all ( isinstance ( x , ( MoleculeContainer , CGRContainer ) ) for x in self . __products ) : raise TypeError ( 'Queries not composable' ) p = reduce ( or_ , self . __products ) else : p = MoleculeContainer ( ) return r ^ p
|
get CGR of reaction
|
3,860
|
def fix_positions ( self ) : shift_x = 0 for m in self . __reactants : max_x = self . __fix_positions ( m , shift_x , 0 ) shift_x = max_x + 1 arrow_min = shift_x if self . __reagents : for m in self . __reagents : max_x = self . __fix_positions ( m , shift_x , 1.5 ) shift_x = max_x + 1 else : shift_x += 3 arrow_max = shift_x - 1 for m in self . __products : max_x = self . __fix_positions ( m , shift_x , 0 ) shift_x = max_x + 1 self . _arrow = ( arrow_min , arrow_max ) self . flush_cache ( )
|
fix coordinates of molecules in reaction
|
3,861
|
def get_permissions ( self ) : user_role = self . last_login_role ( ) if self . last_login_role_key else self . role_set [ 0 ] . role return user_role . get_permissions ( )
|
Permissions of the user .
|
3,862
|
def add_permission_by_name ( self , code , save = False ) : if not save : return [ "%s | %s" % ( p . name , p . code ) for p in Permission . objects . filter ( code__contains = code ) ] for p in Permission . objects . filter ( code__contains = code ) : if p not in self . Permissions : self . Permissions ( permission = p ) if p : self . save ( )
|
Adds a permission with given name .
|
3,863
|
def send_notification ( self , title , message , typ = 1 , url = None , sender = None ) : self . user . send_notification ( title = title , message = message , typ = typ , url = url , sender = sender )
|
sends a message to user of this role s private mq exchange
|
3,864
|
def would_move_be_promotion ( self ) : return ( self . _end_loc . rank == 0 and not self . color ) or ( self . _end_loc . rank == 7 and self . color )
|
Finds if move from current location would be a promotion
|
3,865
|
def connect ( self , receiver , sender = None , weak = True , dispatch_uid = None ) : if dispatch_uid : lookup_key = ( dispatch_uid , _make_id ( sender ) ) else : lookup_key = ( _make_id ( receiver ) , _make_id ( sender ) ) if weak : ref = weakref . ref receiver_object = receiver if hasattr ( receiver , '__self__' ) and hasattr ( receiver , '__func__' ) : ref = WeakMethod receiver_object = receiver . __self__ if six . PY3 : receiver = ref ( receiver ) weakref . finalize ( receiver_object , self . _remove_receiver ) else : receiver = ref ( receiver , self . _remove_receiver ) with self . lock : self . _clear_dead_receivers ( ) for r_key , _ in self . receivers : if r_key == lookup_key : break else : self . receivers . append ( ( lookup_key , receiver ) ) self . sender_receivers_cache . clear ( )
|
Connect receiver to sender for signal .
|
3,866
|
def disconnect ( self , receiver = None , sender = None , dispatch_uid = None ) : if dispatch_uid : lookup_key = ( dispatch_uid , _make_id ( sender ) ) else : lookup_key = ( _make_id ( receiver ) , _make_id ( sender ) ) disconnected = False with self . lock : self . _clear_dead_receivers ( ) for index in range ( len ( self . receivers ) ) : ( r_key , _ ) = self . receivers [ index ] if r_key == lookup_key : disconnected = True del self . receivers [ index ] break self . sender_receivers_cache . clear ( ) return disconnected
|
Disconnect receiver from sender for signal .
|
3,867
|
def migrate ( config ) : webapp = WebApp ( config . web_host , config . web_port , custom_maintenance_file = config . web_custom_html ) webserver = WebServer ( webapp ) webserver . daemon = True webserver . start ( ) migration_parser = YamlParser . parse_from_file ( config . migration_file ) migration = migration_parser . parse ( ) database = Database ( config ) with database . connect ( ) as lock_connection : application_lock = ApplicationLock ( lock_connection ) application_lock . start ( ) while not application_lock . acquired : time . sleep ( 0.5 ) else : if application_lock . replica : application_lock . stop = True application_lock . join ( ) try : table = MigrationTable ( database ) runner = Runner ( config , migration , database , table ) runner . perform ( ) finally : application_lock . stop = True application_lock . join ( )
|
Perform a migration according to config .
|
3,868
|
def get_permissions ( cls ) : perms = [ ] for kls_name , kls in cls . registry . items ( ) : for method_name in cls . __dict__ . keys ( ) : if method_name . endswith ( '_view' ) : perms . append ( "%s.%s" % ( kls_name , method_name ) ) return perms
|
Generates permissions for all CrudView based class methods .
|
3,869
|
def _get_object_menu_models ( ) : from pyoko . conf import settings enabled_models = [ ] for entry in settings . OBJECT_MENU . values ( ) : for mdl in entry : if 'wf' not in mdl : enabled_models . append ( mdl [ 'name' ] ) return enabled_models
|
we need to create basic permissions for only CRUD enabled models
|
3,870
|
def add ( cls , code_name , name = '' , description = '' ) : if code_name not in cls . registry : cls . registry [ code_name ] = ( code_name , name or code_name , description ) return code_name
|
create a custom permission
|
3,871
|
def get_mapping ( self , other ) : m = next ( self . _matcher ( other ) . isomorphisms_iter ( ) , None ) if m : return { v : k for k , v in m . items ( ) }
|
get self to other mapping
|
3,872
|
def get_substructure_mapping ( self , other , limit = 1 ) : i = self . _matcher ( other ) . subgraph_isomorphisms_iter ( ) if limit == 1 : m = next ( i , None ) if m : return { v : k for k , v in m . items ( ) } return elif limit == 0 : return ( { v : k for k , v in m . items ( ) } for m in i ) return [ { v : k for k , v in m . items ( ) } for m in islice ( i , limit ) ]
|
get self to other substructure mapping
|
3,873
|
def shift ( self , direction ) : try : if direction == Direction . UP : return self . shift_up ( ) elif direction == Direction . DOWN : return self . shift_down ( ) elif direction == Direction . RIGHT : return self . shift_right ( ) elif direction == Direction . LEFT : return self . shift_left ( ) else : raise IndexError ( "Invalid direction {}" . format ( direction ) ) except IndexError as e : raise IndexError ( e )
|
Shifts in direction provided by Direction enum .
|
3,874
|
def shift_up ( self , times = 1 ) : try : return Location ( self . _rank + times , self . _file ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted up by 1
|
3,875
|
def shift_down ( self , times = 1 ) : try : return Location ( self . _rank - times , self . _file ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted down by 1
|
3,876
|
def shift_right ( self , times = 1 ) : try : return Location ( self . _rank , self . _file + times ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted right by 1
|
3,877
|
def shift_left ( self , times = 1 ) : try : return Location ( self . _rank , self . _file - times ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted left by 1
|
3,878
|
def shift_up_right ( self , times = 1 ) : try : return Location ( self . _rank + times , self . _file + times ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted up right by 1
|
3,879
|
def shift_up_left ( self , times = 1 ) : try : return Location ( self . _rank + times , self . _file - times ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted up left by 1
|
3,880
|
def shift_down_right ( self , times = 1 ) : try : return Location ( self . _rank - times , self . _file + times ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted down right by 1
|
3,881
|
def shift_down_left ( self , times = 1 ) : try : return Location ( self . _rank - times , self . _file - times ) except IndexError as e : raise IndexError ( e )
|
Finds Location shifted down left by 1
|
3,882
|
def standardize ( self ) : self . reset_query_marks ( ) seen = set ( ) total = 0 for n , atom in self . atoms ( ) : if n in seen : continue for k , center in central . items ( ) : if center != atom : continue shell = tuple ( ( bond , self . _node [ m ] ) for m , bond in self . _adj [ n ] . items ( ) ) for shell_query , shell_patch , atom_patch in query_patch [ k ] : if shell_query != shell : continue total += 1 for attr_name , attr_value in atom_patch . items ( ) : setattr ( atom , attr_name , attr_value ) for ( bond_patch , atom_patch ) , ( bond , atom ) in zip ( shell_patch , shell ) : bond . update ( bond_patch ) for attr_name , attr_value in atom_patch . items ( ) : setattr ( atom , attr_name , attr_value ) seen . add ( n ) seen . update ( self . _adj [ n ] ) break else : continue break if total : self . flush_cache ( ) return total
|
standardize functional groups
|
3,883
|
def get_staged_files ( ) : proc = subprocess . Popen ( ( 'git' , 'status' , '--porcelain' ) , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , _ = proc . communicate ( ) staged_files = modified_re . findall ( out ) return staged_files
|
Get all files staged for the current commit .
|
3,884
|
def runserver ( host = None , port = None ) : host = host or os . getenv ( 'HTTP_HOST' , '0.0.0.0' ) port = port or os . getenv ( 'HTTP_PORT' , '9001' ) zioloop = ioloop . IOLoop . instance ( ) pc = QueueManager ( zioloop ) app . pc = pc pc . connect ( ) app . listen ( port , host ) zioloop . start ( )
|
Run Tornado server
|
3,885
|
def open ( self ) : sess_id = self . _get_sess_id ( ) if sess_id : self . application . pc . websockets [ self . _get_sess_id ( ) ] = self self . write_message ( json . dumps ( { "cmd" : "status" , "status" : "open" } ) ) else : self . write_message ( json . dumps ( { "cmd" : "error" , "error" : "Please login" , "code" : 401 } ) )
|
Called on new websocket connection .
|
3,886
|
def on_message ( self , message ) : log . debug ( "WS MSG for %s: %s" % ( self . _get_sess_id ( ) , message ) ) self . application . pc . redirect_incoming_message ( self . _get_sess_id ( ) , message , self . request )
|
called on new websocket message
|
3,887
|
def _remove_redundancy ( self , log ) : for key in log : if key in log and key in log [ 'data' ] : log [ key ] = log [ 'data' ] . pop ( key ) return log
|
Removes duplicate data from data inside log dict and brings it out .
|
3,888
|
def _scan_fpatterns ( self , state ) : for f in self . fpaths : fpattern , formatter = ( a . split ( '=' ) [ 1 ] for a in f . split ( ':' , 1 ) ) self . log . debug ( 'scan_fpatterns' , fpattern = fpattern , formatter = formatter ) fpaths = glob . glob ( fpattern ) fpaths = list ( set ( fpaths ) - set ( state . files_tracked ) ) for fpath in fpaths : try : formatter_fn = self . formatters . get ( formatter , load_formatter_fn ( formatter ) ) self . log . info ( 'found_formatter_fn' , fn = formatter ) self . formatters [ formatter ] = formatter_fn except ( SystemExit , KeyboardInterrupt ) : raise except ( ImportError , AttributeError ) : self . log . exception ( 'formatter_fn_not_found' , fn = formatter ) sys . exit ( - 1 ) self . log . info ( 'found_log_file' , log_file = fpath ) log_f = dict ( fpath = fpath , fpattern = fpattern , formatter = formatter , formatter_fn = formatter_fn ) log_key = ( fpath , fpattern , formatter ) if log_key not in self . log_reader_threads : self . log . info ( 'starting_collect_log_lines_thread' , log_key = log_key ) log_reader_thread = util . start_daemon_thread ( self . collect_log_lines , ( log_f , ) ) self . log_reader_threads [ log_key ] = log_reader_thread state . files_tracked . append ( fpath ) time . sleep ( self . SCAN_FPATTERNS_INTERVAL )
|
For a list of given fpatterns this starts a thread collecting log lines from file
|
3,889
|
def get_links ( self , ** kw ) : links = [ a for a in dir ( self ) if isinstance ( getattr ( self , a ) , Model ) and not a . startswith ( '_model' ) ] return [ { 'field' : l , 'mdl' : getattr ( self , l ) . __class__ , } for l in links ]
|
Prepare links of form by mimicing pyoko s get_links method s result
|
3,890
|
def set_data ( self , data ) : for name in self . _fields : setattr ( self , name , data . get ( name ) ) return self
|
Fills form with data
|
3,891
|
def get_input ( source , files , threads = 4 , readtype = "1D" , combine = "simple" , names = None , barcoded = False ) : proc_functions = { 'fastq' : ex . process_fastq_plain , 'fasta' : ex . process_fasta , 'bam' : ex . process_bam , 'summary' : ex . process_summary , 'fastq_rich' : ex . process_fastq_rich , 'fastq_minimal' : ex . process_fastq_minimal , 'cram' : ex . process_cram , 'ubam' : ex . process_ubam , } filethreads = min ( len ( files ) , threads ) threadsleft = threads - filethreads with cfutures . ProcessPoolExecutor ( max_workers = filethreads ) as executor : extration_function = partial ( proc_functions [ source ] , threads = threadsleft , readtype = readtype , barcoded = barcoded ) datadf = combine_dfs ( dfs = [ out for out in executor . map ( extration_function , files ) ] , names = names or files , method = combine ) if "readIDs" in datadf and pd . isna ( datadf [ "readIDs" ] ) . any ( ) : datadf . drop ( "readIDs" , axis = 'columns' , inplace = True ) datadf = calculate_start_time ( datadf ) logging . info ( "Nanoget: Gathered all metrics of {} reads" . format ( len ( datadf ) ) ) if len ( datadf ) == 0 : logging . critical ( "Nanoget: no reads retrieved." . format ( len ( datadf ) ) ) sys . exit ( "Fatal: No reads found in input." ) else : return datadf
|
Get input and process accordingly .
|
3,892
|
def combine_dfs ( dfs , names , method ) : if method == "track" : res = list ( ) for df , identifier in zip ( dfs , names ) : df [ "dataset" ] = identifier res . append ( df ) return pd . concat ( res , ignore_index = True ) elif method == "simple" : return pd . concat ( dfs , ignore_index = True )
|
Combine dataframes .
|
3,893
|
def calculate_start_time ( df ) : if "time" in df : df [ "time_arr" ] = pd . Series ( df [ "time" ] , dtype = 'datetime64[s]' ) elif "timestamp" in df : df [ "time_arr" ] = pd . Series ( df [ "timestamp" ] , dtype = "datetime64[ns]" ) else : return df if "dataset" in df : for dset in df [ "dataset" ] . unique ( ) : time_zero = df . loc [ df [ "dataset" ] == dset , "time_arr" ] . min ( ) df . loc [ df [ "dataset" ] == dset , "start_time" ] = df . loc [ df [ "dataset" ] == dset , "time_arr" ] - time_zero else : df [ "start_time" ] = df [ "time_arr" ] - df [ "time_arr" ] . min ( ) return df . drop ( [ "time" , "timestamp" , "time_arr" ] , axis = 1 , errors = "ignore" )
|
Calculate the star_time per read .
|
3,894
|
def parser_from_buffer ( cls , fp ) : yaml = YAML ( typ = "safe" ) return cls ( yaml . load ( fp ) )
|
Construct YamlParser from a file pointer .
|
3,895
|
def check_dict_expected_keys ( self , expected_keys , current , dict_name ) : if not isinstance ( current , dict ) : raise ParseError ( u"'{}' key must be a dict" . format ( dict_name ) , YAML_EXAMPLE ) expected_keys = set ( expected_keys ) current_keys = { key for key in current } extra_keys = current_keys - expected_keys if extra_keys : message = u"{}: the keys {} are unexpected. (allowed keys: {})" raise ParseError ( message . format ( dict_name , list ( extra_keys ) , list ( expected_keys ) , ) , YAML_EXAMPLE , )
|
Check that we don t have unknown keys in a dictionary .
|
3,896
|
def set_message ( self , title , msg , typ , url = None ) : return self . user . send_notification ( title = title , message = msg , typ = typ , url = url )
|
Sets user notification message .
|
3,897
|
def is_auth ( self ) : if self . user_id is None : self . user_id = self . session . get ( 'user_id' ) return bool ( self . user_id )
|
A property that indicates if current user is logged in or not .
|
3,898
|
def msg_box ( self , msg , title = None , typ = 'info' ) : self . output [ 'msgbox' ] = { 'type' : typ , "title" : title or msg [ : 20 ] , "msg" : msg }
|
Create a message box
|
3,899
|
def _update_task ( self , task ) : self . task = task self . task . data . update ( self . task_data ) self . task_type = task . task_spec . __class__ . __name__ self . spec = task . task_spec self . task_name = task . get_name ( ) self . activity = getattr ( self . spec , 'service_class' , '' ) self . _set_lane_data ( )
|
Assigns current task step to self . task then updates the task s data with self . task_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.