idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
57,300
def scope ( cls , f ) : if not hasattr ( cls , "scopes" ) : cls . scopes = copy ( STANDARD_SCOPES ) cls . scopes . append ( f ) def create_builder ( self , * args , ** kwargs ) : bldr = ScopeBuilder ( cls , cls . scopes ) return getattr ( bldr , f . __name__ ) ( * args , ** kwargs ) setattr ( cls , f . __name__ , classmethod ( create_builder ) ) return f
Decorator which can dynamically attach a query scope to the model .
57,301
def _module_name_from_previous_frame ( num_frames_back ) : frm = inspect . stack ( ) [ num_frames_back + 1 ] return inspect . getmodule ( frm [ 0 ] ) . __name__
Returns the module name associated with a frame num_frames_back in the call stack . This function adds 1 to account for itself so num_frames_back should be given relative to the caller .
57,302
def create_model ( schema , collection , class_name = None ) : if not class_name : class_name = camelize ( str ( collection . name ) ) model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( lambda : collection ) ) ) model_class . __module__ = _module_name_from_previous_frame ( 1 ) return model_class
Main entry point to creating a new mongothon model . Both schema and Pymongo collection objects must be provided .
57,303
def create_model_offline ( schema , collection_factory , class_name ) : model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( collection_factory ) ) ) model_class . __module__ = _module_name_from_previous_frame ( 1 ) return model_class
Entry point for creating a new Mongothon model without instantiating a database connection . The collection is instead provided through a closure that is resolved upon the model s first database access .
57,304
def wrap ( value ) : if isinstance ( value , Document ) or isinstance ( value , DocumentList ) : return value elif isinstance ( value , dict ) : return Document ( value ) elif isinstance ( value , list ) : return DocumentList ( value ) else : return value
Wraps the given value in a Document or DocumentList as applicable .
57,305
def unwrap ( value ) : if isinstance ( value , Document ) : return value . to_dict ( ) elif isinstance ( value , DocumentList ) : return value . to_list ( ) else : return value
Unwraps the given Document or DocumentList as applicable .
57,306
def note_change ( self , key , value ) : if value != self . _instance [ key ] and key not in self . _previous and key not in self . _added : self . _previous [ key ] = self . _instance [ key ] if key in self . _previous and value == self . _previous [ key ] : del self . _previous [ key ]
Updates change state to reflect a change to a field . Takes care of ignoring no - ops reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset .
57,307
def note_addition ( self , key , value ) : if key in self . _deleted : if value != self . _deleted [ key ] : self . _previous [ key ] = self . _deleted [ key ] del self . _deleted [ key ] else : self . _added . append ( key )
Updates the change state to reflect the addition of a field . Detects previous changes and deletions of the field and acts accordingly .
57,308
def note_deletion ( self , key ) : if key in self . _added : self . _added . remove ( key ) else : if key in self . _previous : self . _deleted [ key ] = self . _previous [ key ] del self . _previous [ key ] else : self . _deleted [ key ] = self . _instance [ key ]
Notes the deletion of a field .
57,309
def changes ( self ) : return { key : ( self . _previous [ key ] , self . _instance [ key ] ) for key in self . _previous }
Returns a dict containing just the fields which have changed on this Document since it was created or last saved together with both their previous and current values
57,310
def reset_all_changes ( self ) : self . reset_changes ( ) for value in self . values ( ) : if isinstance ( value , Document ) or isinstance ( value , DocumentList ) : value . reset_all_changes ( )
Resets change tracking in this document recursing into child Documents and DocumentLists .
57,311
def populate ( self , other ) : self . clear ( ) self . update ( other ) self . reset_all_changes ( )
Like update but clears the contents first .
57,312
def parse ( self , buffer ) : log . debug ( "parsing a %d byte packet" % len ( buffer ) ) ( opcode , ) = struct . unpack ( str ( "!H" ) , buffer [ : 2 ] ) log . debug ( "opcode is %d" % opcode ) packet = self . __create ( opcode ) packet . buffer = buffer return packet . decode ( )
This method is used to parse an existing datagram into its corresponding TftpPacket object . The buffer is the raw bytes off of the network .
57,313
def __create ( self , opcode ) : tftpassert ( opcode in self . classes , "Unsupported opcode: %d" % opcode ) packet = self . classes [ opcode ] ( ) return packet
This method returns the appropriate class object corresponding to the passed opcode .
57,314
def add_dup ( self , pkt ) : log . debug ( "Recording a dup of %s" , pkt ) s = str ( pkt ) if s in self . dups : self . dups [ s ] += 1 else : self . dups [ s ] = 1 tftpassert ( self . dups [ s ] < MAX_DUPS , "Max duplicates reached" )
This method adds a dup for a packet to the metrics .
57,315
def checkTimeout ( self , now ) : log . debug ( "checking for timeout on session %s" , self ) if now - self . last_update > self . timeout : raise TftpTimeout ( "Timeout waiting for traffic" )
Compare current time with last_update time and raise an exception if we re over the timeout time .
57,316
def end ( self , close_fileobj = True ) : log . debug ( "in TftpContext.end - closing socket" ) self . sock . close ( ) if close_fileobj and self . fileobj is not None and not self . fileobj . closed : log . debug ( "self.fileobj is open - closing" ) self . fileobj . close ( )
Perform session cleanup since the end method should always be called explicitely by the calling code this works better than the destructor . Set close_fileobj to False so fileobj can be returned open .
57,317
def sethost ( self , host ) : self . __host = host self . address = socket . gethostbyname ( host )
Setter method that also sets the address property as a result of the host that is set .
57,318
def cycle ( self ) : try : ( buffer , ( raddress , rport ) ) = self . sock . recvfrom ( MAX_BLKSIZE ) except socket . timeout : log . warning ( "Timeout waiting for traffic, retrying..." ) raise TftpTimeout ( "Timed-out waiting for traffic" ) log . debug ( "Received %d bytes from %s:%s" , len ( buffer ) , raddress , rport ) self . last_update = time . time ( ) recvpkt = self . factory . parse ( buffer ) if raddress != self . address : log . warning ( "Received traffic from %s, expected host %s. Discarding" % ( raddress , self . host ) ) if self . tidport and self . tidport != rport : log . warning ( "Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % ( raddress , rport , self . host , self . tidport ) ) if self . packethook : self . packethook ( recvpkt ) self . state = self . state . handle ( recvpkt , raddress , rport ) self . retry_count = 0
Here we wait for a response from the server after sending it something and dispatch appropriate action to that response .
57,319
def start ( self ) : log . info ( "Sending tftp download request to %s" % self . host ) log . info ( " filename -> %s" % self . file_to_transfer ) log . info ( " options -> %s" % self . options ) self . metrics . start_time = time . time ( ) log . debug ( "Set metrics.start_time to %s" % self . metrics . start_time ) pkt = TftpPacketRRQ ( ) pkt . filename = self . file_to_transfer pkt . mode = "octet" pkt . options = self . options self . sock . sendto ( pkt . encode ( ) . buffer , ( self . host , self . port ) ) self . next_block = 1 self . last_pkt = pkt self . state = TftpStateSentRRQ ( self ) while self . state : try : log . debug ( "State is %s" % self . state ) self . cycle ( ) except TftpTimeout as err : log . error ( str ( err ) ) self . retry_count += 1 if self . retry_count >= TIMEOUT_RETRIES : log . debug ( "hit max retries, giving up" ) raise else : log . warning ( "resending last packet" ) self . state . resendLast ( ) except TftpFileNotFoundError as err : log . error ( "Received File not found error" ) if self . fileobj is not None and not self . filelike_fileobj : if os . path . exists ( self . fileobj . name ) : log . debug ( "unlinking output file of %s" , self . fileobj . name ) os . unlink ( self . fileobj . name ) raise
Initiate the download .
57,320
def end ( self ) : TftpContext . end ( self , not self . filelike_fileobj ) self . metrics . end_time = time . time ( ) log . debug ( "Set metrics.end_time to %s" % self . metrics . end_time ) self . metrics . compute ( )
Finish up the context .
57,321
def download ( self , filename , output , packethook = None , timeout = SOCK_TIMEOUT ) : log . debug ( "Creating download context with the following params:" ) log . debug ( "host = %s, port = %s, filename = %s" % ( self . host , self . iport , filename ) ) log . debug ( "options = %s, packethook = %s, timeout = %s" % ( self . options , packethook , timeout ) ) self . context = TftpContextClientDownload ( self . host , self . iport , filename , output , self . options , packethook , timeout , localip = self . localip ) self . context . start ( ) self . context . end ( ) metrics = self . context . metrics log . info ( '' ) log . info ( "Download complete." ) if metrics . duration == 0 : log . info ( "Duration too short, rate undetermined" ) else : log . info ( "Downloaded %.2f bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) ) log . info ( "Average rate: %.2f kbps" % metrics . kbps ) log . info ( "%.2f bytes in resent data" % metrics . resent_bytes ) log . info ( "Received %d duplicate packets" % metrics . dupcount )
This method initiates a tftp download from the configured remote host requesting the filename passed . It writes the file to output which can be a file - like object or a path to a local file . If a packethook is provided it must be a function that takes a single parameter which will be a copy of each DAT packet received in the form of a TftpPacketDAT object . The timeout parameter may be used to override the default SOCK_TIMEOUT setting which is the amount of time that the client will wait for a receive packet to arrive .
57,322
def upload ( self , filename , input , packethook = None , timeout = SOCK_TIMEOUT ) : self . context = TftpContextClientUpload ( self . host , self . iport , filename , input , self . options , packethook , timeout , localip = self . localip ) self . context . start ( ) self . context . end ( ) metrics = self . context . metrics log . info ( '' ) log . info ( "Upload complete." ) if metrics . duration == 0 : log . info ( "Duration too short, rate undetermined" ) else : log . info ( "Uploaded %d bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) ) log . info ( "Average rate: %.2f kbps" % metrics . kbps ) log . info ( "%.2f bytes in resent data" % metrics . resent_bytes ) log . info ( "Resent %d packets" % metrics . dupcount )
This method initiates a tftp upload to the configured remote host uploading the filename passed . It reads the file from input which can be a file - like object or a path to a local file . If a packethook is provided it must be a function that takes a single parameter which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object . The timeout parameter may be used to override the default SOCK_TIMEOUT setting which is the amount of time that the client will wait for a DAT packet to be ACKd by the server .
57,323
def decode_options ( self , buffer ) : fmt = b"!" options = { } log . debug ( "decode_options: buffer is: %s" , repr ( buffer ) ) log . debug ( "size of buffer is %d bytes" , len ( buffer ) ) if len ( buffer ) == 0 : log . debug ( "size of buffer is zero, returning empty hash" ) return { } log . debug ( "about to iterate options buffer counting nulls" ) length = 0 for i in range ( len ( buffer ) ) : if ord ( buffer [ i : i + 1 ] ) == 0 : log . debug ( "found a null at length %d" , length ) if length > 0 : fmt += b"%dsx" % length length = - 1 else : raise TftpException ( "Invalid options in buffer" ) length += 1 log . debug ( "about to unpack, fmt is: %s" , fmt ) mystruct = struct . unpack ( fmt , buffer ) tftpassert ( len ( mystruct ) % 2 == 0 , "packet with odd number of option/value pairs" ) for i in range ( 0 , len ( mystruct ) , 2 ) : key = mystruct [ i ] . decode ( 'ascii' ) val = mystruct [ i + 1 ] . decode ( 'ascii' ) log . debug ( "setting option %s to %s" , key , val ) log . debug ( "types are %s and %s" , type ( key ) , type ( val ) ) options [ key ] = val return options
This method decodes the section of the buffer that contains an unknown number of options . It returns a dictionary of option names and values .
57,324
def encode ( self ) : tftpassert ( self . filename , "filename required in initial packet" ) tftpassert ( self . mode , "mode required in initial packet" ) filename = self . filename mode = self . mode if not isinstance ( filename , bytes ) : filename = filename . encode ( 'ascii' ) if not isinstance ( self . mode , bytes ) : mode = mode . encode ( 'ascii' ) ptype = None if self . opcode == 1 : ptype = "RRQ" else : ptype = "WRQ" log . debug ( "Encoding %s packet, filename = %s, mode = %s" , ptype , filename , mode ) for key in self . options : log . debug ( " Option %s = %s" , key , self . options [ key ] ) fmt = b"!H" fmt += b"%dsx" % len ( filename ) if mode == b"octet" : fmt += b"5sx" else : raise AssertionError ( "Unsupported mode: %s" % mode ) options_list = [ ] if len ( list ( self . options . keys ( ) ) ) > 0 : log . debug ( "there are options to encode" ) for key in self . options : name = key if not isinstance ( name , bytes ) : name = name . encode ( 'ascii' ) options_list . append ( name ) fmt += b"%dsx" % len ( name ) value = self . options [ key ] if isinstance ( value , int ) : value = str ( value ) if not isinstance ( value , bytes ) : value = value . encode ( 'ascii' ) options_list . append ( value ) fmt += b"%dsx" % len ( value ) log . debug ( "fmt is %s" , fmt ) log . debug ( "options_list is %s" , options_list ) log . debug ( "size of struct is %d" , struct . calcsize ( fmt ) ) self . buffer = struct . pack ( fmt , self . opcode , filename , mode , * options_list ) log . debug ( "buffer is %s" , repr ( self . buffer ) ) return self
Encode the packet s buffer from the instance variables .
57,325
def encode ( self ) : if len ( self . data ) == 0 : log . debug ( "Encoding an empty DAT packet" ) data = self . data if not isinstance ( self . data , bytes ) : data = self . data . encode ( 'ascii' ) fmt = b"!HH%ds" % len ( data ) self . buffer = struct . pack ( fmt , self . opcode , self . blocknumber , data ) return self
Encode the DAT packet . This method populates self . buffer and returns self for easy method chaining .
57,326
def decode ( self ) : ( self . blocknumber , ) = struct . unpack ( str ( "!H" ) , self . buffer [ 2 : 4 ] ) log . debug ( "decoding DAT packet, block number %d" , self . blocknumber ) log . debug ( "should be %d bytes in the packet total" , len ( self . buffer ) ) self . data = self . buffer [ 4 : ] log . debug ( "found %d bytes of data" , len ( self . data ) ) return self
Decode self . buffer into instance variables . It returns self for easy method chaining .
57,327
def encode ( self ) : fmt = b"!HH%dsx" % len ( self . errmsgs [ self . errorcode ] ) log . debug ( "encoding ERR packet with fmt %s" , fmt ) self . buffer = struct . pack ( fmt , self . opcode , self . errorcode , self . errmsgs [ self . errorcode ] ) return self
Encode the DAT packet based on instance variables populating self . buffer returning self .
57,328
def decode ( self ) : "Decode self.buffer, populating instance variables and return self." buflen = len ( self . buffer ) tftpassert ( buflen >= 4 , "malformed ERR packet, too short" ) log . debug ( "Decoding ERR packet, length %s bytes" , buflen ) if buflen == 4 : log . debug ( "Allowing this affront to the RFC of a 4-byte packet" ) fmt = b"!HH" log . debug ( "Decoding ERR packet with fmt: %s" , fmt ) self . opcode , self . errorcode = struct . unpack ( fmt , self . buffer ) else : log . debug ( "Good ERR packet > 4 bytes" ) fmt = b"!HH%dsx" % ( len ( self . buffer ) - 5 ) log . debug ( "Decoding ERR packet with fmt: %s" , fmt ) self . opcode , self . errorcode , self . errmsg = struct . unpack ( fmt , self . buffer ) log . error ( "ERR packet - errorcode: %d, message: %s" % ( self . errorcode , self . errmsg ) ) return self
Decode self . buffer populating instance variables and return self .
57,329
def match_options ( self , options ) : for name in self . options : if name in options : if name == 'blksize' : size = int ( self . options [ name ] ) if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE : log . debug ( "negotiated blksize of %d bytes" , size ) options [ 'blksize' ] = size else : raise TftpException ( "blksize %s option outside allowed range" % size ) elif name == 'tsize' : size = int ( self . options [ name ] ) if size < 0 : raise TftpException ( "Negative file sizes not supported" ) else : raise TftpException ( "Unsupported option: %s" % name ) return True
This method takes a set of options and tries to match them with its own . It can accept some changes in those options from the server as part of a negotiation . Changed or unchanged it will return a dict of the options so that the session can update itself to the negotiated options .
57,330
def handleOACK ( self , pkt ) : if len ( pkt . options . keys ( ) ) > 0 : if pkt . match_options ( self . context . options ) : log . info ( "Successful negotiation of options" ) self . context . options = pkt . options for key in self . context . options : log . info ( " %s = %s" % ( key , self . context . options [ key ] ) ) else : log . error ( "Failed to negotiate options" ) raise TftpException ( "Failed to negotiate options" ) else : raise TftpException ( "No options found in OACK" )
This method handles an OACK from the server syncing any accepted options .
57,331
def returnSupportedOptions ( self , options ) : accepted_options = { } for option in options : if option == 'blksize' : if int ( options [ option ] ) > MAX_BLKSIZE : log . info ( "Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE ) accepted_options [ option ] = MAX_BLKSIZE elif int ( options [ option ] ) < MIN_BLKSIZE : log . info ( "Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE ) accepted_options [ option ] = MIN_BLKSIZE else : accepted_options [ option ] = options [ option ] elif option == 'tsize' : log . debug ( "tsize option is set" ) accepted_options [ 'tsize' ] = 0 else : log . info ( "Dropping unsupported option '%s'" % option ) log . debug ( "Returning these accepted options: %s" , accepted_options ) return accepted_options
This method takes a requested options list from a client and returns the ones that are supported .
57,332
def sendDAT ( self ) : finished = False blocknumber = self . context . next_block if DELAY_BLOCK and DELAY_BLOCK == blocknumber : import time log . debug ( "Deliberately delaying 10 seconds..." ) time . sleep ( 10 ) dat = None blksize = self . context . getBlocksize ( ) buffer = self . context . fileobj . read ( blksize ) log . debug ( "Read %d bytes into buffer" , len ( buffer ) ) if len ( buffer ) < blksize : log . info ( "Reached EOF on file %s" % self . context . file_to_transfer ) finished = True dat = TftpPacketDAT ( ) dat . data = buffer dat . blocknumber = blocknumber self . context . metrics . bytes += len ( dat . data ) log . debug ( "Sending DAT packet %d" , dat . blocknumber ) self . context . sock . sendto ( dat . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) if self . context . packethook : self . context . packethook ( dat ) self . context . last_pkt = dat return finished
This method sends the next DAT packet based on the data in the context . It returns a boolean indicating whether the transfer is finished .
57,333
def sendACK ( self , blocknumber = None ) : log . debug ( "In sendACK, passed blocknumber is %s" , blocknumber ) if blocknumber is None : blocknumber = self . context . next_block log . info ( "Sending ack to block %d" % blocknumber ) ackpkt = TftpPacketACK ( ) ackpkt . blocknumber = blocknumber self . context . sock . sendto ( ackpkt . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) self . context . last_pkt = ackpkt
This method sends an ack packet to the block number specified . If none is specified it defaults to the next_block property in the parent context .
57,334
def sendError ( self , errorcode ) : log . debug ( "In sendError, being asked to send error %d" , errorcode ) errpkt = TftpPacketERR ( ) errpkt . errorcode = errorcode if self . context . tidport == None : log . debug ( "Error packet received outside session. Discarding" ) else : self . context . sock . sendto ( errpkt . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) self . context . last_pkt = errpkt
This method uses the socket passed and uses the errorcode to compose and send an error packet .
57,335
def sendOACK ( self ) : log . debug ( "In sendOACK with options %s" , self . context . options ) pkt = TftpPacketOACK ( ) pkt . options = self . context . options self . context . sock . sendto ( pkt . encode ( ) . buffer , ( self . context . host , self . context . tidport ) ) self . context . last_pkt = pkt
This method sends an OACK packet with the options from the current context .
57,336
def resendLast ( self ) : "Resend the last sent packet due to a timeout." log . warning ( "Resending packet %s on sessions %s" % ( self . context . last_pkt , self ) ) self . context . metrics . resent_bytes += len ( self . context . last_pkt . buffer ) self . context . metrics . add_dup ( self . context . last_pkt ) sendto_port = self . context . tidport if not sendto_port : sendto_port = self . context . port self . context . sock . sendto ( self . context . last_pkt . encode ( ) . buffer , ( self . context . host , sendto_port ) ) if self . context . packethook : self . context . packethook ( self . context . last_pkt )
Resend the last sent packet due to a timeout .
57,337
def handleDat ( self , pkt ) : log . info ( "Handling DAT packet - block %d" % pkt . blocknumber ) log . debug ( "Expecting block %s" , self . context . next_block ) if pkt . blocknumber == self . context . next_block : log . debug ( "Good, received block %d in sequence" , pkt . blocknumber ) self . sendACK ( ) self . context . next_block += 1 log . debug ( "Writing %d bytes to output file" , len ( pkt . data ) ) self . context . fileobj . write ( pkt . data ) self . context . metrics . bytes += len ( pkt . data ) if len ( pkt . data ) < self . context . getBlocksize ( ) : log . info ( "End of file detected" ) return None elif pkt . blocknumber < self . context . next_block : if pkt . blocknumber == 0 : log . warning ( "There is no block zero!" ) self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "There is no block zero!" ) log . warning ( "Dropping duplicate block %d" % pkt . blocknumber ) self . context . metrics . add_dup ( pkt ) log . debug ( "ACKing block %d again, just in case" , pkt . blocknumber ) self . sendACK ( pkt . blocknumber ) else : msg = "Whoa! Received future block %d but expected %d" % ( pkt . blocknumber , self . context . next_block ) log . error ( msg ) raise TftpException ( msg ) return TftpStateExpectDAT ( self . context )
This method handles a DAT packet during a client download or a server upload .
57,338
def serverInitial ( self , pkt , raddress , rport ) : options = pkt . options sendoack = False if not self . context . tidport : self . context . tidport = rport log . info ( "Setting tidport to %s" % rport ) log . debug ( "Setting default options, blksize" ) self . context . options = { 'blksize' : DEF_BLKSIZE } if options : log . debug ( "Options requested: %s" , options ) supported_options = self . returnSupportedOptions ( options ) self . context . options . update ( supported_options ) sendoack = True if pkt . mode != 'octet' : log . warning ( "Received non-octet mode request. I'll reply with binary data." ) if self . context . host != raddress or self . context . port != rport : self . sendError ( TftpErrors . UnknownTID ) log . error ( "Expected traffic from %s:%s but received it " "from %s:%s instead." % ( self . context . host , self . context . port , raddress , rport ) ) return self log . debug ( "Requested filename is %s" , pkt . filename ) if pkt . filename . startswith ( self . context . root ) : full_path = pkt . filename else : full_path = os . path . join ( self . context . root , pkt . filename . lstrip ( '/' ) ) self . full_path = os . path . abspath ( full_path ) log . debug ( "full_path is %s" , full_path ) if self . full_path . startswith ( self . context . root ) : log . info ( "requested file is in the server root - good" ) else : log . warning ( "requested file is not within the server root - bad" ) self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "bad file path" ) self . context . file_to_transfer = pkt . filename return sendoack
This method performs initial setup for a server context transfer put here to refactor code out of the TftpStateServerRecvRRQ and TftpStateServerRecvWRQ classes since their initial setup is identical . The method returns a boolean sendoack to indicate whether it is required to send an OACK to the client .
57,339
def handle ( self , pkt , raddress , rport ) : "Handle an initial RRQ packet as a server." log . debug ( "In TftpStateServerRecvRRQ.handle" ) sendoack = self . serverInitial ( pkt , raddress , rport ) path = self . full_path log . info ( "Opening file %s for reading" % path ) if os . path . exists ( path ) : self . context . fileobj = open ( path , "rb" ) elif self . context . dyn_file_func : log . debug ( "No such file %s but using dyn_file_func" , path ) self . context . fileobj = self . context . dyn_file_func ( self . context . file_to_transfer , raddress = raddress , rport = rport ) if self . context . fileobj is None : log . debug ( "dyn_file_func returned 'None', treating as " "FileNotFound" ) self . sendError ( TftpErrors . FileNotFound ) raise TftpException ( "File not found: %s" % path ) else : log . warn ( "File not found: %s" , path ) self . sendError ( TftpErrors . FileNotFound ) raise TftpException ( "File not found: {}" . format ( path ) ) if sendoack and 'tsize' in self . context . options : self . context . fileobj . seek ( 0 , os . SEEK_END ) tsize = str ( self . context . fileobj . tell ( ) ) self . context . fileobj . seek ( 0 , 0 ) self . context . options [ 'tsize' ] = tsize if sendoack : self . sendOACK ( ) else : self . context . next_block = 1 log . debug ( "No requested options, starting send..." ) self . context . pending_complete = self . sendDAT ( ) return TftpStateExpectACK ( self . context )
Handle an initial RRQ packet as a server .
57,340
def make_subdirs ( self ) : subpath = self . full_path [ len ( self . context . root ) : ] log . debug ( "make_subdirs: subpath is %s" , subpath ) dirs = subpath . split ( os . sep ) [ : - 1 ] log . debug ( "dirs is %s" , dirs ) current = self . context . root for dir in dirs : if dir : current = os . path . join ( current , dir ) if os . path . isdir ( current ) : log . debug ( "%s is already an existing directory" , current ) else : os . mkdir ( current , 0o700 )
The purpose of this method is to if necessary create all of the subdirectories leading up to the file to the written .
57,341
def handle ( self , pkt , raddress , rport ) : "Handle an initial WRQ packet as a server." log . debug ( "In TftpStateServerRecvWRQ.handle" ) sendoack = self . serverInitial ( pkt , raddress , rport ) path = self . full_path if self . context . upload_open : f = self . context . upload_open ( path , self . context ) if f is None : self . sendError ( TftpErrors . AccessViolation ) raise TftpException ( "Dynamic path %s not permitted" % path ) else : self . context . fileobj = f else : log . info ( "Opening file %s for writing" % path ) if os . path . exists ( path ) : log . warning ( "File %s exists already, overwriting..." % ( self . context . file_to_transfer ) ) self . make_subdirs ( ) self . context . fileobj = open ( path , "wb" ) if sendoack : log . debug ( "Sending OACK to client" ) self . sendOACK ( ) else : log . debug ( "No requested options, expecting transfer to begin..." ) self . sendACK ( ) self . context . next_block = 1 return TftpStateExpectDAT ( self . context )
Handle an initial WRQ packet as a server .
57,342
def handle ( self , pkt , raddress , rport ) : "Handle a packet, hopefully an ACK since we just sent a DAT." if isinstance ( pkt , TftpPacketACK ) : log . debug ( "Received ACK for packet %d" % pkt . blocknumber ) if self . context . next_block == pkt . blocknumber : if self . context . pending_complete : log . info ( "Received ACK to final DAT, we're done." ) return None else : log . debug ( "Good ACK, sending next DAT" ) self . context . next_block += 1 log . debug ( "Incremented next_block to %d" , self . context . next_block ) self . context . pending_complete = self . sendDAT ( ) elif pkt . blocknumber < self . context . next_block : log . warning ( "Received duplicate ACK for block %d" % pkt . blocknumber ) self . context . metrics . add_dup ( pkt ) else : log . warning ( "Oooh, time warp. Received ACK to packet we " "didn't send yet. Discarding." ) self . context . metrics . errors += 1 return self elif isinstance ( pkt , TftpPacketERR ) : log . error ( "Received ERR packet from peer: %s" % str ( pkt ) ) raise TftpException ( "Received ERR packet from peer: %s" % str ( pkt ) ) else : log . warning ( "Discarding unsupported packet: %s" % str ( pkt ) ) return self
Handle a packet hopefully an ACK since we just sent a DAT .
57,343
def handle ( self , pkt , raddress , rport ) : if isinstance ( pkt , TftpPacketDAT ) : return self . handleDat ( pkt ) elif isinstance ( pkt , TftpPacketACK ) : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received ACK from peer when expecting DAT" ) elif isinstance ( pkt , TftpPacketWRQ ) : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received WRQ from peer when expecting DAT" ) elif isinstance ( pkt , TftpPacketERR ) : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received ERR from peer: " + str ( pkt ) ) else : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received unknown packet type from peer: " + str ( pkt ) )
Handle the packet in response to an ACK which should be a DAT .
57,344
def handle ( self , pkt , raddress , rport ) : if not self . context . tidport : self . context . tidport = rport log . info ( "Set remote port for session to %s" % rport ) if isinstance ( pkt , TftpPacketOACK ) : log . info ( "Received OACK from server" ) try : self . handleOACK ( pkt ) except TftpException as err : log . error ( "Failed to negotiate options: %s" % str ( err ) ) self . sendError ( TftpErrors . FailedNegotiation ) raise else : log . debug ( "Sending ACK to OACK" ) self . sendACK ( blocknumber = 0 ) log . debug ( "Changing state to TftpStateExpectDAT" ) return TftpStateExpectDAT ( self . context ) elif isinstance ( pkt , TftpPacketDAT ) : log . info ( "Received DAT from server" ) if self . context . options : log . info ( "Server ignored options, falling back to defaults" ) self . context . options = { 'blksize' : DEF_BLKSIZE } return self . handleDat ( pkt ) elif isinstance ( pkt , TftpPacketACK ) : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received ACK from server while in download" ) elif isinstance ( pkt , TftpPacketWRQ ) : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received WRQ from server while in download" ) elif isinstance ( pkt , TftpPacketERR ) : self . sendError ( TftpErrors . IllegalTftpOp ) log . debug ( "Received ERR packet: %s" , pkt ) if pkt . errorcode == TftpErrors . FileNotFound : raise TftpFileNotFoundError ( "File not found" ) else : raise TftpException ( "Received ERR from server: {}" . format ( pkt ) ) else : self . sendError ( TftpErrors . IllegalTftpOp ) raise TftpException ( "Received unknown packet type from server: %s" % pkt ) return self
Handle the packet in response to an RRQ to the server .
57,345
def stop ( self , now = False ) : if now : self . shutdown_immediately = True else : self . shutdown_gracefully = True
Stop the server gracefully . Do not take any new transfers but complete the existing ones . If force is True drop everything and stop . Note immediately will not interrupt the select loop it will happen when the server returns on ready data or a timeout . ie . SOCK_TIMEOUT
57,346
def _fetch ( url , ssl_verify = True ) : req = Request ( url ) if ssl_verify : page = urlopen ( req ) else : ctx = ssl . create_default_context ( ) ctx . check_hostname = False ctx . verify_mode = ssl . CERT_NONE page = urlopen ( req , context = ctx ) content = page . read ( ) . decode ( 'utf-8' ) page . close ( ) return content
Helper funcation to fetch content from a given url .
57,347
def _dict ( content ) : if _has_pandas : data = _data_frame ( content ) . to_dict ( orient = 'records' ) else : response = loads ( content ) key = [ x for x in response . keys ( ) if x in c . response_data ] [ 0 ] data = response [ key ] return data
Helper funcation that converts text - based get response to a python dictionary for additional manipulation .
57,348
def _data_frame ( content ) : response = loads ( content ) key = [ x for x in response . keys ( ) if x in c . response_data ] [ 0 ] frame = DataFrame ( response [ key ] ) final_frame = _convert ( frame ) return final_frame
Helper funcation that converts text - based get response to a pandas dataframe for additional manipulation .
57,349
def _tab ( content ) : response = _data_frame ( content ) . to_csv ( index = False , sep = '\t' ) return response
Helper funcation that converts text - based get response to tab separated values for additional manipulation .
57,350
def _pipe ( content ) : response = _data_frame ( content ) . to_csv ( index = False , sep = '|' ) return response
Helper funcation that converts text - based get response to pipe separated values for additional manipulation .
57,351
def _get_request ( url_root , api_key , path , response_type , params , ssl_verify ) : url = _url_builder ( url_root , api_key , path , params ) content = _fetch ( url , ssl_verify ) response = _dispatch ( response_type ) ( content ) return response
Helper funcation that requests a get response from FRED .
57,352
def parse_atom_file ( filename : str ) -> AtomFeed : root = parse_xml ( filename ) . getroot ( ) return _parse_atom ( root )
Parse an Atom feed from a local XML file .
57,353
def parse_atom_bytes ( data : bytes ) -> AtomFeed : root = parse_xml ( BytesIO ( data ) ) . getroot ( ) return _parse_atom ( root )
Parse an Atom feed from a byte - string containing XML data .
57,354
def _get_link ( element : Element ) -> Optional [ str ] : link = get_text ( element , 'link' ) if link is not None : return link guid = get_child ( element , 'guid' ) if guid is not None and guid . attrib . get ( 'isPermaLink' ) == 'true' : return get_text ( element , 'guid' ) return None
Attempt to retrieve item link .
57,355
def parse_rss_file ( filename : str ) -> RSSChannel : root = parse_xml ( filename ) . getroot ( ) return _parse_rss ( root )
Parse an RSS feed from a local XML file .
57,356
def parse_rss_bytes ( data : bytes ) -> RSSChannel : root = parse_xml ( BytesIO ( data ) ) . getroot ( ) return _parse_rss ( root )
Parse an RSS feed from a byte - string containing XML data .
57,357
def parse_json_feed_file ( filename : str ) -> JSONFeed : with open ( filename ) as f : try : root = json . load ( f ) except json . decoder . JSONDecodeError : raise FeedJSONError ( 'Not a valid JSON document' ) return parse_json_feed ( root )
Parse a JSON feed from a local json file .
57,358
def parse_json_feed_bytes ( data : bytes ) -> JSONFeed : try : root = json . loads ( data ) except json . decoder . JSONDecodeError : raise FeedJSONError ( 'Not a valid JSON document' ) return parse_json_feed ( root )
Parse a JSON feed from a byte - string containing JSON data .
57,359
def parse_opml_file ( filename : str ) -> OPML : root = parse_xml ( filename ) . getroot ( ) return _parse_opml ( root )
Parse an OPML document from a local XML file .
57,360
def parse_opml_bytes ( data : bytes ) -> OPML : root = parse_xml ( BytesIO ( data ) ) . getroot ( ) return _parse_opml ( root )
Parse an OPML document from a byte - string containing XML data .
57,361
def get_feed_list ( opml_obj : OPML ) -> List [ str ] : rv = list ( ) def collect ( obj ) : for outline in obj . outlines : if outline . type == 'rss' and outline . xml_url : rv . append ( outline . xml_url ) if outline . outlines : collect ( outline ) collect ( opml_obj ) return rv
Walk an OPML document to extract the list of feed it contains .
57,362
def simple_parse_file ( filename : str ) -> Feed : pairs = ( ( rss . parse_rss_file , _adapt_rss_channel ) , ( atom . parse_atom_file , _adapt_atom_feed ) , ( json_feed . parse_json_feed_file , _adapt_json_feed ) ) return _simple_parse ( pairs , filename )
Parse an Atom RSS or JSON feed from a local file .
57,363
def simple_parse_bytes ( data : bytes ) -> Feed : pairs = ( ( rss . parse_rss_bytes , _adapt_rss_channel ) , ( atom . parse_atom_bytes , _adapt_atom_feed ) , ( json_feed . parse_json_feed_bytes , _adapt_json_feed ) ) return _simple_parse ( pairs , data )
Parse an Atom RSS or JSON feed from a byte - string containing data .
57,364
def get_shear_distance ( a ) : cx , cy , cz = a . cell if 'shear_dx' in a . info : assert abs ( cx [ 1 ] ) < 1e-12 , 'cx[1] = {0}' . format ( cx [ 1 ] ) assert abs ( cx [ 2 ] ) < 1e-12 , 'cx[2] = {0}' . format ( cx [ 2 ] ) assert abs ( cy [ 0 ] ) < 1e-12 , 'cx[0] = {0}' . format ( cy [ 0 ] ) assert abs ( cy [ 2 ] ) < 1e-12 , 'cy[2] = {0}' . format ( cy [ 2 ] ) assert abs ( cz [ 0 ] ) < 1e-12 , 'cz[0] = {0}' . format ( cz [ 0 ] ) assert abs ( cz [ 1 ] ) < 1e-12 , 'cz[1] = {0}' . format ( cz [ 1 ] ) dx , dy , dz = a . info [ 'shear_dx' ] else : assert abs ( cx [ 1 ] ) < 1e-12 , 'cx[1] = {0}' . format ( cx [ 1 ] ) assert abs ( cx [ 2 ] ) < 1e-12 , 'cx[2] = {0}' . format ( cx [ 2 ] ) assert abs ( cy [ 0 ] ) < 1e-12 , 'cy[0] = {0}' . format ( cy [ 0 ] ) assert abs ( cy [ 2 ] ) < 1e-12 , 'cy[2] = {0}' . format ( cy [ 2 ] ) dx , dy , sz = cz return dx , dy
Returns the distance a volume has moved during simple shear . Considers either Lees - Edwards boundary conditions or sheared cells .
57,365
def array_inverse ( A ) : A = np . ascontiguousarray ( A , dtype = float ) b = np . identity ( A . shape [ 2 ] , dtype = A . dtype ) n_eq = A . shape [ 1 ] n_rhs = A . shape [ 2 ] pivots = np . zeros ( n_eq , np . intc ) identity = np . eye ( n_eq ) def lapack_inverse ( a ) : b = np . copy ( identity ) pivots = np . zeros ( n_eq , np . intc ) results = np . linalg . lapack_lite . dgesv ( n_eq , n_rhs , a , n_eq , pivots , b , n_eq , 0 ) if results [ 'info' ] > 0 : raise np . linalg . LinAlgError ( 'Singular matrix' ) return b return np . array ( [ lapack_inverse ( a ) for a in A ] )
Compute inverse for each matrix in a list of matrices . This is faster than calling numpy . linalg . inv for each matrix .
57,366
def get_delta_plus_epsilon ( nat , i_now , dr_now , dr_old ) : XIJ = get_XIJ ( nat , i_now , dr_now , dr_old ) YIJ = get_YIJ ( nat , i_now , dr_old ) YIJ_invert = array_inverse ( YIJ ) epsilon = np . sum ( XIJ . reshape ( - 1 , 3 , 1 , 3 ) * YIJ_invert . reshape ( - 1 , 1 , 3 , 3 ) , axis = 3 ) return epsilon
Calculate delta_ij + epsilon_ij i . e . the deformation gradient matrix
57,367
def get_D_square_min ( atoms_now , atoms_old , i_now , j_now , delta_plus_epsilon = None ) : nat = len ( atoms_now ) assert len ( atoms_now ) == len ( atoms_old ) pos_now = atoms_now . positions pos_old = atoms_old . positions dr_now = mic ( pos_now [ i_now ] - pos_now [ j_now ] , atoms_now . cell ) dr_old = mic ( pos_old [ i_now ] - pos_old [ j_now ] , atoms_old . cell ) assert dr_now . shape == dr_old . shape if delta_plus_epsilon is None : delta_plus_epsilon = get_delta_plus_epsilon ( nat , i_now , dr_now , dr_old ) delta_plus_epsilon_n = delta_plus_epsilon [ i_now ] d_sq_n = np . sum ( ( dr_now - np . sum ( delta_plus_epsilon_n . reshape ( - 1 , 3 , 3 ) * dr_old . reshape ( - 1 , 1 , 3 ) , axis = 2 ) ) ** 2 , axis = 1 ) d_sq = np . bincount ( i_now , weights = d_sq_n ) return delta_plus_epsilon , d_sq
Calculate the D^2_min norm of Falk and Langer
57,368
def dhms ( secs ) : dhms = [ 0 , 0 , 0 , 0 ] dhms [ 0 ] = int ( secs // 86400 ) s = secs % 86400 dhms [ 1 ] = int ( s // 3600 ) s = secs % 3600 dhms [ 2 ] = int ( s // 60 ) s = secs % 60 dhms [ 3 ] = int ( s + .5 ) return dhms
return days hours minutes and seconds
57,369
def hms ( secs ) : hms = [ 0 , 0 , 0 ] hms [ 0 ] = int ( secs // 3600 ) s = secs % 3600 hms [ 1 ] = int ( s // 60 ) s = secs % 60 hms [ 2 ] = int ( s + .5 ) return hms
return hours minutes and seconds
57,370
def get_enclosing_orthorhombic_box ( cell ) : cx , cy , cz = cell c1 = cx + cy c2 = cx + cz c3 = cy + cz c4 = cx + cy + cz corners = np . array ( [ [ 0 , 0 , 0 ] , cx , cy , cz , c1 , c2 , c3 , c4 ] ) lower = np . min ( corners , axis = 0 ) upper = np . max ( corners , axis = 0 ) return lower , upper
Return lower and upper bounds of the orthorhombic box that encloses the parallelepiped spanned by the three cell vectors of cell .
57,371
def stress_invariants ( s ) : s = np . asarray ( s ) if s . shape == ( 6 , ) : s = s . reshape ( 1 , - 1 ) elif s . shape == ( 3 , 3 ) : s = s . reshape ( 1 , - 1 , - 1 ) if len ( s . shape ) == 3 : s = np . transpose ( [ s [ : , 0 , 0 ] , s [ : , 1 , 1 ] , s [ : , 2 , 2 ] , ( s [ : , 0 , 1 ] + s [ : , 1 , 0 ] ) / 2 , ( s [ : , 1 , 2 ] + s [ : , 2 , 1 ] ) / 2 , ( s [ : , 2 , 0 ] + s [ : , 0 , 2 ] ) / 2 ] ) I1 = s [ : , 0 ] + s [ : , 1 ] + s [ : , 2 ] I2 = s [ : , 0 ] * s [ : , 1 ] + s [ : , 1 ] * s [ : , 2 ] + s [ : , 2 ] * s [ : , 0 ] - s [ : , 3 ] ** 2 - s [ : , 4 ] ** 2 - s [ : , 5 ] ** 2 I3 = s [ : , 0 ] * s [ : , 1 ] * s [ : , 2 ] + 2 * s [ : , 3 ] * s [ : , 4 ] * s [ : , 5 ] - s [ : , 3 ] ** 2 * s [ : , 2 ] - s [ : , 4 ] ** 2 * s [ : , 0 ] - s [ : , 5 ] ** 2 * s [ : , 1 ] J2 = I1 ** 2 / 3 - I2 J3 = 2 * I1 ** 3 / 27 - I1 * I2 / 3 + I3 return - I1 / 3 , np . sqrt ( 2 * J2 / 3 ) , J3
Receives a list of stress tensors and returns the three invariants . Return hydrostatic pressure octahedral shear stress and J3
57,372
def scanmeta ( f ) : print ( f ) if isinstance ( f , str ) : f = io . open ( f , mode = 'r' , encoding = 'latin-1' ) done = False l = f . readline ( ) s = None while l and s is None : i = l . find ( '!' ) if i >= 0 : l = l [ i + 1 : ] i = l . find ( '@meta' ) if i >= 0 : l = l [ i + 5 : ] i = l . find ( '@endmeta' ) if i >= 0 : s = l [ : i ] done = True else : s = l l = f . readline ( ) if not done and not l : return { } while l and not done : i = l . find ( '!' ) if i >= 0 : l = l [ i + 1 : ] i = l . find ( '@endmeta' ) if i >= 0 : s += ' ' + l [ : i ] done = True else : s += ' ' + l l = f . readline ( ) s = map ( lambda x : x . split ( ':' ) , s . split ( ) ) d = { } for x in s : if len ( x ) > 2 or len ( x ) == 0 : raise RuntimeError ( 'Syntax error in meta information.' ) elif len ( x ) == 2 : d [ x [ 0 ] ] = x [ 1 ] else : d [ x [ 0 ] ] = None return d
Scan file headers for
57,373
def mic ( dr , cell , pbc = None ) : rec = np . linalg . inv ( cell ) if pbc is not None : rec *= np . array ( pbc , dtype = int ) . reshape ( 3 , 1 ) dri = np . round ( np . dot ( dr , rec ) ) return dr - np . dot ( dri , cell )
Apply minimum image convention to an array of distance vectors .
57,374
def s_from_dhms ( time ) : dhms_s = { 's' : 1 , 'm' : 60 , 'h' : 3600 , 'd' : 86400 } time = time . lower ( ) word_list = re . findall ( '\d*[^\d]*' , time ) seconds = 0 for word in word_list : if word != '' : sec = 1 for t in list ( dhms_s . keys ( ) ) : nw = word . replace ( t , '' ) if nw != word : sec = dhms_s [ t ] word = nw break try : seconds += int ( word ) * sec except : raise RuntimeError ( 'unknown format in timestring ' + time ) return seconds
return seconds from dhms
57,375
def get_stress ( self , a ) : s = np . zeros ( 6 , dtype = float ) for c in self . calcs : s += c . get_stress ( a ) return s
Calculate stress tensor .
57,376
def set_atoms ( self , a ) : for c in self . calcs : if hasattr ( c , "set_atoms" ) : c . set_atoms ( a )
Assign an atoms object .
57,377
def rename_edges ( self , old_task_name , new_task_name , graph = None ) : if not graph : graph = self . graph for node , edges in graph . items ( ) : if node == old_task_name : graph [ new_task_name ] = copy ( edges ) del graph [ old_task_name ] else : if old_task_name in edges : edges . remove ( old_task_name ) edges . add ( new_task_name )
Change references to a task in existing edges .
57,378
def predecessors ( self , node , graph = None ) : if graph is None : graph = self . graph return [ key for key in graph if node in graph [ key ] ]
Returns a list of all predecessors of the given node
57,379
def constant ( name , shape , value = 0 , dtype = tf . sg_floatx , summary = True , regularizer = None , trainable = True ) : r shape = shape if isinstance ( shape , ( tuple , list ) ) else [ shape ] x = tf . get_variable ( name , shape , dtype = dtype , initializer = tf . constant_initializer ( value ) , regularizer = regularizer , trainable = trainable ) if summary : tf . sg_summary_param ( x ) return x
r Creates a tensor variable of which initial values are value and shape is shape .
57,380
def sg_producer_func ( func ) : r @ wraps ( func ) def wrapper ( ** kwargs ) : r opt = tf . sg_opt ( kwargs ) + tf . sg_opt ( dtypes = [ tf . sg_floatx ] , capacity = 32 , num_threads = 1 ) assert opt . source is not None , 'source is mandatory.' if type ( opt . source ) is not list and type ( opt . source ) is not tuple : opt . source = [ opt . source ] if type ( opt . dtypes ) is not list and type ( opt . dtypes ) is not tuple : opt . dtypes = [ opt . dtypes ] if opt . out_dtypes is None : opt . out_dtypes = opt . dtypes if type ( opt . out_dtypes ) is not list and type ( opt . out_dtypes ) is not tuple : opt . out_dtypes = [ opt . out_dtypes ] assert len ( opt . source ) == len ( opt . dtypes ) , 'Source and dtypes should have same length.' def enqueue_func ( sess , op ) : data = func ( sess . run ( opt . source ) ) feed_dict = { } for ph , col in zip ( placeholders , data ) : feed_dict [ ph ] = col sess . run ( op , feed_dict = feed_dict ) placeholders = [ ] for dtype in opt . dtypes : placeholders . append ( tf . placeholder ( dtype = dtype ) ) queue = tf . FIFOQueue ( opt . capacity , dtypes = opt . out_dtypes ) enqueue_op = queue . enqueue ( placeholders ) runner = _FuncQueueRunner ( enqueue_func , queue , [ enqueue_op ] * opt . num_threads ) tf . train . add_queue_runner ( runner ) return queue . dequeue ( ) return wrapper
r Decorates a function func as sg_producer_func .
57,381
def sg_transpose ( tensor , opt ) : r assert opt . perm is not None , 'perm is mandatory' return tf . transpose ( tensor , opt . perm , name = opt . name )
r Permutes the dimensions according to opt . perm .
57,382
def sg_argmin ( tensor , opt ) : r opt += tf . sg_opt ( axis = tensor . get_shape ( ) . ndims - 1 ) return tf . argmin ( tensor , opt . axis , opt . name )
r Returns the indices of the minimum values along the specified axis .
57,383
def sg_concat ( tensor , opt ) : r assert opt . target is not None , 'target is mandatory.' opt += tf . sg_opt ( axis = tensor . get_shape ( ) . ndims - 1 ) target = opt . target if isinstance ( opt . target , ( tuple , list ) ) else [ opt . target ] return tf . concat ( [ tensor ] + target , opt . axis , name = opt . name )
r Concatenates tensors along a axis .
57,384
def sg_log ( tensor , opt ) : r return tf . log ( tensor + tf . sg_eps , name = opt . name )
r Log transform a dense tensor
57,385
def sg_prod ( tensor , opt ) : r return tf . reduce_prod ( tensor , axis = opt . axis , keep_dims = opt . keep_dims , name = opt . name )
r Computes the product of elements across axis of a tensor .
57,386
def sg_min ( tensor , opt ) : r return tf . reduce_min ( tensor , axis = opt . axis , keep_dims = opt . keep_dims , name = opt . name )
r Computes the minimum of elements across axis of a tensor .
57,387
def sg_max ( tensor , opt ) : r return tf . reduce_max ( tensor , axis = opt . axis , keep_dims = opt . keep_dims , name = opt . name )
r Computes the maximum of elements across axis of a tensor .
57,388
def sg_any ( tensor , opt ) : r return tf . reduce_any ( tensor , axis = opt . axis , keep_dims = opt . keep_dims , name = opt . name )
r Computes the logical or of elements across axis of a tensor .
57,389
def sg_lookup ( tensor , opt ) : r assert opt . emb is not None , 'emb is mandatory.' return tf . nn . embedding_lookup ( opt . emb , tensor , name = opt . name )
r Looks up the tensor which is the embedding matrix .
57,390
def sg_reverse_seq ( tensor , opt ) : r opt += tf . sg_opt ( axis = 1 ) seq_len = tf . not_equal ( tensor , tf . zeros_like ( tensor ) ) . sg_int ( ) . sg_sum ( axis = opt . axis ) return tf . reverse_sequence ( tensor , seq_len , opt . axis , name = opt . name )
r Reverses variable length slices .
57,391
def sg_gpus ( ) : r global _gpus if _gpus is None : local_device_protos = device_lib . list_local_devices ( ) _gpus = len ( [ x . name for x in local_device_protos if x . device_type == 'GPU' ] ) return max ( _gpus , 1 )
r Gets current available GPU nums
57,392
def sg_context ( ** kwargs ) : r global _context context_now = tf . sg_opt ( kwargs ) _context += [ context_now ] if context_now . name : context_now . scope_name = context_now . name context_now . name = None with tf . variable_scope ( context_now . scope_name ) : yield else : yield del _context [ - 1 ]
r Context helper for computational graph building . Makes all elements within the with Block share the parameters .
57,393
def sg_get_context ( ) : r global _context res = tf . sg_opt ( ) for c in _context : res += c return res
r Get current context information
57,394
def sg_sugar_func ( func ) : r @ wraps ( func ) def wrapper ( tensor , ** kwargs ) : out = func ( tensor , tf . sg_opt ( kwargs ) ) out . _sugar = tf . sg_opt ( func = func , arg = tf . sg_opt ( kwargs ) + sg_get_context ( ) , prev = tensor ) out . sg_reuse = types . MethodType ( sg_reuse , out ) return out return wrapper
r Decorates a function func so that it can be a sugar function . Sugar function can be used in a chainable manner .
57,395
def sg_reuse ( tensor , ** opt ) : r opt = tf . sg_opt ( opt ) assert hasattr ( tensor , '_sugar' ) , 'cannot reuse this node.' assert opt . input is not None , 'input is mandatory.' nodes , prev = [ tensor ] , tensor . _sugar . prev while prev is not None : nodes = [ prev ] + nodes prev = prev . _sugar . prev if hasattr ( prev , '_sugar' ) else None out = opt . input for node in nodes [ 1 : ] : if node . _sugar . is_layer : fn = tf . sg_layer_func ( node . _sugar . func ) if node . _sugar . arg . scope_name : with tf . variable_scope ( node . _sugar . arg . scope_name ) : out = fn ( out , ** ( node . _sugar . arg + tf . sg_opt ( name = node . _sugar . name , reuse = True ) ) ) else : out = fn ( out , ** ( node . _sugar . arg + tf . sg_opt ( name = node . _sugar . name , reuse = True ) ) ) else : out = node . _sugar . func ( out , node . _sugar . arg ) return out
r Reconstruct computational graph of tensor so all the parameters can be reused and replace its input tensor with opt . input .
57,396
def sg_input ( shape = None , dtype = sg_floatx , name = None ) : r if shape is None : return tf . placeholder ( dtype , shape = None , name = name ) else : if not isinstance ( shape , ( list , tuple ) ) : shape = [ shape ] return tf . placeholder ( dtype , shape = [ None ] + list ( shape ) , name = name )
r Creates a placeholder .
57,397
def sg_inject ( path , mod_name ) : r import sys if path not in list ( sys . path ) : sys . path . append ( path ) globals ( ) [ mod_name ] = importlib . import_module ( mod_name ) for func_name in dir ( globals ( ) [ mod_name ] ) : if isinstance ( globals ( ) [ mod_name ] . __dict__ . get ( func_name ) , types . FunctionType ) : if not func_name . startswith ( '_' ) : exec ( 'tf.Variable.%s = %s.%s' % ( func_name , mod_name , func_name ) ) exec ( 'tf.Tensor.%s = %s.%s' % ( func_name , mod_name , func_name ) )
r Converts all functions in the given Python module to sugar functions so that they can be used in a chainable manner .
57,398
def sg_queue_context ( sess = None ) : r sess = tf . get_default_session ( ) if sess is None else sess coord = tf . train . Coordinator ( ) try : threads = tf . train . start_queue_runners ( sess , coord ) yield finally : coord . request_stop ( ) coord . join ( threads )
r Context helper for queue routines .
57,399
def sg_arg ( ) : r if not tf . app . flags . FLAGS . __dict__ [ '__parsed' ] : tf . app . flags . FLAGS . _parse_flags ( ) return tf . sg_opt ( tf . app . flags . FLAGS . __dict__ [ '__flags' ] )
r Gets current command line options