idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
800
def _q_iteration ( self , Q , Bpp_solver , Vm , Va , pq ) : dVm = - Bpp_solver . solve ( Q ) Vm [ pq ] = Vm [ pq ] + dVm V = Vm * exp ( 1j * Va ) return V , Vm , Va
Performs a Q iteration updates Vm .
801
def fmsin ( N , fnormin = 0.05 , fnormax = 0.45 , period = None , t0 = None , fnorm0 = 0.25 , pm1 = 1 ) : if period == None : period = N if t0 == None : t0 = N / 2 pm1 = nx . sign ( pm1 ) fnormid = 0.5 * ( fnormax + fnormin ) delta = 0.5 * ( fnormax - fnormin ) phi = - pm1 * nx . arccos ( ( fnorm0 - fnormid ) / delta ) time = nx . arange ( 1 , N ) - t0 phase = 2 * nx . pi * fnormid * time + delta * period * ( nx . sin ( 2 * nx . pi * time / period + phi ) - nx . sin ( phi ) ) y = nx . exp ( 1j * phase ) iflaw = fnormid + delta * nx . cos ( 2 * nx . pi * time / period + phi ) return y , iflaw
Signal with sinusoidal frequency modulation .
802
def _parse_rdf ( self , file ) : store = Graph ( ) store . parse ( file ) print len ( store )
Returns a case from the given file .
803
def load_plugins ( group = 'metrics.plugin.10' ) : file_processors = [ ] build_processors = [ ] for ep in pkg_resources . iter_entry_points ( group , name = None ) : log . debug ( 'loading \'%s\'' , ep ) plugin = ep . load ( ) if hasattr ( plugin , 'get_file_processors' ) : file_processors . extend ( plugin . get_file_processors ( ) ) if hasattr ( plugin , 'get_build_processors' ) : build_processors . extend ( plugin . get_build_processors ( ) ) return file_processors , build_processors
Load and installed metrics plugins .
804
def read_case ( input , format = None ) : format_map = { "matpower" : MATPOWERReader , "psse" : PSSEReader , "pickle" : PickleReader } if format_map . has_key ( format ) : reader_klass = format_map [ format ] reader = reader_klass ( ) case = reader . read ( input ) else : for reader_klass in format_map . values ( ) : reader = reader_klass ( ) try : case = reader . read ( input ) if case is not None : break except : pass else : case = None return case
Returns a case object from the given input file object . The data format may be optionally specified .
805
def detect_data_file ( input , file_name = "" ) : _ , ext = os . path . splitext ( file_name ) if ext == ".m" : line = input . readline ( ) if line . startswith ( "function" ) : type = "matpower" logger . info ( "Recognised MATPOWER data file." ) elif line . startswith ( "Bus.con" or line . startswith ( "%" ) ) : type = "psat" logger . info ( "Recognised PSAT data file." ) else : type = "unrecognised" input . seek ( 0 ) elif ( ext == ".raw" ) or ( ext == ".psse" ) : type = "psse" logger . info ( "Recognised PSS/E data file." ) elif ( ext == ".pkl" ) or ( ext == ".pickle" ) : type = "pickle" logger . info ( "Recognised pickled case." ) else : type = None return type
Detects the format of a network data file according to the file extension and the header .
806
def write ( self , file_or_filename , prog = None , format = 'xdot' ) : if prog is None : file = super ( DotWriter , self ) . write ( file_or_filename ) else : buf = StringIO . StringIO ( ) super ( DotWriter , self ) . write ( buf ) buf . seek ( 0 ) data = self . create ( buf . getvalue ( ) , prog , format ) if isinstance ( file_or_filename , basestring ) : file = None try : file = open ( file_or_filename , "wb" ) except : logger . error ( "Error opening %s." % file_or_filename ) finally : if file is not None : file . write ( data ) file . close ( ) else : file = file_or_filename file . write ( data ) return file
Writes the case data in Graphviz DOT language .
807
def write_bus_data ( self , file , padding = " " ) : for bus in self . case . buses : attrs = [ '%s="%s"' % ( k , v ) for k , v in self . bus_attr . iteritems ( ) ] attr_str = ", " . join ( attrs ) file . write ( "%s%s [%s];\n" % ( padding , bus . name , attr_str ) )
Writes bus data to file .
808
def write_branch_data ( self , file , padding = " " ) : attrs = [ '%s="%s"' % ( k , v ) for k , v in self . branch_attr . iteritems ( ) ] attr_str = ", " . join ( attrs ) for br in self . case . branches : file . write ( "%s%s -> %s [%s];\n" % ( padding , br . from_bus . name , br . to_bus . name , attr_str ) )
Writes branch data in Graphviz DOT language .
809
def write_generator_data ( self , file , padding = " " ) : attrs = [ '%s="%s"' % ( k , v ) for k , v in self . gen_attr . iteritems ( ) ] attr_str = ", " . join ( attrs ) edge_attrs = [ '%s="%s"' % ( k , v ) for k , v in { } . iteritems ( ) ] edge_attr_str = ", " . join ( edge_attrs ) for g in self . case . generators : file . write ( "%s%s [%s];\n" % ( padding , g . name , attr_str ) ) file . write ( "%s%s -> %s [%s];\n" % ( padding , g . name , g . bus . name , edge_attr_str ) )
Write generator data in Graphviz DOT language .
810
def create ( self , dotdata , prog = "dot" , format = "xdot" ) : import os , tempfile from dot2tex . dotparsing import find_graphviz progs = find_graphviz ( ) if progs is None : logger . warning ( "GraphViz executables not found." ) return None if not progs . has_key ( prog ) : logger . warning ( 'Invalid program [%s]. Available programs are: %s' % ( prog , progs . keys ( ) ) ) return None tmp_fd , tmp_name = tempfile . mkstemp ( ) os . close ( tmp_fd ) dot_fd = file ( tmp_name , "w+b" ) dot_fd . write ( dotdata ) dot_fd . close ( ) tmp_dir = os . path . dirname ( tmp_name ) p = subprocess . Popen ( ( progs [ prog ] , '-T' + format , tmp_name ) , cwd = tmp_dir , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) stderr = p . stderr stdout = p . stdout stdout_output = list ( ) while True : data = stdout . read ( ) if not data : break stdout_output . append ( data ) stdout . close ( ) if stdout_output : stdout_output = '' . join ( stdout_output ) if not stderr . closed : stderr_output = list ( ) while True : data = stderr . read ( ) if not data : break stderr_output . append ( data ) stderr . close ( ) if stderr_output : stderr_output = '' . join ( stderr_output ) status = p . wait ( ) if status != 0 : logger . error ( "Program [%s] terminated with status: %d. stderr " "follows: %s" % ( prog , status , stderr_output ) ) elif stderr_output : logger . error ( "%s" , stderr_output ) os . unlink ( tmp_name ) return stdout_output
Creates and returns a representation of the graph using the Graphviz layout program given by prog according to the given format .
811
def format ( file_metrics , build_metrics ) : def indent ( elem , level = 0 ) : i = "\n" + level * " " if len ( elem ) : if not elem . text or not elem . text . strip ( ) : elem . text = i + " " if not elem . tail or not elem . tail . strip ( ) : elem . tail = i for elem in elem : indent ( elem , level + 1 ) if not elem . tail or not elem . tail . strip ( ) : elem . tail = i else : if level and ( not elem . tail or not elem . tail . strip ( ) ) : elem . tail = i root = ET . Element ( 'metrics' ) files = ET . Element ( 'files' ) root . append ( files ) for key in file_metrics . keys ( ) : tmp_file = ET . SubElement ( files , "file" , { 'name' : key , 'language' : file_metrics [ key ] [ 'language' ] } ) for name in file_metrics [ key ] . keys ( ) : if name == 'language' : continue tmp_metric = ET . SubElement ( tmp_file , "metric" , { 'name' : name , 'value' : str ( file_metrics [ key ] [ name ] ) } ) if build_metrics : build = ET . Element ( 'build' ) root . append ( build ) indent ( root ) if PY3 : body = ET . tostring ( root , encoding = 'unicode' ) else : body = ET . tostring ( root ) return body
compute output in XML format .
812
def ask ( message = 'Are you sure? [y/N]' ) : agree = False answer = raw_input ( message ) . lower ( ) if answer . startswith ( 'y' ) : agree = True return agree
Asks the user his opinion .
813
def main ( prog_args = None ) : if prog_args is None : prog_args = sys . argv parser = optparse . OptionParser ( ) parser . usage = parser . add_option ( "-t" , "--test-program" , dest = "test_program" , default = "nose" , help = "specifies the test-program to use. Valid values" " include `nose` (or `nosetests`), `django`, `py` (for `py.test`), " '`symfony`, `jelix` `phpunit` and `tox`' ) parser . add_option ( "-d" , "--debug" , dest = "debug" , action = "store_true" , default = False ) parser . add_option ( '-s' , '--size-max' , dest = 'size_max' , default = 25 , type = "int" , help = "Sets the maximum size (in MB) of files." ) parser . add_option ( '--custom-args' , dest = 'custom_args' , default = '' , type = "str" , help = "Defines custom arguments to pass after the test program command" ) parser . add_option ( '--ignore-dirs' , dest = 'ignore_dirs' , default = '' , type = "str" , help = "Defines directories to ignore. Use a comma-separated list." ) parser . add_option ( '-y' , '--quiet' , dest = 'quiet' , action = "store_true" , default = False , help = "Don't ask for any input." ) opt , args = parser . parse_args ( prog_args ) if args [ 1 : ] : path = args [ 1 ] else : path = '.' try : watcher = Watcher ( path , opt . test_program , opt . debug , opt . custom_args , opt . ignore_dirs , opt . quiet ) watcher_file_size = watcher . file_sizes ( ) if watcher_file_size > opt . size_max : message = "It looks like the total file size (%dMb) is larger than the `max size` option (%dMb).\nThis may slow down the file comparison process, and thus the daemon performances.\nDo you wish to continue? [y/N] " % ( watcher_file_size , opt . size_max ) if not opt . quiet and not ask ( message ) : raise CancelDueToUserRequest ( 'Ok, thx, bye...' ) print "Ready to watch file changes..." watcher . loop ( ) except ( KeyboardInterrupt , SystemExit ) : pass except Exception , msg : print msg print "Bye"
What do you expect?
814
def check_configuration ( self , file_path , test_program , custom_args ) : if not os . path . isdir ( file_path ) : raise InvalidFilePath ( "INVALID CONFIGURATION: file path %s is not a directory" % os . path . abspath ( file_path ) ) if not test_program in IMPLEMENTED_TEST_PROGRAMS : raise InvalidTestProgram ( 'The `%s` is unknown, or not yet implemented. Please chose another one.' % test_program ) if custom_args : if not self . quiet and not ask ( "WARNING!!!\nYou are about to run the following command\n\n $ %s\n\nAre you sure you still want to proceed [y/N]? " % self . get_cmd ( ) ) : raise CancelDueToUserRequest ( 'Test cancelled...' )
Checks if configuration is ok .
815
def check_dependencies ( self ) : "Checks if the test program is available in the python environnement" if self . test_program == 'nose' : try : import nose except ImportError : sys . exit ( 'Nosetests is not available on your system. Please install it and try to run it again' ) if self . test_program == 'py' : try : import py except : sys . exit ( 'py.test is not available on your system. Please install it and try to run it again' ) if self . test_program == 'django' : try : import django except : sys . exit ( 'django is not available on your system. Please install it and try to run it again' ) if self . test_program == 'phpunit' : try : process = subprocess . check_call ( [ 'phpunit' , '--version' ] ) except : sys . exit ( 'phpunit is not available on your system. Please install it and try to run it again' ) if self . test_program == 'tox' : try : import tox except ImportError : sys . exit ( 'tox is not available on your system. Please install it and try to run it again' )
Checks if the test program is available in the python environnement
816
def get_cmd ( self ) : cmd = None if self . test_program in ( 'nose' , 'nosetests' ) : cmd = "nosetests %s" % self . file_path elif self . test_program == 'django' : executable = "%s/manage.py" % self . file_path if os . path . exists ( executable ) : cmd = "python %s/manage.py test" % self . file_path else : cmd = "django-admin.py test" elif self . test_program == 'py' : cmd = 'py.test %s' % self . file_path elif self . test_program == 'symfony' : cmd = 'symfony test-all' elif self . test_program == 'jelix' : cmd = 'php tests.php' elif self . test_program == 'phpunit' : cmd = 'phpunit' elif self . test_program == 'sphinx' : cmd = 'make html' elif self . test_program == 'tox' : cmd = 'tox' if not cmd : raise InvalidTestProgram ( "The test program %s is unknown. Valid options are: `nose`, `django` and `py`" % self . test_program ) if self . custom_args : cmd = '%s %s' % ( cmd , self . custom_args ) return cmd
Returns the full command to be executed at runtime
817
def include ( self , path ) : for extension in IGNORE_EXTENSIONS : if path . endswith ( extension ) : return False parts = path . split ( os . path . sep ) for part in parts : if part in self . ignore_dirs : return False return True
Returns True if the file is not ignored
818
def diff_list ( self , list1 , list2 ) : for key in list1 : if key in list2 and list2 [ key ] != list1 [ key ] : print key elif key not in list2 : print key
Extracts differences between lists . For debug purposes
819
def run ( self , cmd ) : print datetime . datetime . now ( ) output = subprocess . Popen ( cmd , shell = True ) output = output . communicate ( ) [ 0 ] print output
Runs the appropriate command
820
def loop ( self ) : while True : sleep ( 1 ) new_file_list = self . walk ( self . file_path , { } ) if new_file_list != self . file_list : if self . debug : self . diff_list ( new_file_list , self . file_list ) self . run_tests ( ) self . file_list = new_file_list
Main loop daemon .
821
def format ( file_metrics , build_metrics ) : metrics = { 'files' : file_metrics } if build_metrics : metrics [ 'build' ] = build_metrics body = json . dumps ( metrics , sort_keys = True , indent = 4 ) + '\n' return body
compute output in JSON format .
822
def split_linear_constraints ( A , l , u ) : ieq = [ ] igt = [ ] ilt = [ ] ibx = [ ] for i in range ( len ( l ) ) : if abs ( u [ i ] - l [ i ] ) <= EPS : ieq . append ( i ) elif ( u [ i ] > 1e10 ) and ( l [ i ] > - 1e10 ) : igt . append ( i ) elif ( l [ i ] <= - 1e10 ) and ( u [ i ] < 1e10 ) : ilt . append ( i ) elif ( abs ( u [ i ] - l [ i ] ) > EPS ) and ( u [ i ] < 1e10 ) and ( l [ i ] > - 1e10 ) : ibx . append ( i ) else : raise ValueError Ae = A [ ieq , : ] Ai = sparse ( [ A [ ilt , : ] , - A [ igt , : ] , A [ ibx , : ] , - A [ ibx , : ] ] ) be = u [ ieq , : ] bi = matrix ( [ u [ ilt ] , - l [ igt ] , u [ ibx ] , - l [ ibx ] ] ) return Ae , be , Ai , bi
Returns the linear equality and inequality constraints .
823
def dSbus_dV ( Y , V ) : I = Y * V diagV = spdiag ( V ) diagIbus = spdiag ( I ) diagVnorm = spdiag ( div ( V , abs ( V ) ) ) dS_dVm = diagV * conj ( Y * diagVnorm ) + conj ( diagIbus ) * diagVnorm dS_dVa = 1j * diagV * conj ( diagIbus - Y * diagV ) return dS_dVm , dS_dVa
Computes the partial derivative of power injection w . r . t . voltage .
824
def dIbr_dV ( Yf , Yt , V ) : Vnorm = div ( V , abs ( V ) ) diagV = spdiag ( V ) diagVnorm = spdiag ( Vnorm ) dIf_dVa = Yf * 1j * diagV dIf_dVm = Yf * diagVnorm dIt_dVa = Yt * 1j * diagV dIt_dVm = Yt * diagVnorm If = Yf * V It = Yt * V return dIf_dVa , dIf_dVm , dIt_dVa , dIt_dVm , If , It
Computes partial derivatives of branch currents w . r . t . voltage .
825
def dSbr_dV ( Yf , Yt , V , buses , branches ) : nl = len ( branches ) nb = len ( V ) f = matrix ( [ l . from_bus . _i for l in branches ] ) t = matrix ( [ l . to_bus . _i for l in branches ] ) If = Yf * V It = Yt * V Vnorm = div ( V , abs ( V ) ) diagVf = spdiag ( V [ f ] ) diagIf = spdiag ( If ) diagVt = spdiag ( V [ t ] ) diagIt = spdiag ( It ) diagV = spdiag ( V ) diagVnorm = spdiag ( Vnorm ) ibr = range ( nl ) size = ( nl , nb ) dSf_dVa = 1j * ( conj ( diagIf ) * spmatrix ( V [ f ] , ibr , f , size ) - diagVf * conj ( Yf * diagV ) ) dSt_dVa = 1j * ( conj ( diagIt ) * spmatrix ( V [ t ] , ibr , t , size ) - diagVt * conj ( Yt * diagV ) ) dSf_dVm = diagVf * conj ( Yf * diagVnorm ) + conj ( diagIf ) * spmatrix ( Vnorm [ f ] , ibr , f , size ) dSt_dVm = diagVt * conj ( Yt * diagVnorm ) + conj ( diagIt ) * spmatrix ( Vnorm [ t ] , ibr , t , size ) Sf = mul ( V [ f ] , conj ( If ) ) St = mul ( V [ t ] , conj ( It ) ) return dSf_dVa , dSf_dVm , dSt_dVa , dSt_dVm , Sf , St
Computes the branch power flow vector and the partial derivative of branch power flow w . r . t voltage .
826
def dAbr_dV ( dSf_dVa , dSf_dVm , dSt_dVa , dSt_dVm , Sf , St ) : dAf_dPf = spdiag ( 2 * Sf . real ( ) ) dAf_dQf = spdiag ( 2 * Sf . imag ( ) ) dAt_dPt = spdiag ( 2 * St . real ( ) ) dAt_dQt = spdiag ( 2 * St . imag ( ) ) dAf_dVa = dAf_dPf * dSf_dVa . real ( ) + dAf_dQf * dSf_dVa . imag ( ) dAt_dVa = dAt_dPt * dSt_dVa . real ( ) + dAt_dQt * dSt_dVa . imag ( ) dAf_dVm = dAf_dPf * dSf_dVm . real ( ) + dAf_dQf * dSf_dVm . imag ( ) dAt_dVm = dAt_dPt * dSt_dVm . real ( ) + dAt_dQt * dSt_dVm . imag ( ) return dAf_dVa , dAf_dVm , dAt_dVa , dAt_dVm
Partial derivatives of squared flow magnitudes w . r . t voltage .
827
def d2Sbus_dV2 ( Ybus , V , lam ) : n = len ( V ) Ibus = Ybus * V diaglam = spdiag ( lam ) diagV = spdiag ( V ) A = spmatrix ( mul ( lam , V ) , range ( n ) , range ( n ) ) B = Ybus * diagV C = A * conj ( B ) D = Ybus . H * diagV E = conj ( diagV ) * ( D * diaglam - spmatrix ( D * lam , range ( n ) , range ( n ) ) ) F = C - A * spmatrix ( conj ( Ibus ) , range ( n ) , range ( n ) ) G = spmatrix ( div ( matrix ( 1.0 , ( n , 1 ) ) , abs ( V ) ) , range ( n ) , range ( n ) ) Gaa = E + F Gva = 1j * G * ( E - F ) Gav = Gva . T Gvv = G * ( C + C . T ) * G return Gaa , Gav , Gva , Gvv
Computes 2nd derivatives of power injection w . r . t . voltage .
828
def d2Ibr_dV2 ( Ybr , V , lam ) : nb = len ( V ) diaginvVm = spdiag ( div ( matrix ( 1.0 , ( nb , 1 ) ) , abs ( V ) ) ) Haa = spdiag ( mul ( - ( Ybr . T * lam ) , V ) ) Hva = - 1j * Haa * diaginvVm Hav = Hva Hvv = spmatrix ( [ ] , [ ] , [ ] , ( nb , nb ) ) return Haa , Hav , Hva , Hvv
Computes 2nd derivatives of complex branch current w . r . t . voltage .
829
def d2Sbr_dV2 ( Cbr , Ybr , V , lam ) : nb = len ( V ) diaglam = spdiag ( lam ) diagV = spdiag ( V ) A = Ybr . H * diaglam * Cbr B = conj ( diagV ) * A * diagV D = spdiag ( mul ( ( A * V ) , conj ( V ) ) ) E = spdiag ( mul ( ( A . T * conj ( V ) ) , V ) ) F = B + B . T G = spdiag ( div ( matrix ( 1.0 , ( nb , 1 ) ) , abs ( V ) ) ) Haa = F - D - E Hva = 1j * G * ( B - B . T - D + E ) Hav = Hva . T Hvv = G * F * G return Haa , Hav , Hva , Hvv
Computes 2nd derivatives of complex power flow w . r . t . voltage .
830
def tocvx ( B ) : Bcoo = B . tocoo ( ) return spmatrix ( Bcoo . data , Bcoo . row . tolist ( ) , Bcoo . col . tolist ( ) )
Converts a sparse SciPy matrix into a sparse CVXOPT matrix .
831
def doInteractions ( self , number = 1 ) : t0 = time . time ( ) for _ in range ( number ) : self . _oneInteraction ( ) elapsed = time . time ( ) - t0 logger . info ( "%d interactions executed in %.3fs." % ( number , elapsed ) ) return self . stepid
Directly maps the agents and the tasks .
832
def exciter ( self , Xexc , Pexc , Vexc ) : exciters = self . exciters F = zeros ( Xexc . shape ) typ1 = [ e . generator . _i for e in exciters if e . model == CONST_EXCITATION ] typ2 = [ e . generator . _i for e in exciters if e . model == IEEE_DC1A ] F [ typ1 , : ] = 0.0 Efd = Xexc [ typ2 , 0 ] Uf = Xexc [ typ2 , 1 ] Ur = Xexc [ typ2 , 2 ] Ka = Pexc [ typ2 , 0 ] Ta = Pexc [ typ2 , 1 ] Ke = Pexc [ typ2 , 2 ] Te = Pexc [ typ2 , 3 ] Kf = Pexc [ typ2 , 4 ] Tf = Pexc [ typ2 , 5 ] Aex = Pexc [ typ2 , 6 ] Bex = Pexc [ typ2 , 7 ] Ur_min = Pexc [ typ2 , 8 ] Ur_max = Pexc [ typ2 , 9 ] Uref = Pexc [ typ2 , 10 ] Uref2 = Pexc [ typ2 , 11 ] U = Vexc [ typ2 , 1 ] Ux = Aex * exp ( Bex * Efd ) dUr = 1 / Ta * ( Ka * ( Uref - U + Uref2 - Uf ) - Ur ) dUf = 1 / Tf * ( Kf / Te * ( Ur - Ux - Ke * Efd ) - Uf ) if sum ( flatnonzero ( Ur > Ur_max ) ) >= 1 : Ur2 = Ur_max elif sum ( flatnonzero ( Ur < Ur_max ) ) >= 1 : Ur2 = Ur_min else : Ur2 = Ur dEfd = 1 / Te * ( Ur2 - Ux - Ke * Efd ) F [ typ2 , : ] = c_ [ dEfd , dUf , dUr ] return F
Exciter model .
833
def governor ( self , Xgov , Pgov , Vgov ) : governors = self . governors omegas = 2 * pi * self . freq F = zeros ( Xgov . shape ) typ1 = [ g . generator . _i for g in governors if g . model == CONST_POWER ] typ2 = [ g . generator . _i for g in governors if g . model == GENERAL_IEEE ] F [ typ1 , 0 ] = 0 Pm = Xgov [ typ2 , 0 ] P = Xgov [ typ2 , 1 ] x = Xgov [ typ2 , 2 ] z = Xgov [ typ2 , 3 ] K = Pgov [ typ2 , 0 ] T1 = Pgov [ typ2 , 1 ] T2 = Pgov [ typ2 , 2 ] T3 = Pgov [ typ2 , 3 ] Pup = Pgov [ typ2 , 4 ] Pdown = Pgov [ typ2 , 5 ] Pmax = Pgov [ typ2 , 6 ] Pmin = Pgov [ typ2 , 7 ] P0 = Pgov [ typ2 , 8 ] omega = Vgov [ typ2 , 0 ] dx = K * ( - 1 / T1 * x + ( 1 - T2 / T1 ) * ( omega - omegas ) ) dP = 1 / T1 * x + T2 / T1 * ( omega - omegas ) y = 1 / T3 * ( P0 - P - Pm ) y2 = y if sum ( flatnonzero ( y > Pup ) ) >= 1 : y2 = ( 1 - flatnonzero ( y > Pup ) ) * y2 + flatnonzero ( y > Pup ) * Pup if sum ( flatnonzero ( y < Pdown ) ) >= 1 : y2 = ( 1 - flatnonzero ( y < Pdown ) ) * y2 + flatnonzero ( y < Pdown ) * Pdown dz = y2 dPm = y2 if sum ( flatnonzero ( z > Pmax ) ) >= 1 : dPm = ( 1 - flatnonzero ( z > Pmax ) ) * dPm + flatnonzero ( z > Pmax ) * 0 if sum ( flatnonzero ( z < Pmin ) ) >= 1 : dPm = ( 1 - flatnonzero ( z < Pmin ) ) * dPm + flatnonzero ( z < Pmin ) * 0 F [ typ2 , : ] = c_ [ dPm , dP , dx , dz ] return F
Governor model .
834
def generator ( self , Xgen , Xexc , Xgov , Vgen ) : generators = self . dyn_generators omegas = 2 * pi * self . freq F = zeros ( Xgen . shape ) typ1 = [ g . _i for g in generators if g . model == CLASSICAL ] typ2 = [ g . _i for g in generators if g . model == FOURTH_ORDER ] omega = Xgen [ typ1 , 1 ] Pm0 = Xgov [ typ1 , 0 ] H = array ( [ g . h for g in generators ] ) [ typ1 ] D = array ( [ g . d for g in generators ] ) [ typ1 ] Pe = Vgen [ typ1 , 2 ] ddelta = omega = omegas domega = pi * self . freq / H * ( - D * ( omega - omegas ) + Pm0 - Pe ) dEq = zeros ( len ( typ1 ) ) F [ typ1 , : ] = c_ [ ddelta , domega , dEq ] omega = Xgen [ typ2 , 1 ] Eq_tr = Xgen [ typ2 , 2 ] Ed_tr = Xgen [ typ2 , 3 ] H = array ( [ g . h for g in generators ] ) D = array ( [ g . d for g in generators ] ) xd = array ( [ g . xd for g in generators ] ) xq = array ( [ g . xq for g in generators ] ) xd_tr = array ( [ g . xd_tr for g in generators ] ) xq_tr = array ( [ g . xq_tr for g in generators ] ) Td0_tr = array ( [ g . td for g in generators ] ) Tq0_tr = array ( [ g . tq for g in generators ] ) Id = Vgen [ typ2 , 0 ] Iq = Vgen [ typ2 , 1 ] Pe = Vgen [ typ2 , 2 ] Efd = Xexc [ typ2 , 0 ] Pm = Xgov [ typ2 , 0 ] ddelta = omega - omegas domega = pi * self . freq / H * ( - D * ( omega - omegas ) + Pm - Pe ) dEq = 1 / Td0_tr * ( Efd - Eq_tr + ( xd - xd_tr ) * Id ) dEd = 1 / Tq0_tr * ( - Ed_tr - ( xq - xq_tr ) * Iq ) F [ typ2 , : ] = c_ [ ddelta , domega , dEq , dEd ] return F
Generator model .
835
def _write_data ( self , file ) : self . write_case_data ( file ) file . write ( "Bus Data\n" ) file . write ( "-" * 8 + "\n" ) self . write_bus_data ( file ) file . write ( "\n" ) file . write ( "Branch Data\n" ) file . write ( "-" * 11 + "\n" ) self . write_branch_data ( file ) file . write ( "\n" ) file . write ( "Generator Data\n" ) file . write ( "-" * 14 + "\n" ) self . write_generator_data ( file ) file . write ( "\n" )
Writes case data to file in ReStructuredText format .
836
def write_bus_data ( self , file ) : report = CaseReport ( self . case ) buses = self . case . buses col_width = 8 col_width_2 = col_width * 2 + 1 col1_width = 6 sep = "=" * 6 + " " + ( "=" * col_width + " " ) * 6 + "\n" file . write ( sep ) file . write ( "Name" . center ( col1_width ) + " " ) file . write ( "Voltage (pu)" . center ( col_width_2 ) + " " ) file . write ( "Generation" . center ( col_width_2 ) + " " ) file . write ( "Load" . center ( col_width_2 ) + " " ) file . write ( "\n" ) file . write ( "-" * col1_width + " " + ( "-" * col_width_2 + " " ) * 3 + "\n" ) file . write ( ".." . ljust ( col1_width ) + " " ) file . write ( "Amp" . center ( col_width ) + " " ) file . write ( "Phase" . center ( col_width ) + " " ) file . write ( "P (MW)" . center ( col_width ) + " " ) file . write ( "Q (MVAr)" . center ( col_width ) + " " ) file . write ( "P (MW)" . center ( col_width ) + " " ) file . write ( "Q (MVAr)" . center ( col_width ) + " " ) file . write ( "\n" ) file . write ( sep ) for bus in buses : file . write ( bus . name [ : col1_width ] . ljust ( col1_width ) ) file . write ( " %8.3f" % bus . v_magnitude ) file . write ( " %8.3f" % bus . v_angle ) file . write ( " %8.2f" % self . case . s_supply ( bus ) . real ) file . write ( " %8.2f" % self . case . s_supply ( bus ) . imag ) file . write ( " %8.2f" % self . case . s_demand ( bus ) . real ) file . write ( " %8.2f" % self . case . s_demand ( bus ) . imag ) file . write ( "\n" ) file . write ( ".." . ljust ( col1_width ) + " " + ".." . ljust ( col_width ) + " " ) file . write ( "*Total:*" . rjust ( col_width ) + " " ) ptot = report . actual_pgen qtot = report . actual_qgen file . write ( "%8.2f " % ptot ) file . write ( "%8.2f " % qtot ) file . write ( "%8.2f " % report . p_demand ) file . write ( "%8.2f " % report . q_demand ) file . write ( "\n" ) file . write ( sep ) del report
Writes bus data to a ReST table .
837
def write_how_many ( self , file ) : report = CaseReport ( self . case ) components = [ ( "Bus" , "n_buses" ) , ( "Generator" , "n_generators" ) , ( "Committed Generator" , "n_online_generators" ) , ( "Load" , "n_loads" ) , ( "Fixed Load" , "n_fixed_loads" ) , ( "Despatchable Load" , "n_online_vloads" ) , ( "Shunt" , "n_shunts" ) , ( "Branch" , "n_branches" ) , ( "Transformer" , "n_transformers" ) , ( "Inter-tie" , "n_interties" ) , ( "Area" , "n_areas" ) ] longest = max ( [ len ( c [ 0 ] ) for c in components ] ) col1_header = "Object" col1_width = longest col2_header = "Quantity" col2_width = len ( col2_header ) sep = "=" * col1_width + " " + "=" * col2_width + "\n" file . write ( sep ) file . write ( col1_header . center ( col1_width ) ) file . write ( " " ) file . write ( "%s\n" % col2_header . center ( col2_width ) ) file . write ( sep ) for label , attr in components : col2_value = str ( getattr ( report , attr ) ) file . write ( "%s %s\n" % ( label . ljust ( col1_width ) , col2_value . rjust ( col2_width ) ) ) else : file . write ( sep ) file . write ( "\n" ) del report
Writes component numbers to a table .
838
def write_min_max ( self , file ) : report = CaseReport ( self . case ) col1_header = "Attribute" col1_width = 19 col2_header = "Minimum" col3_header = "Maximum" col_width = 22 sep = "=" * col1_width + " " + "=" * col_width + " " + "=" * col_width + "\n" file . write ( sep ) file . write ( "%s" % col1_header . center ( col1_width ) ) file . write ( " " ) file . write ( "%s" % col2_header . center ( col_width ) ) file . write ( " " ) file . write ( "%s" % col3_header . center ( col_width ) ) file . write ( "\n" ) file . write ( sep ) min_val , min_i = getattr ( report , "min_v_magnitude" ) max_val , max_i = getattr ( report , "max_v_magnitude" ) file . write ( "%s %7.3f p.u. @ bus %2d %7.3f p.u. @ bus %2d\n" % ( "Voltage Amplitude" . ljust ( col1_width ) , min_val , min_i , max_val , max_i ) ) min_val , min_i = getattr ( report , "min_v_angle" ) max_val , max_i = getattr ( report , "max_v_angle" ) file . write ( "%s %16.3f %16.3f\n" % ( "Voltage Phase Angle" . ljust ( col1_width ) , min_val , max_val ) ) file . write ( sep ) file . write ( "\n" ) del report
Writes minimum and maximum values to a table .
839
def make_unique_name ( base , existing = [ ] , format = "%s_%s" ) : count = 2 name = base while name in existing : name = format % ( base , count ) count += 1 return name
Return a name unique within a context based on the specified name .
840
def call_antlr4 ( arg ) : "calls antlr4 on grammar file" antlr_path = os . path . join ( ROOT_DIR , "java" , "antlr-4.7-complete.jar" ) classpath = os . pathsep . join ( [ "." , "{:s}" . format ( antlr_path ) , "$CLASSPATH" ] ) generated = os . path . join ( ROOT_DIR , 'src' , 'pymoca' , 'generated' ) cmd = "java -Xmx500M -cp \"{classpath:s}\" org.antlr.v4.Tool {arg:s}" " -o {generated:s} -visitor -Dlanguage=Python3" . format ( ** locals ( ) ) print ( cmd ) proc = subprocess . Popen ( cmd . split ( ) , cwd = os . path . join ( ROOT_DIR , 'src' , 'pymoca' ) ) proc . communicate ( ) with open ( os . path . join ( generated , '__init__.py' ) , 'w' ) as fid : fid . write ( '' )
calls antlr4 on grammar file
841
def setup_package ( ) : with open ( 'requirements.txt' , 'r' ) as req_file : install_reqs = req_file . read ( ) . split ( '\n' ) cmdclass_ = { 'antlr' : AntlrBuildCommand } cmdclass_ . update ( versioneer . get_cmdclass ( ) ) setup ( version = versioneer . get_version ( ) , name = 'pymoca' , maintainer = "James Goppert" , maintainer_email = "james.goppert@gmail.com" , description = DOCLINES [ 0 ] , long_description = "\n" . join ( DOCLINES [ 2 : ] ) , url = 'https://github.com/pymoca/pymoca' , author = 'James Goppert' , author_email = 'james.goppert@gmail.com' , download_url = 'https://github.com/pymoca/pymoca' , license = 'BSD' , classifiers = [ _f for _f in CLASSIFIERS . split ( '\n' ) if _f ] , platforms = [ "Windows" , "Linux" , "Solaris" , "Mac OS-X" , "Unix" ] , install_requires = install_reqs , tests_require = [ 'coverage >= 3.7.1' , 'nose >= 1.3.1' ] , test_suite = 'nose.collector' , python_requires = '>=3.5' , packages = find_packages ( "src" ) , package_dir = { "" : "src" } , include_package_data = True , cmdclass = cmdclass_ )
Setup the package .
842
def body ( self , frame ) : master = Frame ( self ) master . pack ( padx = 5 , pady = 0 , expand = 1 , fill = BOTH ) title = Label ( master , text = "Buses" ) title . pack ( side = TOP ) bus_lb = self . bus_lb = Listbox ( master , selectmode = SINGLE , width = 10 ) bus_lb . pack ( side = LEFT ) for bus in self . case . buses : bus_lb . insert ( END , bus . name ) bus_lb . bind ( "<<ListboxSelect>>" , self . on_bus ) self . bus_params = BusProperties ( master ) return bus_lb
Creates the dialog body . Returns the widget that should have initial focus .
843
def solve ( self , solver_klass = None ) : t0 = time ( ) om = self . _construct_opf_model ( self . case ) if om is None : return { "converged" : False , "output" : { "message" : "No Ref Bus." } } if solver_klass is not None : result = solver_klass ( om , opt = self . opt ) . solve ( ) elif self . dc : result = DCOPFSolver ( om , opt = self . opt ) . solve ( ) else : result = PIPSSolver ( om , opt = self . opt ) . solve ( ) result [ "elapsed" ] = time ( ) - t0 if self . opt . has_key ( "verbose" ) : if self . opt [ "verbose" ] : logger . info ( "OPF completed in %.3fs." % result [ "elapsed" ] ) return result
Solves an optimal power flow and returns a results dictionary .
844
def _construct_opf_model ( self , case ) : self . case . reset ( ) base_mva = case . base_mva oneref , refs = self . _ref_check ( case ) if not oneref : None bs , ln , gn = self . _remove_isolated ( case ) self . case . index_buses ( bs ) gn = self . _pwl1_to_poly ( gn ) Va = self . _get_voltage_angle_var ( refs , bs ) Pg = self . _get_pgen_var ( gn , base_mva ) if self . dc : B , Bf , Pbusinj , Pfinj = self . case . makeBdc ( bs , ln ) Pmis = self . _power_mismatch_dc ( bs , gn , B , Pbusinj , base_mva ) Pf , Pt = self . _branch_flow_dc ( ln , Bf , Pfinj , base_mva ) else : Vm = self . _get_voltage_magnitude_var ( bs , gn ) Qg = self . _get_qgen_var ( gn , base_mva ) Pmis , Qmis , Sf , St = self . _nln_constraints ( len ( bs ) , len ( ln ) ) vl = self . _const_pf_constraints ( gn , base_mva ) ang = self . _voltage_angle_diff_limit ( bs , ln ) if self . dc : vars = [ Va , Pg ] constraints = [ Pmis , Pf , Pt , ang ] else : vars = [ Va , Vm , Pg , Qg ] constraints = [ Pmis , Qmis , Sf , St , vl , ang ] y , ycon = self . _pwl_gen_costs ( gn , base_mva ) if ycon is not None : vars . append ( y ) constraints . append ( ycon ) opf = OPFModel ( case ) opf . add_vars ( vars ) opf . add_constraints ( constraints ) if self . dc : opf . _Bf = Bf opf . _Pfinj = Pfinj return opf
Returns an OPF model .
845
def _ref_check ( self , case ) : refs = [ bus . _i for bus in case . buses if bus . type == REFERENCE ] if len ( refs ) == 1 : return True , refs else : logger . error ( "OPF requires a single reference bus." ) return False , refs
Checks that there is only one reference bus .
846
def _remove_isolated ( self , case ) : buses = case . connected_buses branches = case . online_branches gens = case . online_generators return buses , branches , gens
Returns non - isolated case components .
847
def _pwl1_to_poly ( self , generators ) : for g in generators : if ( g . pcost_model == PW_LINEAR ) and ( len ( g . p_cost ) == 2 ) : g . pwl_to_poly ( ) return generators
Converts single - block piecewise - linear costs into linear polynomial .
848
def _get_voltage_angle_var ( self , refs , buses ) : Va = array ( [ b . v_angle * ( pi / 180.0 ) for b in buses ] ) Vau = Inf * ones ( len ( buses ) ) Val = - Vau Vau [ refs ] = Va [ refs ] Val [ refs ] = Va [ refs ] return Variable ( "Va" , len ( buses ) , Va , Val , Vau )
Returns the voltage angle variable set .
849
def _get_voltage_magnitude_var ( self , buses , generators ) : Vm = array ( [ b . v_magnitude for b in buses ] ) for g in generators : Vm [ g . bus . _i ] = g . v_magnitude Vmin = array ( [ b . v_min for b in buses ] ) Vmax = array ( [ b . v_max for b in buses ] ) return Variable ( "Vm" , len ( buses ) , Vm , Vmin , Vmax )
Returns the voltage magnitude variable set .
850
def _get_pgen_var ( self , generators , base_mva ) : Pg = array ( [ g . p / base_mva for g in generators ] ) Pmin = array ( [ g . p_min / base_mva for g in generators ] ) Pmax = array ( [ g . p_max / base_mva for g in generators ] ) return Variable ( "Pg" , len ( generators ) , Pg , Pmin , Pmax )
Returns the generator active power set - point variable .
851
def _get_qgen_var ( self , generators , base_mva ) : Qg = array ( [ g . q / base_mva for g in generators ] ) Qmin = array ( [ g . q_min / base_mva for g in generators ] ) Qmax = array ( [ g . q_max / base_mva for g in generators ] ) return Variable ( "Qg" , len ( generators ) , Qg , Qmin , Qmax )
Returns the generator reactive power variable set .
852
def _nln_constraints ( self , nb , nl ) : Pmis = NonLinearConstraint ( "Pmis" , nb ) Qmis = NonLinearConstraint ( "Qmis" , nb ) Sf = NonLinearConstraint ( "Sf" , nl ) St = NonLinearConstraint ( "St" , nl ) return Pmis , Qmis , Sf , St
Returns non - linear constraints for OPF .
853
def _const_pf_constraints ( self , gn , base_mva ) : ivl = array ( [ i for i , g in enumerate ( gn ) if g . is_load and ( g . q_min != 0.0 or g . q_max != 0.0 ) ] ) vl = [ gn [ i ] for i in ivl ] nvl = len ( vl ) ng = len ( gn ) Pg = array ( [ g . p for g in vl ] ) / base_mva Qg = array ( [ g . q for g in vl ] ) / base_mva Pmin = array ( [ g . p_min for g in vl ] ) / base_mva Qmin = array ( [ g . q_min for g in vl ] ) / base_mva Qmax = array ( [ g . q_max for g in vl ] ) / base_mva for g in vl : if g . qmin != 0.0 and g . q_max != 0.0 : logger . error ( "Either Qmin or Qmax must be equal to zero for " "each dispatchable load." ) Qlim = ( Qmin == 0.0 ) * Qmax + ( Qmax == 0.0 ) * Qmin if any ( abs ( Qg - Pg * Qlim / Pmin ) > 1e-6 ) : logger . error ( "For a dispatchable load, PG and QG must be " "consistent with the power factor defined by " "PMIN and the Q limits." ) if nvl > 0 : xx = Pmin yy = Qlim pftheta = arctan2 ( yy , xx ) pc = sin ( pftheta ) qc = - cos ( pftheta ) ii = array ( [ range ( nvl ) , range ( nvl ) ] ) jj = r_ [ ivl , ivl + ng ] Avl = csr_matrix ( r_ [ pc , qc ] , ( ii , jj ) , ( nvl , 2 * ng ) ) lvl = zeros ( nvl ) uvl = lvl else : Avl = zeros ( ( 0 , 2 * ng ) ) lvl = array ( [ ] ) uvl = array ( [ ] ) return LinearConstraint ( "vl" , Avl , lvl , uvl , [ "Pg" , "Qg" ] )
Returns a linear constraint enforcing constant power factor for dispatchable loads .
854
def _voltage_angle_diff_limit ( self , buses , branches ) : nb = len ( buses ) if not self . ignore_ang_lim : iang = [ i for i , b in enumerate ( branches ) if ( b . ang_min and ( b . ang_min > - 360.0 ) ) or ( b . ang_max and ( b . ang_max < 360.0 ) ) ] iangl = array ( [ i for i , b in enumerate ( branches ) if b . ang_min is not None ] ) [ iang ] iangh = array ( [ i for i , b in enumerate ( branches ) if b . ang_max is not None ] ) [ iang ] nang = len ( iang ) if nang > 0 : ii = range ( nang ) + range ( nang ) jjf = array ( [ b . from_bus . _i for b in branches ] ) [ iang ] jjt = array ( [ b . to_bus . _i for b in branches ] ) [ iang ] jj = r_ [ jjf , jjt ] Aang = csr_matrix ( r_ [ ones ( nang ) , - ones ( nang ) ] , ( ii , jj ) ) uang = Inf * ones ( nang ) lang = - uang lang [ iangl ] = array ( [ b . ang_min * ( pi / 180.0 ) for b in branches ] ) [ iangl ] uang [ iangh ] = array ( [ b . ang_max * ( pi / 180.0 ) for b in branches ] ) [ iangh ] else : Aang = zeros ( ( 0 , nb ) ) lang = array ( [ ] ) uang = array ( [ ] ) else : Aang = zeros ( ( 0 , nb ) ) lang = array ( [ ] ) uang = array ( [ ] ) return LinearConstraint ( "ang" , Aang , lang , uang , [ "Va" ] )
Returns the constraint on the branch voltage angle differences .
855
def add_var ( self , var ) : if var . name in [ v . name for v in self . vars ] : logger . error ( "Variable set named '%s' already exists." % var . name ) return var . i1 = self . var_N var . iN = self . var_N + var . N - 1 self . vars . append ( var )
Adds a variable to the model .
856
def get_var ( self , name ) : for var in self . vars : if var . name == name : return var else : raise ValueError
Returns the variable set with the given name .
857
def linear_constraints ( self ) : if self . lin_N == 0 : return None , array ( [ ] ) , array ( [ ] ) A = lil_matrix ( ( self . lin_N , self . var_N ) , dtype = float64 ) l = - Inf * ones ( self . lin_N ) u = - l for lin in self . lin_constraints : if lin . N : Ak = lin . A i1 = lin . i1 iN = lin . iN vsl = lin . vs kN = - 1 Ai = lil_matrix ( ( lin . N , self . var_N ) , dtype = float64 ) for v in vsl : var = self . get_var ( v ) j1 = var . i1 jN = var . iN k1 = kN + 1 kN = kN + var . N if j1 == jN : for i in range ( Ai . shape [ 0 ] ) : Ai [ i , j1 ] = Ak [ i , k1 ] else : Ai [ : , j1 : jN + 1 ] = Ak [ : , k1 : kN + 1 ] A [ i1 : iN + 1 , : ] = Ai l [ i1 : iN + 1 ] = lin . l u [ i1 : iN + 1 ] = lin . u return A . tocsr ( ) , l , u
Returns the linear constraints .
858
def add_constraint ( self , con ) : if isinstance ( con , LinearConstraint ) : N , M = con . A . shape if con . name in [ c . name for c in self . lin_constraints ] : logger . error ( "Constraint set named '%s' already exists." % con . name ) return False else : con . i1 = self . lin_N con . iN = self . lin_N + N - 1 nv = 0 for vs in con . vs : nv = nv + self . get_var_N ( vs ) if M != nv : logger . error ( "Number of columns of A does not match number" " of variables, A is %d x %d, nv = %d" , N , M , nv ) self . lin_constraints . append ( con ) elif isinstance ( con , NonLinearConstraint ) : N = con . N if con . name in [ c . name for c in self . nln_constraints ] : logger . error ( "Constraint set named '%s' already exists." % con . name ) return False else : con . i1 = self . nln_N con . iN = self . nln_N + N self . nln_constraints . append ( con ) else : raise ValueError return True
Adds a constraint to the model .
859
def _solve ( self , x0 , A , l , u , xmin , xmax ) : il = [ i for i , ln in enumerate ( self . _ln ) if 0.0 < ln . rate_a < 1e10 ] nl2 = len ( il ) neqnln = 2 * self . _nb niqnln = 2 * len ( il ) user_data = { "A" : A , "neqnln" : neqnln , "niqnln" : niqnln } self . _f ( x0 ) Jdata = self . _dg ( x0 , False , user_data ) lmbda = { "eqnonlin" : ones ( neqnln ) , "ineqnonlin" : ones ( niqnln ) } H = tril ( self . _hessfcn ( x0 , lmbda ) , format = "coo" ) self . _Hrow , self . _Hcol = H . row , H . col n = len ( x0 ) xl = xmin xu = xmax gl = r_ [ zeros ( 2 * self . _nb ) , - Inf * ones ( 2 * nl2 ) , l ] gu = r_ [ zeros ( 2 * self . _nb ) , zeros ( 2 * nl2 ) , u ] m = len ( gl ) nnzj = len ( Jdata ) nnzh = 0 f_fcn , df_fcn , g_fcn , dg_fcn , h_fcn = self . _f , self . _df , self . _g , self . _dg , self . _h nlp = pyipopt . create ( n , xl , xu , m , gl , gu , nnzj , nnzh , f_fcn , df_fcn , g_fcn , dg_fcn ) success = nlp . solve ( x0 , user_data ) nlp . close ( )
Solves using the Interior Point OPTimizer .
860
def doOutages ( self ) : assert len ( self . branchOutages ) == len ( self . market . case . branches ) weights = [ [ ( False , r ) , ( True , 1 - ( r ) ) ] for r in self . branchOutages ] for i , ln in enumerate ( self . market . case . branches ) : ln . online = weighted_choice ( weights [ i ] ) if ln . online == False : print "Branch outage [%s] in period %d." % ( ln . name , self . stepid )
Applies branch outtages .
861
def reset_case ( self ) : for bus in self . market . case . buses : bus . p_demand = self . pdemand [ bus ] for task in self . tasks : for g in task . env . generators : g . p = task . env . _g0 [ g ] [ "p" ] g . p_max = task . env . _g0 [ g ] [ "p_max" ] g . p_min = task . env . _g0 [ g ] [ "p_min" ] g . q = task . env . _g0 [ g ] [ "q" ] g . q_max = task . env . _g0 [ g ] [ "q_max" ] g . q_min = task . env . _g0 [ g ] [ "q_min" ] g . p_cost = task . env . _g0 [ g ] [ "p_cost" ] g . pcost_model = task . env . _g0 [ g ] [ "pcost_model" ] g . q_cost = task . env . _g0 [ g ] [ "q_cost" ] g . qcost_model = task . env . _g0 [ g ] [ "qcost_model" ] g . c_startup = task . env . _g0 [ g ] [ "startup" ] g . c_shutdown = task . env . _g0 [ g ] [ "shutdown" ]
Returns the case to its original state .
862
def doEpisodes ( self , number = 1 ) : for episode in range ( number ) : print "Starting episode %d." % episode if len ( self . profile . shape ) == 1 : self . _pcycle = cycle ( self . profile ) else : assert self . profile . shape [ 0 ] >= number self . _pcycle = cycle ( self . profile [ episode , : ] ) c = self . _pcycle . next ( ) for bus in self . market . case . buses : bus . p_demand = self . pdemand [ bus ] * c for task , agent in zip ( self . tasks , self . agents ) : agent . newEpisode ( ) task . reset ( ) while False in [ task . isFinished ( ) for task in self . tasks ] : if True in [ task . isFinished ( ) for task in self . tasks ] : raise ValueError self . _oneInteraction ( ) self . reset_case ( )
Do the given numer of episodes and return the rewards of each step as a list .
863
def reset ( self ) : self . stepid = 0 for task , agent in zip ( self . tasks , self . agents ) : task . reset ( ) agent . module . reset ( ) agent . history . reset ( )
Sets initial conditions for the experiment .
864
def _updatePropensities ( self , lastState , lastAction , reward ) : phi = self . recency for action in range ( self . module . numActions ) : carryOver = ( 1 - phi ) * self . module . getValue ( lastState , action ) experience = self . _experience ( lastState , action , lastAction , reward ) self . module . updateValue ( lastState , action , carryOver + experience )
Update the propensities for all actions . The propensity for last action chosen will be updated using the feedback value that resulted from performing the action .
865
def _forwardImplementation ( self , inbuf , outbuf ) : assert self . module propensities = self . module . getActionValues ( 0 ) summedProps = sum ( propensities ) probabilities = propensities / summedProps action = eventGenerator ( probabilities ) outbuf [ : ] = scipy . array ( [ action ] )
Proportional probability method .
866
def write ( self , file_or_filename ) : self . book = Workbook ( ) self . _write_data ( None ) self . book . save ( file_or_filename )
Writes case data to file in Excel format .
867
def write_bus_data ( self , file ) : bus_sheet = self . book . add_sheet ( "Buses" ) for i , bus in enumerate ( self . case . buses ) : for j , attr in enumerate ( BUS_ATTRS ) : bus_sheet . write ( i , j , getattr ( bus , attr ) )
Writes bus data to an Excel spreadsheet .
868
def write_branch_data ( self , file ) : branch_sheet = self . book . add_sheet ( "Branches" ) for i , branch in enumerate ( self . case . branches ) : for j , attr in enumerate ( BRANCH_ATTRS ) : branch_sheet . write ( i , j , getattr ( branch , attr ) )
Writes branch data to an Excel spreadsheet .
869
def write_generator_data ( self , file ) : generator_sheet = self . book . add_sheet ( "Generators" ) for j , generator in enumerate ( self . case . generators ) : i = generator . bus . _i for k , attr in enumerate ( GENERATOR_ATTRS ) : generator_sheet . write ( j , 0 , i )
Write generator data to file .
870
def write ( self , file_or_filename ) : if isinstance ( file_or_filename , basestring ) : file = open ( file_or_filename , "wb" ) else : file = file_or_filename self . writer = csv . writer ( file ) super ( CSVWriter , self ) . write ( file )
Writes case data as CSV .
871
def write_case_data ( self , file ) : writer = self . _get_writer ( file ) writer . writerow ( [ "Name" , "base_mva" ] ) writer . writerow ( [ self . case . name , self . case . base_mva ] )
Writes the case data as CSV .
872
def write_bus_data ( self , file ) : writer = self . _get_writer ( file ) writer . writerow ( BUS_ATTRS ) for bus in self . case . buses : writer . writerow ( [ getattr ( bus , attr ) for attr in BUS_ATTRS ] )
Writes bus data as CSV .
873
def write_branch_data ( self , file ) : writer = self . _get_writer ( file ) writer . writerow ( BRANCH_ATTRS ) for branch in self . case . branches : writer . writerow ( [ getattr ( branch , a ) for a in BRANCH_ATTRS ] )
Writes branch data as CSV .
874
def write_generator_data ( self , file ) : writer = self . _get_writer ( file ) writer . writerow ( [ "bus" ] + GENERATOR_ATTRS ) for g in self . case . generators : i = g . bus . _i writer . writerow ( [ i ] + [ getattr ( g , a ) for a in GENERATOR_ATTRS ] )
Write generator data as CSV .
875
def run ( self ) : t0 = time . time ( ) haveQ = self . _isReactiveMarket ( ) self . _withholdOffbids ( ) self . _offbidToCase ( ) success = self . _runOPF ( ) if success : gteeOfferPrice , gteeBidPrice = self . _nodalPrices ( haveQ ) self . _runAuction ( gteeOfferPrice , gteeBidPrice , haveQ ) logger . info ( "SmartMarket cleared in %.3fs" % ( time . time ( ) - t0 ) ) else : for offbid in self . offers + self . bids : offbid . clearedQuantity = 0.0 offbid . clearedPrice = 0.0 offbid . accepted = False offbid . generator . p = 0.0 logger . error ( "Non-convergent market OPF. Blackout!" ) return self . offers , self . bids
Computes cleared offers and bids .
876
def _runOPF ( self ) : if self . decommit : solver = UDOPF ( self . case , dc = ( self . locationalAdjustment == "dc" ) ) elif self . locationalAdjustment == "dc" : solver = OPF ( self . case , dc = True ) else : solver = OPF ( self . case , dc = False , opt = { "verbose" : True } ) self . _solution = solver . solve ( ) return self . _solution [ "converged" ]
Computes dispatch points and LMPs using OPF .
877
def encode ( self , o ) : chunks = list ( self . iterencode ( o ) ) return '' . join ( chunks )
Return a JSON string representation of a Python data structure .
878
def compute_file_metrics ( processors , language , key , token_list ) : tli = itertools . tee ( token_list , len ( processors ) ) metrics = OrderedDict ( ) for p in processors : p . reset ( ) for p , tl in zip ( processors , tli ) : p . process_file ( language , key , tl ) for p in processors : metrics . update ( p . metrics ) return metrics
use processors to compute file metrics .
879
def load ( self , lemmatizer_path ) : self . lemmatizer = { } with io . open ( lemmatizer_path , encoding = 'utf-8' ) as data_file : raw = json . load ( data_file ) for entry in raw : self . lemmatizer [ entry [ "Form" ] ] = entry [ "Lemmas" ] self . apply_blacklist ( )
This methods load the IWNLP . Lemmatizer json file and creates a dictionary of lowercased forms which maps each form to its possible lemmas .
880
def write ( self , file_or_filename ) : if isinstance ( file_or_filename , basestring ) : file = None try : file = open ( file_or_filename , "wb" ) except Exception , detail : logger . error ( "Error opening %s." % detail ) finally : if file is not None : self . _write_data ( file ) file . close ( ) else : file = file_or_filename self . _write_data ( file ) return file
Writes the case data to file .
881
def performAction ( self , action ) : self . t += 1 super ( ProfitTask , self ) . performAction ( int ( action [ 0 ] ) ) self . samples += 1
The action vector is stripped and the only element is cast to integer and given to the super class .
882
def addReward ( self , r = None ) : r = self . getReward ( ) if r is None else r if self . discount : self . cumulativeReward += power ( self . discount , self . samples ) * r else : self . cumulativeReward += r
A filtered mapping towards performAction of the underlying environment .
883
def getV0 ( self , v_mag_guess , buses , generators , type = CASE_GUESS ) : if type == CASE_GUESS : Va = array ( [ b . v_angle * ( pi / 180.0 ) for b in buses ] ) Vm = array ( [ b . v_magnitude for b in buses ] ) V0 = Vm * exp ( 1j * Va ) elif type == FLAT_START : V0 = ones ( len ( buses ) ) elif type == FROM_INPUT : V0 = v_mag_guess else : raise ValueError gbus = [ g . bus . _i for g in generators ] Vg = array ( [ g . v_magnitude for g in generators ] ) V0 [ gbus ] = Vg * abs ( V0 [ gbus ] ) / V0 [ gbus ] return V0
Returns the initial voltage profile .
884
def output_solution ( self , fd , z , z_est , error_sqrsum ) : col_width = 11 sep = ( "=" * col_width + " " ) * 4 + "\n" fd . write ( "State Estimation\n" ) fd . write ( "-" * 16 + "\n" ) fd . write ( sep ) fd . write ( "Type" . center ( col_width ) + " " ) fd . write ( "Name" . center ( col_width ) + " " ) fd . write ( "Measurement" . center ( col_width ) + " " ) fd . write ( "Estimation" . center ( col_width ) + " " ) fd . write ( "\n" ) fd . write ( sep ) c = 0 for t in [ PF , PT , QF , QT , PG , QG , VM , VA ] : for meas in self . measurements : if meas . type == t : n = meas . b_or_l . name [ : col_width ] . ljust ( col_width ) fd . write ( t . ljust ( col_width ) + " " ) fd . write ( n + " " ) fd . write ( "%11.5f " % z [ c ] ) fd . write ( "%11.5f\n" % z_est [ c ] ) c += 1 fd . write ( "\nWeighted sum of error squares = %.4f\n" % error_sqrsum )
Prints comparison of measurements and their estimations .
885
def run ( self ) : self . _clearQuantities ( ) self . _clearPrices ( ) self . _clipPrices ( ) self . _logClearances ( ) return self . offers , self . bids
Clears a set of bids and offers .
886
def _clearQuantity ( self , offbids , gen ) : gOffbids = [ offer for offer in offbids if offer . generator == gen ] valid = [ ob for ob in gOffbids if not ob . withheld ] valid . sort ( key = lambda ob : ob . price , reverse = [ False , True ] [ gen . is_load ] ) acceptedQty = 0.0 for ob in valid : accepted = ( ob . totalQuantity - acceptedQty ) / ob . quantity if accepted > 1.0 : accepted = 1.0 elif accepted < 1.0e-05 : accepted = 0.0 ob . clearedQuantity = accepted * ob . quantity ob . accepted = ( accepted > 0.0 ) acceptedQty += ob . quantity
Computes the cleared bid quantity from total dispatched quantity .
887
def _clearPrices ( self ) : for offbid in self . offers + self . bids : if self . auctionType == DISCRIMINATIVE : offbid . clearedPrice = offbid . price elif self . auctionType == FIRST_PRICE : offbid . clearedPrice = offbid . lmbda else : raise ValueError
Clears prices according to auction type .
888
def _clipPrices ( self ) : if self . guaranteeOfferPrice : for offer in self . offers : if offer . accepted and offer . clearedPrice < offer . price : offer . clearedPrice = offer . price if self . guaranteeBidPrice : for bid in self . bids : if bid . accepted and bid . clearedPrice > bid . price : bid . clearedPrice = bid . price if self . limits . has_key ( "maxClearedOffer" ) : maxClearedOffer = self . limits [ "maxClearedOffer" ] for offer in self . offers : if offer . clearedPrice > maxClearedOffer : offer . clearedPrice = maxClearedOffer if self . limits . has_key ( "minClearedBid" ) : minClearedBid = self . limits [ "minClearedBid" ] for bid in self . bids : if bid . clearedPrice < minClearedBid : bid . clearedPrice = minClearedBid if self . auctionType != DISCRIMINATIVE : for g in self . case . generators : gOffers = [ of for of in self . offers if of . generator == g ] if gOffers : uniformPrice = max ( [ of . clearedPrice for of in gOffers ] ) for of in gOffers : of . clearedPrice = uniformPrice gBids = [ bid for bid in self . bids if bid . vLoad == g ] if gBids : uniformPrice = min ( [ bid . cleared_price for bid in gBids ] ) for bid in gBids : bid . clearedPrice = uniformPrice
Clip cleared prices according to guarantees and limits .
889
def wait_for_response ( client , timeout , path = '/' , expected_status_code = None ) : get_time = getattr ( time , 'monotonic' , time . time ) deadline = get_time ( ) + timeout while True : try : time_left = deadline - get_time ( ) response = client . get ( path , timeout = max ( time_left , 0.001 ) , allow_redirects = False ) if ( expected_status_code is None or response . status_code == expected_status_code ) : return except requests . exceptions . Timeout : break except Exception : pass if get_time ( ) >= deadline : break time . sleep ( 0.1 ) raise TimeoutError ( 'Timeout waiting for HTTP response.' )
Try make a GET request with an HTTP client against a certain path and return once any response has been received ignoring any errors .
890
def request ( self , method , path = None , url_kwargs = None , ** kwargs ) : return self . _session . request ( method , self . _url ( path , url_kwargs ) , ** kwargs )
Make a request against a container .
891
def options ( self , path = None , url_kwargs = None , ** kwargs ) : return self . _session . options ( self . _url ( path , url_kwargs ) , ** kwargs )
Sends an OPTIONS request .
892
def head ( self , path = None , url_kwargs = None , ** kwargs ) : return self . _session . head ( self . _url ( path , url_kwargs ) , ** kwargs )
Sends a HEAD request .
893
def post ( self , path = None , url_kwargs = None , ** kwargs ) : return self . _session . post ( self . _url ( path , url_kwargs ) , ** kwargs )
Sends a POST request .
894
def iuwt_decomposition ( in1 , scale_count , scale_adjust = 0 , mode = 'ser' , core_count = 2 , store_smoothed = False , store_on_gpu = False ) : if mode == 'ser' : return ser_iuwt_decomposition ( in1 , scale_count , scale_adjust , store_smoothed ) elif mode == 'mp' : return mp_iuwt_decomposition ( in1 , scale_count , scale_adjust , store_smoothed , core_count ) elif mode == 'gpu' : return gpu_iuwt_decomposition ( in1 , scale_count , scale_adjust , store_smoothed , store_on_gpu )
This function serves as a handler for the different implementations of the IUWT decomposition . It allows the different methods to be used almost interchangeably .
895
def iuwt_recomposition ( in1 , scale_adjust = 0 , mode = 'ser' , core_count = 1 , store_on_gpu = False , smoothed_array = None ) : if mode == 'ser' : return ser_iuwt_recomposition ( in1 , scale_adjust , smoothed_array ) elif mode == 'mp' : return mp_iuwt_recomposition ( in1 , scale_adjust , core_count , smoothed_array ) elif mode == 'gpu' : return gpu_iuwt_recomposition ( in1 , scale_adjust , store_on_gpu , smoothed_array )
This function serves as a handler for the different implementations of the IUWT recomposition . It allows the different methods to be used almost interchangeably .
896
def ser_iuwt_decomposition ( in1 , scale_count , scale_adjust , store_smoothed ) : wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] ) detail_coeffs = np . empty ( [ scale_count - scale_adjust , in1 . shape [ 0 ] , in1 . shape [ 1 ] ] ) C0 = in1 if scale_adjust > 0 : for i in range ( 0 , scale_adjust ) : C0 = ser_a_trous ( C0 , wavelet_filter , i ) for i in range ( scale_adjust , scale_count ) : C = ser_a_trous ( C0 , wavelet_filter , i ) C1 = ser_a_trous ( C , wavelet_filter , i ) detail_coeffs [ i - scale_adjust , : , : ] = C0 - C1 C0 = C if store_smoothed : return detail_coeffs , C0 else : return detail_coeffs
This function calls the a trous algorithm code to decompose the input into its wavelet coefficients . This is the isotropic undecimated wavelet transform implemented for a single CPU core .
897
def ser_iuwt_recomposition ( in1 , scale_adjust , smoothed_array ) : wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] ) max_scale = in1 . shape [ 0 ] + scale_adjust if smoothed_array is None : recomposition = np . zeros ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] ) else : recomposition = smoothed_array for i in range ( max_scale - 1 , scale_adjust - 1 , - 1 ) : recomposition = ser_a_trous ( recomposition , wavelet_filter , i ) + in1 [ i - scale_adjust , : , : ] if scale_adjust > 0 : for i in range ( scale_adjust - 1 , - 1 , - 1 ) : recomposition = ser_a_trous ( recomposition , wavelet_filter , i ) return recomposition
This function calls the a trous algorithm code to recompose the input into a single array . This is the implementation of the isotropic undecimated wavelet transform recomposition for a single CPU core .
898
def mp_iuwt_recomposition ( in1 , scale_adjust , core_count , smoothed_array ) : wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] ) max_scale = in1 . shape [ 0 ] + scale_adjust if smoothed_array is None : recomposition = np . zeros ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] ) else : recomposition = smoothed_array for i in range ( max_scale - 1 , scale_adjust - 1 , - 1 ) : recomposition = mp_a_trous ( recomposition , wavelet_filter , i , core_count ) + in1 [ i - scale_adjust , : , : ] if scale_adjust > 0 : for i in range ( scale_adjust - 1 , - 1 , - 1 ) : recomposition = mp_a_trous ( recomposition , wavelet_filter , i , core_count ) return recomposition
This function calls the a trous algorithm code to recompose the input into a single array . This is the implementation of the isotropic undecimated wavelet transform recomposition for multiple CPU cores .
899
def gpu_iuwt_decomposition ( in1 , scale_count , scale_adjust , store_smoothed , store_on_gpu ) : ker = SourceModule ( ) wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] , dtype = np . float32 ) wavelet_filter = gpuarray . to_gpu_async ( wavelet_filter ) detail_coeffs = gpuarray . empty ( [ scale_count - scale_adjust , in1 . shape [ 0 ] , in1 . shape [ 1 ] ] , np . float32 ) try : gpu_in1 = gpuarray . to_gpu_async ( in1 . astype ( np . float32 ) ) except : gpu_in1 = in1 gpu_tmp = gpuarray . empty_like ( gpu_in1 ) gpu_out1 = gpuarray . empty_like ( gpu_in1 ) gpu_out2 = gpuarray . empty_like ( gpu_in1 ) gpu_scale = gpuarray . zeros ( [ 1 ] , np . int32 ) gpu_adjust = gpuarray . zeros ( [ 1 ] , np . int32 ) gpu_adjust += scale_adjust gpu_a_trous_row_kernel , gpu_a_trous_col_kernel = gpu_a_trous ( ) gpu_store_detail_coeffs = ker . get_function ( "gpu_store_detail_coeffs" ) grid_rows = int ( in1 . shape [ 0 ] // 32 ) grid_cols = int ( in1 . shape [ 1 ] // 32 ) if scale_adjust > 0 : for i in range ( 0 , scale_adjust ) : gpu_a_trous_row_kernel ( gpu_in1 , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_a_trous_col_kernel ( gpu_tmp , gpu_out1 , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_in1 , gpu_out1 = gpu_out1 , gpu_in1 gpu_scale += 1 for i in range ( scale_adjust , scale_count ) : gpu_a_trous_row_kernel ( gpu_in1 , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_a_trous_col_kernel ( gpu_tmp , gpu_out1 , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_a_trous_row_kernel ( gpu_out1 , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_a_trous_col_kernel ( gpu_tmp , gpu_out2 , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) ) gpu_store_detail_coeffs ( gpu_in1 , gpu_out2 , detail_coeffs , gpu_scale , gpu_adjust , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows , int ( scale_count ) ) ) gpu_in1 , gpu_out1 = gpu_out1 , gpu_in1 gpu_scale += 1 if store_on_gpu : return detail_coeffs elif store_smoothed : return detail_coeffs . get ( ) , gpu_in1 . get ( ) else : return detail_coeffs . get ( )
This function calls the a trous algorithm code to decompose the input into its wavelet coefficients . This is the isotropic undecimated wavelet transform implemented for a GPU .