idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
49,100
def iterconfig ( config , scan_type ) : if scan_type in config : for modality , scans in iter ( config [ scan_type ] . items ( ) ) : for scan in scans : scan . update ( { 'type' : scan_type , 'modality' : modality } ) yield scan
Iterate over BIDS configuration file
49,101
def rename_fmapm ( bids_base , basename ) : files = dict ( ) for ext in [ 'nii.gz' , 'json' ] : for echo in [ 1 , 2 ] : fname = '{0}_e{1}.{2}' . format ( basename , echo , ext ) src = os . path . join ( bids_base , 'fmap' , fname ) if os . path . exists ( src ) : dst = src . replace ( 'magnitude_e{0}' . format ( echo ) , 'magnitude{0}' . format ( echo ) ) logger . debug ( 'renaming %s to %s' , src , dst ) os . rename ( src , dst ) files [ ext ] = dst return files
Rename magnitude fieldmap file to BIDS specification
49,102
def convert ( input , output ) : dirname = os . path . dirname ( output ) if not os . path . exists ( dirname ) : os . makedirs ( dirname ) basename = os . path . basename ( output ) basename = re . sub ( '.nii(.gz)?' , '' , basename ) dcm2niix = commons . which ( 'dcm2niix' ) cmd = [ 'dcm2niix' , '-s' , 'y' , '-b' , 'y' , '-z' , 'y' , '-f' , basename , '-o' , dirname , input ] logger . debug ( cmd ) sp . check_output ( cmd )
Run dcm2niix on input file
49,103
def DispatchSpout ( * a , ** kw ) : spout_class_name = get_config ( ) [ 'Spout' ] spout_class = import_name ( spout_class_name , default_ns = 'birding.spout' ) return spout_class ( * a , ** kw )
Factory to dispatch spout class based on config .
49,104
def which ( x ) : for p in os . environ . get ( 'PATH' ) . split ( os . pathsep ) : p = os . path . join ( p , x ) if os . path . exists ( p ) : return os . path . abspath ( p ) return None
Same as which command on Linux
49,105
def serve ( context : Context , port = 8000 , browsersync_port = 3000 , browsersync_ui_port = 3030 ) : try : from watchdog . observers import Observer except ImportError : context . pip_command ( 'install' , 'watchdog>0.8,<0.9' ) from watchdog . observers import Observer from watchdog . events import PatternMatchingEventHandler class RebuildHandler ( PatternMatchingEventHandler ) : def __init__ ( self , * args , ** kwargs ) : super ( ) . __init__ ( * args , ** kwargs ) self . _patterns = [ '*.js' , '*.scss' , '*.html' ] self . _ignore_directories = True self . builder = None self . rebuild_javascript = threading . Event ( ) self . rebuild_stylesheets = threading . Event ( ) def on_any_event ( self , event ) : if self . builder : self . builder . cancel ( ) extension = event . src_path . rsplit ( '.' , 1 ) [ - 1 ] . lower ( ) if extension == 'js' : self . rebuild_javascript . set ( ) elif extension == 'scss' : self . rebuild_stylesheets . set ( ) self . builder = threading . Timer ( 3 , self . rebuild ) self . builder . start ( ) def rebuild ( self ) : if self . rebuild_javascript . is_set ( ) : self . rebuild_javascript . clear ( ) context . debug ( 'Triggering javascript build' ) bundle_javascript ( context ) if self . rebuild_stylesheets . is_set ( ) : self . rebuild_stylesheets . clear ( ) context . debug ( 'Triggering stylesheet build' ) bundle_stylesheets ( context ) context . debug ( 'Reloading browsers' ) context . node_tool ( 'browser-sync' , 'reload' , '--port=%s' % browsersync_port ) context . info ( 'Watching sources' ) observer = Observer ( ) paths = [ context . app . common_asset_source_path , context . app . asset_source_path , context . app . common_templates_path , context . app . templates_path , ] handler = RebuildHandler ( ) for path in paths : observer . schedule ( handler , path , recursive = True ) observer . setDaemon ( True ) observer . start ( ) context . info ( 'Starting browser sync' ) browsersync_args = [ 'start' , '--host=localhost' , '--no-open' , '--logLevel' , { 0 : 'silent' , 1 : 'info' , 2 : 'debug' } [ context . verbosity ] , '--port=%s' % browsersync_port , '--proxy=localhost:%s' % port , '--ui-port=%s' % browsersync_ui_port ] browsersync = functools . partial ( context . node_tool , 'browser-sync' , * browsersync_args ) threading . Thread ( target = browsersync , daemon = True ) . start ( ) context . info ( 'Starting web server' ) return start ( context , port = port )
Starts a development server with auto - building and live - reload
49,106
def create_build_paths ( context : Context ) : paths = [ context . app . asset_build_path , context . app . screenshots_build_path , context . app . collected_assets_path ] for path in filter ( None , paths ) : os . makedirs ( path , exist_ok = True )
Creates directories needed for build outputs
49,107
def node_dependencies ( context : Context ) : args = [ '--loglevel' , { 0 : 'silent' , 1 : 'warn' , 2 : 'info' } [ context . verbosity ] ] if not context . use_colour : args . append ( '--color false' ) args . append ( 'install' ) return context . shell ( 'npm' , * args )
Updates node . js dependencies
49,108
def local_docker ( context : Context ) : output = io . StringIO ( ) with contextlib . redirect_stdout ( output ) : context . shell ( 'docker-machine' , 'ip' , 'default' ) host_machine_ip = output . getvalue ( ) . strip ( ) or socket . gethostbyname ( socket . gethostname ( ) ) args = ( ) if context . verbosity > 1 : args += ( '--verbose' , ) args += ( 'up' , '--build' , '--remove-orphans' ) if not context . use_colour : args += ( '--no-color' , ) context . shell ( 'docker-compose' , * args , environment = { 'HOST_MACHINE_IP' : host_machine_ip } )
Runs the app in a docker container ; for local development only! Once performed docker - compose up can be used directly
49,109
def lint_javascript ( context : Context ) : args = [ '--config' , os . path . join ( context . app . common_templates_path , 'mtp_common' , 'build_tasks' , 'eslintrc.json' ) , '--format' , 'stylish' , ] if context . verbosity == 0 : args . append ( '--quiet' ) if not context . use_colour : args . append ( '--no-color' ) args . append ( context . app . javascript_source_path ) return context . node_tool ( 'eslint' , * args )
Tests javascript for code and style errors
49,110
def lint_stylesheets ( context : Context ) : args = [ '--config' , os . path . join ( context . app . common_templates_path , 'mtp_common' , 'build_tasks' , 'sass-lint.yml' ) , '--format' , 'stylish' , '--syntax' , 'scss' , ] if context . verbosity > 1 : args . append ( '--verbose' ) args . append ( os . path . join ( context . app . scss_source_path , '**' , '*.scss' ) ) return context . node_tool ( 'sass-lint' , * args )
Tests stylesheets for code and style errors
49,111
def govuk_template ( context : Context , version = '0.23.0' , replace_fonts = True ) : if FileSet ( os . path . join ( context . app . govuk_templates_path , 'base.html' ) ) : return url = 'https://github.com/alphagov/govuk_template/releases' '/download/v{0}/django_govuk_template-{0}.tgz' . format ( version ) try : context . shell ( 'curl --location %(silent)s --output govuk_template.tgz %(url)s' % { 'silent' : '--silent' if context . verbosity == 0 else '' , 'url' : url , } ) context . shell ( 'tar xzf govuk_template.tgz ./govuk_template' ) rsync_flags = '-avz' if context . verbosity == 2 else '-az' context . shell ( 'rsync %s govuk_template/static/ %s/' % ( rsync_flags , context . app . asset_build_path ) ) context . shell ( 'rsync %s govuk_template/templates/ %s/' % ( rsync_flags , context . app . templates_path ) ) finally : context . shell ( 'rm -rf govuk_template.tgz ./govuk_template' ) if replace_fonts : context . shell ( 'rm -rf %s/stylesheets/fonts-ie8.css' ' %s/stylesheets/fonts/' % ( context . app . asset_build_path , context . app . asset_build_path ) )
Installs GOV . UK template
49,112
def additional_assets ( context : Context ) : rsync_flags = '-avz' if context . verbosity == 2 else '-az' for path in context . app . additional_asset_paths : context . shell ( 'rsync %s %s %s/' % ( rsync_flags , path , context . app . asset_build_path ) )
Collects assets from GOV . UK frontend toolkit
49,113
def precompile_python_code ( context : Context ) : from compileall import compile_dir kwargs = { } if context . verbosity < 2 : kwargs [ 'quiet' ] = True compile_dir ( context . app . django_app_name , ** kwargs )
Pre - compiles python modules
49,114
def make_messages ( context : Context , javascript = False , fuzzy = False ) : kwargs = { 'all' : True , 'keep_pot' : True , 'no_wrap' : True , } if fuzzy : kwargs [ 'allow_fuzzy' ] = True if javascript : kwargs . update ( domain = 'djangojs' , ignore_patterns = [ '*.bundle.js' ] ) with in_dir ( context . app . django_app_name ) : return context . management_command ( 'makemessages' , ** kwargs )
Collects text into translation source files
49,115
def classify_format ( f ) : l0 , l1 = _get_two_lines ( f ) if loader . glove . check_valid ( l0 , l1 ) : return _glove elif loader . word2vec_text . check_valid ( l0 , l1 ) : return _word2vec_text elif loader . word2vec_bin . check_valid ( l0 , l1 ) : return _word2vec_bin else : raise OSError ( b"Invalid format" )
Determine the format of word embedding file by their content . This operation only looks at the first two lines and does not check the sanity of input file .
49,116
def load ( cls , path , vocab = None , dtype = np . float32 , max_vocab = None , format = None , binary = False ) : freqs = None if vocab is not None : with open ( vocab , mode = 'rb' ) as f : freqs = loader . vocab . load_vocab ( f ) vocab = { k : i for i , ( k , v ) in enumerate ( sorted ( six . iteritems ( freqs ) , key = lambda k_v : k_v [ 1 ] , reverse = True ) [ : max_vocab ] ) } with open ( path , mode = 'rb' ) as f : if format is None : mod = classify_format ( f ) else : mod = _select_module ( format , binary ) with open ( path , mode = 'rb' ) as f : if vocab is not None : arr = mod . loader . load_with_vocab ( f , vocab , dtype = dtype ) v = vocab else : arr , v = mod . loader . load ( f , max_vocab = max_vocab , dtype = dtype ) obj = cls ( arr , v , freqs ) obj . _load_cond = mod return obj
Load pretrained word embedding from a file .
49,117
def paths_for_shell ( paths , separator = ' ' ) : paths = filter ( None , paths ) paths = map ( shlex . quote , paths ) if separator is None : return paths return separator . join ( paths )
Converts a list of paths for use in shell commands
49,118
def register ( self , * dependencies , default = False , hidden = False , ignore_return_code = False ) : def outer ( func ) : task = Task ( func , * dependencies , default = default , hidden = hidden , ignore_return_code = ignore_return_code ) overidden_task = self . _tasks . pop ( task . name , None ) if overidden_task : self . _overidden_tasks [ task . name ] . append ( overidden_task ) self [ task . name ] = task return task return outer
Decorates a callable to turn it into a task
49,119
def lookup_task ( self , task ) : if isinstance ( task , str ) : try : return self [ task ] except KeyError : pass raise TaskError ( 'Unknown task %s' % task )
Looks up a task by name or by callable
49,120
def get_default_task ( self ) : default_tasks = list ( filter ( lambda task : task . default , self . values ( ) ) ) if len ( default_tasks ) == 1 : return default_tasks [ 0 ]
Returns the default task if there is only one
49,121
def from_callable ( cls , func , ignored_parameters = set ( ) ) : group = cls ( ) signature = inspect . signature ( func ) for parameter in signature . parameters . values ( ) : if parameter . name . startswith ( '_' ) or parameter . name in ignored_parameters : continue parameter = Parameter . from_callable_parameter ( parameter ) group [ parameter . name ] = parameter return group
Reads a function or method signature to produce a set of parameters
49,122
def from_mapping ( cls , mapping ) : group = cls ( ) for name , value in mapping . items ( ) : if name . startswith ( '_' ) : continue group [ name ] = Parameter ( name = name , value = value , constraint = Parameter . constraint_from_type ( value ) , ) return group
Produces a set of parameters from a mapping
49,123
def to_dict ( self ) : return dict ( ( parameter . name , parameter . value ) for parameter in self . values ( ) )
Converts the set of parameters into a dict
49,124
def consume_arguments ( self , argument_list ) : while True : argument_count = len ( argument_list ) for parameter in self . values ( ) : argument_list = parameter . consume_arguments ( argument_list ) if len ( argument_list ) == argument_count : return argument_list
Takes arguments from a list while there are parameters that can accept them
49,125
def update_from ( self , mapping ) : for key , value in mapping . items ( ) : if key in self : if isinstance ( value , Parameter ) : value = value . value self [ key ] . value = value
Updates the set of parameters from a mapping for keys that already exist
49,126
def from_callable_parameter ( cls , parameter ) : if parameter . kind == parameter . KEYWORD_ONLY or parameter . kind == parameter . POSITIONAL_OR_KEYWORD and parameter . default is not parameter . empty : if parameter . annotation is not parameter . empty : constraint = parameter . annotation else : constraint = Parameter . constraint_from_type ( parameter . default ) return cls ( name = parameter . name , value = parameter . default , constraint = constraint , ) else : raise ParameterError ( 'Only keyword parameters are supported' )
Produces a parameter from a function or method
49,127
def constraint_from_type ( cls , value ) : if value is None : return None value_type = type ( value ) if value_type in ( str , int , bool ) : return value_type raise ParameterError ( 'Parameter type cannot be %s' % value_type )
Returns the constraint callable given a value
49,128
def constraint_from_choices ( cls , value_type : type , choices : collections . Sequence ) : choices_str = ', ' . join ( map ( str , choices ) ) def constraint ( value ) : value = value_type ( value ) if value not in choices : raise ParameterError ( 'Argument must be one of %s' % choices_str ) return value constraint . __name__ = 'choices_%s' % value_type . __name__ constraint . __doc__ = 'choice of %s' % choices_str return constraint
Returns a constraint callable based on choices of a given type
49,129
def arg_name ( self ) : if self . constraint is bool and self . value : return '--no-%s' % self . name . replace ( '_' , '-' ) return '--%s' % self . name . replace ( '_' , '-' )
Returns the name of the parameter as a command line flag
49,130
def consume_arguments ( self , argument_list ) : if len ( argument_list ) == 0 : return [ ] if argument_list [ 0 ] == self . arg_name : argument_list = argument_list [ 1 : ] if self . constraint is bool : self . value = not self . value else : try : value = argument_list . pop ( 0 ) except IndexError : raise ParameterError ( 'Argument %s expects a value' % self . arg_name ) self . value = value return argument_list
Takes arguments from a list while this parameter can accept them
49,131
def pip_command ( self , command , * args ) : try : from pip . _internal import main as pip_main except ImportError : from pip import main as pip_main args = [ command ] + list ( args ) if self . verbosity == 0 : args . insert ( 0 , '--quiet' ) elif self . verbosity == 2 : args . insert ( 0 , '--verbose' ) return pip_main ( args )
Runs a pip command
49,132
def shell ( self , command , * args , environment = None ) : command += ' ' + ' ' . join ( args ) command = command . strip ( ) self . debug ( self . yellow_style ( '$ %s' % command ) ) env = self . env . copy ( ) env . update ( environment or { } ) return subprocess . call ( command , shell = True , env = env )
Runs a shell command
49,133
def management_command ( self , command , * args , ** kwargs ) : self . setup_django ( ) if 'verbosity' not in kwargs : kwargs [ 'verbosity' ] = self . verbosity if not self . use_colour : kwargs [ 'no_color' ] = False self . debug ( self . yellow_style ( '$ manage.py %s' % command ) ) return call_command ( command , * args , ** kwargs )
Runs a Django management command
49,134
def login ( request , user ) : session_auth_hash = '' if user is None : user = request . user if hasattr ( user , 'get_session_auth_hash' ) : session_auth_hash = user . get_session_auth_hash ( ) if SESSION_KEY in request . session : session_key = request . session [ SESSION_KEY ] if session_key != user . pk or ( session_auth_hash and request . session . get ( HASH_SESSION_KEY ) != session_auth_hash ) : request . session . flush ( ) else : request . session . cycle_key ( ) request . session [ SESSION_KEY ] = user . pk request . session [ BACKEND_SESSION_KEY ] = user . backend request . session [ USER_DATA_SESSION_KEY ] = user . user_data request . session [ HASH_SESSION_KEY ] = session_auth_hash update_token_in_session ( request . session , user . token ) if hasattr ( request , 'user' ) : request . user = user rotate_token ( request ) user_logged_in . send ( sender = user . __class__ , request = request , user = user )
Persist a user id and a backend in the request . This way a user doesn t have to reauthenticate on every request . Note that data set during the anonymous session is retained when the user logs in .
49,135
def get_user ( request ) : user = None try : user_id = request . session [ SESSION_KEY ] token = request . session [ AUTH_TOKEN_SESSION_KEY ] user_data = request . session [ USER_DATA_SESSION_KEY ] backend_path = request . session [ BACKEND_SESSION_KEY ] except KeyError : pass else : if backend_path in settings . AUTHENTICATION_BACKENDS : backend = load_backend ( backend_path ) user = backend . get_user ( user_id , token , user_data ) if hasattr ( user , 'get_session_auth_hash' ) : session_hash = request . session . get ( HASH_SESSION_KEY ) session_hash_verified = session_hash and constant_time_compare ( session_hash , user . get_session_auth_hash ( ) ) if not session_hash_verified : request . session . flush ( ) user = None return user or MojAnonymousUser ( )
Returns the user model instance associated with the given request session . If no user is retrieved an instance of MojAnonymousUser is returned .
49,136
def logout ( request ) : user = getattr ( request , 'user' , None ) if hasattr ( user , 'is_authenticated' ) and not user . is_authenticated : user = None user_logged_out . send ( sender = user . __class__ , request = request , user = user ) language = request . session . get ( LANGUAGE_SESSION_KEY ) request . session . flush ( ) if language is not None : request . session [ LANGUAGE_SESSION_KEY ] = language if hasattr ( request , 'user' ) : request . user = MojAnonymousUser ( )
Removes the authenticated user s ID from the request and flushes their session data .
49,137
def fields ( self ) : return { k : getattr ( self , k , None ) for k in self . schema . fields }
return all the fields and their raw values for this Orm instance . This property returns a dict with the field names and their current values
49,138
def create ( cls , fields = None , ** fields_kwargs ) : instance = cls ( fields , ** fields_kwargs ) instance . save ( ) return instance
create an instance of cls with the passed in fields and set it into the db
49,139
def populate ( self , fields = None , ** fields_kwargs ) : pop_fields = { } fields = self . make_dict ( fields , fields_kwargs ) for k in self . schema . fields . keys ( ) : pop_fields [ k ] = fields . get ( k , None ) self . _populate ( pop_fields )
take the passed in fields combine them with missing fields that should be there and then run all those through appropriate methods to hydrate this orm .
49,140
def _populate ( self , fields ) : schema = self . schema for k , v in fields . items ( ) : fields [ k ] = schema . fields [ k ] . iget ( self , v ) self . modify ( fields ) self . reset_modified ( )
this runs all the fields through their iget methods to mimic them freshly coming out of the db then resets modified
49,141
def depopulate ( self , is_update ) : fields = { } schema = self . schema for k , field in schema . fields . items ( ) : is_modified = k in self . modified_fields orig_v = getattr ( self , k ) v = field . iset ( self , orig_v , is_update = is_update , is_modified = is_modified ) if is_modified or v is not None : if is_update and field . is_pk ( ) and v == orig_v : continue else : fields [ k ] = v if not is_update : for field_name in schema . required_fields . keys ( ) : if field_name not in fields : raise KeyError ( "Missing required field {}" . format ( field_name ) ) return fields
Get all the fields that need to be saved
49,142
def insert ( self ) : ret = True schema = self . schema fields = self . depopulate ( False ) q = self . query q . set_fields ( fields ) pk = q . insert ( ) if pk : fields = q . fields fields [ schema . pk . name ] = pk self . _populate ( fields ) else : ret = False return ret
persist the field values of this orm
49,143
def update ( self ) : ret = True fields = self . depopulate ( True ) q = self . query q . set_fields ( fields ) pk = self . pk if pk : q . is_field ( self . schema . pk . name , pk ) else : raise ValueError ( "You cannot update without a primary key" ) if q . update ( ) : fields = q . fields self . _populate ( fields ) else : ret = False return ret
re - persist the updated field values of this orm that has a primary key
49,144
def save ( self ) : ret = False pk = None if self . schema . pk . name not in self . modified_fields : pk = self . pk if pk : ret = self . update ( ) else : ret = self . insert ( ) return ret
persist the fields in this object into the db this will update if _id is set otherwise it will insert
49,145
def delete ( self ) : ret = False q = self . query pk = self . pk if pk : pk_name = self . schema . pk . name self . query . is_field ( pk_name , pk ) . delete ( ) setattr ( self , pk_name , None ) self . reset_modified ( ) for field_name in self . schema . fields : if getattr ( self , field_name , None ) != None : self . modified_fields . add ( field_name ) ret = True return ret
delete the object from the db if pk is set
49,146
def reset_modified ( self ) : self . modified_fields = set ( ) for field_name , field in self . schema . normal_fields . items ( ) : if isinstance ( field , ObjectField ) : self . modified_fields . add ( field_name )
reset field modification tracking
49,147
def modify ( self , fields = None , ** fields_kwargs ) : modified_fields = set ( ) fields = self . make_dict ( fields , fields_kwargs ) fields = self . _modify ( fields ) for field_name , field_val in fields . items ( ) : in_schema = field_name in self . schema . fields if in_schema : setattr ( self , field_name , field_val ) modified_fields . add ( field_name ) return modified_fields
update the fields of this instance with the values in dict fields
49,148
def jsonable ( self , * args , ** options ) : d = { } for field_name , field in self . schema . normal_fields . items ( ) : field_val = getattr ( self , field_name , None ) field_val = field . jsonable ( self , field_val ) if field_val is not None : d [ field_name ] = field_val return d
return a public version of this instance that can be jsonified
49,149
def merge2images ( hdu1 , hdu2 , debugplot ) : image_header = hdu1 [ 0 ] . header image_header_ = hdu2 [ 0 ] . header naxis = image_header [ 'naxis' ] naxis_ = image_header_ [ 'naxis' ] if naxis != naxis_ : raise ValueError ( 'Incompatible NAXIS values: {}, {}' . format ( naxis , naxis_ ) ) naxis1 = image_header [ 'naxis1' ] naxis1_ = image_header_ [ 'naxis1' ] if naxis1 != naxis1_ : raise ValueError ( 'Incompatible NAXIS1 values: {}, {}' . format ( naxis1 , naxis1_ ) ) if naxis == 1 : naxis2 = 1 elif naxis == 2 : naxis2 = image_header [ 'naxis2' ] naxis2_ = image_header_ [ 'naxis2' ] if naxis2 != naxis2_ : raise ValueError ( 'Incompatible NAXIS2 values: {}, {}' . format ( naxis2 , naxis2_ ) ) else : raise ValueError ( 'Unexpected NAXIS value: {}' . format ( naxis ) ) image2d_merged = np . zeros ( ( naxis2 , naxis1 ) ) data1 = hdu1 [ 0 ] . data data2 = hdu2 [ 0 ] . data for i in range ( naxis2 ) : sp1 = data1 [ i , : ] sp2 = data2 [ i , : ] jmin1 , jmax1 = find_pix_borders ( sp1 , sought_value = 0 ) jmin2 , jmax2 = find_pix_borders ( sp2 , sought_value = 0 ) image2d_merged [ i , : ] = sp1 + sp2 useful1 = ( jmin1 != - 1 ) and ( jmax1 != naxis1 ) useful2 = ( jmin2 != - 1 ) and ( jmax2 != naxis1 ) if useful1 and useful2 : jmineff = max ( jmin1 , jmin2 ) jmaxeff = min ( jmax1 , jmax2 ) image2d_merged [ i , jmineff : ( jmaxeff + 1 ) ] /= 2 image_merged = fits . PrimaryHDU ( data = image2d_merged . astype ( np . float32 ) , header = image_header ) return image_merged
Merge 2 EMIR images averaging the common region .
49,150
def configure_environ ( dsn_env_name = 'PROM_DSN' , connection_class = DsnConnection ) : inters = [ ] cs = dsnparse . parse_environs ( dsn_env_name , parse_class = connection_class ) for c in cs : inter = c . interface set_interface ( inter , c . name ) inters . append ( inter ) return inters
configure interfaces based on environment variables
49,151
def configure ( dsn , connection_class = DsnConnection ) : c = dsnparse . parse ( dsn , parse_class = connection_class ) inter = c . interface set_interface ( inter , c . name ) return inter
configure an interface to be used to query a backend
49,152
def set_interface ( interface , name = '' ) : global interfaces if not interface : raise ValueError ( 'interface is empty' ) if name in interfaces : interfaces [ name ] . close ( ) interfaces [ name ] = interface
don t want to bother with a dsn? Use this method to make an interface available
49,153
async def open_websocket_server ( sock , filter = None ) : ws = await create_websocket_server ( sock , filter = filter ) try : yield ws finally : await ws . close ( )
A context manager which serves this websocket .
49,154
async def create_websocket_server ( sock , filter = None ) : ws = Websocket ( ) await ws . start_server ( sock , filter = filter ) return ws
A more low - level form of open_websocket_server . You are responsible for closing this websocket .
49,155
def magic_fields ( self ) : return { f : v for f , v in self . fields . items ( ) if f . startswith ( '_' ) }
the magic fields for the schema
49,156
def set_index ( self , index_name , index ) : if not index_name : raise ValueError ( "index_name must have a value" ) if index_name in self . indexes : raise ValueError ( "index_name {} has already been defined on {}" . format ( index_name , str ( self . indexes [ index_name ] . fields ) ) ) if not isinstance ( index , Index ) : raise ValueError ( "{} is not an Index instance" . format ( type ( index ) ) ) index . name = index_name self . indexes [ index_name ] = index return self
add an index to the schema
49,157
def schema ( self ) : if not hasattr ( self , "_schema" ) : ret = None o = self . _type if isinstance ( o , type ) : ret = getattr ( o , "schema" , None ) elif isinstance ( o , Schema ) : ret = o else : module , klass = utils . get_objects ( o ) ret = klass . schema self . _schema = ret return self . _schema
return the schema instance if this is reference to another table
49,158
def fval ( self , instance ) : try : val = instance . __dict__ [ self . instance_field_name ] except KeyError as e : val = None return val
return the raw value that this property is holding internally for instance
49,159
def set_default_reference ( self , method , reference ) : if method not in self . _available_methods : raise ValueError ( 'Unknown method: {0}' . format ( method ) ) self . _default_references [ method ] = reference
Set the default reference for a method .
49,160
def get_default_reference ( self , method ) : if method not in self . _available_methods : raise ValueError ( 'Unknown method: {0}' . format ( method ) ) return self . _default_references . get ( method )
Returns the default reference for a method .
49,161
def print_element_xray_transitions ( self , element , file = sys . stdout , tabulate_kwargs = None ) : header = [ 'IUPAC' , 'Siegbahn' , 'Energy (eV)' , 'Probability' ] rows = [ ] for xraytransition in self . element_xray_transitions ( element ) : try : iupac = self . xray_transition_notation ( xraytransition , 'iupac' ) except : iupac = '' try : siegbahn = self . xray_transition_notation ( xraytransition , 'siegbahn' ) except : siegbahn = '' try : energy_eV = self . xray_transition_energy_eV ( element , xraytransition ) except : energy_eV = '' try : probability = self . xray_transition_probability ( element , xraytransition ) except : probability = '' rows . append ( [ iupac , siegbahn , energy_eV , probability ] ) rows . sort ( key = operator . itemgetter ( 2 ) ) if tabulate_kwargs is None : tabulate_kwargs = { } file . write ( tabulate . tabulate ( rows , header , ** tabulate_kwargs ) )
Prints all x - ray transitions for an element with their different notations and energy .
49,162
def get_subclasses ( modulepath , parent_class ) : if isinstance ( modulepath , ModuleType ) : modules = get_modules ( modulepath . __name__ ) else : modules = get_modules ( modulepath ) ret = set ( ) for m in modules : cs = inspect . getmembers ( m , lambda v : inspect . isclass ( v ) and issubclass ( v , parent_class ) ) for class_name , klass in cs : ret . add ( klass ) return ret
given a module return all the parent_class subclasses that are found in that module and any submodules .
49,163
def build_dump_order ( orm_class , orm_classes ) : if orm_class in orm_classes : return for field_name , field_val in orm_class . schema . fields . items ( ) : if field_val . is_ref ( ) : build_dump_order ( field_val . schema . orm_class , orm_classes ) if orm_class not in orm_classes : orm_classes . append ( orm_class )
pass in an array when you encounter a ref call this method again with the array when something has no more refs then it gets appended to the array and returns each time something gets through the list they are added but before they are added to the list it is checked to see if it is already in the listt
49,164
def main_dump ( paths , directory , dry_run ) : table_map = get_table_map ( paths ) for conn_name , conn_info in table_map . items ( ) : inter = conn_info [ "interface" ] conn = inter . connection_config table_names = conn_info [ "table_names" ] cmd = get_base_cmd ( "backup" , inter , directory ) cmd . extend ( table_names ) if dry_run : echo . out ( " " . join ( cmd ) ) else : run_cmd ( cmd )
dump all or part of the prom data currently only works on Postgres databases
49,165
def main_restore ( directory , conn_name ) : inter = get_interface ( conn_name ) conn = inter . connection_config cmd = get_base_cmd ( "restore" , inter , directory ) run_cmd ( cmd )
Restore your database dumped with the dump command
49,166
def _get_fields ( self , table_name , ** kwargs ) : ret = { } query_str = [ ] query_args = [ 'f' , table_name ] query_str . append ( 'SELECT' ) query_str . append ( ', ' . join ( [ 'a.attnum' , 'a.attname' , 'a.attnotnull' , 't.typname' , 'i.indisprimary' , 'c.relname AS confrelname' , ] ) ) query_str . append ( 'FROM' ) query_str . append ( ' pg_attribute a' ) query_str . append ( 'JOIN pg_type t ON a.atttypid = t.oid' ) query_str . append ( 'LEFT JOIN pg_index i ON a.attrelid = i.indrelid' ) query_str . append ( ' AND a.attnum = any(i.indkey)' ) query_str . append ( 'LEFT JOIN pg_constraint s ON a.attrelid = s.conrelid' ) query_str . append ( ' AND s.contype = {} AND a.attnum = any(s.conkey)' . format ( self . val_placeholder ) ) query_str . append ( 'LEFT JOIN pg_class c ON s.confrelid = c.oid' ) query_str . append ( 'WHERE' ) query_str . append ( ' a.attrelid = {}::regclass' . format ( self . val_placeholder ) ) query_str . append ( ' AND a.attisdropped = False' ) query_str . append ( ' AND a.attnum > 0' ) query_str . append ( 'ORDER BY a.attnum ASC' ) query_str = os . linesep . join ( query_str ) fields = self . query ( query_str , * query_args , ** kwargs ) pg_types = { "float8" : float , "timestamp" : datetime . datetime , "int2" : int , "int4" : int , "int8" : long , "numeric" : decimal . Decimal , "text" : str , "bpchar" : str , "varchar" : str , "bool" : bool , "date" : datetime . date , "blob" : bytearray , } for row in fields : field = { "name" : row [ "attname" ] , "field_type" : pg_types [ row [ "typname" ] ] , "field_required" : row [ "attnotnull" ] , "pk" : bool ( row [ "indisprimary" ] ) , } if row [ "confrelname" ] : field [ "schema_table_name" ] = row [ "confrelname" ] field [ "ref_table_name" ] = row [ "confrelname" ] ret [ field [ "name" ] ] = field return ret
return all the fields for the given schema
49,167
def client_authentication ( self , request , auth = None , ** kwargs ) : try : auth_info = verify_client ( self . endpoint_context , request , auth ) except Exception as err : msg = "Failed to verify client due to: {}" . format ( err ) logger . error ( msg ) return self . error_cls ( error = "unauthorized_client" , error_description = msg ) else : if 'client_id' not in auth_info : logger . error ( 'No client_id, authentication failed' ) return self . error_cls ( error = "unauthorized_client" , error_description = 'unknown client' ) return auth_info
Deal with client authentication
49,168
def _post_parse_request ( self , request , client_id = '' , ** kwargs ) : if 'state' in request : try : sinfo = self . endpoint_context . sdb [ request [ 'code' ] ] except KeyError : logger . error ( 'Code not present in SessionDB' ) return self . error_cls ( error = "unauthorized_client" ) else : state = sinfo [ 'authn_req' ] [ 'state' ] if state != request [ 'state' ] : logger . error ( 'State value mismatch' ) return self . error_cls ( error = "unauthorized_client" ) if "client_id" not in request : request [ "client_id" ] = client_id logger . debug ( "%s: %s" % ( request . __class__ . __name__ , sanitize ( request ) ) ) return request
This is where clients come to get their access tokens
49,169
def register ( self , new_outputs , * args , ** kwargs ) : kwargs . update ( zip ( self . meta_names , args ) ) super ( OutputRegistry , self ) . register ( new_outputs , ** kwargs )
Register outputs and metadata .
49,170
def initial_classification ( self , obresult , target_is_sky = False ) : with obresult . frames [ 0 ] . open ( ) as baseimg : has_bpm_ext = 'BPM' in baseimg self . logger . debug ( 'images have BPM extension: %s' , has_bpm_ext ) images_info = [ ] for f in obresult . frames : with f . open ( ) as img : hdr = img [ 0 ] . header iinfo = ImageInfo ( f ) finfo = { } iinfo . metadata = finfo finfo [ 'uuid' ] = hdr [ 'UUID' ] finfo [ 'exposure' ] = hdr [ 'EXPTIME' ] finfo [ 'airmass' ] = hdr [ 'airmass' ] finfo [ 'mjd' ] = hdr [ 'tstamp' ] iinfo . label = 'result_image_{}' . format ( finfo [ 'uuid' ] ) iinfo . mask = nfcom . Extension ( "BPM" ) iinfo . objmask_data = None iinfo . valid_target = False iinfo . valid_sky = False iinfo . itype = 'TARGET' if iinfo . itype == 'TARGET' : iinfo . valid_target = True if target_is_sky : iinfo . valid_sky = True if iinfo . itype == 'SKY' : iinfo . valid_sky = True images_info . append ( iinfo ) return images_info
Classify input frames
49,171
def update_or_create ( self , * args , ** kwargs ) : obj , created = super ( ) . update_or_create ( * args , ** kwargs ) if not created : return self . with_signal ( result = ( obj , created ) ) return obj , created
Only sent when not created since default implementation will call self . create when creating which triggers our signal already .
49,172
def update ( self , duration ) : if duration >= 0 : self . histogram . update ( duration ) self . meter . mark ( )
Add a recorded duration .
49,173
def _build_g5k_conf ( vmong5k_conf ) : clusters = [ m . cluster for m in vmong5k_conf . machines ] sites = g5k_api_utils . get_clusters_sites ( clusters ) site_names = set ( sites . values ( ) ) if len ( site_names ) > 1 : raise Exception ( "Multisite deployment not supported yet" ) site = site_names . pop ( ) return _do_build_g5k_conf ( vmong5k_conf , site )
Build the conf of the g5k provider from the vmong5k conf .
49,174
def copy_model_instance ( obj ) : meta = getattr ( obj , '_meta' ) return { f . name : getattr ( obj , f . name ) for f in meta . get_fields ( include_parents = False ) if not f . auto_created }
Copy Django model instance as a dictionary excluding automatically created fields like an auto - generated sequence as a primary key or an auto - created many - to - one reverse relation .
49,175
def load_data ( self , * args , ** kwargs ) : argpos = { v [ 'extras' ] [ 'argpos' ] : k for k , v in self . parameters . iteritems ( ) if 'argpos' in v [ 'extras' ] } data = dict ( { argpos [ n ] : a for n , a in enumerate ( args ) } , ** kwargs ) return self . apply_units_to_cache ( data )
Collects positional and keyword arguments into data and applies units .
49,176
def grid_reload_from_name ( job_name ) : gk = get_api_client ( ) sites = get_all_sites_obj ( ) jobs = [ ] for site in [ s for s in sites if s . uid not in gk . excluded_site ] : logger . info ( "Reloading %s from %s" % ( job_name , site . uid ) ) _jobs = site . jobs . list ( name = job_name , state = "waiting,launching,running" ) if len ( _jobs ) == 1 : logger . info ( "Reloading %s from %s" % ( _jobs [ 0 ] . uid , site . uid ) ) jobs . append ( _jobs [ 0 ] ) elif len ( _jobs ) > 1 : raise EnosG5kDuplicateJobsError ( site , job_name ) return jobs
Reload all running or pending jobs of Grid 5000 with a given name .
49,177
def grid_reload_from_ids ( oargrid_jobids ) : gk = get_api_client ( ) jobs = [ ] for site , job_id in oargrid_jobids : jobs . append ( gk . sites [ site ] . jobs [ job_id ] ) return jobs
Reload all running or pending jobs of Grid 5000 from their ids
49,178
def grid_destroy_from_name ( job_name ) : jobs = grid_reload_from_name ( job_name ) for job in jobs : job . delete ( ) logger . info ( "Killing the job (%s, %s)" % ( job . site , job . uid ) )
Destroy all the jobs with a given name .
49,179
def grid_destroy_from_ids ( oargrid_jobids ) : jobs = grid_reload_from_ids ( oargrid_jobids ) for job in jobs : job . delete ( ) logger . info ( "Killing the jobs %s" % oargrid_jobids )
Destroy all the jobs with corresponding ids
49,180
def wait_for_jobs ( jobs ) : all_running = False while not all_running : all_running = True time . sleep ( 5 ) for job in jobs : job . refresh ( ) scheduled = getattr ( job , "scheduled_at" , None ) if scheduled is not None : logger . info ( "Waiting for %s on %s [%s]" % ( job . uid , job . site , _date2h ( scheduled ) ) ) all_running = all_running and job . state == "running" if job . state == "error" : raise Exception ( "The job %s is in error state" % job ) logger . info ( "All jobs are Running !" )
Waits for all the jobs to be runnning .
49,181
def grid_deploy ( site , nodes , options ) : gk = get_api_client ( ) environment = options . pop ( "env_name" ) options . update ( environment = environment ) options . update ( nodes = nodes ) key_path = DEFAULT_SSH_KEYFILE options . update ( key = key_path . read_text ( ) ) logger . info ( "Deploying %s with options %s" % ( nodes , options ) ) deployment = gk . sites [ site ] . deployments . create ( options ) while deployment . status not in [ "terminated" , "error" ] : deployment . refresh ( ) print ( "Waiting for the end of deployment [%s]" % deployment . uid ) time . sleep ( 10 ) deploy = [ ] undeploy = [ ] if deployment . status == "terminated" : deploy = [ node for node , v in deployment . result . items ( ) if v [ "state" ] == "OK" ] undeploy = [ node for node , v in deployment . result . items ( ) if v [ "state" ] == "KO" ] elif deployment . status == "error" : undeploy = nodes return deploy , undeploy
Deploy and wait for the deployment to be finished .
49,182
def set_nodes_vlan ( site , nodes , interface , vlan_id ) : def _to_network_address ( host ) : splitted = host . split ( '.' ) splitted [ 0 ] = splitted [ 0 ] + "-" + interface return "." . join ( splitted ) gk = get_api_client ( ) network_addresses = [ _to_network_address ( n ) for n in nodes ] gk . sites [ site ] . vlans [ str ( vlan_id ) ] . submit ( { "nodes" : network_addresses } )
Set the interface of the nodes in a specific vlan .
49,183
def clusters_sites_obj ( clusters ) : result = { } all_clusters = get_all_clusters_sites ( ) clusters_sites = { c : s for ( c , s ) in all_clusters . items ( ) if c in clusters } for cluster , site in clusters_sites . items ( ) : result . update ( { cluster : get_site_obj ( site ) } ) return result
Get all the corresponding sites of the passed clusters .
49,184
def get_all_clusters_sites ( ) : result = { } gk = get_api_client ( ) sites = gk . sites . list ( ) for site in sites : clusters = site . clusters . list ( ) result . update ( { c . uid : site . uid for c in clusters } ) return result
Get all the cluster of all the sites .
49,185
def get_nodes ( cluster ) : gk = get_api_client ( ) site = get_cluster_site ( cluster ) return gk . sites [ site ] . clusters [ cluster ] . nodes . list ( )
Get all the nodes of a given cluster .
49,186
def get_cluster_interfaces ( cluster , extra_cond = lambda nic : True ) : nics = get_nics ( cluster ) nics = [ ( nic [ 'device' ] , nic [ 'name' ] ) for nic in nics if nic [ 'mountable' ] and nic [ 'interface' ] == 'Ethernet' and not nic [ 'management' ] and extra_cond ( nic ) ] nics = sorted ( nics ) return nics
Get the network interfaces names corresponding to a criteria .
49,187
def get_clusters_interfaces ( clusters , extra_cond = lambda nic : True ) : interfaces = { } for cluster in clusters : nics = get_cluster_interfaces ( cluster , extra_cond = extra_cond ) interfaces . setdefault ( cluster , nics ) return interfaces
Returns for each cluster the available cluster interfaces
49,188
def _do_synchronise_jobs ( walltime , machines ) : offset = SYNCHRONISATION_OFFSET start = time . time ( ) + offset _t = time . strptime ( walltime , "%H:%M:%S" ) _walltime = _t . tm_hour * 3600 + _t . tm_min * 60 + _t . tm_sec demands = defaultdict ( int ) for machine in machines : cluster = machine [ "cluster" ] demands [ cluster ] += machine [ "nodes" ] if len ( list ( demands . keys ( ) ) ) <= 1 : logger . debug ( "Only one cluster detected: no synchronisation needed" ) return None clusters = clusters_sites_obj ( list ( demands . keys ( ) ) ) sites = set ( list ( clusters . values ( ) ) ) if len ( sites ) <= 1 : logger . debug ( "Only one site detected: no synchronisation needed" ) return None ok = True for cluster , nodes in demands . items ( ) : cluster_status = clusters [ cluster ] . status . list ( ) ok = ok and can_start_on_cluster ( cluster_status . nodes , nodes , start , _walltime ) if not ok : break if ok : logger . info ( "Reservation_date=%s (%s)" % ( _date2h ( start ) , sites ) ) return start if start is None : raise EnosG5kSynchronisationError ( sites )
This returns a common reservation date for all the jobs .
49,189
def add_data ( self , rawdata ) : for data in rawdata : try : item = data [ 0 ] if item [ 0 ] == 2 : continue if item [ 0 ] != 0 : warnings . warn ( f"Unknown message type '{item[0]}'" , Warning ) continue item = item [ 1 ] target = str ( item [ 0 ] ) try : data = item [ 1 ] except IndexError : data = dict ( ) try : method = getattr ( self , self . __head + target ) method ( data ) except AttributeError : self . _handle_unhandled ( target , data ) except IndexError : LOGGER . warning ( "Wrongly constructed message received: %r" , data ) self . conn . process_queues ( )
Add data to given room s state
49,190
def register_callback ( self ) : cid = str ( self . __cid ) self . __cid += 1 event = queue . Queue ( ) self . __callbacks [ cid ] = event return cid , event
Register callback that we will have to wait for
49,191
def _handle_callback ( self , data ) : cb_id = data . get ( "id" ) args = data . get ( "args" ) event = self . __callbacks . pop ( cb_id , None ) if not event : return if not args : event . put ( args ) return err , info = args if err is None : event . put ( info ) else : LOGGER . warning ( "Callback returned error of %s" , str ( err ) ) event . put ( err )
Handle lain s callback . Only used with getFileinfo so far
49,192
def _handle_userCount ( self , data ) : self . room . user_count = data self . conn . enqueue_data ( "user_count" , self . room . user_count )
Handle user count changes
49,193
def _handle_userInfo ( self , data ) : for k , v in data . items ( ) : if k == "nick" : if v == "None" : v = "Volaphile" setattr ( self . room . user , k , v ) self . conn . enqueue_data ( k , self . room . user . nick ) elif k != "profile" : if not hasattr ( self . room , k ) : warnings . warn ( f"Skipping unset property {k}" , ResourceWarning ) continue setattr ( self . room , k , v ) self . conn . enqueue_data ( k , getattr ( self . room , k ) ) self . room . user_info = k , v self . conn . enqueue_data ( "user_info" , self . room . user_info )
Handle user information
49,194
def _handle_config ( self , data ) : self . room . config . update ( data ) self . conn . enqueue_data ( "config" , data )
Handle initial config push and config changes
49,195
def _handle_files ( self , data ) : initial = data . get ( "set" , False ) files = data [ "files" ] for f in files : try : fobj = File ( self . room , self . conn , f [ 0 ] , f [ 1 ] , type = f [ 2 ] , size = f [ 3 ] , expire_time = int ( f [ 4 ] ) / 1000 , uploader = f [ 6 ] . get ( "nick" ) or f [ 6 ] . get ( "user" ) , ) self . room . filedict = fobj . fid , fobj if not initial : self . conn . enqueue_data ( "file" , fobj ) except Exception : import pprint LOGGER . exception ( "bad file" ) pprint . pprint ( f ) if initial : self . conn . enqueue_data ( "initial_files" , self . room . files )
Handle new files being uploaded
49,196
def _handle_delete_file ( self , data ) : file = self . room . filedict . get ( data ) if file : self . room . filedict = data , None self . conn . enqueue_data ( "delete_file" , file )
Handle files being removed
49,197
def _handle_chat ( self , data ) : self . conn . enqueue_data ( "chat" , ChatMessage . from_data ( self . room , self . conn , data ) )
Handle chat messages
49,198
def _handle_changed_config ( self , change ) : key , value = change . get ( "key" ) , change . get ( "value" ) self . room . config . update ( { key : value } ) self . conn . enqueue_data ( "config" , self . room . config )
Handle configuration changes
49,199
def _handle_chat_name ( self , data ) : self . room . user . nick = data self . conn . enqueue_data ( "user" , self . room . user )
Handle user name changes