idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
4,400
|
def parts ( self , * args , ** kwargs ) : return self . _client . parts ( * args , activity = self . id , ** kwargs )
|
Retrieve parts belonging to this activity .
|
4,401
|
def associated_parts ( self , * args , ** kwargs ) : return ( self . parts ( category = Category . MODEL , * args , ** kwargs ) , self . parts ( category = Category . INSTANCE , * args , ** kwargs ) )
|
Retrieve models and instances belonging to this activity .
|
4,402
|
def subprocess ( self ) : subprocess_id = self . _json_data . get ( 'container' ) if subprocess_id == self . _json_data . get ( 'root_container' ) : raise NotFoundError ( "Cannot find subprocess for this task '{}', " "as this task exist on top level." . format ( self . name ) ) return self . _client . activity ( pk = subprocess_id , scope = self . scope_id )
|
Retrieve the subprocess in which this activity is defined .
|
4,403
|
def siblings ( self , ** kwargs ) : container_id = self . _json_data . get ( 'container' ) return self . _client . activities ( container = container_id , scope = self . scope_id , ** kwargs )
|
Retrieve the other activities that also belong to the subprocess .
|
4,404
|
def create ( self , * args , ** kwargs ) : if self . activity_type != ActivityType . SUBPROCESS : raise IllegalArgumentError ( "One can only create a task under a subprocess." ) return self . _client . create_activity ( self . id , * args , ** kwargs )
|
Create a new activity belonging to this subprocess .
|
4,405
|
def customization ( self ) : from . customization import ExtCustomization return ExtCustomization ( activity = self , client = self . _client )
|
Get a customization object representing the customization of the activity .
|
4,406
|
def all_stop_places_quays ( self ) -> list : all_places = self . stops . copy ( ) for quay in self . quays : all_places . append ( quay ) return all_places
|
Get all stop places and quays
|
4,407
|
async def expand_all_quays ( self ) -> None : if not self . stops : return headers = { 'ET-Client-Name' : self . _client_name } request = { 'query' : GRAPHQL_STOP_TO_QUAY_TEMPLATE , 'variables' : { 'stops' : self . stops , 'omitNonBoarding' : self . omit_non_boarding } } with async_timeout . timeout ( 10 ) : resp = await self . web_session . post ( RESOURCE , json = request , headers = headers ) if resp . status != 200 : _LOGGER . error ( "Error connecting to Entur, response http status code: %s" , resp . status ) return None result = await resp . json ( ) if 'errors' in result : return for stop_place in result [ 'data' ] [ 'stopPlaces' ] : if len ( stop_place [ 'quays' ] ) > 1 : for quay in stop_place [ 'quays' ] : if quay [ 'estimatedCalls' ] : self . quays . append ( quay [ 'id' ] )
|
Find all quays from stop places .
|
4,408
|
async def update ( self ) -> None : headers = { 'ET-Client-Name' : self . _client_name } request = { 'query' : self . get_gql_query ( ) , 'variables' : { 'stops' : self . stops , 'quays' : self . quays , 'whitelist' : { 'lines' : self . line_whitelist } , 'numberOfDepartures' : self . number_of_departures , 'omitNonBoarding' : self . omit_non_boarding } } with async_timeout . timeout ( 10 ) : resp = await self . web_session . post ( RESOURCE , json = request , headers = headers ) if resp . status != 200 : _LOGGER . error ( "Error connecting to Entur, response http status code: %s" , resp . status ) return None result = await resp . json ( ) if 'errors' in result : _LOGGER . warning ( "Entur API responded with error message: {error}" , result [ 'errors' ] ) return self . _data = result [ 'data' ] if 'stopPlaces' in self . _data : for stop in self . _data [ 'stopPlaces' ] : self . _process_place ( stop , False ) if 'quays' in self . _data : for quay in self . _data [ 'quays' ] : self . _process_place ( quay , True )
|
Get the latest data from api . entur . org .
|
4,409
|
def _process_place ( self , place : dict , is_platform : bool ) -> None : place_id = place [ 'id' ] self . info [ place_id ] = Place ( place , is_platform )
|
Extract information from place dictionary .
|
4,410
|
def serializable_list ( olist , attrs_to_serialize = None , rels_to_expand = None , group_listrels_by = None , rels_to_serialize = None , key_modifications = None , groupby = None , keyvals_to_merge = None , preserve_order = False , dict_struct = None , dict_post_processors = None ) : if groupby : if preserve_order : result = json_encoder ( deep_group ( olist , keys = groupby , serializer = 'todict' , preserve_order = preserve_order , serializer_kwargs = { 'rels_to_serialize' : rels_to_serialize , 'rels_to_expand' : rels_to_expand , 'attrs_to_serialize' : attrs_to_serialize , 'group_listrels_by' : group_listrels_by , 'key_modifications' : key_modifications , 'dict_struct' : dict_struct , 'dict_post_processors' : dict_post_processors } ) ) else : result = deep_group ( olist , keys = groupby , serializer = 'todict' , preserve_order = preserve_order , serializer_kwargs = { 'rels_to_serialize' : rels_to_serialize , 'rels_to_expand' : rels_to_expand , 'attrs_to_serialize' : attrs_to_serialize , 'group_listrels_by' : group_listrels_by , 'key_modifications' : key_modifications , 'dict_struct' : dict_struct , 'dict_post_processors' : dict_post_processors } ) return result else : result_list = map ( lambda o : serialized_obj ( o , attrs_to_serialize = attrs_to_serialize , rels_to_expand = rels_to_expand , group_listrels_by = group_listrels_by , rels_to_serialize = rels_to_serialize , key_modifications = key_modifications , dict_struct = dict_struct , dict_post_processors = dict_post_processors ) , olist ) if keyvals_to_merge : result_list = [ merge ( obj_dict , kvdict ) for obj_dict , kvdict in zip ( result_list , keyvals_to_merge ) ] return result_list
|
Converts a list of model instances to a list of dictionaries using their todict method .
|
4,411
|
def jsoned ( struct , wrap = True , meta = None , struct_key = 'result' , pre_render_callback = None ) : return _json . dumps ( structured ( struct , wrap = wrap , meta = meta , struct_key = struct_key , pre_render_callback = pre_render_callback ) , default = json_encoder )
|
Provides a json dump of the struct
|
4,412
|
def as_list ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : response = func ( * args , ** kwargs ) if isinstance ( response , Response ) : return response return as_json_list ( response , ** _serializable_params ( request . args , check_groupby = True ) ) return wrapper
|
A decorator used to return a JSON response of a list of model objects . It expects the decorated function to return a list of model instances . It then converts the instances to dicts and serializes them into a json response
|
4,413
|
def as_processed_list ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : func_argspec = inspect . getargspec ( func ) func_args = func_argspec . args for kw in request . args : if ( kw in func_args and kw not in RESTRICTED and not any ( request . args . get ( kw ) . startswith ( op ) for op in OPERATORS ) and not any ( kw . endswith ( op ) for op in OPERATORS ) ) : kwargs [ kw ] = request . args . get ( kw ) func_output = func ( * args , ** kwargs ) return process_args_and_render_json_list ( func_output ) return wrapper
|
A decorator used to return a JSON response of a list of model objects . It differs from as_list in that it accepts a variety of querying parameters and can use them to filter and modify the results . It expects the decorated function to return either Model Class to query or a SQLAlchemy filter which exposes a subset of the instances of the Model class . It then converts the instances to dicts and serializes them into a json response
|
4,414
|
def as_obj ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : response = func ( * args , ** kwargs ) return render_json_obj_with_requested_structure ( response ) return wrapper
|
A decorator used to return a JSON response with a dict representation of the model instance . It expects the decorated function to return a Model instance . It then converts the instance to dicts and serializes it into a json response
|
4,415
|
def execute ( self , interactive = False ) : url = self . _client . _build_url ( 'service_execute' , service_id = self . id ) response = self . _client . _request ( 'GET' , url , params = dict ( interactive = interactive , format = 'json' ) ) if response . status_code != requests . codes . accepted : raise APIError ( "Could not execute service '{}': {}" . format ( self , ( response . status_code , response . json ( ) ) ) ) data = response . json ( ) return ServiceExecution ( json = data . get ( 'results' ) [ 0 ] , client = self . _client )
|
Execute the service .
|
4,416
|
def edit ( self , name = None , description = None , version = None , ** kwargs ) : update_dict = { 'id' : self . id } if name : if not isinstance ( name , str ) : raise IllegalArgumentError ( "name should be provided as a string" ) update_dict . update ( { 'name' : name } ) if description : if not isinstance ( description , str ) : raise IllegalArgumentError ( "description should be provided as a string" ) update_dict . update ( { 'description' : description } ) if version : if not isinstance ( version , str ) : raise IllegalArgumentError ( "description should be provided as a string" ) update_dict . update ( { 'script_version' : version } ) if kwargs : update_dict . update ( ** kwargs ) response = self . _client . _request ( 'PUT' , self . _client . _build_url ( 'service' , service_id = self . id ) , json = update_dict ) if response . status_code != requests . codes . ok : raise APIError ( "Could not update Service ({})" . format ( response ) ) if name : self . name = name if version : self . version = version
|
Edit Service details .
|
4,417
|
def delete ( self ) : response = self . _client . _request ( 'DELETE' , self . _client . _build_url ( 'service' , service_id = self . id ) ) if response . status_code != requests . codes . no_content : raise APIError ( "Could not delete service: {} with id {}" . format ( self . name , self . id ) )
|
Delete this service .
|
4,418
|
def get_executions ( self , ** kwargs ) : return self . _client . service_executions ( service = self . id , scope = self . scope_id , ** kwargs )
|
Retrieve the executions related to the current service .
|
4,419
|
def service ( self ) : if not self . _service : self . _service = self . _client . service ( id = self . service_id ) return self . _service
|
Retrieve the Service object to which this execution is associated .
|
4,420
|
def terminate ( self ) : url = self . _client . _build_url ( 'service_execution_terminate' , service_execution_id = self . id ) response = self . _client . _request ( 'GET' , url , params = dict ( format = 'json' ) ) if response . status_code != requests . codes . accepted : raise APIError ( "Could not execute service '{}': {}" . format ( self , response ) )
|
Terminate the Service execution .
|
4,421
|
def get_log ( self , target_dir = None , log_filename = 'log.txt' ) : full_path = os . path . join ( target_dir or os . getcwd ( ) , log_filename ) url = self . _client . _build_url ( 'service_execution_log' , service_execution_id = self . id ) response = self . _client . _request ( 'GET' , url ) if response . status_code != requests . codes . ok : raise APIError ( "Could not download service execution log" ) with open ( full_path , 'w+b' ) as f : for chunk in response : f . write ( chunk )
|
Retrieve the log of the service execution .
|
4,422
|
def get_notebook_url ( self ) : url = self . _client . _build_url ( 'service_execution_notebook_url' , service_execution_id = self . id ) response = self . _client . _request ( 'GET' , url , params = dict ( format = 'json' ) ) if response . status_code != requests . codes . ok : raise APIError ( "Could not retrieve notebook url '{}': {}" . format ( self , response ) ) data = response . json ( ) url = data . get ( 'results' ) [ 0 ] . get ( 'url' ) return url
|
Get the url of the notebook if the notebook is executed in interactive mode .
|
4,423
|
def sendMessage ( self , data ) : opcode = BINARY if isinstance ( data , unicode ) : opcode = TEXT self . _sendMessage ( False , opcode , data )
|
Send websocket data frame to the client . If data is a unicode object then the frame is sent as Text . If the data is a bytearray object then the frame is sent as Binary .
|
4,424
|
def _shape_array ( array1 , array2 ) : if len ( array1 ) > len ( array2 ) : new_array = array2 old_array = array1 else : new_array = array1 old_array = array2 length = len ( old_array ) - len ( new_array ) for i in range ( length ) : n = new_array [ - 1 ] . copy ( ) n [ 0 : : 3 ] += 1 n [ 2 : : 3 ] = 0 new_array = np . vstack ( [ new_array , [ n ] ] ) arrays = np . hstack ( [ old_array , new_array ] ) return arrays
|
Function that equalises the input arrays by zero - padding the shortest one .
|
4,425
|
def _create_txt_from_str ( in_path , channels , new_path ) : header = [ "# OpenSignals Text File Format" ] files = [ bsnb . load ( in_path ) ] with open ( in_path , encoding = "latin-1" ) as opened_p : header . append ( opened_p . readlines ( ) [ 1 ] ) header . append ( "# EndOfHeader" ) data = [ ] nr_channels = [ ] for file in files : for i , device in enumerate ( file . keys ( ) ) : nr_channels . append ( len ( list ( file [ device ] ) ) ) data . append ( file [ device ] [ channels [ i ] ] ) dephase , s1 , s2 = synchronise_signals ( data [ 0 ] , data [ 1 ] ) new_header = [ h . replace ( "\n" , "" ) for h in header ] sync_file = open ( new_path , 'w' ) sync_file . write ( ' \n' . join ( new_header ) + '\n' ) old_columns = np . loadtxt ( in_path ) if np . array_equal ( s1 , data [ 0 ] ) : aux = 3 * nr_channels [ 0 ] columns = old_columns [ dephase : , aux : ] new_file = _shape_array ( old_columns [ : , : aux ] , columns ) elif np . array_equal ( s2 , data [ 1 ] ) : aux = 3 * nr_channels [ 1 ] columns = old_columns [ dephase : , : aux ] new_file = _shape_array ( columns , old_columns [ : , aux : ] ) else : print ( "The devices are synchronised." ) return for line in new_file : sync_file . write ( '\t' . join ( str ( int ( i ) ) for i in line ) + '\t\n' ) sync_file . close ( )
|
This function allows to generate a text file with synchronised signals from the input file .
|
4,426
|
def render ( self , bindings ) : out = [ ] binding = False for segment in self . segments : if segment . kind == _BINDING : if segment . literal not in bindings : raise ValidationException ( ( 'rendering error: value for key \'{}\' ' 'not provided' ) . format ( segment . literal ) ) out . extend ( PathTemplate ( bindings [ segment . literal ] ) . segments ) binding = True elif segment . kind == _END_BINDING : binding = False else : if binding : continue out . append ( segment ) path = _format ( out ) self . match ( path ) return path
|
Renders a string from a path template using the provided bindings .
|
4,427
|
def match ( self , path ) : this = self . segments that = path . split ( '/' ) current_var = None bindings = { } segment_count = self . segment_count j = 0 for i in range ( 0 , len ( this ) ) : if j >= len ( that ) : break if this [ i ] . kind == _TERMINAL : if this [ i ] . literal == '*' : bindings [ current_var ] = that [ j ] j += 1 elif this [ i ] . literal == '**' : until = j + len ( that ) - segment_count + 1 segment_count += len ( that ) - segment_count bindings [ current_var ] = '/' . join ( that [ j : until ] ) j = until elif this [ i ] . literal != that [ j ] : raise ValidationException ( 'mismatched literal: \'%s\' != \'%s\'' % ( this [ i ] . literal , that [ j ] ) ) else : j += 1 elif this [ i ] . kind == _BINDING : current_var = this [ i ] . literal if j != len ( that ) or j != segment_count : raise ValidationException ( 'match error: could not render from the path template: {}' . format ( path ) ) return bindings
|
Matches a fully qualified path template string .
|
4,428
|
def parse ( self , data ) : self . binding_var_count = 0 self . segment_count = 0 segments = self . parser . parse ( data ) path_wildcard = False for segment in segments : if segment . kind == _TERMINAL and segment . literal == '**' : if path_wildcard : raise ValidationException ( 'validation error: path template cannot contain more ' 'than one path wildcard' ) path_wildcard = True return segments
|
Returns a list of path template segments parsed from data .
|
4,429
|
def create ( window , root ) : notifications = { } _id = root . get_property ( "id" ) from foxpuppet . windows . browser . notifications import addons notifications . update ( addons . NOTIFICATIONS ) return notifications . get ( _id , BaseNotification ) ( window , root )
|
Create a notification object .
|
4,430
|
def label ( self ) : with self . selenium . context ( self . selenium . CONTEXT_CHROME ) : return self . root . get_attribute ( "label" )
|
Provide access to the notification label .
|
4,431
|
def origin ( self ) : with self . selenium . context ( self . selenium . CONTEXT_CHROME ) : return self . root . get_attribute ( "origin" )
|
Provide access to the notification origin .
|
4,432
|
def find_primary_button ( self ) : if self . window . firefox_version >= 67 : return self . root . find_element ( By . CLASS_NAME , "popup-notification-primary-button" ) return self . root . find_anonymous_element_by_attribute ( "anonid" , "button" )
|
Retrieve the primary button .
|
4,433
|
def windows ( self ) : from foxpuppet . windows import BrowserWindow return [ BrowserWindow ( self . selenium , handle ) for handle in self . selenium . window_handles ]
|
Return a list of all open windows .
|
4,434
|
def read_daemon ( self ) : while True : data = self . _socket . recv ( 9999 ) self . feed_parser ( data )
|
Read thread .
|
4,435
|
def _logic ( self , value = None ) : self . _validation_result , self . _validation_reason = None , 'No reason' return self . _validation_result , self . _validation_reason
|
Process the inner logic of the validator .
|
4,436
|
def get_station_board ( self , crs , rows = 17 , include_departures = True , include_arrivals = False , destination_crs = None , origin_crs = None ) : if include_departures and include_arrivals : query_type = 'GetArrivalDepartureBoard' elif include_departures : query_type = 'GetDepartureBoard' elif include_arrivals : query_type = 'GetArrivalBoard' else : raise ValueError ( "get_station_board must have either include_departures or \include_arrivals set to True" ) q = partial ( self . _base_query ( ) [ query_type ] , crs = crs , numRows = rows ) if destination_crs : if origin_crs : log . warn ( "Station board query can only filter on one of \destination_crs and origin_crs, using only destination_crs" ) q = partial ( q , filterCrs = destination_crs , filterType = 'to' ) elif origin_crs : q = partial ( q , filterCrs = origin_crs , filterType = 'from' ) try : soap_response = q ( ) except WebFault : raise WebServiceError return StationBoard ( soap_response )
|
Query the darwin webservice to obtain a board for a particular station and return a StationBoard instance
|
4,437
|
def get_service_details ( self , service_id ) : service_query = self . _soap_client . service [ 'LDBServiceSoap' ] [ 'GetServiceDetails' ] try : soap_response = service_query ( serviceID = service_id ) except WebFault : raise WebServiceError return ServiceDetails ( soap_response )
|
Get the details of an individual service and return a ServiceDetails instance .
|
4,438
|
def render_template ( self ) : self . _parse_paths ( ) context = dict ( napp = self . _napp . __dict__ , paths = self . _paths ) self . _save ( context )
|
Render and save API doc in openapi . yml .
|
4,439
|
def _parse_decorated_functions ( self , code ) : matches = re . finditer ( r , code , re . VERBOSE | re . DOTALL ) for function_match in matches : m_dict = function_match . groupdict ( ) self . _parse_docstring ( m_dict [ 'docstring' ] ) self . _add_function_paths ( m_dict [ 'decorators' ] )
|
Return URL rule HTTP methods and docstring .
|
4,440
|
def _parse_docstring ( self , docstring ) : match = re . match ( r , docstring , re . VERBOSE | re . DOTALL ) summary = 'TODO write the summary.' description = 'TODO write/remove the description' if match : m_dict = match . groupdict ( ) summary = m_dict [ 'summary' ] if m_dict [ 'description' ] : description = re . sub ( r'(\s|\n){2,}' , ' ' , m_dict [ 'description' ] ) self . _summary = summary self . _description = description
|
Parse the method docstring .
|
4,441
|
def _parse_methods ( cls , list_string ) : if list_string is None : return APIServer . DEFAULT_METHODS json_list = list_string . replace ( "'" , '"' ) return json . loads ( json_list )
|
Return HTTP method list . Use json for security reasons .
|
4,442
|
def _rule2path ( cls , rule ) : typeless = re . sub ( r'<\w+?:' , '<' , rule ) return typeless . replace ( '<' , '{' ) . replace ( '>' , '}' )
|
Convert relative Flask rule to absolute OpenAPI path .
|
4,443
|
def property ( self , name ) : found = None if is_uuid ( name ) : found = find ( self . properties , lambda p : name == p . id ) else : found = find ( self . properties , lambda p : name == p . name ) if not found : raise NotFoundError ( "Could not find property with name or id {}" . format ( name ) ) return found
|
Retrieve the property belonging to this part based on its name or uuid .
|
4,444
|
def parent ( self ) : if self . parent_id : return self . _client . part ( pk = self . parent_id , category = self . category ) else : return None
|
Retrieve the parent of this Part .
|
4,445
|
def children ( self , ** kwargs ) : if not kwargs : if not self . _cached_children : self . _cached_children = list ( self . _client . parts ( parent = self . id , category = self . category ) ) return self . _cached_children else : return self . _client . parts ( parent = self . id , category = self . category , ** kwargs )
|
Retrieve the children of this Part as Partset .
|
4,446
|
def siblings ( self , ** kwargs ) : if self . parent_id : return self . _client . parts ( parent = self . parent_id , category = self . category , ** kwargs ) else : from pykechain . models . partset import PartSet return PartSet ( parts = [ ] )
|
Retrieve the siblings of this Part as Partset .
|
4,447
|
def model ( self ) : if self . category == Category . INSTANCE : model_id = self . _json_data [ 'model' ] . get ( 'id' ) return self . _client . model ( pk = model_id ) else : raise NotFoundError ( "Part {} has no model" . format ( self . name ) )
|
Retrieve the model of this Part as Part .
|
4,448
|
def instances ( self , ** kwargs ) : if self . category == Category . MODEL : return self . _client . parts ( model = self , category = Category . INSTANCE , ** kwargs ) else : raise NotFoundError ( "Part {} is not a model" . format ( self . name ) )
|
Retrieve the instances of this Part as a PartSet .
|
4,449
|
def proxy_model ( self ) : if self . category != Category . MODEL : raise IllegalArgumentError ( "Part {} is not a model, therefore it cannot have a proxy model" . format ( self ) ) if 'proxy' in self . _json_data and self . _json_data . get ( 'proxy' ) : catalog_model_id = self . _json_data [ 'proxy' ] . get ( 'id' ) return self . _client . model ( pk = catalog_model_id ) else : raise NotFoundError ( "Part {} is not a proxy" . format ( self . name ) )
|
Retrieve the proxy model of this proxied Part as a Part .
|
4,450
|
def add ( self , model , ** kwargs ) : if self . category != Category . INSTANCE : raise APIError ( "Part should be of category INSTANCE" ) return self . _client . create_part ( self , model , ** kwargs )
|
Add a new child instance based on a model to this part .
|
4,451
|
def add_to ( self , parent , ** kwargs ) : if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) return self . _client . create_part ( parent , self , ** kwargs )
|
Add a new instance of this model to a part .
|
4,452
|
def add_model ( self , * args , ** kwargs ) : if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) return self . _client . create_model ( self , * args , ** kwargs )
|
Add a new child model to this model .
|
4,453
|
def add_property ( self , * args , ** kwargs ) : if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) return self . _client . create_property ( self , * args , ** kwargs )
|
Add a new property to this model .
|
4,454
|
def update ( self , name = None , update_dict = None , bulk = True , ** kwargs ) : action = 'bulk_update_properties' request_body = dict ( ) for prop_name_or_id , property_value in update_dict . items ( ) : if is_uuid ( prop_name_or_id ) : request_body [ prop_name_or_id ] = property_value else : request_body [ self . property ( prop_name_or_id ) . id ] = property_value if bulk and len ( update_dict . keys ( ) ) > 1 : if name : if not isinstance ( name , str ) : raise IllegalArgumentError ( "Name of the part should be provided as a string" ) r = self . _client . _request ( 'PUT' , self . _client . _build_url ( 'part' , part_id = self . id ) , data = dict ( name = name , properties = json . dumps ( request_body ) , ** kwargs ) , params = dict ( select_action = action ) ) if r . status_code != requests . codes . ok : raise APIError ( '{}: {}' . format ( str ( r ) , r . content ) ) else : for property_name , property_value in update_dict . items ( ) : self . property ( property_name ) . value = property_value
|
Edit part name and property values in one go .
|
4,455
|
def add_with_properties ( self , model , name = None , update_dict = None , bulk = True , ** kwargs ) : if self . category != Category . INSTANCE : raise APIError ( "Part should be of category INSTANCE" ) name = name or model . name action = 'new_instance_with_properties' properties_update_dict = dict ( ) for prop_name_or_id , property_value in update_dict . items ( ) : if is_uuid ( prop_name_or_id ) : properties_update_dict [ prop_name_or_id ] = property_value else : properties_update_dict [ model . property ( prop_name_or_id ) . id ] = property_value if bulk : r = self . _client . _request ( 'POST' , self . _client . _build_url ( 'parts' ) , data = dict ( name = name , model = model . id , parent = self . id , properties = json . dumps ( properties_update_dict ) , ** kwargs ) , params = dict ( select_action = action ) ) if r . status_code != requests . codes . created : raise APIError ( '{}: {}' . format ( str ( r ) , r . content ) ) return Part ( r . json ( ) [ 'results' ] [ 0 ] , client = self . _client ) else : new_part = self . add ( model , name = name ) new_part . update ( update_dict = update_dict , bulk = bulk ) return new_part
|
Add a part and update its properties in one go .
|
4,456
|
def order_properties ( self , property_list = None ) : if self . category != Category . MODEL : raise APIError ( "Part should be of category MODEL" ) if not isinstance ( property_list , list ) : raise IllegalArgumentError ( 'Expected a list of strings or Property() objects, got a {} object' . format ( type ( property_list ) ) ) order_dict = dict ( ) for prop in property_list : if isinstance ( prop , ( str , text_type ) ) : order_dict [ self . property ( name = prop ) . id ] = property_list . index ( prop ) else : order_dict [ prop . id ] = property_list . index ( prop ) r = self . _client . _request ( 'PUT' , self . _client . _build_url ( 'part' , part_id = self . id ) , data = dict ( property_order = json . dumps ( order_dict ) ) ) if r . status_code != requests . codes . ok : raise APIError ( "Could not reorder properties" )
|
Order the properties of a part model using a list of property objects or property names or property id s .
|
4,457
|
def clone ( self , ** kwargs ) : parent = self . parent ( ) return self . _client . _create_clone ( parent , self , ** kwargs )
|
Clone a part .
|
4,458
|
def copy ( self , target_parent , name = None , include_children = True , include_instances = True ) : if self . category == Category . MODEL and target_parent . category == Category . MODEL : copied_model = relocate_model ( part = self , target_parent = target_parent , name = name , include_children = include_children ) if include_instances : instances_to_be_copied = list ( self . instances ( ) ) parent_instances = list ( target_parent . instances ( ) ) for parent_instance in parent_instances : for instance in instances_to_be_copied : instance . populate_descendants ( ) move_part_instance ( part_instance = instance , target_parent = parent_instance , part_model = self , name = instance . name , include_children = include_children ) return copied_model elif self . category == Category . INSTANCE and target_parent . category == Category . INSTANCE : copied_instance = relocate_instance ( part = self , target_parent = target_parent , name = name , include_children = include_children ) return copied_instance else : raise IllegalArgumentError ( 'part "{}" and target parent "{}" must have the same category' )
|
Copy the Part to target parent both of them having the same category .
|
4,459
|
def move ( self , target_parent , name = None , include_children = True , include_instances = True ) : if not name : name = self . name if self . category == Category . MODEL and target_parent . category == Category . MODEL : moved_model = relocate_model ( part = self , target_parent = target_parent , name = name , include_children = include_children ) if include_instances : retrieve_instances_to_copied = list ( self . instances ( ) ) retrieve_parent_instances = list ( target_parent . instances ( ) ) for parent_instance in retrieve_parent_instances : for instance in retrieve_instances_to_copied : instance . populate_descendants ( ) move_part_instance ( part_instance = instance , target_parent = parent_instance , part_model = self , name = instance . name , include_children = include_children ) self . delete ( ) return moved_model elif self . category == Category . INSTANCE and target_parent . category == Category . INSTANCE : moved_instance = relocate_instance ( part = self , target_parent = target_parent , name = name , include_children = include_children ) try : self . delete ( ) except APIError : model_of_instance = self . model ( ) model_of_instance . delete ( ) return moved_instance else : raise IllegalArgumentError ( 'part "{}" and target parent "{}" must have the same category' )
|
Move the Part to target parent both of them the same category .
|
4,460
|
def _generate_notebook_by_difficulty_body ( notebook_object , dict_by_difficulty ) : difficulty_keys = list ( dict_by_difficulty . keys ( ) ) difficulty_keys . sort ( ) for difficulty in difficulty_keys : markdown_cell = STAR_TABLE_HEADER markdown_cell = _set_star_value ( markdown_cell , int ( difficulty ) ) for notebook_file in dict_by_difficulty [ str ( difficulty ) ] : split_path = notebook_file . split ( "/" ) notebook_type = split_path [ - 2 ] notebook_name = split_path [ - 1 ] . split ( "&" ) [ 0 ] notebook_title = split_path [ - 1 ] . split ( "&" ) [ 1 ] markdown_cell += "\n\t<tr>\n\t\t<td width='20%' class='header_image_color_" + str ( NOTEBOOK_KEYS [ notebook_type ] ) + "'><img " "src='../../images/icons/" + notebook_type . title ( ) + ".png' width='15%'>\n\t\t</td>" markdown_cell += "\n\t\t<td width='60%' class='center_cell open_cell_light'>" + notebook_title + "\n\t\t</td>" markdown_cell += "\n\t\t<td width='20%' class='center_cell'>\n\t\t\t<a href='" "../" + notebook_type . title ( ) + "/" + notebook_name + "'><div class='file_icon'></div></a>\n\t\t</td>\n\t</tr>" markdown_cell += "</table>" notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( markdown_cell ) )
|
Internal function that is used for generation of the page where notebooks are organized by difficulty level .
|
4,461
|
def _generate_dir_structure ( path ) : current_dir = ( path + "\\opensignalsfactory_environment" ) . replace ( "\\" , "/" ) if not os . path . isdir ( current_dir ) : os . makedirs ( current_dir ) path_cloned_files = ( os . path . abspath ( __file__ ) . split ( os . path . basename ( __file__ ) ) [ 0 ] + "\\notebook_files\\osf_files\\" ) . replace ( "\\" , "/" ) for var in [ "images" , "styles" , "signal_samples" ] : if os . path . isdir ( ( current_dir + "\\" + var ) . replace ( "\\" , "/" ) ) : shutil . rmtree ( ( current_dir + "\\" + var ) . replace ( "\\" , "/" ) ) src = ( path_cloned_files + "\\" + var ) . replace ( "\\" , "/" ) destination = ( current_dir + "\\" + var ) . replace ( "\\" , "/" ) shutil . copytree ( src , destination ) current_dir += "/Categories" if not os . path . isdir ( current_dir ) : os . makedirs ( current_dir ) categories = list ( NOTEBOOK_KEYS . keys ( ) ) for category in categories : if not os . path . isdir ( current_dir + "/" + category ) : os . makedirs ( current_dir + "/" + category ) return current_dir
|
Internal function intended to generate the biosignalsnotebooks directories in order to the user can visualise and execute the Notebook created with notebook class in Jupyter .
|
4,462
|
def in_lamp_reach ( p ) : v1 = XYPoint ( Lime . x - Red . x , Lime . y - Red . y ) v2 = XYPoint ( Blue . x - Red . x , Blue . y - Red . y ) q = XYPoint ( p . x - Red . x , p . y - Red . y ) s = cross_product ( q , v2 ) / cross_product ( v1 , v2 ) t = cross_product ( v1 , q ) / cross_product ( v1 , v2 ) return ( s >= 0.0 ) and ( t >= 0.0 ) and ( s + t <= 1.0 )
|
Check if the provided XYPoint can be recreated by a Hue lamp .
|
4,463
|
def get_closest_point_to_line ( A , B , P ) : AP = XYPoint ( P . x - A . x , P . y - A . y ) AB = XYPoint ( B . x - A . x , B . y - A . y ) ab2 = AB . x * AB . x + AB . y * AB . y ap_ab = AP . x * AB . x + AP . y * AB . y t = ap_ab / ab2 if t < 0.0 : t = 0.0 elif t > 1.0 : t = 1.0 return XYPoint ( A . x + AB . x * t , A . y + AB . y * t )
|
Find the closest point on a line . This point will be reproducible by a Hue lamp .
|
4,464
|
def get_closest_point_to_point ( xy_point ) : pAB = get_closest_point_to_line ( Red , Lime , xy_point ) pAC = get_closest_point_to_line ( Blue , Red , xy_point ) pBC = get_closest_point_to_line ( Lime , Blue , xy_point ) dAB = get_distance_between_two_points ( xy_point , pAB ) dAC = get_distance_between_two_points ( xy_point , pAC ) dBC = get_distance_between_two_points ( xy_point , pBC ) lowest = dAB closest_point = pAB if ( dAC < lowest ) : lowest = dAC closest_point = pAC if ( dBC < lowest ) : lowest = dBC closest_point = pBC cx = closest_point . x cy = closest_point . y return XYPoint ( cx , cy )
|
Used to find the closest point to an unreproducible Color is unreproducible on each line in the CIE 1931 triangle .
|
4,465
|
def get_xy_from_hex ( hex_value ) : red , green , blue = struct . unpack ( 'BBB' , codecs . decode ( hex_value , 'hex' ) ) r = ( ( red + 0.055 ) / ( 1.0 + 0.055 ) ) ** 2.4 if ( red > 0.04045 ) else ( red / 12.92 ) g = ( ( green + 0.055 ) / ( 1.0 + 0.055 ) ) ** 2.4 if ( green > 0.04045 ) else ( green / 12.92 ) b = ( ( blue + 0.055 ) / ( 1.0 + 0.055 ) ) ** 2.4 if ( blue > 0.04045 ) else ( blue / 12.92 ) X = r * 0.4360747 + g * 0.3850649 + b * 0.0930804 Y = r * 0.2225045 + g * 0.7168786 + b * 0.0406169 Z = r * 0.0139322 + g * 0.0971045 + b * 0.7141733 if X + Y + Z == 0 : cx = cy = 0 else : cx = X / ( X + Y + Z ) cy = Y / ( X + Y + Z ) xy_point = XYPoint ( cx , cy ) is_in_reach = in_lamp_reach ( xy_point ) if not is_in_reach : xy_point = get_closest_point_to_point ( xy_point ) return xy_point
|
Returns X Y coordinates containing the closest avilable CIE 1931 based on the hex_value provided .
|
4,466
|
def get_other_keys ( self , key , including_current = False ) : other_keys = [ ] if key in self : other_keys . extend ( self . __dict__ [ str ( type ( key ) ) ] [ key ] ) if not including_current : other_keys . remove ( key ) return other_keys
|
Returns list of other keys that are mapped to the same value as specified key .
|
4,467
|
def iterkeys ( self , key_type = None , return_all_keys = False ) : if ( key_type is not None ) : the_key = str ( key_type ) if the_key in self . __dict__ : for key in self . __dict__ [ the_key ] . keys ( ) : if return_all_keys : yield self . __dict__ [ the_key ] [ key ] else : yield key else : for keys in self . items_dict . keys ( ) : yield keys
|
Returns an iterator over the dictionary s keys .
|
4,468
|
def itervalues ( self , key_type = None ) : if ( key_type is not None ) : intermediate_key = str ( key_type ) if intermediate_key in self . __dict__ : for direct_key in self . __dict__ [ intermediate_key ] . values ( ) : yield self . items_dict [ direct_key ] else : for value in self . items_dict . values ( ) : yield value
|
Returns an iterator over the dictionary s values .
|
4,469
|
def keys ( self , key_type = None ) : if key_type is not None : intermediate_key = str ( key_type ) if intermediate_key in self . __dict__ : return self . __dict__ [ intermediate_key ] . keys ( ) else : all_keys = { } for keys in self . items_dict . keys ( ) : all_keys [ keys ] = None return all_keys . keys ( )
|
Returns a copy of the dictionary s keys .
|
4,470
|
def values ( self , key_type = None ) : if ( key_type is not None ) : all_items = { } keys_used = set ( ) direct_key = str ( key_type ) if direct_key in self . __dict__ : for intermediate_key in self . __dict__ [ direct_key ] . values ( ) : if not intermediate_key in keys_used : all_items [ intermediate_key ] = self . items_dict [ intermediate_key ] keys_used . add ( intermediate_key ) return all_items . values ( ) else : return self . items_dict . values ( )
|
Returns a copy of the dictionary s values .
|
4,471
|
def __add_item ( self , item , keys = None ) : if ( not keys or not len ( keys ) ) : raise Exception ( 'Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!' % ( self . __class__ . __name__ , str ( item ) ) ) direct_key = tuple ( keys ) for key in keys : key_type = str ( type ( key ) ) if ( not key_type in self . __dict__ ) : self . __setattr__ ( key_type , dict ( ) ) self . __dict__ [ key_type ] [ key ] = direct_key if ( not 'items_dict' in self . __dict__ ) : self . items_dict = dict ( ) self . items_dict [ direct_key ] = item
|
Internal method to add an item to the multi - key dictionary
|
4,472
|
def get ( self , key , default = None ) : if key in self : return self . items_dict [ self . __dict__ [ str ( type ( key ) ) ] [ key ] ] else : return default
|
Return the value at index specified as key .
|
4,473
|
def extract_translations ( self , string ) : trans = [ ] for t in Lexer ( string . decode ( "utf-8" ) , None ) . tokenize ( ) : if t . token_type == TOKEN_BLOCK : if not t . contents . startswith ( ( self . tranz_tag , self . tranzchoice_tag ) ) : continue is_tranzchoice = t . contents . startswith ( self . tranzchoice_tag + " " ) kwargs = { "id" : self . _match_to_transvar ( id_re , t . contents ) , "number" : self . _match_to_transvar ( number_re , t . contents ) , "domain" : self . _match_to_transvar ( domain_re , t . contents ) , "locale" : self . _match_to_transvar ( locale_re , t . contents ) , "is_transchoice" : is_tranzchoice , "parameters" : TransVar ( [ x . split ( "=" ) [ 0 ] . strip ( ) for x in properties_re . findall ( t . contents ) if x ] , TransVar . LITERAL ) , "lineno" : t . lineno , } trans . append ( Translation ( ** kwargs ) ) return trans
|
Extract messages from Django template string .
|
4,474
|
def next ( self ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) self . _n += 1 if self . _n > self . _nb_markers : raise StopIteration ( ) return self . _bim . index [ self . _n - 1 ] , self . _read_current_marker ( )
|
Returns the next marker .
|
4,475
|
def _read_current_marker ( self ) : return self . _geno_values [ np . frombuffer ( self . _bed . read ( self . _nb_bytes ) , dtype = np . uint8 ) ] . flatten ( order = "C" ) [ : self . _nb_samples ]
|
Reads the current marker and returns its genotypes .
|
4,476
|
def seek ( self , n ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) if 0 <= n < self . _nb_markers : self . _n = n self . _bed . seek ( self . _get_seek_position ( n ) ) else : raise ValueError ( "invalid position in BED: {}" . format ( n ) )
|
Gets to a certain marker position in the BED file .
|
4,477
|
def _read_bim ( self ) : bim = pd . read_csv ( self . bim_filename , delim_whitespace = True , names = [ "chrom" , "snp" , "cm" , "pos" , "a1" , "a2" ] , dtype = dict ( snp = str , a1 = str , a2 = str ) ) bim [ "i" ] = bim . index try : bim = bim . set_index ( "snp" , verify_integrity = True ) self . _has_duplicated = False except ValueError as e : self . _has_duplicated = True duplicated = bim . snp . duplicated ( keep = False ) duplicated_markers = bim . loc [ duplicated , "snp" ] duplicated_marker_counts = duplicated_markers . value_counts ( ) self . _dup_markers = { m : [ ] for m in duplicated_marker_counts . index } logger . warning ( "Duplicated markers found" ) for marker , count in duplicated_marker_counts . iteritems ( ) : logger . warning ( " - {}: {:,d} times" . format ( marker , count ) ) logger . warning ( "Appending ':dupX' to the duplicated markers " "according to their location in the BIM file" ) counter = Counter ( ) for i , marker in duplicated_markers . iteritems ( ) : counter [ marker ] += 1 new_name = "{}:dup{}" . format ( marker , counter [ marker ] ) bim . loc [ i , "snp" ] = new_name self . _dup_markers [ marker ] . append ( new_name ) bim = bim . set_index ( "snp" , verify_integrity = True ) allele_encoding = np . array ( [ bim . a2 * 2 , bim . a1 + bim . a2 , bim . a1 * 2 , list ( repeat ( "00" , bim . shape [ 0 ] ) ) ] , dtype = "U2" , ) self . _allele_encoding = allele_encoding . T self . _bim = bim [ [ "chrom" , "pos" , "cm" , "a1" , "a2" , "i" ] ] self . _nb_markers = self . _bim . shape [ 0 ]
|
Reads the BIM file .
|
4,478
|
def _read_fam ( self ) : fam = pd . read_csv ( self . fam_filename , delim_whitespace = True , names = [ "fid" , "iid" , "father" , "mother" , "gender" , "status" ] , dtype = dict ( fid = str , iid = str , father = str , mother = str ) ) fam [ "byte" ] = [ int ( np . ceil ( ( 1 + 1 ) / 4.0 ) ) - 1 for i in range ( len ( fam ) ) ] fam [ "bit" ] = [ ( i % 4 ) * 2 for i in range ( len ( fam ) ) ] self . _fam = fam self . _nb_samples = self . _fam . shape [ 0 ]
|
Reads the FAM file .
|
4,479
|
def _read_bed ( self ) : if ( self . _bim is None ) or ( self . _fam is None ) : raise RuntimeError ( "no BIM or FAM file were read" ) self . _nb_bytes = int ( np . ceil ( self . _nb_samples / 4.0 ) ) with open ( self . bed_filename , "rb" ) as bed_file : if ( ord ( bed_file . read ( 1 ) ) != 108 ) or ( ord ( bed_file . read ( 1 ) ) != 27 ) : raise ValueError ( "not a valid BED file: " "{}" . format ( self . bed_filename ) ) if ord ( bed_file . read ( 1 ) ) != 1 : raise ValueError ( "not in SNP-major format (please recode): " "{}" . format ( self . bed_filename ) ) seek_index = self . _get_seek_position ( self . _bim . iloc [ - 1 , : ] . i ) bed_file . seek ( seek_index ) geno = self . _geno_values [ np . frombuffer ( bed_file . read ( self . _nb_bytes ) , dtype = np . uint8 ) ] . flatten ( order = "C" ) [ : self . _nb_samples ] if geno . shape [ 0 ] != self . _nb_samples : raise ValueError ( "invalid number of entries: corrupted BED?" ) self . _bed = open ( self . bed_filename , "rb" ) self . _bed . read ( 3 )
|
Reads the BED file .
|
4,480
|
def _write_bed_header ( self ) : final_byte = 1 if self . _bed_format == "SNP-major" else 0 self . _bed . write ( bytearray ( ( 108 , 27 , final_byte ) ) )
|
Writes the BED first 3 bytes .
|
4,481
|
def iter_geno_marker ( self , markers , return_index = False ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) if isinstance ( markers , str ) : markers = [ markers ] if return_index : for marker in markers : geno , seek = self . get_geno_marker ( marker , return_index = True ) yield marker , geno , seek else : for marker in markers : yield marker , self . get_geno_marker ( marker )
|
Iterates over genotypes for a list of markers .
|
4,482
|
def get_geno_marker ( self , marker , return_index = False ) : if self . _mode != "r" : raise UnsupportedOperation ( "not available in 'w' mode" ) if marker not in self . _bim . index : raise ValueError ( "{}: marker not in BIM" . format ( marker ) ) seek_index = self . _bim . loc [ marker , "i" ] self . seek ( seek_index ) if return_index : return self . _read_current_marker ( ) , seek_index return self . _read_current_marker ( )
|
Gets the genotypes for a given marker .
|
4,483
|
def write_genotypes ( self , genotypes ) : if self . _mode != "w" : raise UnsupportedOperation ( "not available in 'r' mode" ) if self . _nb_values is None : self . _nb_values = len ( genotypes ) if self . _nb_values != len ( genotypes ) : raise ValueError ( "{:,d} samples expected, got {:,d}" . format ( self . _nb_values , len ( genotypes ) , ) ) byte_array = [ g [ 0 ] | ( g [ 1 ] << 2 ) | ( g [ 2 ] << 4 ) | ( g [ 3 ] << 6 ) for g in self . _grouper ( ( _byte_recode [ geno ] for geno in genotypes ) , 4 ) ] self . _bed . write ( bytearray ( byte_array ) )
|
Write genotypes to binary file .
|
4,484
|
def _read ( self , directory , filename , session , path , name , extension , spatial = None , spatialReferenceID = None , replaceParamFile = None ) : self . fileExtension = extension timeSeries = [ ] with open ( path , 'r' ) as f : for line in f : sline = line . strip ( ) . split ( ) record = { 'time' : sline [ 0 ] , 'values' : [ ] } for idx in range ( 1 , len ( sline ) ) : record [ 'values' ] . append ( sline [ idx ] ) timeSeries . append ( record ) self . _createTimeSeriesObjects ( timeSeries , filename )
|
Generic Time Series Read from File Method
|
4,485
|
def _write ( self , session , openFile , replaceParamFile ) : timeSeries = self . timeSeries numTS = len ( timeSeries ) valList = [ ] for tsNum , ts in enumerate ( timeSeries ) : values = ts . values for value in values : valDict = { 'time' : value . simTime , 'tsNum' : tsNum , 'value' : value . value } valList . append ( valDict ) result = pivot ( valList , ( 'time' , ) , ( 'tsNum' , ) , 'value' ) for line in result : valString = '' for n in range ( 0 , numTS ) : val = '%.6f' % line [ ( n , ) ] valString = '%s%s%s' % ( valString , ' ' * ( 13 - len ( str ( val ) ) ) , val ) openFile . write ( ' %.8f%s\n' % ( line [ 'time' ] , valString ) )
|
Generic Time Series Write to File Method
|
4,486
|
def as_dataframe ( self ) : time_series = { } for ts_index , ts in enumerate ( self . timeSeries ) : index = [ ] data = [ ] for value in ts . values : index . append ( value . simTime ) data . append ( value . value ) time_series [ ts_index ] = pd . Series ( data , index = index ) return pd . DataFrame ( time_series )
|
Return time series as pandas dataframe
|
4,487
|
def _createTimeSeriesObjects ( self , timeSeries , filename ) : try : valColumns = len ( timeSeries [ 0 ] [ 'values' ] ) series = [ ] for i in range ( 0 , valColumns ) : ts = TimeSeries ( ) ts . timeSeriesFile = self series . append ( ts ) for record in timeSeries : for index , value in enumerate ( record [ 'values' ] ) : tsVal = TimeSeriesValue ( simTime = record [ 'time' ] , value = value ) tsVal . timeSeries = series [ index ] except IndexError : log . warning ( ( '%s was opened, but the contents of the file were empty.' 'This file will not be read into the database.' ) % filename ) except : raise
|
Create GSSHAPY TimeSeries and TimeSeriesValue Objects Method
|
4,488
|
def extend ( self , * blues , memo = None ) : memo = { } if memo is None else memo for blue in blues : if isinstance ( blue , Dispatcher ) : blue = blue . blue ( memo = memo ) for method , kwargs in blue . deferred : getattr ( self , method ) ( ** kwargs ) return self
|
Extends deferred operations calling each operation of given Blueprints .
|
4,489
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension with open ( path , 'r' ) as f : for line in f : sline = line . strip ( ) . split ( ) if len ( sline ) == 1 : self . numLocations = sline [ 0 ] else : location = OutputLocation ( linkOrCellI = sline [ 0 ] , nodeOrCellJ = sline [ 1 ] ) location . outputLocationFile = self
|
Generic Output Location Read from File Method
|
4,490
|
def _write ( self , session , openFile , replaceParamFile ) : locations = self . outputLocations openFile . write ( '%s\n' % self . numLocations ) for location in locations : openFile . write ( '%s %s\n' % ( location . linkOrCellI , location . nodeOrCellJ ) )
|
Generic Output Location Write to File Method
|
4,491
|
def web ( self , depth = - 1 , node_data = NONE , node_function = NONE , directory = None , sites = None , run = True ) : options = { 'node_data' : node_data , 'node_function' : node_function } options = { k : v for k , v in options . items ( ) if v is not NONE } from . web import WebMap from . sol import Solution obj = self . dsp if isinstance ( self , Solution ) else self webmap = WebMap ( ) webmap . add_items ( obj , workflow = False , depth = depth , ** options ) if sites is not None : import tempfile directory = directory or tempfile . mkdtemp ( ) sites . add ( webmap . site ( directory , view = run ) ) return webmap
|
Creates a dispatcher Flask app .
|
4,492
|
def plot ( self , workflow = None , view = True , depth = - 1 , name = NONE , comment = NONE , format = NONE , engine = NONE , encoding = NONE , graph_attr = NONE , node_attr = NONE , edge_attr = NONE , body = NONE , node_styles = NONE , node_data = NONE , node_function = NONE , edge_data = NONE , max_lines = NONE , max_width = NONE , directory = None , sites = None , index = False ) : d = { 'name' : name , 'comment' : comment , 'format' : format , 'engine' : engine , 'encoding' : encoding , 'graph_attr' : graph_attr , 'node_attr' : node_attr , 'edge_attr' : edge_attr , 'body' : body , } options = { 'digraph' : { k : v for k , v in d . items ( ) if v is not NONE } or NONE , 'node_styles' : node_styles , 'node_data' : node_data , 'node_function' : node_function , 'edge_data' : edge_data , 'max_lines' : max_lines , 'max_width' : max_width , } options = { k : v for k , v in options . items ( ) if v is not NONE } from . drw import SiteMap from . sol import Solution if workflow is None and isinstance ( self , Solution ) : workflow = True else : workflow = workflow or False sitemap = SiteMap ( ) sitemap . add_items ( self , workflow = workflow , depth = depth , ** options ) if view : import tempfile directory = directory or tempfile . mkdtemp ( ) if sites is None : sitemap . render ( directory = directory , view = True , index = index ) else : sites . add ( sitemap . site ( directory , view = True , index = index ) ) return sitemap
|
Plots the Dispatcher with a graph in the DOT language with Graphviz .
|
4,493
|
def _api_get ( self , url , ** kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers return self . _get ( ** kwargs )
|
A convenience wrapper for _get . Adds headers auth and base url by default
|
4,494
|
def _api_put ( self , url , ** kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers self . _put ( ** kwargs )
|
A convenience wrapper for _put . Adds headers auth and base url by default
|
4,495
|
def _api_post ( self , url , ** kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers self . _post ( ** kwargs )
|
A convenience wrapper for _post . Adds headers auth and base url by default
|
4,496
|
def _api_delete ( self , url , ** kwargs ) : kwargs [ 'url' ] = self . url + url kwargs [ 'auth' ] = self . auth headers = deepcopy ( self . headers ) headers . update ( kwargs . get ( 'headers' , { } ) ) kwargs [ 'headers' ] = headers self . _delete ( ** kwargs )
|
A convenience wrapper for _delete . Adds headers auth and base url by default
|
4,497
|
def getAsKmlGrid ( self , session , path = None , documentName = None , colorRamp = ColorRampEnum . COLOR_RAMP_HUE , alpha = 1.0 , noDataValue = None ) : if type ( self . raster ) != type ( None ) : if documentName is None : try : documentName = self . filename except AttributeError : documentName = 'default' if noDataValue is None : noDataValue = self . defaultNoDataValue converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) if isinstance ( colorRamp , dict ) : converter . setCustomColorRamp ( colorRamp [ 'colors' ] , colorRamp [ 'interpolatedPoints' ] ) else : converter . setDefaultColorRamp ( colorRamp ) kmlString = converter . getAsKmlGrid ( tableName = self . tableName , rasterId = self . id , rasterIdFieldName = 'id' , rasterFieldName = self . rasterColumnName , documentName = documentName , alpha = alpha , noDataValue = noDataValue , discreet = self . discreet ) if path : with open ( path , 'w' ) as f : f . write ( kmlString ) return kmlString
|
Retrieve the raster as a KML document with each cell of the raster represented as a vector polygon . The result is a vector grid of raster cells . Cells with the no data value are excluded .
|
4,498
|
def getAsGrassAsciiGrid ( self , session ) : if type ( self . raster ) != type ( None ) : converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) return converter . getAsGrassAsciiRaster ( tableName = self . tableName , rasterIdFieldName = 'id' , rasterId = self . id , rasterFieldName = self . rasterColumnName )
|
Retrieve the raster in the GRASS ASCII Grid format .
|
4,499
|
def shutdown ( url = None ) : if url is None : for host in util . hosts . values ( ) : host . shutdown ( ) global core_type core_type = None else : host = util . hosts [ url ] host . shutdown ( )
|
Stops the Host passed by parameter or all of them if none is specified stopping at the same time all its actors . Should be called at the end of its usage to finish correctly all the connections and threads .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.