idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
3,900
|
def set_client_cmds ( self ) : self . task_data [ 'cmd' ] = self . input . get ( 'cmd' ) self . task_data [ 'flow' ] = self . input . get ( 'flow' ) filters = self . input . get ( 'filters' , { } ) try : if isinstance ( filters , dict ) : self . task_data [ 'object_id' ] = filters . get ( 'object_id' ) [ 'values' ] [ 0 ] elif filters [ 0 ] [ 'field' ] == 'object_id' : self . task_data [ 'object_id' ] = filters [ 0 ] [ 'values' ] [ 0 ] except : if 'object_id' in self . input : self . task_data [ 'object_id' ] = self . input . get ( 'object_id' )
|
This is method automatically called on each request and updates object_id cmd and flow client variables from current . input .
|
3,901
|
def generate_move ( self , position ) : while True : print ( position ) raw = input ( str ( self . color ) + "\'s move \n" ) move = converter . short_alg ( raw , self . color , position ) if move is None : continue return move
|
Returns valid and legal move given position
|
3,902
|
def in_check_as_result ( self , pos , move ) : test = cp ( pos ) test . update ( move ) test_king = test . get_king ( move . color ) return self . loc_adjacent_to_opponent_king ( test_king . location , test )
|
Finds if playing my move would make both kings meet .
|
3,903
|
def loc_adjacent_to_opponent_king ( self , location , position ) : for fn in self . cardinal_directions : try : if isinstance ( position . piece_at_square ( fn ( location ) ) , King ) and position . piece_at_square ( fn ( location ) ) . color != self . color : return True except IndexError : pass return False
|
Finds if 2 kings are touching given the position of one of the kings .
|
3,904
|
def add ( self , func , position ) : try : if self . loc_adjacent_to_opponent_king ( func ( self . location ) , position ) : return except IndexError : return if position . is_square_empty ( func ( self . location ) ) : yield self . create_move ( func ( self . location ) , notation_const . MOVEMENT ) elif position . piece_at_square ( func ( self . location ) ) . color != self . color : yield self . create_move ( func ( self . location ) , notation_const . CAPTURE )
|
Adds all 8 cardinal directions as moves for the King if legal .
|
3,905
|
def _rook_legal_for_castle ( self , rook ) : return rook is not None and type ( rook ) is Rook and rook . color == self . color and not rook . has_moved
|
Decides if given rook exists is of this color and has not moved so it is eligible to castle .
|
3,906
|
def _empty_not_in_check ( self , position , direction ) : def valid_square ( square ) : return position . is_square_empty ( square ) and not self . in_check ( position , square ) return valid_square ( direction ( self . location , 1 ) ) and valid_square ( direction ( self . location , 2 ) )
|
Checks if set of squares in between King and Rook are empty and safe for the king to castle .
|
3,907
|
def add_castle ( self , position ) : if self . has_moved or self . in_check ( position ) : return if self . color == color . white : rook_rank = 0 else : rook_rank = 7 castle_type = { notation_const . KING_SIDE_CASTLE : { "rook_file" : 7 , "direction" : lambda king_square , times : king_square . shift_right ( times ) } , notation_const . QUEEN_SIDE_CASTLE : { "rook_file" : 0 , "direction" : lambda king_square , times : king_square . shift_left ( times ) } } for castle_key in castle_type : castle_dict = castle_type [ castle_key ] castle_rook = position . piece_at_square ( Location ( rook_rank , castle_dict [ "rook_file" ] ) ) if self . _rook_legal_for_castle ( castle_rook ) and self . _empty_not_in_check ( position , castle_dict [ "direction" ] ) : yield self . create_move ( castle_dict [ "direction" ] ( self . location , 2 ) , castle_key )
|
Adds kingside and queenside castling moves if legal
|
3,908
|
def possible_moves ( self , position ) : for move in itertools . chain ( * [ self . add ( fn , position ) for fn in self . cardinal_directions ] ) : yield move for move in self . add_castle ( position ) : yield move
|
Generates list of possible moves
|
3,909
|
def in_check ( self , position , location = None ) : location = location or self . location for piece in position : if piece is not None and piece . color != self . color : if not isinstance ( piece , King ) : for move in piece . possible_moves ( position ) : if move . end_loc == location : return True else : if self . loc_adjacent_to_opponent_king ( piece . location , position ) : return True return False
|
Finds if the king is in check or if both kings are touching .
|
3,910
|
def set_keep_alive ( sock , idle = 10 , interval = 5 , fails = 5 ) : import sys sock . setsockopt ( socket . SOL_SOCKET , socket . SO_KEEPALIVE , 1 ) if sys . platform in ( 'linux' , 'linux2' ) : sock . setsockopt ( socket . IPPROTO_TCP , socket . TCP_KEEPIDLE , idle ) sock . setsockopt ( socket . IPPROTO_TCP , socket . TCP_KEEPINTVL , interval ) sock . setsockopt ( socket . IPPROTO_TCP , socket . TCP_KEEPCNT , fails ) elif sys . platform == 'darwin' : sock . setsockopt ( socket . IPPROTO_TCP , 0x10 , interval ) else : pass
|
Sets the keep - alive setting for the peer socket .
|
3,911
|
def init_default ( cls ) : return cls ( [ [ Rook ( white , Location ( 0 , 0 ) ) , Knight ( white , Location ( 0 , 1 ) ) , Bishop ( white , Location ( 0 , 2 ) ) , Queen ( white , Location ( 0 , 3 ) ) , King ( white , Location ( 0 , 4 ) ) , Bishop ( white , Location ( 0 , 5 ) ) , Knight ( white , Location ( 0 , 6 ) ) , Rook ( white , Location ( 0 , 7 ) ) ] , [ Pawn ( white , Location ( 1 , file ) ) for file in range ( 8 ) ] , [ None for _ in range ( 8 ) ] , [ None for _ in range ( 8 ) ] , [ None for _ in range ( 8 ) ] , [ None for _ in range ( 8 ) ] , [ Pawn ( black , Location ( 6 , file ) ) for file in range ( 8 ) ] , [ Rook ( black , Location ( 7 , 0 ) ) , Knight ( black , Location ( 7 , 1 ) ) , Bishop ( black , Location ( 7 , 2 ) ) , Queen ( black , Location ( 7 , 3 ) ) , King ( black , Location ( 7 , 4 ) ) , Bishop ( black , Location ( 7 , 5 ) ) , Knight ( black , Location ( 7 , 6 ) ) , Rook ( black , Location ( 7 , 7 ) ) ] ] )
|
Creates a Board with the standard chess starting position .
|
3,912
|
def material_advantage ( self , input_color , val_scheme ) : if self . get_king ( input_color ) . in_check ( self ) and self . no_moves ( input_color ) : return - 100 if self . get_king ( - input_color ) . in_check ( self ) and self . no_moves ( - input_color ) : return 100 return sum ( [ val_scheme . val ( piece , input_color ) for piece in self ] )
|
Finds the advantage a particular side possesses given a value scheme .
|
3,913
|
def advantage_as_result ( self , move , val_scheme ) : test_board = cp ( self ) test_board . update ( move ) return test_board . material_advantage ( move . color , val_scheme )
|
Calculates advantage after move is played
|
3,914
|
def _calc_all_possible_moves ( self , input_color ) : for piece in self : if piece is not None and piece . color == input_color : for move in piece . possible_moves ( self ) : test = cp ( self ) test_move = Move ( end_loc = move . end_loc , piece = test . piece_at_square ( move . start_loc ) , status = move . status , start_loc = move . start_loc , promoted_to_piece = move . promoted_to_piece ) test . update ( test_move ) if self . king_loc_dict is None : yield move continue my_king = test . piece_at_square ( self . king_loc_dict [ input_color ] ) if my_king is None or not isinstance ( my_king , King ) or my_king . color != input_color : self . king_loc_dict [ input_color ] = test . find_king ( input_color ) my_king = test . piece_at_square ( self . king_loc_dict [ input_color ] ) if not my_king . in_check ( test ) : yield move
|
Returns list of all possible moves
|
3,915
|
def runInParallel ( * fns ) : proc = [ ] for fn in fns : p = Process ( target = fn ) p . start ( ) proc . append ( p ) for p in proc : p . join ( )
|
Runs multiple processes in parallel .
|
3,916
|
def find_piece ( self , piece ) : for i , _ in enumerate ( self . position ) : for j , _ in enumerate ( self . position ) : loc = Location ( i , j ) if not self . is_square_empty ( loc ) and self . piece_at_square ( loc ) == piece : return loc raise ValueError ( "{} \nPiece not found: {}" . format ( self , piece ) )
|
Finds Location of the first piece that matches piece . If none is found Exception is raised .
|
3,917
|
def place_piece_at_square ( self , piece , location ) : self . position [ location . rank ] [ location . file ] = piece piece . location = location
|
Places piece at given get_location
|
3,918
|
def move_piece ( self , initial , final ) : self . place_piece_at_square ( self . piece_at_square ( initial ) , final ) self . remove_piece_at_square ( initial )
|
Moves piece from one location to another
|
3,919
|
def update ( self , move ) : if move is None : raise TypeError ( "Move cannot be type None" ) if self . king_loc_dict is not None and isinstance ( move . piece , King ) : self . king_loc_dict [ move . color ] = move . end_loc for square in self : pawn = square if isinstance ( pawn , Pawn ) : pawn . just_moved_two_steps = False if type ( move . piece ) is King or type ( move . piece ) is Rook : move . piece . has_moved = True elif move . status == notation_const . MOVEMENT and isinstance ( move . piece , Pawn ) and fabs ( move . end_loc . rank - move . start_loc . rank ) == 2 : move . piece . just_moved_two_steps = True if move . status == notation_const . KING_SIDE_CASTLE : self . move_piece ( Location ( move . end_loc . rank , 7 ) , Location ( move . end_loc . rank , 5 ) ) self . piece_at_square ( Location ( move . end_loc . rank , 5 ) ) . has_moved = True elif move . status == notation_const . QUEEN_SIDE_CASTLE : self . move_piece ( Location ( move . end_loc . rank , 0 ) , Location ( move . end_loc . rank , 3 ) ) self . piece_at_square ( Location ( move . end_loc . rank , 3 ) ) . has_moved = True elif move . status == notation_const . EN_PASSANT : self . remove_piece_at_square ( Location ( move . start_loc . rank , move . end_loc . file ) ) elif move . status == notation_const . PROMOTE or move . status == notation_const . CAPTURE_AND_PROMOTE : try : self . remove_piece_at_square ( move . start_loc ) self . place_piece_at_square ( move . promoted_to_piece ( move . color , move . end_loc ) , move . end_loc ) except TypeError as e : raise ValueError ( "Promoted to piece cannot be None in Move {}\n{}" . format ( repr ( move ) , e ) ) return self . move_piece ( move . piece . location , move . end_loc )
|
Updates position by applying selected move
|
3,920
|
def clone_subgraphs ( self , g ) : if not isinstance ( g , CGRContainer ) : raise InvalidData ( 'only CGRContainer acceptable' ) r_group = [ ] x_group = { } r_group_clones = [ ] newcomponents = [ ] components , lost_bonds , term_atoms = self . __split_graph ( g ) lost_map = { x : y for x , y in lost_bonds } x_terminals = set ( lost_map . values ( ) ) r_terminals = set ( lost_map ) for i in components : x_terminal_atom = x_terminals . intersection ( i ) if x_terminal_atom : x_group [ x_terminal_atom . pop ( ) ] = i continue r_terminal_atom = r_terminals . intersection ( i ) if r_terminal_atom : r_group . append ( [ r_terminal_atom , i ] ) continue newcomponents . append ( i ) tmp = g for i in newcomponents : for k , j in r_group : gm = GraphMatcher ( j , i , node_match = self . __node_match_products , edge_match = self . __edge_match_products ) mapping = next ( ( x for x in gm . subgraph_isomorphisms_iter ( ) if k . issubset ( x ) and all ( x [ y ] in term_atoms for y in k ) ) , None ) if mapping : r_group_clones . append ( [ k , mapping ] ) tmp = compose ( tmp , self . __remap_group ( j , tmp , mapping ) [ 0 ] ) break for i , j in r_group_clones : for k in i : remappedgroup , mapping = self . __remap_group ( x_group [ lost_map [ k ] ] , tmp , { } ) tmp = CGRcore . union ( tmp , remappedgroup ) tmp . add_edge ( j [ k ] , mapping [ lost_map [ k ] ] , s_bond = 1 , sp_bond = ( 1 , None ) ) if r_group_clones : tmp . meta . update ( g . meta ) return tmp return tmp . copy ( )
|
search bond breaks and creations
|
3,921
|
def __get_substitution_paths ( g ) : for n , nbrdict in g . adjacency ( ) : for m , l in combinations ( nbrdict , 2 ) : nms = nbrdict [ m ] [ 'sp_bond' ] nls = nbrdict [ l ] [ 'sp_bond' ] if nms == ( 1 , None ) and nls == ( None , 1 ) : yield m , n , l elif nms == ( None , 1 ) and nls == ( 1 , None ) : yield l , n , m
|
get atoms paths from detached atom to attached
|
3,922
|
def _add_crud ( self , model_data , object_type , results ) : model = model_registry . get_model ( model_data [ 'name' ] ) field_name = model_data . get ( 'field' ) verbose_name = model_data . get ( 'verbose_name' , model . Meta . verbose_name_plural ) category = model_data . get ( 'category' , settings . DEFAULT_OBJECT_CATEGORY_NAME ) wf_dict = { "text" : verbose_name , "wf" : model_data . get ( 'wf' , "crud" ) , "model" : model_data [ 'name' ] , "kategori" : category } if field_name : wf_dict [ 'param' ] = field_name results [ object_type ] . append ( wf_dict ) self . _add_to_quick_menu ( wf_dict [ 'model' ] , wf_dict )
|
Creates a menu entry for given model data . Updates results in place .
|
3,923
|
def _get_workflow_menus ( self ) : results = defaultdict ( list ) from zengine . lib . cache import WFSpecNames for name , title , category in WFSpecNames ( ) . get_or_set ( ) : if self . current . has_permission ( name ) and category != 'hidden' : wf_dict = { "text" : title , "wf" : name , "kategori" : category , "param" : "id" } results [ 'other' ] . append ( wf_dict ) self . _add_to_quick_menu ( name , wf_dict ) return results
|
Creates menu entries for custom workflows .
|
3,924
|
def connect ( self ) : if self . connecting : log . info ( 'PikaClient: Already connecting to RabbitMQ' ) return log . info ( 'PikaClient: Connecting to RabbitMQ' ) self . connecting = True self . connection = TornadoConnection ( NON_BLOCKING_MQ_PARAMS , stop_ioloop_on_close = False , custom_ioloop = self . io_loop , on_open_callback = self . on_connected )
|
Creates connection to RabbitMQ server
|
3,925
|
def on_connected ( self , connection ) : log . info ( 'PikaClient: connected to RabbitMQ' ) self . connected = True self . in_channel = self . connection . channel ( self . on_channel_open )
|
AMQP connection callback . Creates input channel .
|
3,926
|
def on_channel_open ( self , channel ) : self . in_channel . exchange_declare ( exchange = 'input_exc' , type = 'topic' , durable = True ) channel . queue_declare ( callback = self . on_input_queue_declare , queue = self . INPUT_QUEUE_NAME )
|
Input channel creation callback Queue declaration done here
|
3,927
|
def wsgi_app ( self , request ) : try : if request . method != 'POST' : abort ( 400 ) try : data = request . data if isinstance ( data , str ) : body = json . loads ( data ) else : body = json . loads ( data . decode ( 'utf-8' ) ) except ValueError : abort ( 400 ) if self . validate : valid_cert = util . validate_request_certificate ( request . headers , request . data ) valid_ts = util . validate_request_timestamp ( body ) if not valid_cert or not valid_ts : log . error ( 'failed to validate request' ) abort ( 403 ) resp_obj = self . alexa . dispatch_request ( body ) return Response ( response = json . dumps ( resp_obj , indent = 4 ) , status = 200 , mimetype = 'application/json' ) except HTTPException as exc : log . exception ( 'Failed to handle request' ) return exc
|
Incoming request handler .
|
3,928
|
def close ( self , force = False ) : if self . __write : self . write = self . __write_adhoc self . __write = False if not self . _is_buffer or force : self . _file . close ( )
|
close opened file
|
3,929
|
def aromatize ( self ) : rings = [ x for x in self . sssr if 4 < len ( x ) < 7 ] if not rings : return 0 total = 0 while True : c = self . _quinonize ( rings , 'order' ) if c : total += c elif total : break c = self . _aromatize ( rings , 'order' ) if not c : break total += c if total : self . flush_cache ( ) return total
|
convert structure to aromatic form
|
3,930
|
def close ( self , * args , ** kwargs ) : if not self . __finalized : self . _file . write ( '</cml>' ) self . __finalized = True super ( ) . close ( * args , ** kwargs )
|
write close tag of MRV file and close opened file
|
3,931
|
def write ( self , data ) : self . _file . write ( '<cml>' ) self . __write ( data ) self . write = self . __write
|
write single molecule or reaction into file
|
3,932
|
def get_tasks ( current ) : STATE_DICT = { 'active' : [ 20 , 30 ] , 'future' : 10 , 'finished' : 40 , 'expired' : 90 } state = STATE_DICT [ current . input [ 'state' ] ] if isinstance ( state , list ) : queryset = TaskInvitation . objects . filter ( progress__in = state ) else : queryset = TaskInvitation . objects . filter ( progress = state ) if 'inverted' in current . input : allowed_workflows = [ bpmn_wf . name for bpmn_wf in BPMNWorkflow . objects . all ( ) if current . has_permission ( bpmn_wf . name ) ] queryset = queryset . exclude ( role_id = current . role_id ) . filter ( wf_name__in = allowed_workflows ) else : queryset = queryset . filter ( role_id = current . role_id ) if 'query' in current . input : queryset = queryset . filter ( search_data__contains = current . input [ 'query' ] . lower ( ) ) if 'wf_type' in current . input : queryset = queryset . filter ( wf_name = current . input [ 'wf_type' ] ) if 'start_date' in current . input : queryset = queryset . filter ( start_date__gte = datetime . strptime ( current . input [ 'start_date' ] , "%d.%m.%Y" ) ) if 'finish_date' in current . input : queryset = queryset . filter ( finish_date__lte = datetime . strptime ( current . input [ 'finish_date' ] , "%d.%m.%Y" ) ) current . output [ 'task_list' ] = [ { 'token' : inv . instance . key , 'key' : inv . key , 'title' : inv . title , 'wf_type' : inv . wf_name , 'state' : inv . progress , 'start_date' : format_date ( inv . start_date ) , 'finish_date' : format_date ( inv . finish_date ) , 'description' : inv . instance . wf . description , 'status' : inv . ownership } for inv in queryset ] task_inv_list = TaskInvitation . objects . filter ( role_id = current . role_id ) current . output [ 'task_count' ] = { 'active' : task_inv_list . filter ( progress__in = STATE_DICT [ 'active' ] ) . count ( ) , 'future' : task_inv_list . filter ( progress = STATE_DICT [ 'future' ] ) . count ( ) , 'finished' : task_inv_list . filter ( progress = STATE_DICT [ 'finished' ] ) . count ( ) , 'expired' : task_inv_list . filter ( progress = STATE_DICT [ 'expired' ] ) . count ( ) }
|
List task invitations of current user
|
3,933
|
def reduce_memory_usage ( df ) : usage_pre = df . memory_usage ( deep = True ) . sum ( ) if "runIDs" in df : df . loc [ : , "runIDs" ] = df . loc [ : , "runIDs" ] . astype ( "category" ) df_int = df . select_dtypes ( include = [ 'int' ] ) df_float = df . select_dtypes ( include = [ 'float' ] ) df . loc [ : , df_int . columns ] = df_int . apply ( pd . to_numeric , downcast = 'unsigned' ) df . loc [ : , df_float . columns ] = df_float . apply ( pd . to_numeric , downcast = 'float' ) usage_post = df . memory_usage ( deep = True ) . sum ( ) logging . info ( "Reduced DataFrame memory usage from {}Mb to {}Mb" . format ( usage_pre / 1024 ** 2 , usage_post / 1024 ** 2 ) ) if usage_post > 4e9 and "readIDs" in df : logging . info ( "DataFrame of features is too big, dropping read identifiers." ) return df . drop ( [ "readIDs" ] , axis = 1 , errors = "ignore" ) else : return df
|
reduce memory usage of the dataframe
|
3,934
|
def check_existance ( f ) : if not opath . isfile ( f ) : logging . error ( "Nanoget: File provided doesn't exist or the path is incorrect: {}" . format ( f ) ) sys . exit ( "File provided doesn't exist or the path is incorrect: {}" . format ( f ) )
|
Check if the file supplied as input exists .
|
3,935
|
def list_user_roles ( self ) : _form = JsonForm ( current = self . current , title = _ ( u"Switch Role" ) ) _form . help_text = "Your current role: %s %s" % ( self . current . role . unit . name , self . current . role . abstract_role . name ) switch_roles = self . get_user_switchable_roles ( ) _form . role_options = fields . Integer ( _ ( u"Please, choose the role you want to switch:" ) , choices = switch_roles , default = switch_roles [ 0 ] [ 0 ] , required = True ) _form . switch = fields . Button ( _ ( u"Switch" ) ) self . form_out ( _form )
|
Lists user roles as selectable except user s current role .
|
3,936
|
def change_user_role ( self ) : role_key = self . input [ 'form' ] [ 'role_options' ] self . current . user . last_login_role_key = role_key self . current . user . save ( ) auth = AuthBackend ( self . current ) auth . set_user ( self . current . user ) self . current . output [ 'cmd' ] = 'reload'
|
Changes user s role from current role to chosen role .
|
3,937
|
def json_dumps ( self , obj ) : return json . dumps ( obj , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) )
|
Serializer for consistency
|
3,938
|
def safe_filename ( self , otype , oid ) : permitted = set ( [ '_' , '-' , '(' , ')' ] ) oid = '' . join ( [ c for c in oid if c . isalnum ( ) or c in permitted ] ) while oid . find ( '--' ) != - 1 : oid = oid . replace ( '--' , '-' ) ext = 'json' ts = datetime . now ( ) . strftime ( "%Y%m%dT%H%M%S" ) fname = '' is_new = False while not is_new : oid_len = 255 - len ( '%s--%s.%s' % ( otype , ts , ext ) ) fname = '%s-%s-%s.%s' % ( otype , oid [ : oid_len ] , ts , ext ) is_new = True if os . path . exists ( fname ) : is_new = False ts += '-bck' return fname
|
Santize obj name into fname and verify doesn t already exist
|
3,939
|
def write_pkg_to_file ( self , name , objects , path = '.' , filename = None ) : pkg_objs = [ ] for _ , obj in iteritems ( objects ) : pkg_objs . append ( obj ) sorted_pkg = sorted ( pkg_objs , key = lambda k : k [ '_id' ] ) output = self . json_dumps ( sorted_pkg ) + '\n' if filename is None : filename = self . safe_filename ( 'Pkg' , name ) filename = os . path . join ( path , filename ) self . pr_inf ( "Writing to file: " + filename ) with open ( filename , 'w' ) as f : f . write ( output ) return filename
|
Write a list of related objs to file
|
3,940
|
def get_dashboard_full ( self , db_name ) : objects = { } dashboards = self . get_objects ( "type" , "dashboard" ) vizs = self . get_objects ( "type" , "visualization" ) searches = self . get_objects ( "type" , "search" ) if db_name not in dashboards : return None self . pr_inf ( "Found dashboard: " + db_name ) objects [ db_name ] = dashboards [ db_name ] panels = json . loads ( dashboards [ db_name ] [ '_source' ] [ 'panelsJSON' ] ) for panel in panels : if 'id' not in panel : continue pid = panel [ 'id' ] if pid in searches : self . pr_inf ( "Found search: " + pid ) objects [ pid ] = searches [ pid ] elif pid in vizs : self . pr_inf ( "Found vis: " + pid ) objects [ pid ] = vizs [ pid ] emb = vizs [ pid ] . get ( '_source' , { } ) . get ( 'savedSearchId' , None ) if emb is not None and emb not in objects : if emb not in searches : self . pr_err ( 'Missing search %s' % emb ) return objects objects [ emb ] = searches [ emb ] return objects
|
Get DB and all objs needed to duplicate it
|
3,941
|
def parse_node ( self , node ) : spec = super ( CamundaProcessParser , self ) . parse_node ( node ) spec . data = self . _parse_input_data ( node ) spec . data [ 'lane_data' ] = self . _get_lane_properties ( node ) spec . defines = spec . data service_class = node . get ( full_attr ( 'assignee' ) ) if service_class : self . parsed_nodes [ node . get ( 'id' ) ] . service_class = node . get ( full_attr ( 'assignee' ) ) return spec
|
Overrides ProcessParser . parse_node Parses and attaches the inputOutput tags that created by Camunda Modeller
|
3,942
|
def _get_lane_properties ( self , node ) : lane_name = self . get_lane ( node . get ( 'id' ) ) lane_data = { 'name' : lane_name } for a in self . xpath ( ".//bpmn:lane[@name='%s']/*/*/" % lane_name ) : lane_data [ a . attrib [ 'name' ] ] = a . attrib [ 'value' ] . strip ( ) return lane_data
|
Parses the given XML node
|
3,943
|
def package_in_memory ( cls , workflow_name , workflow_files ) : s = StringIO ( ) p = cls ( s , workflow_name , meta_data = [ ] ) p . add_bpmn_files_by_glob ( workflow_files ) p . create_package ( ) return s . getvalue ( )
|
Generates wf packages from workflow diagrams .
|
3,944
|
def compose ( self , data ) : g = self . __separate ( data ) if self . __cgr_type in ( 1 , 2 , 3 , 4 , 5 , 6 ) else self . __condense ( data ) g . meta . update ( data . meta ) return g
|
condense reaction container to CGR . see init for details about cgr_type
|
3,945
|
def add_atom ( self , atom , _map = None ) : if _map is None : _map = max ( self , default = 0 ) + 1 elif _map in self . _node : raise KeyError ( 'atom with same number exists' ) attr_dict = self . node_attr_dict_factory ( ) if isinstance ( atom , str ) : attr_dict . element = atom elif isinstance ( atom , int ) : attr_dict . element = elements_list [ atom - 1 ] else : attr_dict . update ( atom ) self . _adj [ _map ] = self . adjlist_inner_dict_factory ( ) self . _node [ _map ] = attr_dict self . flush_cache ( ) return _map
|
new atom addition
|
3,946
|
def add_bond ( self , atom1 , atom2 , bond ) : if atom1 == atom2 : raise KeyError ( 'atom loops impossible' ) if atom1 not in self . _node or atom2 not in self . _node : raise KeyError ( 'atoms not found' ) if atom1 in self . _adj [ atom2 ] : raise KeyError ( 'atoms already bonded' ) attr_dict = self . edge_attr_dict_factory ( ) if isinstance ( bond , int ) : attr_dict . order = bond else : attr_dict . update ( bond ) self . _adj [ atom1 ] [ atom2 ] = self . _adj [ atom2 ] [ atom1 ] = attr_dict self . flush_cache ( )
|
implementation of bond addition
|
3,947
|
def delete_bond ( self , n , m ) : self . remove_edge ( n , m ) self . flush_cache ( )
|
implementation of bond removing
|
3,948
|
def augmented_substructure ( self , atoms , dante = False , deep = 1 , meta = False , as_view = True ) : nodes = [ set ( atoms ) ] for i in range ( deep ) : n = { y for x in nodes [ - 1 ] for y in self . _adj [ x ] } | nodes [ - 1 ] if n in nodes : break nodes . append ( n ) if dante : return [ self . substructure ( a , meta , as_view ) for a in nodes ] else : return self . substructure ( nodes [ - 1 ] , meta , as_view )
|
create substructure containing atoms and their neighbors
|
3,949
|
def split ( self , meta = False ) : return [ self . substructure ( c , meta , False ) for c in connected_components ( self ) ]
|
split disconnected structure to connected substructures
|
3,950
|
def bonds ( self ) : seen = set ( ) for n , m_bond in self . _adj . items ( ) : seen . add ( n ) for m , bond in m_bond . items ( ) : if m not in seen : yield n , m , bond
|
iterate other all bonds
|
3,951
|
def _get_subclass ( name ) : return next ( x for x in BaseContainer . __subclasses__ ( ) if x . __name__ == name )
|
need for cyclic import solving
|
3,952
|
def _default_make_pool ( http , proxy_info ) : if not http . ca_certs : http . ca_certs = _certifi_where_for_ssl_version ( ) ssl_disabled = http . disable_ssl_certificate_validation cert_reqs = 'CERT_REQUIRED' if http . ca_certs and not ssl_disabled else None if isinstance ( proxy_info , collections . Callable ) : proxy_info = proxy_info ( ) if proxy_info : if proxy_info . proxy_user and proxy_info . proxy_pass : proxy_url = 'http://{}:{}@{}:{}/' . format ( proxy_info . proxy_user , proxy_info . proxy_pass , proxy_info . proxy_host , proxy_info . proxy_port , ) proxy_headers = urllib3 . util . request . make_headers ( proxy_basic_auth = '{}:{}' . format ( proxy_info . proxy_user , proxy_info . proxy_pass , ) ) else : proxy_url = 'http://{}:{}/' . format ( proxy_info . proxy_host , proxy_info . proxy_port , ) proxy_headers = { } return urllib3 . ProxyManager ( proxy_url = proxy_url , proxy_headers = proxy_headers , ca_certs = http . ca_certs , cert_reqs = cert_reqs , ) return urllib3 . PoolManager ( ca_certs = http . ca_certs , cert_reqs = cert_reqs , )
|
Creates a urllib3 . PoolManager object that has SSL verification enabled and uses the certifi certificates .
|
3,953
|
def patch ( make_pool = _default_make_pool ) : setattr ( httplib2 , '_HttpOriginal' , httplib2 . Http ) httplib2 . Http = Http Http . _make_pool = make_pool
|
Monkey - patches httplib2 . Http to be httplib2shim . Http .
|
3,954
|
def _is_ipv6 ( addr ) : try : socket . inet_pton ( socket . AF_INET6 , addr ) return True except socket . error : return False
|
Checks if a given address is an IPv6 address .
|
3,955
|
def _certifi_where_for_ssl_version ( ) : if not ssl : return if ssl . OPENSSL_VERSION_INFO < ( 1 , 0 , 2 ) : warnings . warn ( 'You are using an outdated version of OpenSSL that ' 'can\'t use stronger root certificates.' ) return certifi . old_where ( ) return certifi . where ( )
|
Gets the right location for certifi certifications for the current SSL version .
|
3,956
|
def _map_exception ( e ) : if isinstance ( e , urllib3 . exceptions . MaxRetryError ) : if not e . reason : return e e = e . reason message = e . args [ 0 ] if e . args else '' if isinstance ( e , urllib3 . exceptions . ResponseError ) : if 'too many redirects' in message : return httplib2 . RedirectLimit ( message ) if isinstance ( e , urllib3 . exceptions . NewConnectionError ) : if ( 'Name or service not known' in message or 'nodename nor servname provided, or not known' in message ) : return httplib2 . ServerNotFoundError ( 'Unable to find hostname.' ) if 'Connection refused' in message : return socket . error ( ( errno . ECONNREFUSED , 'Connection refused' ) ) if isinstance ( e , urllib3 . exceptions . DecodeError ) : return httplib2 . FailedToDecompressContent ( 'Content purported as compressed but not uncompressable.' , httplib2 . Response ( { 'status' : 500 } ) , '' ) if isinstance ( e , urllib3 . exceptions . TimeoutError ) : return socket . timeout ( 'timed out' ) if isinstance ( e , urllib3 . exceptions . SSLError ) : return ssl . SSLError ( * e . args ) return e
|
Maps an exception from urlib3 to httplib2 .
|
3,957
|
def exit ( self , signal = None , frame = None ) : self . input_channel . close ( ) self . client_queue . close ( ) self . connection . close ( ) log . info ( "Worker exiting" ) sys . exit ( 0 )
|
Properly close the AMQP connections
|
3,958
|
def connect ( self ) : self . connection = pika . BlockingConnection ( BLOCKING_MQ_PARAMS ) self . client_queue = ClientQueue ( ) self . input_channel = self . connection . channel ( ) self . input_channel . exchange_declare ( exchange = self . INPUT_EXCHANGE , type = 'topic' , durable = True ) self . input_channel . queue_declare ( queue = self . INPUT_QUEUE_NAME ) self . input_channel . queue_bind ( exchange = self . INPUT_EXCHANGE , queue = self . INPUT_QUEUE_NAME ) log . info ( "Bind to queue named '%s' queue with exchange '%s'" % ( self . INPUT_QUEUE_NAME , self . INPUT_EXCHANGE ) )
|
make amqp connection and create channels and queue binding
|
3,959
|
def clear_queue ( self ) : def remove_message ( ch , method , properties , body ) : print ( "Removed message: %s" % body ) self . input_channel . basic_consume ( remove_message , queue = self . INPUT_QUEUE_NAME , no_ack = True ) try : self . input_channel . start_consuming ( ) except ( KeyboardInterrupt , SystemExit ) : log . info ( " Exiting" ) self . exit ( )
|
clear outs all messages from INPUT_QUEUE_NAME
|
3,960
|
def run ( self ) : self . input_channel . basic_consume ( self . handle_message , queue = self . INPUT_QUEUE_NAME , no_ack = True ) try : self . input_channel . start_consuming ( ) except ( KeyboardInterrupt , SystemExit ) : log . info ( " Exiting" ) self . exit ( )
|
actual consuming of incoming works starts here
|
3,961
|
def handle_message ( self , ch , method , properties , body ) : input = { } headers = { } try : self . sessid = method . routing_key input = json_decode ( body ) data = input [ 'data' ] if 'path' in data : if data [ 'path' ] in VIEW_METHODS : data [ 'view' ] = data [ 'path' ] else : data [ 'wf' ] = data [ 'path' ] session = Session ( self . sessid ) headers = { 'remote_ip' : input [ '_zops_remote_ip' ] , 'source' : input [ '_zops_source' ] } if 'wf' in data : output = self . _handle_workflow ( session , data , headers ) elif 'job' in data : self . _handle_job ( session , data , headers ) return else : output = self . _handle_view ( session , data , headers ) except HTTPError as e : import sys if hasattr ( sys , '_called_from_test' ) : raise output = { "cmd" : "error" , "error" : self . _prepare_error_msg ( e . message ) , "code" : e . code } log . exception ( "Http error occurred" ) except : self . current = Current ( session = session , input = data ) self . current . headers = headers import sys if hasattr ( sys , '_called_from_test' ) : raise err = traceback . format_exc ( ) output = { "cmd" : "error" , "error" : self . _prepare_error_msg ( err ) , "code" : 500 } log . exception ( "Worker error occurred with messsage body:\n%s" % body ) if 'callbackID' in input : output [ 'callbackID' ] = input [ 'callbackID' ] log . info ( "OUTPUT for %s: %s" % ( self . sessid , output ) ) output [ 'reply_timestamp' ] = time ( ) self . send_output ( output )
|
this is a pika . basic_consumer callback handles client inputs runs appropriate workflows and views
|
3,962
|
def sync_wf_cache ( current ) : wf_cache = WFCache ( current ) wf_state = wf_cache . get ( ) if 'role_id' in wf_state : try : wfi = WFInstance . objects . get ( key = current . input [ 'token' ] ) except ObjectDoesNotExist : wfi = WFInstance ( key = current . input [ 'token' ] ) wfi . wf = BPMNWorkflow . objects . get ( name = wf_state [ 'name' ] ) if not wfi . current_actor . exist : try : inv = TaskInvitation . objects . get ( instance = wfi , role_id = wf_state [ 'role_id' ] ) inv . delete_other_invitations ( ) inv . progress = 20 inv . save ( ) except ObjectDoesNotExist : current . log . exception ( "Invitation not found: %s" % wf_state ) except MultipleObjectsReturned : current . log . exception ( "Multiple invitations found: %s" % wf_state ) wfi . step = wf_state [ 'step' ] wfi . name = wf_state [ 'name' ] wfi . pool = wf_state [ 'pool' ] wfi . current_actor_id = str ( wf_state [ 'role_id' ] ) wfi . data = wf_state [ 'data' ] if wf_state [ 'finished' ] : wfi . finished = True wfi . finish_date = wf_state [ 'finish_date' ] wf_cache . delete ( ) wfi . save ( ) else : pass
|
BG Job for storing wf state to DB
|
3,963
|
def get_description ( self ) : paths = [ 'bpmn:collaboration/bpmn:participant/bpmn:documentation' , 'bpmn:collaboration/bpmn:documentation' , 'bpmn:process/bpmn:documentation' ] for path in paths : elm = self . root . find ( path , NS ) if elm is not None and elm . text : return elm . text
|
Tries to get WF description from collabration or process or pariticipant
|
3,964
|
def create_tasks ( self ) : roles = self . get_roles ( ) if self . task_type in [ "A" , "D" ] : instances = self . create_wf_instances ( roles = roles ) self . create_task_invitation ( instances ) elif self . task_type in [ "C" , "B" ] : instances = self . create_wf_instances ( ) self . create_task_invitation ( instances , roles )
|
will create a WFInstance per object and per TaskInvitation for each role and WFInstance
|
3,965
|
def get_model_objects ( model , wfi_role = None , ** kwargs ) : query_dict = { } for k , v in kwargs . items ( ) : if isinstance ( v , list ) : query_dict [ k ] = [ str ( x ) for x in v ] else : parse = str ( v ) . split ( '.' ) if parse [ 0 ] == 'role' and wfi_role : query_dict [ k ] = wfi_role for i in range ( 1 , len ( parse ) ) : query_dict [ k ] = query_dict [ k ] . __getattribute__ ( parse [ i ] ) else : query_dict [ k ] = parse [ 0 ] return model . objects . all ( ** query_dict )
|
Fetches model objects by filtering with kwargs
|
3,966
|
def post_save ( self ) : if self . run : self . run = False self . create_tasks ( ) self . save ( )
|
can be removed when a proper task manager admin interface implemented
|
3,967
|
def delete_other_invitations ( self ) : self . objects . filter ( instance = self . instance ) . exclude ( key = self . key ) . delete ( )
|
When one person use an invitation we should delete other invitations
|
3,968
|
def save ( self , wf_state ) : self . wf_state = wf_state self . wf_state [ 'role_id' ] = self . current . role_id self . set ( self . wf_state ) if self . wf_state [ 'name' ] not in settings . EPHEMERAL_WORKFLOWS : self . publish ( job = '_zops_sync_wf_cache' , token = self . db_key )
|
write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache
|
3,969
|
def send_to_prv_exchange ( self , user_id , message = None ) : exchange = 'prv_%s' % user_id . lower ( ) msg = json . dumps ( message , cls = ZEngineJSONEncoder ) log . debug ( "Sending following users \"%s\" exchange:\n%s " % ( exchange , msg ) ) self . get_channel ( ) . publish ( exchange = exchange , routing_key = '' , body = msg )
|
Send messages through logged in users private exchange .
|
3,970
|
def decompose ( self ) : mc = self . _get_subclass ( 'MoleculeContainer' ) reactants = mc ( ) products = mc ( ) for n , atom in self . atoms ( ) : reactants . add_atom ( atom . _reactant , n ) products . add_atom ( atom . _product , n ) for n , m , bond in self . bonds ( ) : if bond . _reactant is not None : reactants . add_bond ( n , m , bond . _reactant ) if bond . _product is not None : products . add_bond ( n , m , bond . _product ) return reactants , products
|
decompose CGR to pair of Molecules which represents reactants and products state of reaction
|
3,971
|
def size_history ( self , size_data ) : def my_merge ( df1 , df2 ) : res = pd . merge ( df1 , df2 , how = 'outer' , left_index = True , right_index = True ) cols = sorted ( res . columns ) pairs = [ ] for col1 , col2 in zip ( cols [ : - 1 ] , cols [ 1 : ] ) : if col1 . endswith ( '_x' ) and col2 . endswith ( '_y' ) : pairs . append ( ( col1 , col2 ) ) for col1 , col2 in pairs : res [ col1 [ : - 2 ] ] = res [ col1 ] . combine_first ( res [ col2 ] ) res = res . drop ( [ col1 , col2 ] , axis = 1 ) return res dfs_key = [ ] for name , group in size_data . groupby ( 'key' ) : dfs = [ ] for row in group . itertuples ( ) : dates = pd . date_range ( start = row . fromDate , end = row . toDate ) sizes = [ row . size ] * len ( dates ) data = { 'date' : dates , 'size' : sizes } df2 = pd . DataFrame ( data , columns = [ 'date' , 'size' ] ) pd . to_datetime ( df2 [ 'date' ] , format = ( '%Y-%m-%d' ) ) df2 . set_index ( [ 'date' ] , inplace = True ) dfs . append ( df2 ) df_key = ( reduce ( my_merge , dfs ) ) df_key . columns = [ name if x == 'size' else x for x in df_key . columns ] dfs_key . append ( df_key ) df_all = ( reduce ( my_merge , dfs_key ) ) mykeys = df_all . columns . values . tolist ( ) mykeys . sort ( key = lambda x : x . split ( '-' ) [ 0 ] + '-' + str ( int ( x . split ( '-' ) [ 1 ] ) ) . zfill ( 6 ) ) df_all = df_all [ mykeys ] start , end = df_all . index . min ( ) , df_all . index . max ( ) df_all = df_all . reindex ( pd . date_range ( start , end , freq = 'D' ) , method = 'ffill' ) return df_all
|
Return the a DataFrame indexed by day with columns containing story size for each issue .
|
3,972
|
def histogram ( self , cycle_data , bins = 10 ) : values , edges = np . histogram ( cycle_data [ 'cycle_time' ] . astype ( 'timedelta64[D]' ) . dropna ( ) , bins = bins ) index = [ ] for i , v in enumerate ( edges ) : if i == 0 : continue index . append ( "%.01f to %.01f" % ( edges [ i - 1 ] , edges [ i ] , ) ) return pd . Series ( values , name = "Items" , index = index )
|
Return histogram data for the cycle times in cycle_data . Returns a dictionary with keys bin_values and bin_edges of numpy arrays
|
3,973
|
def scatterplot ( self , cycle_data ) : columns = list ( cycle_data . columns ) columns . remove ( 'cycle_time' ) columns . remove ( 'completed_timestamp' ) columns = [ 'completed_timestamp' , 'cycle_time' ] + columns data = ( cycle_data [ columns ] . dropna ( subset = [ 'cycle_time' , 'completed_timestamp' ] ) . rename ( columns = { 'completed_timestamp' : 'completed_date' } ) ) data [ 'cycle_time' ] = data [ 'cycle_time' ] . astype ( 'timedelta64[D]' ) data [ 'completed_date' ] = data [ 'completed_date' ] . map ( pd . Timestamp . date ) return data
|
Return scatterplot data for the cycle times in cycle_data . Returns a data frame containing only those items in cycle_data where values are set for completed_timestamp and cycle_time and with those two columns as the first two both normalised to whole days and with completed_timestamp renamed to completed_date .
|
3,974
|
def _is_ready ( self , topic_name ) : url = 'http://%s/stats?format=json&topic=%s' % ( self . nsqd_http_address , topic_name ) if '#' in topic_name : topic_name , tag = topic_name . split ( "#" , 1 ) try : data = self . session . get ( url ) . json ( ) topics = data . get ( 'topics' , [ ] ) topics = [ t for t in topics if t [ 'topic_name' ] == topic_name ] if not topics : raise Exception ( 'topic_missing_at_nsq' ) topic = topics [ 0 ] depth = topic [ 'depth' ] depth += sum ( c . get ( 'depth' , 0 ) for c in topic [ 'channels' ] ) self . log . debug ( 'nsq_depth_check' , topic = topic_name , depth = depth , max_depth = self . nsq_max_depth ) if depth < self . nsq_max_depth : return else : raise Exception ( 'nsq_is_full_waiting_to_clear' ) except : raise
|
Is NSQ running and have space to receive messages?
|
3,975
|
def centers_list ( self ) : center = set ( ) adj = defaultdict ( set ) for n , atom in self . atoms ( ) : if atom . _reactant != atom . _product : center . add ( n ) for n , m , bond in self . bonds ( ) : if bond . _reactant != bond . _product : adj [ n ] . add ( m ) adj [ m ] . add ( n ) center . add ( n ) center . add ( m ) out = [ ] while center : n = center . pop ( ) if n in adj : c = set ( self . __plain_bfs ( adj , n ) ) out . append ( list ( c ) ) center . difference_update ( c ) else : out . append ( [ n ] ) return out
|
get a list of lists of atoms of reaction centers
|
3,976
|
def _matcher ( self , other ) : if isinstance ( other , CGRContainer ) : return GraphMatcher ( other , self , lambda x , y : x == y , lambda x , y : x == y ) raise TypeError ( 'only cgr-cgr possible' )
|
CGRContainer < CGRContainer
|
3,977
|
def __plain_bfs ( adj , source ) : seen = set ( ) nextlevel = { source } while nextlevel : thislevel = nextlevel nextlevel = set ( ) for v in thislevel : if v not in seen : yield v seen . add ( v ) nextlevel . update ( adj [ v ] )
|
modified NX fast BFS node generator
|
3,978
|
def token ( self ) : if self . _token is None : token_type = os . getenv ( TOKEN_TYPE_KEY , '' ) token_body = os . getenv ( TOKEN_BODY_KEY , '' ) self . _token = _Token ( token_type , token_body ) return self . _token
|
Returns authorization token provided by Cocaine .
|
3,979
|
def _send ( self ) : buff = BytesIO ( ) while True : msgs = list ( ) try : msg = yield self . queue . get ( ) if not self . _connected : yield self . connect ( ) try : while True : msgs . append ( msg ) counter = next ( self . counter ) msgpack_pack ( [ counter , EMIT , msg ] , buff ) msg = self . queue . get_nowait ( ) except queues . QueueEmpty : pass try : yield self . pipe . write ( buff . getvalue ( ) ) except Exception : pass buff . truncate ( 0 ) except Exception : for message in msgs : self . _log_to_fallback ( message )
|
Send a message lazy formatted with args . External log attributes can be passed via named attribute extra like in logging from the standart library .
|
3,980
|
def moves_in_direction ( self , direction , position ) : current_square = self . location while True : try : current_square = direction ( current_square ) except IndexError : return if self . contains_opposite_color_piece ( current_square , position ) : yield self . create_move ( current_square , notation_const . CAPTURE ) if not position . is_square_empty ( current_square ) : return yield self . create_move ( current_square , notation_const . MOVEMENT )
|
Finds moves in a given direction
|
3,981
|
def possible_moves ( self , position ) : for move in itertools . chain ( * [ self . moves_in_direction ( fn , position ) for fn in self . cross_fn ] ) : yield move
|
Returns all possible rook moves .
|
3,982
|
def overlap_status ( a , b ) : a1 , a2 = a . min ( ) , a . max ( ) b1 , b2 = b . min ( ) , b . max ( ) if a1 >= b1 and a2 <= b2 : result = 'full' elif a2 < b1 or b2 < a1 : result = 'none' else : result = 'partial' return result
|
Check overlap between two arrays .
|
3,983
|
def validate_totalflux ( totalflux ) : if totalflux <= 0.0 : raise exceptions . SynphotError ( 'Integrated flux is <= 0' ) elif np . isnan ( totalflux ) : raise exceptions . SynphotError ( 'Integrated flux is NaN' ) elif np . isinf ( totalflux ) : raise exceptions . SynphotError ( 'Integrated flux is infinite' )
|
Check integrated flux for invalid values .
|
3,984
|
def validate_wavelengths ( wavelengths ) : if isinstance ( wavelengths , u . Quantity ) : units . validate_wave_unit ( wavelengths . unit ) wave = wavelengths . value else : wave = wavelengths if np . isscalar ( wave ) : wave = [ wave ] wave = np . asarray ( wave ) if np . any ( wave <= 0 ) : raise exceptions . ZeroWavelength ( 'Negative or zero wavelength occurs in wavelength array' , rows = np . where ( wave <= 0 ) [ 0 ] ) sorted_wave = np . sort ( wave ) if not np . alltrue ( sorted_wave == wave ) : if np . alltrue ( sorted_wave [ : : - 1 ] == wave ) : pass else : raise exceptions . UnsortedWavelength ( 'Wavelength array is not monotonic' , rows = np . where ( sorted_wave != wave ) [ 0 ] ) if wave . size > 1 : dw = sorted_wave [ 1 : ] - sorted_wave [ : - 1 ] if np . any ( dw == 0 ) : raise exceptions . DuplicateWavelength ( 'Wavelength array contains duplicate entries' , rows = np . where ( dw == 0 ) [ 0 ] )
|
Check wavelengths for synphot compatibility .
|
3,985
|
def generate_wavelengths ( minwave = 500 , maxwave = 26000 , num = 10000 , delta = None , log = True , wave_unit = u . AA ) : wave_unit = units . validate_unit ( wave_unit ) if delta is not None : num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}' . format ( minwave , maxwave , num , delta , log ) if log : logmin = np . log10 ( minwave ) logmax = np . log10 ( maxwave ) if delta is None : waveset = np . logspace ( logmin , logmax , num , endpoint = False ) else : waveset = 10 ** np . arange ( logmin , logmax , delta ) else : if delta is None : waveset = np . linspace ( minwave , maxwave , num , endpoint = False ) else : waveset = np . arange ( minwave , maxwave , delta ) return waveset . astype ( np . float64 ) * wave_unit , waveset_str
|
Generate wavelength array to be used for spectrum sampling .
|
3,986
|
def download_data ( cdbs_root , verbose = True , dry_run = False ) : from . config import conf if not os . path . exists ( cdbs_root ) : os . makedirs ( cdbs_root , exist_ok = True ) if verbose : print ( 'Created {}' . format ( cdbs_root ) ) elif not os . path . isdir ( cdbs_root ) : raise OSError ( '{} must be a directory' . format ( cdbs_root ) ) host = 'http://ssb.stsci.edu/cdbs/' file_list = [ ] if not cdbs_root . endswith ( os . sep ) : cdbs_root += os . sep for cfgitem in conf . __class__ . __dict__ . values ( ) : if ( not isinstance ( cfgitem , ConfigItem ) or not cfgitem . name . endswith ( 'file' ) ) : continue url = cfgitem ( ) if not url . startswith ( host ) : if verbose : print ( '{} is not from {}, skipping download' . format ( url , host ) ) continue dst = url . replace ( host , cdbs_root ) . replace ( '/' , os . sep ) if os . path . exists ( dst ) : if verbose : print ( '{} already exists, skipping download' . format ( dst ) ) continue subdirs = os . path . dirname ( dst ) os . makedirs ( subdirs , exist_ok = True ) if not dry_run : try : src = download_file ( url ) copyfile ( src , dst ) except Exception as exc : print ( 'Download failed - {}' . format ( str ( exc ) ) ) continue file_list . append ( dst ) if verbose : print ( '{} downloaded to {}' . format ( url , dst ) ) return file_list
|
Download CDBS data files to given root directory . Download is skipped if a data file already exists .
|
3,987
|
async def main ( loop ) : pyvlx = PyVLX ( 'pyvlx.yaml' , loop = loop ) await pyvlx . load_scenes ( ) await pyvlx . scenes [ "All Windows Closed" ] . run ( ) await pyvlx . load_nodes ( ) await pyvlx . nodes [ 'Bath' ] . open ( ) await pyvlx . nodes [ 'Bath' ] . close ( ) await pyvlx . nodes [ 'Bath' ] . set_position ( Position ( position_percent = 45 ) ) await pyvlx . disconnect ( )
|
Demonstrate functionality of PyVLX .
|
3,988
|
def add ( self , node ) : if not isinstance ( node , Node ) : raise TypeError ( ) for i , j in enumerate ( self . __nodes ) : if j . node_id == node . node_id : self . __nodes [ i ] = node return self . __nodes . append ( node )
|
Add Node replace existing node if node with node_id is present .
|
3,989
|
async def load ( self , node_id = None ) : if node_id is not None : await self . _load_node ( node_id = node_id ) else : await self . _load_all_nodes ( )
|
Load nodes from KLF 200 if no node_id is specified all nodes are loaded .
|
3,990
|
async def _load_node ( self , node_id ) : get_node_information = GetNodeInformation ( pyvlx = self . pyvlx , node_id = node_id ) await get_node_information . do_api_call ( ) if not get_node_information . success : raise PyVLXException ( "Unable to retrieve node information" ) notification_frame = get_node_information . notification_frame node = convert_frame_to_node ( self . pyvlx , notification_frame ) if node is not None : self . add ( node )
|
Load single node via API .
|
3,991
|
async def _load_all_nodes ( self ) : get_all_nodes_information = GetAllNodesInformation ( pyvlx = self . pyvlx ) await get_all_nodes_information . do_api_call ( ) if not get_all_nodes_information . success : raise PyVLXException ( "Unable to retrieve node information" ) self . clear ( ) for notification_frame in get_all_nodes_information . notification_frames : node = convert_frame_to_node ( self . pyvlx , notification_frame ) if node is not None : self . add ( node )
|
Load all nodes via API .
|
3,992
|
def start ( self ) : self . run_task = self . pyvlx . loop . create_task ( self . loop ( ) )
|
Create loop task .
|
3,993
|
async def stop ( self ) : self . stopped = True self . loop_event . set ( ) await self . stopped_event . wait ( )
|
Stop heartbeat .
|
3,994
|
async def loop ( self ) : while not self . stopped : self . timeout_handle = self . pyvlx . connection . loop . call_later ( self . timeout_in_seconds , self . loop_timeout ) await self . loop_event . wait ( ) if not self . stopped : self . loop_event . clear ( ) await self . pulse ( ) self . cancel_loop_timeout ( ) self . stopped_event . set ( )
|
Pulse every timeout seconds until stopped .
|
3,995
|
async def pulse ( self ) : get_state = GetState ( pyvlx = self . pyvlx ) await get_state . do_api_call ( ) if not get_state . success : raise PyVLXException ( "Unable to send get state." )
|
Send get state request to API to keep the connection alive .
|
3,996
|
def string_to_bytes ( string , size ) : if len ( string ) > size : raise PyVLXException ( "string_to_bytes::string_to_large" ) encoded = bytes ( string , encoding = 'utf-8' ) return encoded + bytes ( size - len ( encoded ) )
|
Convert string to bytes add padding .
|
3,997
|
def bytes_to_string ( raw ) : ret = bytes ( ) for byte in raw : if byte == 0x00 : return ret . decode ( "utf-8" ) ret += bytes ( [ byte ] ) return ret . decode ( "utf-8" )
|
Convert bytes to string .
|
3,998
|
async def house_status_monitor_disable ( pyvlx ) : status_monitor_disable = HouseStatusMonitorDisable ( pyvlx = pyvlx ) await status_monitor_disable . do_api_call ( ) if not status_monitor_disable . success : raise PyVLXException ( "Unable disable house status monitor." )
|
Disable house status monitor .
|
3,999
|
def BitmathType ( bmstring ) : try : argvalue = bitmath . parse_string ( bmstring ) except ValueError : raise argparse . ArgumentTypeError ( "'%s' can not be parsed into a valid bitmath object" % bmstring ) else : return argvalue
|
An argument type for integrations with the argparse module .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.