code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Initialising empty dictionary endpoints = {} for port_element in port_values: target_port = port_element.split(':') for port in target_port: endpoints[int(port)] = int(port) # Setting the types endpoint_spec = docker.types.EndpointSpec(ports=endpoints) return endpoint_spec
def _parse_ports(port_values: dict) -> dict
Parse ports key. Args: port_values (dict): ports configuration values Returns: dict, Ports specification which contains exposed ports
6.42863
6.112506
1.051717
for v_values in volume_values: for v_key, v_value in v_values.items(): if v_key == 'source': if v_value == '.': source = os.path.dirname( os.path.abspath(__file__)) else: source = v_value if v_key == 'target': target = v_value volume_spec = [source + ':' + target] return volume_spec
def _parse_volumes(volume_values: dict) -> str
Parse volumes key. Args: volume_values (dict): volume configuration values Returns: string, volume specification with mount source and container path
3.206544
2.965526
1.081273
# Initialising empty dictionary resources = {} for r_values in resource_values[resource_name]: if 'limits' in r_values: for r_key, r_value in \ resource_values[resource_name][r_values].items(): if 'cpu' in r_key: cpu_value = float(r_value) * 10 ** 9 cpu_key = r_key[:3] + '_limit' resources[cpu_key] = int(cpu_value) if 'mem' in r_key: mem_value = re.sub('M', '', r_value) mem_key = r_key[:3] + '_limit' resources[mem_key] = int(mem_value) * 1048576 resources_spec = docker.types.Resources(**resources) return resources_spec
def _parse_resources(resource_values: dict, resource_name: str) -> dict
Parse resources key. Args: resource_values (dict): resource configurations values resource_name (string): Resource name Returns: dict, resources specification
2.831829
2.754986
1.027892
# Initialising empty list networks = [] for n_values in service_list['networks'].values(): for n_key, n_value in n_values.items(): if 'name' in n_key: networks.append(n_value) return networks
def _parse_networks(service_list: dict) -> list
Parse network key. Args: service_list (dict): Service configurations Returns: list, List of networks
3.916015
3.802095
1.029962
for log_key, log_value in log_values.items(): if 'driver' in log_key: service_config['log_driver'] = log_value if 'options' in log_key: service_config['log_driver_options'] = log_value
def _parse_logging(log_values: dict, service_config: dict)
Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification
2.533012
2.599423
0.974452
LOG.info('Initialising Processing Block queue.') queue = ProcessingBlockQueue() active_pb_ids = ProcessingBlockList().active LOG.info('Initialising PC PB queue: %s', active_pb_ids) for pb_id in active_pb_ids: pb = ProcessingBlock(pb_id) queue.put(pb.id, pb.priority, pb.type) return queue
def _init_queue()
Initialise the Processing Block queue from the database. This method should populate the queue from the current state of the Configuration Database. This needs to be based on the current set of Processing Blocks in the database and consider events on these processing blocks.
6.055482
4.566985
1.325926
LOG.info("Starting to monitor PB events") check_counter = 0 while True: if check_counter == 50: check_counter = 0 LOG.debug('Checking for PB events...') published_events = self._pb_events.get_published_events() for event in published_events: if event.type == 'status_changed': LOG.info('PB status changed event: %s', event.data['status']) if event.data['status'] == 'created': LOG.info('Acknowledged PB created event (%s) for %s, ' '[timestamp: %s]', event.id, event.object_id, event.timestamp) pb = ProcessingBlock(event.object_id) self._queue.put(event.object_id, pb.priority, pb.type) if event.data['status'] == 'completed': LOG.info('Acknowledged PB completed event (%s) for %s,' ' [timestamp: %s]', event.id, event.object_id, event.timestamp) self._num_pbcs -= 1 if self._num_pbcs < 0: self._num_pbcs = 0 time.sleep(0.1) check_counter += 1
def _monitor_events(self)
Watch for Processing Block events.
3.181247
3.025793
1.051376
LOG.info('Starting Processing Block queue reporter.') while True: LOG.info('PB queue length = %d', len(self._queue)) time.sleep(self._report_interval) if active_count() != 5: LOG.critical('Processing Controller not running ' 'correctly! (%d/%d threads active)', active_count(), 5)
def _processing_controller_status(self)
Report on the status of the Processing Block queue(s).
8.135515
6.08451
1.337086
LOG.info('Starting to Schedule Processing Blocks.') while True: time.sleep(0.5) if not self._queue: continue if self._num_pbcs >= self._max_pbcs: LOG.warning('Resource limit reached!') continue _inspect = Inspect(app=APP) if self._queue and _inspect.active() is not None: next_pb = self._queue[-1] LOG.info('Considering %s for execution...', next_pb[2]) utc_now = datetime.datetime.utcnow() time_in_queue = (utc_now - datetime_from_isoformat(next_pb[4])) if time_in_queue.total_seconds() >= 10: item = self._queue.get() LOG.info('------------------------------------') LOG.info('>>> Executing %s! <<<', item) LOG.info('------------------------------------') execute_processing_block.delay(item) self._num_pbcs += 1 else: LOG.info('Waiting for resources for %s', next_pb[2])
def _schedule_processing_blocks(self)
Schedule Processing Blocks for execution.
4.650932
4.439758
1.047564
LOG.info('Starting to Monitor PBC status.') inspect = celery.current_app.control.inspect() workers = inspect.ping() start_time = time.time() while workers is None: time.sleep(0.1) elapsed = time.time() - start_time if elapsed > 20.0: LOG.warning('PBC not found!') break if workers is not None: for worker in workers: _tasks = inspect.registered_tasks()[worker] LOG.info('Worker: %s tasks:', worker) for task_index, task_name in enumerate(_tasks): LOG.info(' %02d : %s', task_index, task_name) while True: LOG.info('Checking PBC status (%d/%d)', self._num_pbcs, self._max_pbcs) celery_app = celery.current_app inspect = celery_app.control.inspect() workers = inspect.ping() if workers is None: LOG.warning('PBC service not found!') else: LOG.info('PBC state: %s', celery_app.events.State()) _active = inspect.active() _scheduled = inspect.scheduled() for worker in workers: LOG.info(' Worker %s: scheduled: %s, active: %s', worker, _active[worker], _scheduled[worker]) time.sleep(self._report_interval)
def _monitor_pbc_status(self)
Monitor the PBC status.
3.087086
2.999971
1.029039
# TODO(BMo) having this check is probably a good idea but I've \ # disabled it for now while the PBC is in flux. # assert sip_pbc.release.__version__ == '1.2.3' scheduler_threads = [ Thread(target=self._monitor_events, daemon=True), Thread(target=self._processing_controller_status, daemon=True), Thread(target=self._schedule_processing_blocks, daemon=True), Thread(target=self._monitor_pbc_status, daemon=True) ] for thread in scheduler_threads: thread.start() try: for thread in scheduler_threads: thread.join() except KeyboardInterrupt: LOG.info('Keyboard interrupt!') sys.exit(0) finally: LOG.info('Finally!')
def start(self)
Start the scheduler threads.
5.93563
5.561295
1.067311
if request.method == 'POST': response = {'message': 'POST Accepted'} logging.info('alarm POSTED!') data = request.data logging.info(data) string = json.dumps(data) producer.send('SIP-alarms', string.encode()) return response return ""
def alarm()
.
7.057795
7.112133
0.99236
LOG.debug("Setting current state from target state for %s", service.id) service.update_current_state(service.target_state)
def _update_service_current_state(service: ServiceState)
Update the current state of a service. Updates the current state of services after their target state has changed. Args: service (ServiceState): Service state object to update
7.90029
6.200971
1.274041
service_states = get_service_state_list() # Set the target state of services for service in service_states: if service.current_state != sdp_target_state: LOG.debug('Setting the current state of %s to be %s', service.id, sdp_target_state) service.update_current_state(sdp_target_state)
def _update_services_instant_gratification(sdp_target_state: str)
For demonstration purposes only. This instantly updates the services current state with the target state, rather than wait on them or schedule random delays in bringing them back up.
3.49312
3.036504
1.150376
service_states = get_service_state_list() # Set the target state of services for service in service_states: if service.current_state != sdp_target_state: LOG.debug('Setting the target state of %s to be %s', service.id, sdp_target_state) service.update_target_state(sdp_target_state)
def _update_services_target_state(sdp_target_state: str)
Update the target states of services based on SDP target state. When we get a new target state this function is called to ensure components receive the target state(s) and/or act on them. Args: sdp_target_state (str): Target state of SDP
3.325763
3.715058
0.895212
LOG.info('Handling SDP target state updated event...') LOG.info('SDP target state: %s', sdp_state.target_state) # Map between the SDP target state and the service target state? if sdp_state.target_state == 'off': _update_services_target_state('off') # TODO: Work out if the state of SDP has reached the target state. # If yes, update the current state. sdp_state.update_current_state(sdp_state.target_state)
def _handle_sdp_target_state_updated(sdp_state: SDPState)
Respond to an SDP target state change event. This function sets the current state of SDP to the target state if that is possible. TODO(BMo) This cant be done as a blocking function as it is here!
5.313698
4.603185
1.154353
parser = argparse.ArgumentParser(description='{} service.'. format(__service_id__)) parser.add_argument('--random_errors', action='store_true', help='Enable random errors') parser.add_argument('-v', action='store_true', help='Verbose mode (enable debug printing)') parser.add_argument('-vv', action='store_true', help='Extra verbose mode') args = parser.parse_args() if args.vv: init_logger(log_level='DEBUG', show_log_origin=True) elif args.v: init_logger(logger_name='sip.ec.master_controller', log_level='DEBUG') else: init_logger(log_level='INFO') return args
def _parse_args()
Command line parser.
3.853518
3.681966
1.046592
# Parse command line arguments. LOG.info("Initialising: %s", __service_id__) # FIXME(BMo) There is a bug when SDP or services 'start' in the 'off' # state. At the moment it is impossible to transition out of this. # FIXME(BMo) **Hack** Register all services or if already registered do # nothing (this is handled by the ServiceState object). _services = [ "ExecutionControl:AlarmReceiver:1.0.0", "ExecutionControl:AlertManager:1.0.0", "ExecutionControl:ConfigurationDatabase:5.0.1", "ExecutionControl:MasterController:1.3.0", "ExecutionControl:ProcessingController:1.2.6", "ExecutionControl:ProcessingBlockController:1.3.0", "TangoControl:Database:1.0.4", "TangoControl:MySQL:1.0.3", "TangoControl:SDPMaster:1.2.1", "TangoControl:Subarrays:1.2.0", "TangoControl:ProcessingBlocks:1.2.0", "Platform:Kafka:2.1.1", "Platform:Prometheus:1.0.0", "Platform:PrometheusPushGateway:0.7.0", "Platform:RedisCommander:210.0.0", "Platform:Zookeeper:3.4.13" ] for service_id in _services: subsystem, name, version = service_id.split(':') ServiceState(subsystem, name, version) # If the SDP state is 'unknown', mark the SDP state as init. # FIXME(BMo) This is not right as we want to allow for recovery from # failure without just reinitialising...!? ie. respect the old sate # NOTE: If the state is 'off' we will want to reset the database # with 'skasip_config_db_init --clear' if sdp_state.current_state in ['unknown', 'off']: try: LOG.info("Setting the SDPState to 'init'") sdp_state.update_current_state('init', force=True) except ValueError as error: LOG.critical('Unable to set the State of SDP to init! %s', str(error)) LOG.info("Updating Service States") service_state_list = get_service_state_list() # FIXME(BMo) **Hack** Mark all Services in the 'unknown' state as # initialising. for service_state in service_state_list: if service_state.current_state in ['unknown', 'off']: service_state.update_current_state('init', force=True) # FIXME(BMo) **Hack** After 'checking' that the services are 'on' set # their state on 'on' after a short delay. # FIXME(BMo) This check should not be serialised!!! (should be part of the # event loop) for service_state in service_state_list: if service_state.current_state == 'init': time.sleep(random.uniform(0, 0.2)) service_state.update_current_state('on') # FIXME(BMo): **Hack** Now the all services are on, set the sate of SDP to # 'standby' # FIXME(BMo) This should also be part of the event loop. services_on = [service.current_state == 'on' for service in service_state_list] if all(services_on): LOG.info('All Services are online!.') sdp_state.update_current_state('standby') else: LOG.critical('Master Controller failed to initialise.') return service_state_list
def _init(sdp_state: SDPState)
Initialise the Master Controller Service. Performs the following actions: 1. Registers ServiceState objects into the Config Db. 2. If initialising for the first time (unknown state), sets the SDPState to 'init' 3. Initialises the state of Services, if running for the first time (their state == unknown) 4. Waits some time and sets the Service states to 'on'. This emulates waiting for Services to become available. 5. Once all services are 'on', sets the SDP state to 'standby'.
5.038797
4.667982
1.079438
LOG.debug('Event detected! (id : "%s", type: "%s", data: "%s")', event.object_id, event.type, event.data) if event.object_id == 'SDP' and event.type == 'current_state_updated': LOG.info('SDP current state updated, no action required!') if event.object_id == 'SDP' and event.type == 'target_state_updated': LOG.info("SDP target state changed to '%s'", sdp_state.target_state) # If the sdp is already in the target state do nothing if sdp_state.target_state == sdp_state.current_state: LOG.warning('SDP already in %s state', sdp_state.current_state) return # Check that a transition to the target state is allowed in the # current state. if not sdp_state.is_target_state_allowed(sdp_state.target_state): LOG.error('Transition to %s is not allowed when in state %s', sdp_state.target_state, sdp_state.current_state) sdp_state.target_state = sdp_state.current_state return _update_services_target_state(sdp_state.target_state) # If asking SDP to turn off, also turn off services. if sdp_state.target_state == 'off': LOG.info('Turning off services!') for service_state in service_states: service_state.update_target_state('off') service_state.update_current_state('off') LOG.info('Processing target state change request ...') time.sleep(0.1) LOG.info('Done processing target state change request!') # Assuming that the SDP has responding to the target # target state command by now, set the current state # to the target state. sdp_state.update_current_state(sdp_state.target_state) if sdp_state.current_state == 'alarm': LOG.debug('raising SDP state alarm') SIP_STATE_ALARM.set(1) else: SIP_STATE_ALARM.set(0) try: # FIXME(BMo) the pushgateway host should not be hardcoded! push_to_gateway('platform_pushgateway:9091', job='SIP', registry=COLLECTOR_REGISTRY) except urllib.error.URLError: LOG.warning("Unable to connect to the Alarms service!")
def _process_event(event: Event, sdp_state: SDPState, service_states: List[ServiceState])
Process a SDP state change event.
3.524045
3.463065
1.017608
sdp_state = SDPState() service_states = get_service_state_list() state_events = sdp_state.get_event_queue(subscriber=__service_name__) state_is_off = sdp_state.current_state == 'off' counter = 0 while True: time.sleep(0.1) if not state_is_off: # *Hack* to avoid problems with historical events not being # correctly handled by EventQueue.get(), replay old events every # 10s # - see issue #54 if counter % 1000 == 0: LOG.debug('Checking published events ... %d', counter / 1000) _published_events = state_events.get_published_events( process=True) for _state_event in _published_events: _process_event(_state_event, sdp_state, service_states) else: _state_event = state_events.get() if _state_event: _process_event(_state_event, sdp_state, service_states) state_is_off = sdp_state.current_state == 'off' counter += 1
def _process_state_change_events()
Process events relating to the overall state of SDP. This function starts and event loop which continually checks for and responds to SDP state change events.
4.444674
4.340458
1.02401
# Parse command line args. _parse_args() LOG.info("Starting: %s", __service_id__) # Subscribe to state change events. # FIXME(BMo) This API is unfortunate as it looks like we are only # subscribing to sdp_state events. LOG.info('Subscribing to state change events (subscriber = %s)', __service_name__) sdp_state = SDPState() _ = sdp_state.subscribe(subscriber=__service_name__) # Initialise the service. _ = _init(sdp_state) LOG.info('Finished initialising!') # Enter a pseudo event-loop (using Sched) to monitor for state change # events # (Also random set services into a fault or alarm state if enabled) LOG.info('Responding to state change events ...') try: _process_state_change_events() except ValueError as error: LOG.critical('Value error: %s', str(error)) except KeyboardInterrupt as err: LOG.debug('Keyboard Interrupt %s', err) LOG.info('Exiting!')
def main()
Merge temp_main and main.
7.732278
7.938546
0.974017
stream_config = spead2.send.StreamConfig( max_packet_size=16356, rate=1000e6, burst_size=10, max_heaps=1) item_group = spead2.send.ItemGroup(flavour=spead2.Flavour(4, 64, 48, 0)) # Add item descriptors to the heap. num_baselines = (512 * 513) // 2 dtype = [('TCI', 'i1'), ('FD', 'u1'), ('VIS', '<c8', 4)] item_group.add_item( id=0x6000, name='visibility_timestamp_count', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6001, name='visibility_timestamp_fraction', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6005, name='visibility_baseline_count', description='', shape=tuple(), format=None, dtype='<u4') item_group.add_item( id=0x6008, name='scan_id', description='', shape=tuple(), format=None, dtype='<u8') item_group.add_item( id=0x600A, name='correlator_output_data', description='', shape=(num_baselines,), dtype=dtype) # Create streams and send start-of-stream message. streams = [] num_streams = 2 for i in range(num_streams): stream = spead2.send.UdpStream( thread_pool=spead2.ThreadPool(threads=1), hostname='127.0.0.1', port=41000 + i, config=stream_config) stream.send_heap(item_group.get_start()) streams.append(stream) vis = numpy.zeros(shape=(num_baselines,), dtype=dtype) num_heaps = 200 start_time = time.time() for stream in streams: # Update values in the heap. item_group['visibility_timestamp_count'].value = 1 item_group['visibility_timestamp_fraction'].value = 0 item_group['visibility_baseline_count'].value = num_baselines item_group['scan_id'].value = 100000000 item_group['correlator_output_data'].value = vis # Iterate heaps. for i in range(num_heaps): # Send heap. stream.send_heap(item_group.get_heap(descriptors='all', data='all')) # Print time taken. duration = time.time() - start_time data_size = num_streams * num_heaps * (vis.nbytes / 1e6) print("Sent %.3f MB in %.3f sec (%.3f MB/sec)" % ( data_size, duration, (data_size/duration))) # Send end-of-stream message. for stream in streams: stream.send_heap(item_group.get_end())
def main()
Runs the test sender.
3.078216
3.087019
0.997148
timestamp = config.get('timestamp', None) return cls(config.get('id'), config.get('type'), config.get('data', dict()), config.get('origin', None), timestamp, config.get('object_type', None), config.get('object_id', None), config.get('object_key', None))
def from_config(cls, config: dict)
Create an event object from an event dictionary object. Args: config (dict): Event Configuration dictionary.
3.106997
3.145518
0.987754
# Get data from the input Measurement Set. ms = oskar.MeasurementSet.open(filename) block_start = 0 num_rows = ms.num_rows num_baselines = ms.num_stations * (ms.num_stations - 1) // 2 # Loop over data blocks of size num_baselines. while block_start < num_rows: block_size = num_rows - block_start if block_size > num_baselines: block_size = num_baselines # Get the baseline coordinates. (Replace this with a query to LTS.) uvw = ms.read_column('UVW', block_start, block_size) # Read the Stokes-I visibility weights. vis_weights = ms.read_column('WEIGHT', block_start, block_size) if ms.num_pols == 4: vis_weights = 0.5 * (vis_weights[:, 0] + vis_weights[:, 3]) # Loop over frequency channels. # (We expect there to be only one channel here, but loop just in case.) for j in range(ms.num_channels): # Get coordinates in wavelengths. coords = uvw * (ms.freq_start_hz + j * ms.freq_inc_hz) / 299792458. # Get the Stokes-I visibilities for this channel. vis_data = None if not imager.coords_only: vis_data = ms.read_vis(block_start, j, 1, block_size) if ms.num_pols == 4: vis_data = 0.5 * (vis_data[0, :, 0] + vis_data[0, :, 3]) # Update the grid plane with this visibility block. grid_norm = imager.update_plane( coords[:, 0], coords[:, 1], coords[:, 2], vis_data, vis_weights, grid_data, grid_norm, grid_weights) # Increment start row by block size. block_start += block_size # Return updated grid normalisation. return grid_norm
def process_input_data(filename, imager, grid_data, grid_norm, grid_weights)
Reads visibility data from a Measurement Set. The visibility grid or weights grid is updated accordingly. Visibility data are read from disk in blocks of size num_baselines. Args: filename (str): Name of Measurement Set to open. imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray or None): Visibility grid to populate. grid_norm (float) Current grid normalisation. grid_weights (numpy.ndarray): Weights grid to populate or read. Returns: grid_norm (float): Updated grid normalisation.
3.635707
3.21344
1.131407
LOG.debug('GET list of SBIs.') # Construct response object. _url = get_root_url() response = dict(scheduling_blocks=[], links=dict(home='{}'.format(_url))) # Get ordered list of SBI ID's. block_ids = DB.get_sched_block_instance_ids() # Loop over SBIs and add summary of each to the list of SBIs in the # response. for block in DB.get_block_details(block_ids): block_id = block['id'] LOG.debug('Adding SBI %s to list', block_id) LOG.debug(block) block['num_processing_blocks'] = len(block['processing_block_ids']) temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2 block['status'] = choice(temp) try: del block['processing_block_ids'] except KeyError: pass block['links'] = { 'detail': '{}/scheduling-block/{}' .format(_url, block_id) } response['scheduling_blocks'].append(block) return response, HTTPStatus.OK
def get()
Return list of Scheduling Blocks Instances known to SDP .
4.975486
4.667972
1.065878
response = dict(blocks=[]) block_ids = DB.get_sched_block_instance_ids() for index, block_id in enumerate(block_ids): block = DB.get_block_details([block_id]).__next__() info = [ index, block['id'], block['sub_array_id'], len(block['processing_blocks']) ] response['blocks'].append(info) return response, HTTPStatus.OK
def get_table()
Provides table of scheduling block instance metadata for use with AJAX tables
5.447437
4.437255
1.227659
self._db.hset(key, field, value)
def set_value(self, key, field, value)
Add the state of the key and field
7.308969
7.159934
1.020815
self._db.rpush(event_name, dict(type=event_type, id=block_id))
def push_event(self, event_name, event_type, block_id)
Push inserts all the specified values at the tail of the list stored at the key
5.273386
4.737165
1.113194
# Install handler to respond to SIGTERM signal.signal(signal.SIGTERM, _sig_handler) with open(sys.argv[1]) as fh: config = json.load(fh) # Starts the pulsar search ftp server os.chdir(os.path.expanduser('~')) receiver = PulsarStart(config, logging.getLogger()) receiver.run()
def main()
Task run method.
6.584649
6.634902
0.992426
@wraps(func) def with_exception_handling(*args, **kwargs): try: return func(*args, **kwargs) except redis.exceptions.ConnectionError: raise ConnectionError("Unable to connect to the Redis " "Configuration Database. host = {}, " "port = {}, id = {}." .format(REDIS_HOST, REDIS_PORT, REDIS_DB_ID)) return with_exception_handling
def check_connection(func)
Check connection exceptions.
3.225867
3.050064
1.057639
for _key, _value in my_dict.items(): if isinstance(_value, dict): if not hierarchical: self._db.hmset(key, {_key: json.dumps(_value)}) else: self.save_dict(key + ':' + _key, _value, hierarchical) elif isinstance(_value, list): if not hierarchical: self._db.hmset(key, {_key: str(_value)}) else: print('saving list at ', key + ':' + _key) self._db.lpush(key + ':' + _key, *_value[::-1]) elif isinstance(_value, bool): self._db.hmset(key, {_key: str(_value)}) else: self._db.hmset(key, {_key: _value})
def save_dict(self, key: str, my_dict: dict, hierarchical: bool = False)
Store the specified dictionary at the specified key.
2.112309
2.009826
1.050991
temp = my_dict for depth, key in enumerate(keys): if depth < len(keys) - 1: if key not in temp: temp[key] = dict() temp = temp[key] else: if key not in temp: temp[key] = values else: temp[key] = {**temp[key], **values} return my_dict
def _build_dict(my_dict, keys, values)
Build a dictionary from a set of redis hashes. keys = ['a', 'b', 'c'] values = {'value': 'foo'} my_dict = {'a': {'b': {'c': {'value': 'foo'}}}} Args: my_dict (dict): Dictionary to add to keys (list[str]): List of keys used to define hierarchy in my_dict values (dict): Values to add at to the dictionary at the key specified by keys Returns: dict, new dictionary with values added at keys
1.909065
2.153139
0.886643
if self._db.type(db_key) == 'list': db_values = self._db.lrange(db_key, 0, -1) for i, value in enumerate(db_values): try: db_values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass else: # self._db.type == 'hash' db_values = self._db.hgetall(db_key) for _key, _value in db_values.items(): try: db_values[_key] = ast.literal_eval(_value) except SyntaxError: pass except ValueError: pass return db_values
def _load_values(self, db_key: str) -> dict
Load values from the db at the specified key, db_key. FIXME(BMo): Could also be extended to load scalar types (instead of just list and hash)
1.877405
1.7258
1.087846
db_keys = self._db.keys(pattern=db_key + '*') my_dict = {} for _db_key in db_keys: if self._db.type(_db_key) == 'list': db_values = self._db.lrange(_db_key, 0, -1) for i, value in enumerate(db_values): try: db_values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass else: # self._db.type == 'hash' db_values = self._db.hgetall(_db_key) for _key, _value in db_values.items(): try: db_values[_key] = ast.literal_eval(_value) except SyntaxError: pass except ValueError: pass my_dict = self._build_dict(my_dict, _db_key.split(':'), db_values) return my_dict[db_key]
def _load_dict_hierarchical(self, db_key: str) -> dict
Load a dictionary stored hierarchically at db_key.
2.090825
1.994434
1.04833
if not hierarchical: db_values = self._db.hgetall(db_key) for _key, _value in db_values.items(): if isinstance(_value, str): db_values[_key] = ast.literal_eval(_value) my_dict = db_values else: my_dict = self._load_dict_hierarchical(db_key) return my_dict
def load_dict(self, db_key: str, hierarchical: bool = False) -> dict
Load the dictionary at the specified key. Hierarchically stored dictionaries use a ':' separator to expand the dictionary into a set of Redis hashes. Args: db_key (str): Key at which the dictionary is stored in the db. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: dict, the dictionary stored at key
2.51803
2.64098
0.953445
result = [] if not hierarchical: _values = self._db.hmget(db_key, *dict_keys) result = [ast.literal_eval(_value) for _value in _values] else: # Get all keys in the set of keys for this dict 'db_key' db_keys = self._db.keys(pattern=db_key + '*') for _db_key in db_keys: # Check if one of the dict_keys is an entire sub-dict entry for name in _db_key.split(':')[1:]: if name in dict_keys: _values = self._load_values(_db_key) result.append(_values) # Look in the sub-dict for any of the dict_keys _values = self._db.hmget(_db_key, *dict_keys) for i, value in enumerate(_values): try: _values[i] = ast.literal_eval(value) except SyntaxError: pass except ValueError: pass result += [value for value in _values if value is not None] return result
def load_dict_values(self, db_key: str, dict_keys: List[str], hierarchical: bool = False) -> List
Load values from a dictionary with the specified dict_keys. Args: db_key (str): Key where the dictionary is stored dict_keys (List[str]): Keys within the dictionary to load. hierarchical (bool): If True, expect the dictionary to have been stored hierarchically. If False, expect the dictionary to have been stored flat. Returns: object: The value stored at dict_key in the dictionary stored at key
3.000878
3.059529
0.98083
# FIXME(BMo): new name for this function -> save_dict_value ? if pipeline: self._pipeline.hset(key, field, str(value)) else: self._db.hset(key, field, str(value))
def set_hash_value(self, key, field, value, pipeline=False)
Set the value of field in a hash stored at key. Args: key (str): key (name) of the hash field (str): Field within the hash to set value: Value to set pipeline (bool): True, start a transaction block. Default false.
6.636387
7.704987
0.861311
if pipeline: self._pipeline.lpush(key, *value) else: self._db.lpush(key, *value)
def prepend_to_list(self, key, *value, pipeline=False)
Add new element to the start of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false.
2.669742
3.132928
0.852156
if pipeline: self._pipeline.rpush(key, *value) else: self._db.rpush(key, *value)
def append_to_list(self, key, *value, pipeline=False)
Add new element to the end of the list stored at key. Args: key (str): Key where the list is stored value: Value to add to the list pipeline (bool): True, start a transaction block. Default false.
2.74775
3.099854
0.886413
if pipeline: return self._pipeline.lrange(key, 0, -1) return self._db.lrange(key, 0, -1)
def get_list(self, key, pipeline=False)
Get all the value in the list stored at key. Args: key (str): Key where the list is stored. pipeline (bool): True, start a transaction block. Default false. Returns: list: values in the list ordered by list index
2.631202
3.354959
0.784272
if pipeline: self._pipeline.delete(*names) else: self._db.delete(*names)
def delete(self, *names: str, pipeline=False)
Delete one or more keys specified by names. Args: names (str): Names of keys to delete pipeline (bool): True, start a transaction block. Default false.
4.303525
4.190411
1.026994
if event_history is None: event_history = event_name + '_history' return self._db.rpoplpush(event_name, event_history)
def get_event(self, event_name, event_history=None)
Get an event from the database. Gets an event from the named event list removing the event and adding it to the event history. Args: event_name (str): Event list key. event_history (str, optional): Event history list. Returns: str: string representation of the event object
4.172064
5.095023
0.818851
if pipeline: if redis.__version__ == '2.10.6': self._pipeline.lrem(name=key, value=value, num=count) else: self._pipeline.lrem(key, count, value) else: if self._db.exists(key): if redis.__version__ == '2.10.6': self._db.lrem(name=key, value=value, num=count) else: self._db.lrem(key, count, value)
def remove_from_list(self, key: str, value, count: int = 0, pipeline: bool = False)
Remove specified value(s) from the list stored at key. Args: key (str): Key where the list is stored. value: value to remove count (int): Number of entries to remove, default 0 == all pipeline(bool): If True, start a transaction block. Default False.
2.196749
2.188307
1.003858
if pipeline: self._pipeline.watch(key) else: self._db.watch(key)
def watch(self, key, pipeline=False)
Watch the given key. Marks the given key to be watch for conditional execution of a transaction. Args: key (str): Key that needs to be watched pipeline (bool): True, start a transaction block. Default false.
3.514913
4.149679
0.847033
if pipeline: self._pipeline.publish(channel, message) else: self._db.publish(channel, message)
def publish(self, channel, message, pipeline=False)
Post a message to a given channel. Args: channel (str): Channel where the message will be published message (str): Message to publish pipeline (bool): True, start a transaction block. Default false.
3.204668
3.617138
0.885968
# Initialise services_health = {} # Get Service IDs services_ids = self._get_services() for service_id in services_ids: service_name = DC.get_service_name(service_id) # Check if the current and actual replica levels are the same if DC.get_replicas(service_id) != \ DC.get_actual_replica(service_id): services_health[service_name] = "Unhealthy" else: services_health[service_name] = "Healthy" return services_health
def get_services_health(self) -> dict
Get the health of all services. Returns: dict, services id and health status
3.634781
3.73169
0.974031
services_health_status = self.get_services_health() # Evaluate overall health health_status = all(status == "Healthy" for status in services_health_status.values()) # Converting from bool to str if health_status: overall_status = "Healthy" else: overall_status = "Unhealthy" return overall_status
def get_overall_services_health(self) -> str
Get the overall health of all the services. Returns: str, overall health status
3.595069
3.887209
0.924846
# Check if the current and actual replica levels are the same if DC.get_replicas(service_id) != DC.get_actual_replica(service_id): health_status = "Unhealthy" else: health_status = "Healthy" return health_status
def get_service_health(service_id: str) -> str
Get the health of a service using service_id. Args: service_id Returns: str, health status
6.087575
5.79075
1.051259
uptime = time.time() - START_TIME response = dict(uptime=f'{uptime:.2f}s', links=dict(root='{}'.format(get_root_url()))) # TODO(BM) check if we can connect to the config database ... # try: # DB.get_sub_array_ids() # except ConnectionError as error: # response['state'] = 'ERROR' # response['message'] = str(error) return response, HTTPStatus.OK
def get()
Check the health of this service
7.119404
6.955177
1.023612
# Check command line arguments. if len(sys.argv) < 2: raise RuntimeError('Usage: python3 async_recv.py <json config>') # Set up logging. sip_logging.init_logger(show_thread=True) # Load SPEAD configuration from JSON file. # with open(sys.argv[-1]) as f: # spead_config = json.load(f) spead_config = json.loads(sys.argv[1]) # Set up the SPEAD receiver and run it (see method, above). receiver = SpeadReceiver(spead_config) receiver.run()
def main()
Main function for SPEAD receiver module.
4.908418
4.270882
1.149275
self._log.info("Worker thread processing block %i", i_block) time_overall0 = time.time() time_unpack = 0.0 time_write = 0.0 for i_heap, heap in enumerate(receive_buffer.result()): # Skip and log any incomplete heaps. if isinstance(heap, spead2.recv.IncompleteHeap): self._log.info("Dropped incomplete heap %i", heap.cnt + 1) continue # Update the item group from this heap. items = self._item_group.update(heap) # Get the time and channel indices from the heap index. i_chan = i_heap // self._num_buffer_times i_time = i_heap % self._num_buffer_times if 'correlator_output_data' in items: vis_data = items['correlator_output_data'].value['VIS'] if self._block is None: num_baselines = vis_data.shape[0] num_pols = vis_data[0].shape[0] self._block = numpy.zeros((self._num_buffer_times, self._num_streams, num_baselines), dtype=('c8', num_pols)) self._block[:, :, :] = 0 # To make the copies faster. # Unpack data from the heap into the block to be processed. time_unpack0 = time.time() self._block[i_time, i_chan, :] = vis_data time_unpack += time.time() - time_unpack0 # Check the data for debugging! val = self._block[i_time, i_chan, -1][-1].real self._log.debug("Data: %.3f", val) if self._block is not None: # Process the buffered data here. if self._config['process_data']: pass # Write the buffered data to storage. if self._config['write_data']: time_write0 = time.time() with open(self._config['filename'], 'ab') as f: # Don't use pickle, it's really slow (even protocol 4)! numpy.save(f, self._block, allow_pickle=False) time_write += time.time() - time_write0 # Report time taken. time_overall = time.time() - time_overall0 self._log.info("Total processing time: %.1f ms", 1000 * time_overall) self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall) self._log.info("Write was %.1f %%", 100 * time_write / time_overall) if time_unpack != 0.0: self._log.info("Memory speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_unpack) if time_write != 0.0: self._log.info("Write speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_write)
def process_buffer(self, i_block, receive_buffer)
Blocking function to process the received heaps. This is run in an executor.
3.360276
3.263555
1.029637
loop = asyncio.get_event_loop() # Get first heap in each stream (should be empty). self._log.info("Waiting for %d streams to start...", self._num_streams) for stream in self._streams: await stream.get(loop=loop) i_block = 0 receive_buffer = [None, None] while True: # Process the previous buffer, if available. processing_tasks = None if i_block > 0: i_buffer_proc = (i_block - 1) % 2 processing_tasks = loop.run_in_executor( executor, self.process_buffer, i_block - 1, receive_buffer[i_buffer_proc]) # Set up asynchronous receive on all streams. i_buffer_recv = i_block % 2 receive_tasks = [] for stream in self._streams: for _ in range(self._num_buffer_times): # NOTE: loop.create_task() is needed to schedule these # in the right order! receive_tasks.append( loop.create_task(stream.get(loop=loop))) receive_buffer[i_buffer_recv] = asyncio.gather(*receive_tasks) # Ensure asynchronous receives and previous processing tasks are # done. self._log.info("Receiving block %i", i_block) try: await receive_buffer[i_buffer_recv] self._log.info("Received block %i", i_block) except spead2.Stopped: self._log.info("Stream Stopped") break if processing_tasks: await processing_tasks i_block += 1
async def _run_loop(self, executor)
Main loop.
3.838491
3.749217
1.023811
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) loop = asyncio.get_event_loop() loop.run_until_complete(self._run_loop(executor)) self._log.info('Shutting down...') executor.shutdown()
def run(self)
Starts the receiver.
3.011624
2.772609
1.086206
# Seek to the start of the buffer self.seek(0) while True: # Copy bytes from the buffer until we reach the end of the JSON brace_count = 0 quoted = False json_string = '' while True: # Read a character b = self.read(1) if b == b'': return c = b.decode() # If it is a \ then copy the next character if c == '\\': json_string += c json_string += self.read(1).decode() continue # If we are inside quotes we just need to detect a # closing quote if c == '"': if quoted: quoted = False else: quoted = True # Otherwise we count the braces if c == '{': brace_count += 1 elif c == '}': brace_count -= 1 # Copy the character into the JSON string json_string += c # If the brace count is zero we are done if brace_count == 0: break # Parse the JSON so that we can get the size of the data array meta = json.loads(json_string) n_channels = meta['data_cube']['n_channels'] n_sub_integrations = meta['data_cube']['n_sub_integrations'] # Save the JSON to a file f = open('{0}_{1}_{2}.json'.format( meta['metadata']['observation_id'], meta['metadata']['beam_id'], meta['metadata']['name']), 'w') f.write(json.dumps(meta)) f.close() # Read the data data = self.read(n_channels * n_sub_integrations) # Write it to a file f = open('{0}_{1}_{2}.data'.format( meta['metadata']['observation_id'], meta['metadata']['beam_id'], meta['metadata']['name']), 'wb') f.write(data) f.close()
def close(self)
Add docstring!
2.627306
2.595373
1.012304
self._log.info('Starting Pulsar Search Interface') # Instantiate a dummy authorizer for managing 'virtual' users authorizer = DummyAuthorizer() # Define a new user having full r/w permissions and a read-only # anonymous user authorizer.add_user(self._config['login']['user'], self._config['login']['psswd'], '.', perm=self._config['login']['perm']) authorizer.add_anonymous(os.getcwd()) # Instantiate FTP handler class handler = FTPHandler handler.authorizer = authorizer handler.abstracted_fs = PulsarFileSystem # Define a customized banner (string returned when client connects) handler.banner = "SKA SDP pulsar search interface." # Instantiate FTP server class and listen on 0.0.0.0:7878 address = (self._config['address']['listen'], self._config['address']['port']) server = FTPServer(address, handler) # set a limit for connections server.max_cons = 256 server.max_cons_per_ip = 5 # start ftp server server.serve_forever()
def run(self)
Start the FTP Server for pulsar search.
3.29898
2.581363
1.277999
# As status is a modifiable property, have to reload from the db. self._config = self._load_config() return self._config.get('status')
def status(self) -> str
Return the workflow stage status.
12.382075
9.898233
1.250938
# FIXME(BM) This is currently a hack because workflow stages # don't each have their own db entry. pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) stages[self._index]['status'] = value DB.set_hash_value(pb_key, 'workflow_stages', stages)
def status(self, value)
Set the workflow stage status.
7.431757
6.408129
1.159739
pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) return stages[self._index]
def _load_config(self)
Load the workflow stage config from the database.
9.695867
7.698711
1.259414
parser = argparse.ArgumentParser(description='Register PB devices.') parser.add_argument('num_pb', type=int, help='Number of PBs devices to register.') return parser.parse_args()
def parse_command_args()
Command line parser.
5.159186
4.679068
1.10261
tango_db = Database() LOG.info("Registering PB devices:") dev_info = DbDevInfo() # pylint: disable=protected-access dev_info._class = 'ProcessingBlockDevice' dev_info.server = 'processing_block_ds/1' for index in range(num_pbs): dev_info.name = 'sip_sdp/pb/{:05d}'.format(index) LOG.info("\t%s", dev_info.name) tango_db.add_device(dev_info)
def register_pb_devices(num_pbs: int = 100)
Register PBs devices. Note(BMo): Ideally we do not want to register any devices here. There does not seem to be a way to create a device server with no registered devices in Tango. This is (probably) because Tango devices must have been registered before the server starts ...
5.437836
4.733073
1.148902
# Validate the SBI config schema if schema_path is None: schema_path = join(dirname(__file__), 'schema', 'configure_sbi.json') with open(schema_path, 'r') as file: schema = json.loads(file.read()) validate(config_dict, schema) # Add SBI status field config_dict['status'] = 'created' # Set the subarray field to None if not defined. if 'subarray_id' not in config_dict: config_dict['subarray_id'] = 'None' # Add created, and updated timestamps. timestamp = datetime.datetime.utcnow().isoformat() config_dict['created'] = timestamp config_dict['updated'] = timestamp # Split out the processing block data array pb_list = copy.deepcopy(config_dict['processing_blocks']) # Remove processing blocks from the SBI configuration. config_dict.pop('processing_blocks', None) # Add list of PB ids to the SBI configuration config_dict['processing_block_ids'] = [] for pb in pb_list: config_dict['processing_block_ids'].append(pb['id']) # Add the SBI data object to the database. key = SchedulingObject.get_key(SBI_KEY, config_dict['id']) DB.save_dict(key, config_dict, hierarchical=False) # DB.set_hash_values(key, config_dict) # Add the SBI id to the list of active SBIs key = '{}:active'.format(SBI_KEY) DB.append_to_list(key, config_dict['id']) # Publish notification to subscribers sbi = SchedulingObject(SBI_KEY, config_dict['id']) sbi.set_status('created') for pb in pb_list: pb['sbi_id'] = config_dict['id'] cls._add_pb(pb) return cls(config_dict['id'])
def from_config(cls, config_dict: dict, schema_path: str = None)
Create an SBI object from the specified configuration dict. NOTE(BM) This should really be done as a single atomic db transaction. Args: config_dict(dict): SBI configuration dictionary schema_path(str, optional): Path to the SBI config schema.
3.259486
3.1388
1.03845
self.set_status('aborted') DB.remove_from_list('{}:active'.format(self._type), self._id) DB.append_to_list('{}:aborted'.format(self._type), self._id) sbi_pb_ids = ast.literal_eval( DB.get_hash_value(self._key, 'processing_block_ids')) for pb_id in sbi_pb_ids: pb = ProcessingBlock(pb_id) pb.abort()
def abort(self)
Abort the SBI (and associated PBs).
5.015711
4.135616
1.212809
values = DB.get_hash_value(self._key, 'processing_block_ids') return ast.literal_eval(values)
def get_pb_ids(self) -> List[str]
Return the list of PB ids associated with the SBI. Returns: list, Processing block ids
9.818851
11.104633
0.884212
if date is None: date = datetime.datetime.utcnow() if isinstance(date, datetime.datetime): date = date.strftime('%Y%m%d') if instance_id is None: instance_id = randint(0, 9999) return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
def get_id(date=None, project: str = 'sip', instance_id: int = None) -> str
Get a SBI Identifier. Args: date (str or datetime.datetime, optional): UTC date of the SBI project (str, optional ): Project Name instance_id (int, optional): SBI instance identifier Returns: str, Scheduling Block Instance (SBI) ID.
2.546767
2.218563
1.147935
# Add status field to the PB pb_config['status'] = 'created' # Add created and updated timestamps to the PB timestamp = datetime.datetime.utcnow().isoformat() pb_config['created'] = timestamp pb_config['updated'] = timestamp # set default priority, if not defined if 'priority' not in pb_config: pb_config['priority'] = 0 # Retrieve the workflow definition SchedulingBlockInstance._update_workflow_definition(pb_config) # If needed, add resources and dependencies fields keys = ['resources_required', 'resources_assigned', 'dependencies'] for key in keys: if key not in pb_config: pb_config[key] = [] for stage in pb_config['workflow_stages']: if key not in stage: stage[key] = [] # Add PB to the the database key = SchedulingObject.get_key(PB_KEY, pb_config['id']) # DB.set_hash_values(key, pb_config) DB.save_dict(key, pb_config, hierarchical=False) # Add to list of PB ids key = '{}:active'.format(PB_KEY) DB.append_to_list(key, pb_config['id']) key = '{}:active:{}'.format(PB_KEY, pb_config['type']) DB.append_to_list(key, pb_config['id']) # Publish an event to to notify subscribers of the new PB pb = SchedulingObject(PB_KEY, pb_config['id']) pb.set_status('created')
def _add_pb(pb_config: dict)
.
3.707571
3.651766
1.015282
known_workflows = get_workflows() workflow_id = pb_config['workflow']['id'] workflow_version = pb_config['workflow']['version'] if workflow_id not in known_workflows or \ workflow_version not in known_workflows[workflow_id]: raise RuntimeError("Unknown workflow definition: {}:{}" .format(workflow_id, workflow_version)) workflow = get_workflow(workflow_id, workflow_version) for stage in workflow['stages']: stage['status'] = 'none' pb_config['workflow_parameters'] = pb_config['workflow']['parameters'] pb_config['workflow_id'] = pb_config['workflow']['id'] pb_config['workflow_version'] = pb_config['workflow']['version'] pb_config['workflow_stages'] = workflow['stages'] pb_config.pop('workflow', None)
def _update_workflow_definition(pb_config: dict)
Update the PB configuration workflow definition. Args: pb_config (dict): PB configuration dictionary Raises: RunTimeError, if the workflow definition (id, version) specified in the sbi_config is not known.
2.22132
2.155431
1.030569
response = { "links": { "message": "Welcome to the SIP Processing Controller Interface", "items": [ {"href": "{}health".format(request.url)}, {"href": "{}subarrays".format(request.url)}, {"href": "{}scheduling_blocks".format(request.url)}, {"href": "{}processing_blocks".format(request.url)} ] } } return response, HTTPStatus.OK
def root()
Placeholder root url for the PCI. Ideally this should never be called!
5.295685
5.695783
0.929755
_date = strftime("%Y%m%d", gmtime()) _project = project for i in range(num_blocks): yield '{}-{}-sbi{:03d}'.format(_date, _project, i)
def generate_scheduling_block_id(num_blocks, project='test')
Generate a scheduling_block id
4.38311
4.031233
1.087288
num_blocks = int(sys.argv[1]) if len(sys.argv) == 2 else 3 clear_db() for block_id in generate_scheduling_block_id(num_blocks=num_blocks, project='sip'): config = { "id": block_id, "sub_array_id": str(random.choice(range(3))), "processing_blocks": [] } for i in range(random.randint(1, 3)): config['processing_blocks'].append({ "id": "{}:pb{:03d}".format(block_id, i), "workflow": { "name": "{}".format(random.choice(['vis_ingest_01', 'dask_ical_01', 'dask_maps_01'])), "template": {}, "stages": [] } }) print('-' * 40) print(json.dumps(config, indent=2)) add_scheduling_block(config)
def main()
Main Function
4.232366
4.263906
0.992603
try: DB.add_sbi(config) except jsonschema.ValidationError as error: error_dict = error.__dict__ for key in error_dict: error_dict[key] = error_dict[key].__str__() error_response = dict(message="Failed to add scheduling block", reason="JSON validation error", details=error_dict) return error_response, HTTPStatus.BAD_REQUEST response = dict(config=config, message='Successfully registered scheduling block ' 'instance with ID: {}'.format(config['id'])) response['links'] = { 'self': '{}scheduling-block/{}'.format(request.url_root, config['id']), 'list': '{}'.format(request.url), 'home': '{}'.format(request.url_root) } return response, HTTPStatus.ACCEPTED
def add_scheduling_block(config)
Adds a scheduling block to the database, returning a response object
3.801433
3.767192
1.009089
@wraps(func) def with_exception_handling(*args, **kwargs): try: return func(*args, **kwargs) except ConnectionError as error: return (dict(error='Unable to connect to Configuration Db.', error_message=str(error), links=dict(root='{}'.format(get_root_url()))), HTTPStatus.NOT_FOUND) return with_exception_handling
def missing_db_response(func)
Decorator to check connection exceptions
4.531259
4.269523
1.061303
log = logging.getLogger('sip.mock_workflow_stage') if len(sys.argv) != 2: log.critical('Expecting JSON string as first argument!') return config = json.loads(sys.argv[1]) log.info('Running mock_workflow_stage (version: %s).', __version__) log.info('Received configuration: %s', json.dumps(config)) log.info('Starting task') i = 0 start_time = time.time() duration = config.get('duration', 20) while time.time() - start_time <= duration: time.sleep(duration / 20) elapsed = time.time() - start_time log.info(" %s %2i / 20 (elapsed %.2f s)", config.get('message', 'Progress '), i + 1, elapsed) i += 1 log.info('Task complete!')
def main()
Run the workflow task.
3.924687
3.732237
1.051564
log = logging.getLogger('sip.examples.log_spammer') log.info('Starting to spam log messages every %fs', sleep_length) counter = 0 try: while True: log.info('Hello %06i (log_spammer: %s, sip logging: %s)', counter, _version.__version__, __version__) counter += 1 time.sleep(sleep_length) except KeyboardInterrupt: log.info('Exiting...')
def main(sleep_length=0.1)
Log to stdout using python logging in a while loop
5.429536
5.026318
1.080221
fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \ '| %(message)s' logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
def init_logging()
Initialise Python logging.
2.303653
2.148242
1.072343
if not re.match(r'^subarray-0[0-9]|subarray-1[0-5]$', sub_array_id): response = dict(error='Invalid sub-array ID specified "{}" does not ' 'match sub-array ID naming convention ' '(ie. subarray-[00-15]).'. format(sub_array_id)) return response, HTTPStatus.BAD_REQUEST if sub_array_id not in DB.get_sub_array_ids(): response = dict(error='Sub-array "{}" does not currently exist. ' 'Known sub-arrays = {}' .format(sub_array_id, DB.get_sub_array_ids())) return response, HTTPStatus.NOT_FOUND block_ids = DB.get_sub_array_sbi_ids(sub_array_id) _blocks = [b for b in DB.get_block_details(block_ids)] response = dict(scheduling_blocks=[]) _url = get_root_url() for block in _blocks: block['links'] = { 'self': '{}/scheduling-block/{}'.format(_url, block['id']) } response['scheduling_blocks'].append(block) response['links'] = { 'self': '{}'.format(request.url), 'list': '{}/sub-arrays'.format(_url), 'home': '{}'.format(_url), } return response, HTTPStatus.OK
def get(sub_array_id)
Sub array detail resource. This method will list scheduling blocks and processing blocks in the specified sub-array.
3.087739
3.099949
0.996061
config = request.data config['sub_array_id'] = 'subarray-{:02d}'.format(sub_array_id) return add_scheduling_block(config)
def create(sub_array_id)
Create / register a Scheduling Block instance with SDP.
5.988468
5.005831
1.196298
block_ids = DB.get_sub_array_sbi_ids(sub_array_id) if block_id in block_ids: block = DB.get_block_details([block_id]).__next__() return block, HTTPStatus.OK return dict(error="unknown id"), HTTPStatus.NOT_FOUND
def get_scheduling_block(sub_array_id, block_id)
Return the list of scheduling blocks instances associated with the sub array
5.276318
5.143273
1.025868
paths = [] for (path, _, file_names) in walk(directory): for filename in file_names: paths.append(join('..', path, filename)) return paths
def package_files(directory)
Get list of data files to add to the package.
2.55491
2.452141
1.04191
tango_db = Database() device = "sip_sdp/elt/master" device_info = DbDevInfo() device_info._class = "SDPMasterDevice" device_info.server = "sdp_master_ds/1" device_info.name = device devices = tango_db.get_device_name(device_info.server, device_info._class) if device not in devices: LOG.info('Registering device "%s" with device server "%s"', device_info.name, device_info.server) tango_db.add_device(device_info)
def register_master()
Register the SDP Master device.
4.269878
3.775242
1.131021
LOG.info('Starting %s', __service_id__) return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout, args=args, **kwargs)
def main(args=None, **kwargs)
Run the Tango SDP Master device server.
17.632261
8.388721
2.101901
parser = argparse.ArgumentParser( prog='csp_pulsar_sender', description='Send fake pulsar data using ftp protocol.') parser.add_argument('config_file', type=argparse.FileType('r'), help='JSON configuration file.') parser.add_argument('-v', '--verbose', help='Enable verbose messages.', action='store_true') parser.add_argument('-p', '--print_settings', help='Print settings file.', action='store_true') return parser.parse_args()
def parse_command_line()
Parse command line arguments.
3.458179
3.291803
1.050543
log = logging.getLogger(__file__) log.setLevel(level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(level) formatter = logging.Formatter('%(asctime)s: %(message)s', '%Y/%m/%d-%H:%M:%S') handler.setFormatter(formatter) log.addHandler(handler) return log
def _init_log(level=logging.DEBUG)
Initialise the logging object. Args: level (int): Logging level. Returns: Logger: Python logging object.
1.697548
1.785159
0.950922
# Create simulation object, and start streaming SPEAD heaps sender = PulsarSender() # Parse command line arguments args = parse_command_line() # Initialise logging. _log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO) # Load configuration. _log.info('Loading config: %s', args.config_file.name) _config = json.load(args.config_file) if args.print_settings: _log.debug('Settings:\n %s', json.dumps(_config, indent=4, sort_keys=True)) sender.send(_config, _log, 1, 1)
def main()
Main script function
5.545475
5.438262
1.019714
allow_non_recipe_messages = kwargs.pop("allow_non_recipe_messages", False) log_extender = kwargs.pop("log_extender", None) def unwrap_recipe(header, message): if header.get("workflows-recipe") in (True, "True", "true", 1): rw = RecipeWrapper(message=message, transport=transport_layer) if log_extender and rw.environment and rw.environment.get("ID"): with log_extender("recipe_ID", rw.environment["ID"]): return callback(rw, header, message.get("payload")) return callback(rw, header, message.get("payload")) if allow_non_recipe_messages: return callback(None, header, message) # self.log.warning('Discarding non-recipe message:\n' + \ # "First 1000 characters of header:\n%s\n" + \ # "First 1000 characters of message:\n%s", # str(header)[:1000], str(message)[:1000]) transport_layer.nack(header) return subscription_call(channel, unwrap_recipe, *args, **kwargs)
def _wrap_subscription( transport_layer, subscription_call, channel, callback, *args, **kwargs )
Internal method to create an intercepting function for incoming messages to interpret recipes. This function is then used to subscribe to a channel on the transport layer. :param transport_layer: Reference to underlying transport object. :param subscription_call: Reference to the subscribing function of the transport layer. :param channel: Channel name to subscribe to. :param callback: Real function to be called when messages are received. The callback will pass three arguments, a RecipeWrapper object (details below), the header as a dictionary structure, and the message. :param allow_non_recipe_messages: Pass on incoming messages that do not include recipe information. In this case the first argument to the callback function will be 'None'. :param log_extender: If the recipe contains useful contextual information for log messages, such as a unique ID which can be used to connect all messages originating from the same recipe, then the information will be passed to this function, which must be a context manager factory. :return: Return value of call to subscription_call.
3.816302
2.930519
1.302261
return _wrap_subscription( transport_layer, transport_layer.subscribe, channel, callback, *args, **kwargs )
def wrap_subscribe(transport_layer, channel, callback, *args, **kwargs)
Listen to a queue on the transport layer, similar to the subscribe call in transport/common_transport.py. Intercept all incoming messages and parse for recipe information. See common_transport.subscribe for possible additional keyword arguments. :param transport_layer: Reference to underlying transport object. :param channel: Queue name to subscribe to. :param callback: Function to be called when messages are received. The callback will pass three arguments, a RecipeWrapper object (details below), the header as a dictionary structure, and the message. :return: A unique subscription ID
3.840914
5.798628
0.662383
return _wrap_subscription( transport_layer, transport_layer.subscribe_broadcast, channel, callback, *args, **kwargs )
def wrap_subscribe_broadcast(transport_layer, channel, callback, *args, **kwargs)
Listen to a topic on the transport layer, similar to the subscribe_broadcast call in transport/common_transport.py. Intercept all incoming messages and parse for recipe information. See common_transport.subscribe_broadcast for possible arguments. :param transport_layer: Reference to underlying transport object. :param channel: Topic name to subscribe to. :param callback: Function to be called when messages are received. The callback will pass three arguments, a RecipeWrapper object (details below), the header as a dictionary structure, and the message. :return: A unique subscription ID
2.90971
4.14898
0.701307
if self.transport: if self.transport.connect(): self.log.debug("Service successfully connected to transport layer") else: raise RuntimeError("Service could not connect to transport layer") # direct all transport callbacks into the main queue self._transport_interceptor_counter = itertools.count() self.transport.subscription_callback_set_intercept( self._transport_interceptor ) else: self.log.debug("No transport layer defined for service. Skipping.")
def start_transport(self)
If a transport object has been defined then connect it now.
7.020142
6.198178
1.132614
def add_item_to_queue(header, message): queue_item = ( Priority.TRANSPORT, next( self._transport_interceptor_counter ), # insertion sequence to keep messages in order (callback, header, message), ) self.__queue.put( queue_item ) # Block incoming transport until insertion completes return add_item_to_queue
def _transport_interceptor(self, callback)
Takes a callback function and returns a function that takes headers and messages and places them on the main service queue.
11.351346
9.768518
1.162034
if frontend: self.__pipe_frontend = frontend self.__send_service_status_to_frontend() if commands: self.__pipe_commands = commands
def connect(self, frontend=None, commands=None)
Inject pipes connecting the service to the frontend. Two arguments are supported: frontend= for messages from the service to the frontend, and commands= for messages from the frontend to the service. The injection should happen before the service is started, otherwise the underlying file descriptor references may not be handled correctly.
5.935029
5.05074
1.175081
self.__log_extensions.append((field, value)) try: yield except Exception as e: setattr(e, "workflows_log_" + field, value) raise finally: self.__log_extensions.remove((field, value))
def extend_log(self, field, value)
A context wherein a specified extra field in log messages is populated with a fixed value. This affects all log messages within the context.
4.726265
4.356526
1.08487
self.log.debug("Queue listener thread started") counter = itertools.count() # insertion sequence to keep messages in order while not self.__shutdown: if self.__pipe_commands.poll(1): try: message = self.__pipe_commands.recv() except EOFError: # Pipe was closed by frontend. Shut down service. self.__shutdown = True self.log.error( "Pipe closed by frontend, shutting down service", exc_info=True ) break queue_item = (Priority.COMMAND, next(counter), message) try: self.__queue.put(queue_item, True, 60) except queue.Full: # If the message can't be stored within 60 seconds then the service is # operating outside normal parameters. Try to shut it down. self.__shutdown = True self.log.error( "Write to service priority queue failed, shutting down service", exc_info=True, ) break self.log.debug("Queue listener thread terminating")
def __command_queue_listener(self)
Function to continuously retrieve data from the frontend. Commands are sent to the central priority queue. If the pipe from the frontend is closed the service shutdown is initiated. Check every second if service has shut down, then terminate. This function is run by a separate daemon thread, which is started by the __start_command_queue_listener function.
4.351042
3.778306
1.151586
thread_function = self.__command_queue_listener class QueueListenerThread(threading.Thread): def run(qltself): thread_function() assert not hasattr(self, "__queue_listener_thread") self.log.debug("Starting queue listener thread") self.__queue_listener_thread = QueueListenerThread() self.__queue_listener_thread.daemon = True self.__queue_listener_thread.name = "Command Queue Listener" self.__queue_listener_thread.start()
def __start_command_queue_listener(self)
Start the function __command_queue_listener in a separate thread. This function continuously listens to the pipe connected to the frontend.
2.931442
2.968195
0.987618
for field, value in self.__log_extensions: setattr(logrecord, field, value) self.__send_to_frontend({"band": "log", "payload": logrecord})
def _log_send(self, logrecord)
Forward log records to the frontend.
10.422453
7.577786
1.375396
self._idle_callback = callback self._idle_time = idle_time
def _register_idle(self, idle_time, callback)
Register a callback function that is run when idling for a given time span (in seconds).
3.849381
3.584161
1.073998
if self.__service_status != statuscode: self.__service_status = statuscode self.__send_service_status_to_frontend()
def __update_service_status(self, statuscode)
Set the internal status of the service object, and notify frontend.
3.280325
2.430804
1.349482
self._service_name = name self.__send_to_frontend({"band": "set_name", "name": self._service_name})
def _set_name(self, name)
Set a new name for this service, and notify the frontend accordingly.
9.078883
5.1792
1.752951
# Reset logging to pass logrecords into the queue to the frontend only. # Existing handlers may be broken as they were copied into a new process, # so should be discarded. for loggername in [None] + list(logging.Logger.manager.loggerDict.keys()): logger = logging.getLogger(loggername) while logger.handlers: logger.removeHandler(logger.handlers[0]) # Re-enable logging to console root_logger = logging.getLogger() # By default pass all warning (and higher) level messages to the frontend root_logger.setLevel(logging.WARN) root_logger.addHandler(workflows.logging.CallbackHandler(self._log_send)) # Set up the service logger and pass all info (and higher) level messages # (or other level if set differently) self.log = logging.getLogger(self._logger_name) if self.start_kwargs.get("verbose_log"): self.log_verbosity = logging.DEBUG self.log.setLevel(self.log_verbosity) # Additionally, write all critical messages directly to console console = logging.StreamHandler() console.setLevel(logging.CRITICAL) root_logger.addHandler(console)
def initialize_logging(self)
Reset the logging for the service process. All logged messages are forwarded to the frontend. If any filtering is desired, then this must take place on the service side.
5.900441
5.577782
1.057847
# Keep a copy of keyword arguments for use in subclasses self.start_kwargs.update(kwargs) try: self.initialize_logging() self.__update_service_status(self.SERVICE_STATUS_STARTING) self.start_transport() self.initializing() self._register("command", self.__process_command) if self.__pipe_commands is None: # can only listen to commands if command queue is defined self.__shutdown = True else: # start listening to command queue in separate thread self.__start_command_queue_listener() while not self.__shutdown: # main loop self.__update_service_status(self.SERVICE_STATUS_IDLE) if self._idle_time is None: task = self.__queue.get() else: try: task = self.__queue.get(True, self._idle_time) except queue.Empty: self.__update_service_status(self.SERVICE_STATUS_TIMER) if self._idle_callback: self._idle_callback() continue self.__update_service_status(self.SERVICE_STATUS_PROCESSING) if task[0] == Priority.COMMAND: message = task[2] if message and "band" in message: processor = self.__callback_register.get(message["band"]) if processor is None: self.log.warning( "received message on unregistered band\n%s", message ) else: processor(message.get("payload")) else: self.log.warning( "received message without band information\n%s", message ) elif task[0] == Priority.TRANSPORT: callback, header, message = task[2] callback(header, message) else: self.log.warning("Unknown item on main service queue\n%r", task) except KeyboardInterrupt: self.log.warning("Ctrl+C detected. Shutting down.") except Exception as e: self.process_uncaught_exception(e) self.__update_service_status(self.SERVICE_STATUS_ERROR) self.in_shutdown() return try: self.__update_service_status(self.SERVICE_STATUS_SHUTDOWN) self.in_shutdown() self.__update_service_status(self.SERVICE_STATUS_END) except Exception as e: self.process_uncaught_exception(e) self.__update_service_status(self.SERVICE_STATUS_ERROR)
def start(self, **kwargs)
Start listening to command queue, process commands in main loop, set status, etc... This function is most likely called by the frontend in a separate process.
3.23357
3.14566
1.027946
# Add information about the actual exception to the log message # This includes the file, line and piece of code causing the exception. # exc_info=True adds the full stack trace to the log message. exc_file_fullpath, exc_file, exc_lineno, exc_func, exc_line = ( workflows.logging.get_exception_source() ) added_information = { "workflows_exc_lineno": exc_lineno, "workflows_exc_funcName": exc_func, "workflows_exc_line": exc_line, "workflows_exc_pathname": exc_file_fullpath, "workflows_exc_filename": exc_file, } for field in filter(lambda x: x.startswith("workflows_log_"), dir(e)): added_information[field[14:]] = getattr(e, field, None) self.log.critical( "Unhandled service exception: %s", e, exc_info=True, extra=added_information )
def process_uncaught_exception(self, e)
This is called to handle otherwise uncaught exceptions from the service. The service will terminate either way, but here we can do things such as gathering useful environment information and logging for posterity.
4.178589
4.012152
1.041483
if self.use_ssl: factory = irc.connection.Factory(wrapper=ssl.wrap_socket) else: factory = irc.connection.Factory() self.connection.connect(server=self.server, port=self.port, nickname=self.nickname, connect_factory=factory, password=self.password, username=self.username, ircname=self.ircname)
def connect(self, *args, **kwargs)
Connect to a server. This overrides the function in SimpleIRCClient to provide SSL functionality. :param args: :param kwargs: :return:
2.696799
2.661853
1.013129