_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q17200
|
publish
|
train
|
def publish(event_type: str,
event_data: dict = None,
object_type: str = None,
object_id: str = None,
object_key: str = None,
origin: str = None):
"""Publish an event.
Published the event to all subscribers and stores the event with the
object.
Args:
event_type (str): The event type
event_data (dict, optional): Optional event data
object_type (str): Type of object.
object_id (str): Object ID
object_key (str, optional): Key used to stored the object. If None,
the default assume the key is of the form <object type>:<object id>
origin (str): Origin or publisher of the event.
"""
event = Event(event_id=_get_event_id(object_type),
event_type=event_type,
event_data=event_data,
event_origin=origin,
object_type=object_type,
object_id=object_id,
object_key=object_key)
# Publish the event to subscribers
_publish_to_subscribers(event)
# Update the object event list and data.
if object_key is None:
object_key = '{}:{}'.format(object_type, object_id)
_update_object(object_key, event)
# Execute the set of db transactions as an atomic transaction.
DB.execute()
|
python
|
{
"resource": ""
}
|
q17201
|
_get_events_list
|
train
|
def _get_events_list(object_key: str) -> List[str]:
"""Get list of event ids for the object with the specified key.
Args:
object_key (str): Key of an object in the database.
"""
return DB.get_list(_keys.events_list(object_key))
|
python
|
{
"resource": ""
}
|
q17202
|
_get_events_data
|
train
|
def _get_events_data(object_key: str) -> List[dict]:
"""Get the list of event data for the object with the specified key.
Args:
object_key (str): Key of an object in the database.
"""
events_data = []
key = _keys.events_data(object_key)
for event_id in _get_events_list(object_key):
event_dict = literal_eval(DB.get_hash_value(key, event_id))
events_data.append(event_dict)
return events_data
|
python
|
{
"resource": ""
}
|
q17203
|
get_events
|
train
|
def get_events(object_key: str) -> List[Event]:
"""Get list of events for the object with the specified key."""
events_data = _get_events_data(object_key)
return [Event.from_config(event_dict) for event_dict in events_data]
|
python
|
{
"resource": ""
}
|
q17204
|
_publish_to_subscribers
|
train
|
def _publish_to_subscribers(event: Event):
"""Publish and event to all subscribers.
- Adds the event id to the published event list for all subscribers.
- Adds the event data to the published event data for all subscribers.
- Publishes the event id notification to all subscribers.
Args:
event (Event): Event object to publish.
"""
subscribers = get_subscribers(event.object_type)
# Add the event to each subscribers published list
for sub in subscribers:
DB.prepend_to_list(_keys.published(event.object_type, sub),
event.id, pipeline=True)
event_dict = deepcopy(event.config)
event_dict.pop('id')
DB.set_hash_value(_keys.data(event.object_type, sub), event.id,
str(event_dict), pipeline=True)
DB.publish(event.object_type, event.id, pipeline=True)
|
python
|
{
"resource": ""
}
|
q17205
|
_update_object
|
train
|
def _update_object(object_key: str, event: Event):
"""Update the events list and events data for the object.
- Adds the event Id to the list of events for the object.
- Adds the event data to the hash of object event data keyed by event
id.
Args:
object_key (str): Key of the object being updated.
event (Event): Event object
"""
events_list_key = _keys.events_list(object_key)
events_data_key = _keys.events_data(object_key)
event_dict = deepcopy(event.config)
event_dict.pop('id')
DB.append_to_list(events_list_key, event.id, pipeline=True)
DB.set_hash_value(events_data_key, event.id, json.dumps(event_dict),
pipeline=True)
|
python
|
{
"resource": ""
}
|
q17206
|
_get_event_id
|
train
|
def _get_event_id(object_type: str) -> str:
"""Return an event key for the event on the object type.
This must be a unique event id for the object.
Args:
object_type (str): Type of object
Returns:
str, event id
"""
key = _keys.event_counter(object_type)
DB.watch(key, pipeline=True)
count = DB.get_value(key)
DB.increment(key)
DB.execute()
if count is None:
count = 0
return '{}_event_{:08d}'.format(object_type, int(count))
|
python
|
{
"resource": ""
}
|
q17207
|
ProcessingBlockDevice.pb_id
|
train
|
def pb_id(self, pb_id: str):
"""Set the PB Id for this device."""
# FIXME(BMo) instead of creating the object to check if the PB exists
# use a method on PB List?
# ProcessingBlock(pb_id)
self.set_state(DevState.ON)
self._pb_id = pb_id
|
python
|
{
"resource": ""
}
|
q17208
|
ProcessingBlockDevice.pb_config
|
train
|
def pb_config(self):
"""Return the PB configuration."""
pb = ProcessingBlock(self._pb_id)
return json.dumps(pb.config)
|
python
|
{
"resource": ""
}
|
q17209
|
StateObject.current_timestamp
|
train
|
def current_timestamp(self) -> datetime:
"""Get the current state timestamp."""
timestamp = DB.get_hash_value(self._key, 'current_timestamp')
return datetime_from_isoformat(timestamp)
|
python
|
{
"resource": ""
}
|
q17210
|
StateObject.target_timestamp
|
train
|
def target_timestamp(self) -> datetime:
"""Get the target state timestamp."""
timestamp = DB.get_hash_value(self._key, 'target_timestamp')
return datetime_from_isoformat(timestamp)
|
python
|
{
"resource": ""
}
|
q17211
|
StateObject.update_target_state
|
train
|
def update_target_state(self, value: str, force: bool = True) -> datetime:
"""Set the target state.
Args:
value (str): New value for target state
force (bool): If true, ignore allowed transitions
Returns:
datetime, update timestamp
Raises:
RuntimeError, if it is not possible to currently set the target
state.
ValueError, if the specified target stat is not allowed.
"""
value = value.lower()
if not force:
current_state = self.current_state
if current_state == 'unknown':
raise RuntimeError("Unable to set target state when current "
"state is 'unknown'")
allowed_target_states = self._allowed_target_states[current_state]
LOG.debug('Updating target state of %s to %s', self._id, value)
if value not in allowed_target_states:
raise ValueError("Invalid target state: '{}'. {} can be "
"commanded to states: {}".
format(value, current_state,
allowed_target_states))
return self._update_state('target', value)
|
python
|
{
"resource": ""
}
|
q17212
|
StateObject.update_current_state
|
train
|
def update_current_state(self, value: str,
force: bool = False) -> datetime:
"""Update the current state.
Args:
value (str): New value for sdp state
force (bool): If true, ignore allowed transitions
Returns:
datetime, update timestamp
Raises:
ValueError: If the specified current state is not allowed.
"""
value = value.lower()
if not force:
current_state = self.current_state
# IF the current state is unknown, it can be set to any of the
# allowed states, otherwise only allow certain transitions.
if current_state == 'unknown':
allowed_transitions = self._allowed_states
else:
allowed_transitions = self._allowed_transitions[current_state]
allowed_transitions.append(current_state)
LOG.debug('Updating current state of %s to %s', self._id, value)
if value not in allowed_transitions:
raise ValueError("Invalid current state update: '{}'. '{}' "
"can be transitioned to states: {}"
.format(value, current_state,
allowed_transitions))
return self._update_state('current', value)
|
python
|
{
"resource": ""
}
|
q17213
|
StateObject._initialise
|
train
|
def _initialise(self, initial_state: str = 'unknown') -> dict:
"""Return a dictionary used to initialise a state object.
This method is used to obtain a dictionary/hash describing the initial
state of SDP or a service in SDP.
Args:
initial_state (str): Initial state.
Returns:
dict, Initial state configuration
"""
initial_state = initial_state.lower()
if initial_state != 'unknown' and \
initial_state not in self._allowed_states:
raise ValueError('Invalid initial state: {}'.format(initial_state))
_initial_state = dict(
current_state=initial_state,
target_state=initial_state,
current_timestamp=datetime.utcnow().isoformat(),
target_timestamp=datetime.utcnow().isoformat())
return _initial_state
|
python
|
{
"resource": ""
}
|
q17214
|
ConfigDb.add_sched_block_instance
|
train
|
def add_sched_block_instance(self, config_dict):
"""Add Scheduling Block to the database.
Args:
config_dict (dict): SBI configuration
"""
# Get schema for validation
schema = self._get_schema()
LOG.debug('Adding SBI with config: %s', config_dict)
# Validates the schema
validate(config_dict, schema)
# Add status field and value to the data
updated_block = self._add_status(config_dict)
# Splitting into different names and fields before
# adding to the database
scheduling_block_data, processing_block_data = \
self._split_sched_block_instance(updated_block)
# Adding Scheduling block instance with id
name = "scheduling_block:" + updated_block["id"]
self._db.set_specified_values(name, scheduling_block_data)
# Add a event to the scheduling block event list to notify
# of a new scheduling block being added to the db.
self._db.push_event(self.scheduling_event_name,
updated_block["status"],
updated_block["id"])
# Adding Processing block with id
for value in processing_block_data:
name = ("scheduling_block:" + updated_block["id"] +
":processing_block:" + value['id'])
self._db.set_specified_values(name, value)
# Add a event to the processing block event list to notify
# of a new processing block being added to the db.
self._db.push_event(self.processing_event_name,
value["status"],
value["id"])
|
python
|
{
"resource": ""
}
|
q17215
|
ConfigDb.get_sched_block_instance_ids
|
train
|
def get_sched_block_instance_ids(self):
"""Get unordered list of scheduling block ids"""
# Initialise empty list
scheduling_block_ids = []
# Pattern used to search scheduling block ids
pattern = 'scheduling_block:*'
block_ids = self._db.get_ids(pattern)
for block_id in block_ids:
if 'processing_block' not in block_id:
id_split = block_id.split(':')[-1]
scheduling_block_ids.append(id_split)
return sorted(scheduling_block_ids)
|
python
|
{
"resource": ""
}
|
q17216
|
ConfigDb.get_processing_block_ids
|
train
|
def get_processing_block_ids(self):
"""Get list of processing block ids using the processing block id"""
# Initialise empty list
_processing_block_ids = []
# Pattern used to search processing block ids
pattern = '*:processing_block:*'
block_ids = self._db.get_ids(pattern)
for block_id in block_ids:
id_split = block_id.split(':')[-1]
_processing_block_ids.append(id_split)
return sorted(_processing_block_ids)
|
python
|
{
"resource": ""
}
|
q17217
|
ConfigDb.get_sub_array_ids
|
train
|
def get_sub_array_ids(self):
"""Get list of sub array ids"""
# Initialise empty list
_scheduling_block_ids = []
_sub_array_ids = []
for blocks_id in self.get_sched_block_instance_ids():
_scheduling_block_ids.append(blocks_id)
block_details = self.get_block_details(_scheduling_block_ids)
for details in block_details:
_sub_array_ids.append(details['sub_array_id'])
_sub_array_ids = sorted(list(set(_sub_array_ids)))
return _sub_array_ids
|
python
|
{
"resource": ""
}
|
q17218
|
ConfigDb.get_sub_array_sbi_ids
|
train
|
def get_sub_array_sbi_ids(self, sub_array_id):
"""Get Scheduling Block Instance ID associated with sub array id"""
_ids = []
sbi_ids = self.get_sched_block_instance_ids()
for details in self.get_block_details(sbi_ids):
if details['sub_array_id'] == sub_array_id:
_ids.append(details['id'])
return sorted(_ids)
|
python
|
{
"resource": ""
}
|
q17219
|
ConfigDb.get_block_details
|
train
|
def get_block_details(self, block_ids):
"""Get details of scheduling or processing block
Args:
block_ids (list): List of block IDs
"""
# Convert input to list if needed
if not hasattr(block_ids, "__iter__"):
block_ids = [block_ids]
for _id in block_ids:
block_key = self._db.get_block(_id)[0]
block_data = self._db.get_all_field_value(block_key)
# NOTE(BM) unfortunately this doesn't quite work for keys where \
# the value is a python type (list, dict etc) \
# The following hack works for now but is probably not infallible
for key in block_data:
for char in ['[', '{']:
if char in block_data[key]:
block_data[key] = ast.literal_eval(
str(block_data[key]))
yield block_data
|
python
|
{
"resource": ""
}
|
q17220
|
ConfigDb.update_value
|
train
|
def update_value(self, block_id, field, value):
""""Update the value of the given block id and field"""
block_name = self._db.get_block(block_id)
for name in block_name:
self._db.set_value(name, field, value)
|
python
|
{
"resource": ""
}
|
q17221
|
ConfigDb.delete_sched_block_instance
|
train
|
def delete_sched_block_instance(self, block_id):
"""Delete the specified Scheduling Block Instance.
Removes the Scheduling Block Instance, and all Processing Blocks
that belong to it from the database"""
LOG.debug('Deleting SBI %s', block_id)
scheduling_blocks = self._db.get_all_blocks(block_id)
if not scheduling_blocks:
raise RuntimeError('Scheduling block not found: {}'.
format(block_id))
if scheduling_blocks:
for blocks in scheduling_blocks:
if "processing_block" not in blocks:
self._db.delete_block(blocks)
else:
split_key = blocks.split(':')
self._db.delete_block(blocks)
# Add a event to the processing block event list to notify
# about deleting from the db
self._db.push_event(self.processing_event_name, "deleted",
split_key[3])
# Add a event to the scheduling block event list to notify
# of a deleting a scheduling block from the db
self._db.push_event(self.scheduling_event_name, "deleted",
block_id)
|
python
|
{
"resource": ""
}
|
q17222
|
ConfigDb._get_schema
|
train
|
def _get_schema():
"""Get the schema for validation"""
schema_path = os.path.join(os.path.dirname(__file__),
'schema', 'scheduling_block_schema.json')
with open(schema_path, 'r') as file:
schema_data = file.read()
schema = json.loads(schema_data)
return schema
|
python
|
{
"resource": ""
}
|
q17223
|
ConfigDb._add_status
|
train
|
def _add_status(scheduling_block):
"""This function adds status fields to all the section
in the scheduling block instance"""
scheduling_block['status'] = "created"
for block in scheduling_block:
if isinstance(scheduling_block[block], list):
for field in scheduling_block[block]:
field['status'] = 'created'
return scheduling_block
|
python
|
{
"resource": ""
}
|
q17224
|
ConfigDb._split_sched_block_instance
|
train
|
def _split_sched_block_instance(self, scheduling_block):
"""Split the scheduling block data into multiple names
before adding to the configuration database"""
# Initialise empty list
_scheduling_block_data = {}
_processing_block_data = {}
_processing_block_id = []
for block in scheduling_block:
values = scheduling_block[block]
if block != 'processing_blocks':
_scheduling_block_data[block] = values
else:
# Check if there is a processing block that already exits in
# the database
processing_block_id = self.get_processing_block_ids()
for value in values:
if value['id'] not in processing_block_id:
_processing_block_data = values
else:
raise Exception("Processing block already exits",
value['id'])
# Adding processing block id to the scheduling block list
for block_id in _processing_block_data:
_processing_block_id.append(block_id['id'])
_scheduling_block_data['processing_block_ids'] = _processing_block_id
return _scheduling_block_data, _processing_block_data
|
python
|
{
"resource": ""
}
|
q17225
|
init_logger
|
train
|
def init_logger(logger_name='sip', log_level=None, p3_mode: bool = True,
show_thread: bool = False, propagate: bool = False,
show_log_origin=False):
"""Initialise the SIP logger.
Attaches a stdout stream handler to the 'sip' logger. This will
apply to all logger objects with a name prefixed by 'sip.'
This function respects the 'SIP_LOG_LEVEL' environment variable to
set the logging level.
Args:
logger_name (str, optional): Name of the logger object.
log_level (str or int, optional): Logging level for the SIP logger.
p3_mode (bool, optional): Print logging statements in a format that
P3 can support.
show_thread (bool, optional): Display the thread in the log message.
propagate (bool, optional): Propagate settings to parent loggers.
show_log_origin (boo, optional): If true show the origin
(file, line no.) of log messages.
"""
log = logging.getLogger(logger_name)
log.propagate = propagate
# Remove existing handlers (avoids duplicate messages)
for handler in log.handlers:
log.removeHandler(handler)
_debug = '%(filename)s:%(lineno)d | ' if show_log_origin else ''
# P3 mode is intended to work with the fluentd configuration on P3.
# This has ms timestamp precision and uses '-' as a delimiter
# between statements in the log file.
if p3_mode:
_prefix = '%(asctime)s - %(name)s - %(levelname)s'
if show_thread:
_format = '{} - %(threadName)s - {}%(message)s'\
.format(_prefix, _debug)
else:
_format = '{} - {}%(message)s'.format(_prefix, _debug)
formatter = logging.Formatter(_format)
formatter.converter = time.gmtime
# If not in P3 mode, the timestamp will be us precision and use '|'
# as a separator.
else:
_prefix = '%(asctime)s | %(name)s | %(levelname)s'
if show_thread:
_format = '{} | %(threadName)s | {}%(message)s'\
.format(_prefix, _debug)
else:
_format = '{} | {}%(message)s'.format(_prefix, _debug)
formatter = SIPFormatter(_format, datefmt='%Y-%m-%dT%H:%M:%S.%fZ')
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
log.addHandler(handler)
# Set the logging level.
if log_level:
log.setLevel(log_level)
else:
log.setLevel(os.getenv('SIP_LOG_LEVEL', 'DEBUG'))
|
python
|
{
"resource": ""
}
|
q17226
|
disable_logger
|
train
|
def disable_logger(logger_name: str, propagate: bool = False):
"""Disable output for the logger of the specified name."""
log = logging.getLogger(logger_name)
log.propagate = propagate
for handler in log.handlers:
log.removeHandler(handler)
|
python
|
{
"resource": ""
}
|
q17227
|
set_log_level
|
train
|
def set_log_level(logger_name: str, log_level: str, propagate: bool = False):
"""Set the log level of the specified logger."""
log = logging.getLogger(logger_name)
log.propagate = propagate
log.setLevel(log_level)
|
python
|
{
"resource": ""
}
|
q17228
|
SIPFormatter.formatTime
|
train
|
def formatTime(self, record, datefmt=None):
"""Format the log timestamp."""
_seconds_fraction = record.created - int(record.created)
_datetime_utc = time.mktime(time.gmtime(record.created))
_datetime_utc += _seconds_fraction
_created = self.converter(_datetime_utc)
if datefmt:
time_string = _created.strftime(datefmt)
else:
time_string = _created.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
time_string = "%s,%03d" % (time_string, record.msecs)
return time_string
|
python
|
{
"resource": ""
}
|
q17229
|
generate_sbi
|
train
|
def generate_sbi(index: int = None):
"""Generate a SBI config JSON string."""
date = datetime.datetime.utcnow().strftime('%Y%m%d')
if index is None:
index = randint(0, 999)
sbi_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index)
sb_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index)
pb_id = 'PB-{}-sip-demo-{:03d}'.format(date, index)
print('* Generating SBI: %s, PB: %s' % (sb_id, pb_id))
sbi = dict(
id=sbi_id,
version='1.0.0',
scheduling_block=dict(
id=sb_id,
project='sip',
programme_block='sip_demos'
),
processing_blocks=[
dict(
id=pb_id,
version='1.0.0',
type='offline',
priority=1,
dependencies=[],
resources_required=[],
workflow=dict(
id='mock_workflow',
version='1.0.0',
parameters=dict(
stage1=dict(duration=30),
stage2=dict(duration=30),
stage3=dict(duration=30)
)
)
)
]
)
return sbi
|
python
|
{
"resource": ""
}
|
q17230
|
PulsarSender.send
|
train
|
def send(self, config, log, obs_id, beam_id):
"""
Send the pulsar data to the ftp server
Args:
config (dict): Dictionary of settings
log (logging.Logger): Python logging object
obs_id: observation id
beam_id: beam id
"""
log.info('Starting Pulsar Data Transfer...')
socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id))
socket.send(json.dumps(config).encode())
socket.send(bytearray(1000 * 1000))
# Overwrites the metadata name in the config dict
# and re-sends the data to the receiver.
config['metadata']['name'] = 'candidate_two'
socket.send(json.dumps(config).encode())
socket.send(bytearray(1000 * 1000))
socket.close()
log.info('Pulsar Data Transfer Completed...')
|
python
|
{
"resource": ""
}
|
q17231
|
SubarrayDevice.init_device
|
train
|
def init_device(self):
"""Initialise the device."""
Device.init_device(self)
time.sleep(0.1)
self.set_state(DevState.STANDBY)
|
python
|
{
"resource": ""
}
|
q17232
|
SubarrayDevice.configure
|
train
|
def configure(self, sbi_config: str):
"""Configure an SBI for this subarray.
Args:
sbi_config (str): SBI configuration JSON
Returns:
str,
"""
# print(sbi_config)
config_dict = json.loads(sbi_config)
self.debug_stream('SBI configuration:\n%s',
json.dumps(config_dict, indent=2))
try:
sbi = Subarray(self.get_name()).configure_sbi(config_dict)
except jsonschema.exceptions.ValidationError as error:
return json.dumps(dict(path=error.absolute_path.__str__(),
schema_path=error.schema_path.__str__(),
message=error.message), indent=2)
except RuntimeError as error:
return json.dumps(dict(error=str(error)), indent=2)
return 'Accepted SBI: {}'.format(sbi.id)
|
python
|
{
"resource": ""
}
|
q17233
|
SubarrayDevice.processing_blocks
|
train
|
def processing_blocks(self):
"""Return list of PBs associated with the subarray.
<http://www.esrf.eu/computing/cs/tango/pytango/v920/server_api/server.html#PyTango.server.pipe>
"""
sbi_ids = Subarray(self.get_name()).sbi_ids
pbs = []
for sbi_id in sbi_ids:
sbi = SchedulingBlockInstance(sbi_id)
pbs.append(sbi.processing_block_ids)
return 'PB', pbs
|
python
|
{
"resource": ""
}
|
q17234
|
datetime_from_isoformat
|
train
|
def datetime_from_isoformat(value: str):
"""Return a datetime object from an isoformat string.
Args:
value (str): Datetime string in isoformat.
"""
if sys.version_info >= (3, 7):
return datetime.fromisoformat(value)
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
|
python
|
{
"resource": ""
}
|
q17235
|
SDPMasterDevice.always_executed_hook
|
train
|
def always_executed_hook(self):
"""Run for each command."""
_logT = self._devProxy.get_logging_target()
if 'device::sip_sdp_logger' not in _logT:
try:
self._devProxy.add_logging_target('device::sip_sdp/elt/logger')
self.info_stream("Test of Tango logging from "
"'tc_tango_master'")
except Exception as e:
LOG.debug('Failed to setup Tango logging %s', e )
|
python
|
{
"resource": ""
}
|
q17236
|
SDPMasterDevice.configure
|
train
|
def configure(self, value):
"""Schedule an offline only SBI with SDP."""
# Only accept new SBIs if the SDP is on.
if self._sdp_state.current_state != 'on':
raise RuntimeWarning('Unable to configure SBIs unless SDP is '
'\'on\'.')
# Check that the new SBI is not already registered.
sbi_config_dict = json.loads(value)
sbi_list = SchedulingBlockInstanceList()
LOG.info('SBIs before: %s', sbi_list.active)
if sbi_config_dict.get('id') in sbi_list.active:
raise RuntimeWarning('Unable to add SBI with ID {}, an SBI with '
'this ID is already registered with SDP!'
.format(sbi_config_dict.get('id')))
# Add the SBI to the dictionary.
LOG.info('Scheduling offline SBI! Config:\n%s', value)
sbi = SchedulingBlockInstance.from_config(sbi_config_dict)
LOG.info('SBIs after: %s', sbi_list.active)
sbi_pb_ids = sbi.processing_block_ids
LOG.info('SBI "%s" contains PBs: %s', sbi.id, sbi_pb_ids)
# pb_list = ProcessingBlockList()
# LOG.info('Active PBs: %s', pb_list.active)
# Get the list and number of Tango PB devices
tango_db = Database()
pb_device_class = "ProcessingBlockDevice"
pb_device_server_instance = "processing_block_ds/1"
pb_devices = tango_db.get_device_name(pb_device_server_instance,
pb_device_class)
LOG.info('Number of PB devices in the pool = %d', len(pb_devices))
# Get a PB device which has not been assigned.
for pb_id in sbi_pb_ids:
for pb_device_name in pb_devices:
device = DeviceProxy(pb_device_name)
if not device.pb_id:
LOG.info('Assigning PB device = %s to PB id = %s',
pb_device_name, pb_id)
# Set the device attribute 'pb_id' to the processing block
# id it is tracking.
device.pb_id = pb_id
break
|
python
|
{
"resource": ""
}
|
q17237
|
SDPMasterDevice._get_service_state
|
train
|
def _get_service_state(service_id: str):
"""Get the Service state object for the specified id."""
LOG.debug('Getting state of service %s', service_id)
services = get_service_id_list()
service_ids = [s for s in services if service_id in s]
if len(service_ids) != 1:
return 'Service not found! services = {}'.format(str(services))
subsystem, name, version = service_ids[0].split(':')
return ServiceState(subsystem, name, version)
|
python
|
{
"resource": ""
}
|
q17238
|
SDPMasterDevice.allowed_target_sdp_states
|
train
|
def allowed_target_sdp_states(self):
"""Return a list of allowed target states for the current state."""
_current_state = self._sdp_state.current_state
_allowed_target_states = self._sdp_state.allowed_target_states[
_current_state]
return json.dumps(dict(allowed_target_sdp_states=
_allowed_target_states))
|
python
|
{
"resource": ""
}
|
q17239
|
SDPMasterDevice.target_sdp_state
|
train
|
def target_sdp_state(self, state):
"""Update the target state of SDP."""
LOG.info('Setting SDP target state to %s', state)
if self._sdp_state.current_state == state:
LOG.info('Target state ignored, SDP is already "%s"!', state)
if state == 'on':
self.set_state(DevState.ON)
if state == 'off':
self.set_state(DevState.OFF)
if state == 'standby':
self.set_state(DevState.STANDBY)
if state == 'disable':
self.set_state(DevState.DISABLE)
self._sdp_state.update_target_state(state)
|
python
|
{
"resource": ""
}
|
q17240
|
SDPMasterDevice.health
|
train
|
def health(self):
"""Health check method, returns the up-time of the device."""
return json.dumps(dict(uptime='{:.3f}s'
.format((time.time() - self._start_time))))
|
python
|
{
"resource": ""
}
|
q17241
|
SDPMasterDevice.scheduling_block_instances
|
train
|
def scheduling_block_instances(self):
"""Return the a JSON dict encoding the SBIs known to SDP."""
# TODO(BMo) change this to a pipe?
sbi_list = SchedulingBlockInstanceList()
return json.dumps(dict(active=sbi_list.active,
completed=sbi_list.completed,
aborted=sbi_list.aborted))
|
python
|
{
"resource": ""
}
|
q17242
|
SDPMasterDevice.processing_blocks
|
train
|
def processing_blocks(self):
"""Return the a JSON dict encoding the PBs known to SDP."""
pb_list = ProcessingBlockList()
# TODO(BMo) realtime, offline etc.
return json.dumps(dict(active=pb_list.active,
completed=pb_list.completed,
aborted=pb_list.aborted))
|
python
|
{
"resource": ""
}
|
q17243
|
SDPMasterDevice.processing_block_devices
|
train
|
def processing_block_devices(self):
"""Get list of processing block devices."""
# Get the list and number of Tango PB devices
tango_db = Database()
pb_device_class = "ProcessingBlockDevice"
pb_device_server_instance = "processing_block_ds/1"
pb_devices = tango_db.get_device_name(pb_device_server_instance,
pb_device_class)
LOG.info('Number of PB devices in the pool = %d', len(pb_devices))
pb_device_map = []
for pb_device_name in pb_devices:
device = DeviceProxy(pb_device_name)
if device.pb_id:
LOG.info('%s %s', pb_device_name, device.pb_id)
pb_device_map.append((pb_device_name, device.pb_id))
return str(pb_device_map)
|
python
|
{
"resource": ""
}
|
q17244
|
SDPMasterDevice._set_master_state
|
train
|
def _set_master_state(self, state):
"""Set the state of the SDPMaster."""
if state == 'init':
self._service_state.update_current_state('init', force=True)
self.set_state(DevState.INIT)
elif state == 'on':
self.set_state(DevState.ON)
self._service_state.update_current_state('on')
|
python
|
{
"resource": ""
}
|
q17245
|
register_subarray_devices
|
train
|
def register_subarray_devices():
"""Register subarray devices."""
tango_db = Database()
LOG.info("Registering Subarray devices:")
device_info = DbDevInfo()
# pylint: disable=protected-access
device_info._class = "SubarrayDevice"
device_info.server = "subarray_ds/1"
for index in range(16):
device_info.name = "sip_sdp/elt/subarray_{:02d}".format(index)
LOG.info("\t%s", device_info.name)
tango_db.add_device(device_info)
tango_db.put_class_property(device_info._class, dict(version='1.0.0'))
|
python
|
{
"resource": ""
}
|
q17246
|
add_workflow_definitions
|
train
|
def add_workflow_definitions(sbi_config: dict):
"""Add any missing SBI workflow definitions as placeholders.
This is a utility function used in testing and adds mock / test workflow
definitions to the database for workflows defined in the specified
SBI config.
Args:
sbi_config (dict): SBI configuration dictionary.
"""
registered_workflows = []
for i in range(len(sbi_config['processing_blocks'])):
workflow_config = sbi_config['processing_blocks'][i]['workflow']
workflow_name = '{}:{}'.format(workflow_config['id'],
workflow_config['version'])
if workflow_name in registered_workflows:
continue
workflow_definition = dict(
id=workflow_config['id'],
version=workflow_config['version'],
stages=[]
)
key = "workflow_definitions:{}:{}".format(workflow_config['id'],
workflow_config['version'])
DB.save_dict(key, workflow_definition, hierarchical=False)
registered_workflows.append(workflow_name)
|
python
|
{
"resource": ""
}
|
q17247
|
generate_version
|
train
|
def generate_version(max_major: int = 1, max_minor: int = 7,
max_patch: int = 15) -> str:
"""Select a random version.
Args:
max_major (int, optional) maximum major version
max_minor (int, optional) maximum minor version
max_patch (int, optional) maximum patch version
Returns:
str, Version String
"""
major = randint(0, max_major)
minor = randint(0, max_minor)
patch = randint(0, max_patch)
return '{:d}.{:d}.{:d}'.format(major, minor, patch)
|
python
|
{
"resource": ""
}
|
q17248
|
generate_sb
|
train
|
def generate_sb(date: datetime.datetime, project: str,
programme_block: str) -> dict:
"""Generate a Scheduling Block data object.
Args:
date (datetime.datetime): UTC date of the SBI
project (str): Project Name
programme_block (str): Programme
Returns:
str, Scheduling Block Instance (SBI) ID.
"""
date = date.strftime('%Y%m%d')
instance_id = randint(0, 9999)
sb_id = 'SB-{}-{}-{:04d}'.format(date, project, instance_id)
return dict(id=sb_id, project=project, programme_block=programme_block)
|
python
|
{
"resource": ""
}
|
q17249
|
generate_pb_config
|
train
|
def generate_pb_config(pb_id: str,
pb_config: dict = None,
workflow_config: dict = None) -> dict:
"""Generate a PB configuration dictionary.
Args:
pb_id (str): Processing Block Id
pb_config (dict, optional) PB configuration.
workflow_config (dict, optional): Workflow configuration
Returns:
dict, PB configuration dictionary.
"""
if workflow_config is None:
workflow_config = dict()
if pb_config is None:
pb_config = dict()
pb_type = pb_config.get('type', choice(PB_TYPES))
workflow_id = workflow_config.get('id')
if workflow_id is None:
if pb_type == 'offline':
workflow_id = choice(OFFLINE_WORKFLOWS)
else:
workflow_id = choice(REALTIME_WORKFLOWS)
workflow_version = workflow_config.get('version', generate_version())
workflow_parameters = workflow_config.get('parameters', dict())
pb_data = dict(
id=pb_id,
version=__pb_version__,
type=pb_type,
priority=pb_config.get('priority', randint(0, 10)),
dependencies=pb_config.get('dependencies', []),
resources_required=pb_config.get('resources_required', []),
workflow=dict(
id=workflow_id,
version=workflow_version,
parameters=workflow_parameters
)
)
return pb_data
|
python
|
{
"resource": ""
}
|
q17250
|
generate_sbi_config
|
train
|
def generate_sbi_config(num_pbs: int = 3, project: str = 'sip',
programme_block: str = 'sip_demos',
pb_config: Union[dict, List[dict]] = None,
workflow_config:
Union[dict, List[dict]] = None,
register_workflows=False) -> dict:
"""Generate a SBI configuration dictionary.
Args:
num_pbs (int, optional): Number of Processing Blocks (default = 3)
project (str, optional): Project to associate the SBI with.
programme_block (str, optional): SBI programme block
pb_config (dict, List[dict], optional): PB configuration
workflow_config (dict, List[dict], optional): Workflow configuration
register_workflows (bool, optional): If true also register workflows.
Returns:
dict, SBI configuration dictionary
"""
if isinstance(workflow_config, dict):
workflow_config = [workflow_config]
if isinstance(pb_config, dict):
pb_config = [pb_config]
utc_now = datetime.datetime.utcnow()
pb_list = []
for i in range(num_pbs):
pb_id = ProcessingBlock.get_id(utc_now)
if workflow_config is not None:
_workflow_config = workflow_config[i]
else:
_workflow_config = None
if pb_config is not None:
_pb_config = pb_config[i]
else:
_pb_config = None
pb_dict = generate_pb_config(pb_id, _pb_config, _workflow_config)
pb_list.append(pb_dict)
sbi_config = dict(
id=SchedulingBlockInstance.get_id(utc_now, project),
version=__sbi_version__,
scheduling_block=generate_sb(utc_now, project, programme_block),
processing_blocks=pb_list
)
if register_workflows:
add_workflow_definitions(sbi_config)
return sbi_config
|
python
|
{
"resource": ""
}
|
q17251
|
generate_sbi_json
|
train
|
def generate_sbi_json(num_pbs: int = 3, project: str = 'sip',
programme_block: str = 'sip_demos',
pb_config: Union[dict, List[dict]] = None,
workflow_config:
Union[dict, List[dict]] = None,
register_workflows=True) -> str:
"""Return a JSON string used to configure an SBI."""
return json.dumps(generate_sbi_config(num_pbs, project, programme_block,
pb_config,
workflow_config, register_workflows))
|
python
|
{
"resource": ""
}
|
q17252
|
SDPLoggerDevice.log
|
train
|
def log(self, argin):
"""Log a command for the SDP STango ubsystem devices."""
#
# Tango Manual Appendix 9 gives the format
# argin[0] = millisecond Unix timestamp
# argin[1] = log level
# argin[2] = the source log device name
# argin[3] = the log message
# argin[4] = Not used - reserved
# argin[5] = thread identifier of originating message
tm = datetime.datetime.fromtimestamp(float(argin[0])/1000.)
fmt = "%Y-%m-%d %H:%M:%S"
message = "TANGO Log message - {} - {} {} {}".format(
tm.strftime(fmt), argin[1], argin[2], argin[3])
LOG.info(message)
|
python
|
{
"resource": ""
}
|
q17253
|
_scheduling_block_ids
|
train
|
def _scheduling_block_ids(num_blocks, start_id, project):
"""Generate Scheduling Block instance ID"""
for i in range(num_blocks):
_root = '{}-{}'.format(strftime("%Y%m%d", gmtime()), project)
yield '{}-sb{:03d}'.format(_root, i + start_id), \
'{}-sbi{:03d}'.format(_root, i + start_id)
|
python
|
{
"resource": ""
}
|
q17254
|
_generate_processing_blocks
|
train
|
def _generate_processing_blocks(start_id, min_blocks=0, max_blocks=4):
"""Generate a number of Processing Blocks"""
processing_blocks = []
num_blocks = random.randint(min_blocks, max_blocks)
for i in range(start_id, start_id + num_blocks):
_id = 'sip-pb{:03d}'.format(i)
block = dict(id=_id, resources_requirement={}, workflow={})
processing_blocks.append(block)
return processing_blocks
|
python
|
{
"resource": ""
}
|
q17255
|
_scheduling_block_config
|
train
|
def _scheduling_block_config(num_blocks=5, start_sbi_id=0, start_pb_id=0,
project='sip'):
"""Return a Scheduling Block Configuration dictionary"""
pb_id = start_pb_id
for sb_id, sbi_id in _scheduling_block_ids(num_blocks, start_sbi_id,
project):
sub_array_id = 'subarray-{:02d}'.format(random.choice(range(5)))
config = dict(id=sbi_id,
sched_block_id=sb_id,
sub_array_id=sub_array_id,
processing_blocks=_generate_processing_blocks(pb_id))
pb_id += len(config['processing_blocks'])
yield config
|
python
|
{
"resource": ""
}
|
q17256
|
add_scheduling_blocks
|
train
|
def add_scheduling_blocks(num_blocks, clear=True):
"""Add a number of scheduling blocks to the db."""
db_client = ConfigDb()
if clear:
LOG.info('Resetting database ...')
db_client.clear()
start_sbi_id = 0
start_pb_id = 0
else:
start_sbi_id = len(db_client.get_sched_block_instance_ids())
start_pb_id = len(db_client.get_processing_block_ids())
LOG.info("Adding %i SBIs to the db", num_blocks)
for config in _scheduling_block_config(num_blocks, start_sbi_id,
start_pb_id):
LOG.info('Creating SBI %s with %i PBs.', config['id'],
len(config['processing_blocks']))
db_client.add_sched_block_instance(config)
|
python
|
{
"resource": ""
}
|
q17257
|
main
|
train
|
def main(args=None, **kwargs):
"""Start the Processing Block device server."""
LOG.info('Starting SDP PB devices.')
return run([ProcessingBlockDevice], verbose=True, msg_stream=sys.stdout,
args=args, **kwargs)
|
python
|
{
"resource": ""
}
|
q17258
|
ProcessingBlock.dependencies
|
train
|
def dependencies(self) -> List[Dependency]:
"""Return the PB dependencies."""
dependencies_str = DB.get_hash_value(self.key, 'dependencies')
dependencies = []
for dependency in ast.literal_eval(dependencies_str):
dependencies.append(Dependency(dependency))
return dependencies
|
python
|
{
"resource": ""
}
|
q17259
|
ProcessingBlock.resources_assigned
|
train
|
def resources_assigned(self) -> List[Resource]:
"""Return list of resources assigned to the PB."""
resources_str = DB.get_hash_value(self.key, 'resources_assigned')
resources_assigned = []
for resource in ast.literal_eval(resources_str):
resources_assigned.append(Resource(resource))
return resources_assigned
|
python
|
{
"resource": ""
}
|
q17260
|
ProcessingBlock.workflow_stages
|
train
|
def workflow_stages(self) -> List[WorkflowStage]:
"""Return list of workflow stages.
Returns:
dict, resources of a specified pb
"""
workflow_stages = []
stages = DB.get_hash_value(self.key, 'workflow_stages')
for index in range(len(ast.literal_eval(stages))):
workflow_stages.append(WorkflowStage(self.id, index))
return workflow_stages
|
python
|
{
"resource": ""
}
|
q17261
|
ProcessingBlock.add_assigned_resource
|
train
|
def add_assigned_resource(self, resource_type: str,
value: Union[str, int, float, bool],
parameters: dict = None):
"""Add assigned resource to the processing block.
Args:
resource_type (str): Resource type
value: Resource value
parameters (dict, optional): Parameters specific to the resource
"""
if parameters is None:
parameters = dict()
resources = DB.get_hash_value(self.key, 'resources_assigned')
resources = ast.literal_eval(resources)
resources.append(dict(type=resource_type, value=value,
parameters=parameters))
DB.set_hash_value(self.key, 'resources_assigned', resources)
|
python
|
{
"resource": ""
}
|
q17262
|
ProcessingBlock.remove_assigned_resource
|
train
|
def remove_assigned_resource(self, resource_type: str,
value: Union[str, int, float, bool] = None,
parameters: dict = None):
"""Remove assigned resources from the processing block.
All matching resources will be removed. If only type is specified
all resources of the specified type will be removed.
If value and/or parameters are specified they will be used
for matching the resource to remove.
Args:
resource_type (str): Resource type
value: Resource value
parameters (dict, optional): Parameters specific to the resource
"""
resources = DB.get_hash_value(self.key, 'resources_assigned')
resources = ast.literal_eval(resources)
new_resources = []
for resource in resources:
if resource['type'] != resource_type:
new_resources.append(resource)
elif value is not None and resource['value'] != value:
new_resources.append(resource)
elif parameters is not None and \
resource['parameters'] != parameters:
new_resources.append(resource)
DB.set_hash_value(self.key, 'resources_assigned', new_resources)
|
python
|
{
"resource": ""
}
|
q17263
|
ProcessingBlock.abort
|
train
|
def abort(self):
"""Abort the processing_block."""
LOG.debug('Aborting PB %s', self._id)
self.set_status('aborted')
pb_type = DB.get_hash_value(self.key, 'type')
key = '{}:active'.format(self._type)
DB.remove_from_list(key, self._id)
key = '{}:active:{}'.format(self._type, pb_type)
DB.remove_from_list(key, self._id)
key = '{}:aborted'.format(self._type)
DB.append_to_list(key, self._id)
key = '{}:aborted:{}'.format(self._type, pb_type)
DB.append_to_list(key, self._id)
self._mark_updated()
|
python
|
{
"resource": ""
}
|
q17264
|
ProcessingBlock._mark_updated
|
train
|
def _mark_updated(self):
"""Update the updated timestamp."""
timestamp = datetime.datetime.utcnow().isoformat()
DB.set_hash_value(self.key, 'updated', timestamp)
|
python
|
{
"resource": ""
}
|
q17265
|
get
|
train
|
def get():
"""Subarray list.
This method will list all sub-arrays known to SDP.
"""
_url = get_root_url()
LOG.debug('GET Sub array list')
sub_array_ids = sorted(DB.get_sub_array_ids())
response = dict(sub_arrays=[])
for array_id in sub_array_ids:
array_summary = dict(sub_arrary_id=array_id)
block_ids = DB.get_sub_array_sbi_ids(array_id)
LOG.debug('Subarray IDs: %s', array_id)
LOG.debug('SBI IDs: %s', block_ids)
array_summary['num_scheduling_blocks'] = len(block_ids)
array_summary['links'] = {
'detail': '{}/sub-array/{}'.format(_url, array_id)
}
response['sub_arrays'].append(array_summary)
response['links'] = dict(self=request.url, home=_url)
return response, status.HTTP_200_OK
|
python
|
{
"resource": ""
}
|
q17266
|
post
|
train
|
def post():
"""Generate a SBI."""
_url = get_root_url()
LOG.debug("POST subarray SBI.")
# TODO(BM) generate sbi_config .. see report ...
# ... will need to add this as a util function on the db...
sbi_config = {}
DB.add_sbi(sbi_config)
response = dict()
return response, status.HTTP_200_OK
|
python
|
{
"resource": ""
}
|
q17267
|
Subarray.set_parameters
|
train
|
def set_parameters(self, parameters_dict):
"""Set the subarray parameters.
Args:
parameters_dict (dict): Dictionary of Subarray parameters
"""
DB.set_hash_value(self._key, 'parameters', parameters_dict)
self.publish("parameters_updated")
|
python
|
{
"resource": ""
}
|
q17268
|
Subarray.sbi_ids
|
train
|
def sbi_ids(self) -> List[str]:
"""Get the list of SBI Ids.
Returns:
list, list of SBI ids associated with this subarray.
"""
return ast.literal_eval(DB.get_hash_value(self._key, 'sbi_ids'))
|
python
|
{
"resource": ""
}
|
q17269
|
Subarray.configure_sbi
|
train
|
def configure_sbi(self, sbi_config: dict, schema_path: str = None):
"""Add a new SBI to the database associated with this subarray.
Args:
sbi_config (dict): SBI configuration.
schema_path (str, optional): Path to the SBI config schema.
"""
if not self.active:
raise RuntimeError("Unable to add SBIs to inactive subarray!")
sbi_config['subarray_id'] = self._id
sbi = SchedulingBlockInstance.from_config(sbi_config, schema_path)
self._add_sbi_id(sbi_config['id'])
return sbi
|
python
|
{
"resource": ""
}
|
q17270
|
Subarray.abort
|
train
|
def abort(self):
"""Abort all SBIs associated with the subarray."""
for sbi_id in self.sbi_ids:
sbi = SchedulingBlockInstance(sbi_id)
sbi.abort()
self.set_state('ABORTED')
|
python
|
{
"resource": ""
}
|
q17271
|
Subarray.deactivate
|
train
|
def deactivate(self):
"""Deactivate the subarray."""
DB.set_hash_value(self._key, 'active', 'False')
# Remove the subarray from each of the SBIs
for sbi_id in self.sbi_ids:
SchedulingBlockInstance(sbi_id).clear_subarray()
DB.set_hash_value(self._key, 'sbi_ids', [])
self.publish('subarray_deactivated')
|
python
|
{
"resource": ""
}
|
q17272
|
Subarray.remove_sbi_id
|
train
|
def remove_sbi_id(self, sbi_id):
"""Remove an SBI Identifier."""
sbi_ids = self.sbi_ids
sbi_ids.remove(sbi_id)
DB.set_hash_value(self._key, 'sbi_ids', sbi_ids)
|
python
|
{
"resource": ""
}
|
q17273
|
Subarray._add_sbi_id
|
train
|
def _add_sbi_id(self, sbi_id):
"""Add a SBI Identifier."""
sbi_ids = self.sbi_ids
sbi_ids.append(sbi_id)
DB.set_hash_value(self._key, 'sbi_ids', sbi_ids)
|
python
|
{
"resource": ""
}
|
q17274
|
EventQueue.get
|
train
|
def get(self) -> Union[Event, None]:
"""Get the latest event from the queue.
Call this method to query the queue for the latest event.
If no event has been published None is returned.
Returns:
Event or None
"""
message = self._queue.get_message()
if message and message['type'] == 'message':
event_id = DB.get_event(self._pub_key, self._processed_key)
event_data_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_data_str)
event_dict['id'] = event_id
event_dict['subscriber'] = self._subscriber
return Event.from_config(event_dict)
return None
|
python
|
{
"resource": ""
}
|
q17275
|
EventQueue.get_processed_events
|
train
|
def get_processed_events(self) -> List[Event]:
"""Get all processed events.
This method is intended to be used to recover events stuck in the
processed state which could happen if an event handling processing
an processed event goes down before completing the event processing.
Returns:
list[Events], list of event objects.
"""
event_ids = DB.get_list(self._processed_key)
events = []
for event_id in event_ids:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict['id'] = event_id
event_dict['subscriber'] = self._subscriber
events.append(Event.from_config(event_dict))
return events
|
python
|
{
"resource": ""
}
|
q17276
|
EventQueue.complete_event
|
train
|
def complete_event(self, event_id: str):
"""Complete the specified event."""
event_ids = DB.get_list(self._processed_key)
if event_id not in event_ids:
raise KeyError('Unable to complete event. Event {} has not been '
'processed (ie. it is not in the processed '
'list).'.format(event_id))
DB.remove_from_list(self._processed_key, event_id, pipeline=True)
key = _keys.completed_events(self._object_type, self._subscriber)
DB.append_to_list(key, event_id, pipeline=True)
DB.execute()
|
python
|
{
"resource": ""
}
|
q17277
|
add
|
train
|
def add(workflow_definition: dict, templates_root: str):
"""Add a workflow definition to the Configuration Database.
Templates are expected to be found in a directory tree with the following
structure:
- workflow_id:
|- workflow_version
|- stage_id
|- stage_version
|- <templates>
Args:
workflow_definition (dict): Workflow definition.
templates_root (str): Workflow templates root path
"""
schema_path = join(dirname(__file__), 'schema', 'workflow_definition.json')
with open(schema_path, 'r') as file:
schema = json.loads(file.read())
jsonschema.validate(workflow_definition, schema)
_id = workflow_definition['id']
_version = workflow_definition['version']
_load_templates(workflow_definition, templates_root)
workflow_id = workflow_definition['id']
version = workflow_definition['version']
name = "workflow_definitions:{}:{}".format(workflow_id, version)
if DB.get_keys(name):
raise KeyError('Workflow definition already exists: {}'.format(name))
# DB.set_hash_values(name, workflow_definition)
DB.save_dict(name, workflow_definition, hierarchical=False)
|
python
|
{
"resource": ""
}
|
q17278
|
delete
|
train
|
def delete(workflow_id: str = None, workflow_version: str = None):
"""Delete workflow definitions.
Args:
workflow_id (str, optional): Optional workflow identifier
workflow_version (str, optional): Optional workflow identifier version
If workflow_id and workflow_version are None, delete all workflow
definitions.
"""
if workflow_id is None and workflow_version is None:
keys = DB.get_keys("workflow_definitions:*")
DB.delete(*keys)
elif workflow_id is not None and workflow_version is None:
keys = DB.get_keys("workflow_definitions:{}:*".format(workflow_id))
DB.delete(*keys)
elif workflow_id is None and workflow_version is not None:
keys = DB.get_keys("workflow_definitions:*:{}"
.format(workflow_version))
DB.delete(*keys)
else:
name = "workflow_definitions:{}:{}".format(workflow_id,
workflow_version)
DB.delete(name)
|
python
|
{
"resource": ""
}
|
q17279
|
get_workflow
|
train
|
def get_workflow(workflow_id: str, workflow_version: str) -> dict:
"""Get a workflow definition from the Configuration Database.
Args:
workflow_id (str): Workflow identifier
workflow_version (str): Workflow version
Returns:
dict, Workflow definition dictionary
"""
name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version)
workflow = DB.get_hash_dict(name)
workflow['stages'] = ast.literal_eval(workflow['stages'])
return workflow
|
python
|
{
"resource": ""
}
|
q17280
|
get_workflows
|
train
|
def get_workflows() -> dict:
"""Get dict of ALL known workflow definitions.
Returns
list[dict]
"""
keys = DB.get_keys("workflow_definitions:*")
known_workflows = dict()
for key in keys:
values = key.split(':')
if values[1] not in known_workflows:
known_workflows[values[1]] = list()
known_workflows[values[1]].append(values[2])
return known_workflows
|
python
|
{
"resource": ""
}
|
q17281
|
_load_templates
|
train
|
def _load_templates(workflow: dict, templates_root: str):
"""Load templates keys."""
workflow_template_path = join(templates_root, workflow['id'],
workflow['version'])
for i, stage_config in enumerate(workflow['stages']):
stage_template_path = join(workflow_template_path,
stage_config['id'],
stage_config['version'])
for config_type in ['ee_config', 'app_config']:
for key, value in stage_config[config_type].items():
if 'template' in key:
template_file = join(stage_template_path, value)
with open(template_file, 'r') as file:
template_str = file.read()
workflow['stages'][i][config_type][key] = template_str
|
python
|
{
"resource": ""
}
|
q17282
|
delete_pb_devices
|
train
|
def delete_pb_devices():
"""Delete PBs devices from the Tango database."""
parser = argparse.ArgumentParser(description='Register PB devices.')
parser.add_argument('num_pb', type=int,
help='Number of PBs devices to register.')
args = parser.parse_args()
log = logging.getLogger('sip.tango_control.subarray')
tango_db = Database()
log.info("Deleting PB devices:")
for index in range(args.num_pb):
name = 'sip_sdp/pb/{:05d}'.format(index)
log.info("\t%s", name)
tango_db.delete_device(name)
|
python
|
{
"resource": ""
}
|
q17283
|
main
|
train
|
def main():
"""Main function for SPEAD sender module."""
# Check command line arguments.
if len(sys.argv) != 2:
raise RuntimeError('Usage: python3 async_send.py <json config>')
# Set up logging.
sip_logging.init_logger(show_thread=False)
# Load SPEAD configuration from JSON file.
# _path = os.path.dirname(os.path.abspath(__file__))
# with open(os.path.join(_path, 'spead_send.json')) as file_handle:
# spead_config = json.load(file_handle)
spead_config = json.loads(sys.argv[1])
try:
_path = os.path.dirname(os.path.abspath(__file__))
schema_path = os.path.join(_path, 'config_schema.json')
with open(schema_path) as schema_file:
schema = json.load(schema_file)
validate(spead_config, schema)
except ValidationError as error:
print(error.cause)
raise
# Set up the SPEAD sender and run it (see method, above).
sender = SpeadSender(spead_config)
sender.run()
|
python
|
{
"resource": ""
}
|
q17284
|
SpeadSender.fill_buffer
|
train
|
def fill_buffer(heap_data, i_chan):
"""Blocking function to populate data in the heap.
This is run in an executor.
"""
# Calculate the time count and fraction.
now = datetime.datetime.utcnow()
time_full = now.timestamp()
time_count = int(time_full)
time_fraction = int((time_full - time_count) * (2**32 - 1))
diff = now - (now.replace(hour=0, minute=0, second=0, microsecond=0))
time_data = diff.seconds + 1e-6 * diff.microseconds
# Write the data into the buffer.
heap_data['visibility_timestamp_count'] = time_count
heap_data['visibility_timestamp_fraction'] = time_fraction
heap_data['correlator_output_data']['VIS'][:][:] = \
time_data + i_chan * 1j
|
python
|
{
"resource": ""
}
|
q17285
|
SpeadSender.run
|
train
|
def run(self):
"""Starts the sender."""
# Create the thread pool.
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=self._config['num_workers'])
# Wait to ensure multiple senders can be synchronised.
now = int(datetime.datetime.utcnow().timestamp())
start_time = ((now + 29) // 30) * 30
self._log.info('Waiting until {}'.format(
datetime.datetime.fromtimestamp(start_time)))
while int(datetime.datetime.utcnow().timestamp()) < start_time:
time.sleep(0.1)
# Run the event loop.
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self._run_loop(executor))
except KeyboardInterrupt:
pass
finally:
# Send the end of stream message to each stream.
self._log.info('Shutting down, closing streams...')
tasks = []
for stream, item_group in self._streams:
tasks.append(stream.async_send_heap(item_group.get_end()))
loop.run_until_complete(asyncio.gather(*tasks))
self._log.info('... finished.')
executor.shutdown()
|
python
|
{
"resource": ""
}
|
q17286
|
SchedulingObjectList.set_complete
|
train
|
def set_complete(self, object_id: str):
"""Mark the specified object as completed."""
if object_id in self.active:
DB.remove_from_list('{}:active'.format(self.type), object_id)
DB.append_to_list('{}:completed'.format(self.type), object_id)
|
python
|
{
"resource": ""
}
|
q17287
|
SchedulingObjectList.publish
|
train
|
def publish(self, object_id: str, event_type: str,
event_data: dict = None):
"""Publish a scheduling object event.
Args:
object_id (str): ID of the scheduling object
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
object_key = SchedulingObject.get_key(self.type, object_id)
publish(event_type=event_type,
event_data=event_data,
object_type=self.type,
object_id=object_id,
object_key=object_key,
origin=None)
|
python
|
{
"resource": ""
}
|
q17288
|
_start_workflow_stages
|
train
|
def _start_workflow_stages(pb: ProcessingBlock, pb_id: str,
workflow_stage_dict: dict,
workflow_stage: WorkflowStage,
docker: DockerSwarmClient):
"""Start a workflow stage by starting a number of docker services.
This function first assesses if the specified workflow stage can be
started based on its dependencies. If this is found to be the case,
the workflow stage is stared by first resolving and template arguments
in the workflow stage configuration, and then using the Docker Swarm Client
API to start workflow stage services. As part of this, the
workflow_stage_dict data structure is updated accordingly.
TODO(BMo) This function will need refactoring at some point as part
of an update to the way workflow state metadata is stored in the
configuration database. Currently the stage_data dictionary
is a bit of a hack for a badly specified Configuration Database
backed WorkflowStage object.
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing Block data
object
pb_id (str): Processing Block identifier
workflow_stage_dict (dict): Workflow stage metadata structure
workflow_stage (WorkflowStage): Workflow state configuration database
data object.
docker (DockerClient): Docker Swarm Client object.
"""
# FIXME(BMo) replace pb_id argument, get this from the pb instead!
stage_data = workflow_stage_dict[workflow_stage.id]
stage_data['start'] = False
# Determine if the stage can be started.
if stage_data['status'] == 'none':
if not workflow_stage.dependencies:
stage_data['start'] = True
else:
dependency_status = []
for dependency in workflow_stage.dependencies:
dependency_status.append(
workflow_stage_dict[dependency['value']][
'status'] == 'complete')
# ii += 1
stage_data['start'] = all(dependency_status)
# Start the workflow stage.
if stage_data['start']:
# Configure EE (set up templates)
LOG.info('-- Starting workflow stage: %s --', workflow_stage.id)
LOG.info('Configuring EE templates.')
args_template = jinja2.Template(workflow_stage.args_template)
stage_params = pb.workflow_parameters[workflow_stage.id]
template_params = {**workflow_stage.config, **stage_params}
args = args_template.render(stage=template_params)
LOG.info('Resolving workflow script arguments.')
args = json.dumps(json.loads(args))
compose_template = jinja2.Template(
workflow_stage.compose_template)
compose_str = compose_template.render(stage=dict(args=args))
# Prefix service names with the PB id
compose_dict = yaml.load(compose_str)
service_names = compose_dict['services'].keys()
new_service_names = [
'{}_{}_{}'.format(pb_id, pb.workflow_id, name)
for name in service_names]
for new, old in zip(new_service_names, service_names):
compose_dict['services'][new] = \
compose_dict['services'].pop(old)
compose_str = yaml.dump(compose_dict)
# Run the compose file
service_ids = docker.create_services(compose_str)
LOG.info('Staring workflow containers:')
for service_id in service_ids:
service_name = docker.get_service_name(service_id)
LOG.info(" %s, %s ", service_name, service_id)
stage_data['services'][service_id] = {}
LOG.info('Created Services: %s', service_ids)
stage_data['services'][service_id] = dict(
name=docker.get_service_name(service_id),
status='running',
complete=False
)
stage_data["status"] = 'running'
|
python
|
{
"resource": ""
}
|
q17289
|
_update_workflow_stages
|
train
|
def _update_workflow_stages(stage_data: dict, workflow_stage: WorkflowStage,
docker: DockerSwarmClient):
"""Check and update the status of a workflow stage.
This function checks and updates the status of a workflow stage
specified by the parameters in the specified stage_data dictionary.
If the workflow stage is not marked as complete, this function will
check with the Docker Swarm API on the status of Docker services
defined for the stage. If **all** services are found to be complete
(based on their service state being reported as 'shutdown',
the workflow stage is marked complete.
This function is used by `execute_processing_block`.
TODO(BMo) This function will need refactoring at some point as part
of an update to the way workflow state metadata is stored in the
configuration database. Currently the stage_data dictionary
is a bit of a hack for a badly specified Configuration Database
backed WorkflowStage object.
Args:
stage_data (dict): Dictionary holding workflow stage metadata.
workflow_stage (WorkflowStage): Workflow stage data object.
docker (DockerClient): Docker Swarm Client object.
"""
service_status_complete = []
# FIXME(BMo) is not "complete" -> is "running"
if stage_data["status"] != "complete":
for service_id, service_dict in stage_data['services'].items():
service_state = docker.get_service_state(service_id)
if service_state == 'shutdown':
docker.delete_service(service_id)
service_dict['status'] = service_state
service_dict['complete'] = (service_state == 'shutdown')
service_status_complete.append(service_dict['complete'])
if all(service_status_complete):
LOG.info('Workflow stage service %s complete!',
workflow_stage.id)
stage_data['status'] = "complete"
|
python
|
{
"resource": ""
}
|
q17290
|
_abort_workflow
|
train
|
def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict,
docker: DockerSwarmClient):
"""Abort the workflow.
TODO(BMo): This function currently does nothing as the abort flag
is hardcoded to False!
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing block object.
workflow_stage_dict (dict): Workflow stage metadata dictionary.
docker (DockerClient): Docker Swarm Client object.
Returns:
bool, True if the stage is aborted, otherwise False.
"""
# TODO(BMo) Ask the database if the abort flag on the PB is set.
_abort_flag = False
if _abort_flag:
for workflow_stage in pb.workflow_stages:
for service_id, _ in \
workflow_stage_dict[workflow_stage.id]['services'].items():
docker.delete_service(service_id)
LOG.info("Deleted Service Id %s", service_id)
return True
return False
|
python
|
{
"resource": ""
}
|
q17291
|
_workflow_complete
|
train
|
def _workflow_complete(workflow_stage_dict: dict):
"""Check if the workflow is complete.
This function checks if the entire workflow is complete.
This function is used by `execute_processing_block`.
Args:
workflow_stage_dict (dict): Workflow metadata dictionary.
Returns:
bool, True if the workflow is complete, otherwise False.
"""
# Check if all stages are complete, if so end the PBC by breaking
# out of the while loop
complete_stages = []
for _, stage_config in workflow_stage_dict.items():
complete_stages.append((stage_config['status'] == 'complete'))
if all(complete_stages):
LOG.info('PB workflow complete!')
return True
return False
|
python
|
{
"resource": ""
}
|
q17292
|
execute_processing_block
|
train
|
def execute_processing_block(pb_id: str, log_level='DEBUG'):
"""Execute a processing block.
Celery tasks that executes a workflow defined in a Configuration database
Processing Block data object.
Args:
pb_id (str): The PB id for the PBC
log_level (str): Python logging level.
"""
init_logger('sip', show_log_origin=True, propagate=False,
log_level=log_level)
LOG.info('+' * 40)
LOG.info('+ Executing Processing block: %s!', pb_id)
LOG.info('+' * 40)
LOG.info('Processing Block Controller version: %s', __version__)
LOG.info('Docker Swarm API version: %s', sip_swarm_api_version)
LOG.info('Configuration database API version: %s', config_db_version)
pb = ProcessingBlock(pb_id)
LOG.info('Starting workflow %s %s', pb.workflow_id, pb.workflow_version)
pb.set_status('running')
docker = DockerSwarmClient()
# Coping workflow stages to a dict
workflow_stage_dict = {}
for stage in pb.workflow_stages:
workflow_stage_dict[stage.id] = deepcopy(stage.config)
workflow_stage_dict[stage.id]['services'] = dict()
# Loop until workflow stages are complete.
while True:
time.sleep(0.1)
for workflow_stage in pb.workflow_stages:
_start_workflow_stages(pb, pb_id, workflow_stage_dict,
workflow_stage, docker)
_update_workflow_stages(workflow_stage_dict[workflow_stage.id],
workflow_stage, docker)
if _abort_workflow(pb, workflow_stage_dict, docker):
break
if _workflow_complete(workflow_stage_dict):
break
pb_list = ProcessingBlockList()
pb_list.set_complete(pb_id)
pb.set_status('completed')
LOG.info('-' * 40)
LOG.info('- Destroying PBC for %s', pb_id)
LOG.info('-' * 40)
return pb.status
|
python
|
{
"resource": ""
}
|
q17293
|
ProcessingBlockQueue.put
|
train
|
def put(self, block_id, priority, pb_type='offline'):
"""Add a Processing Block to the queue.
When a new entry it added, the queue is (re-)sorted by priority
followed by insertion order (older blocks with equal priority are
first).
Args:
block_id (str): Processing Block Identifier
priority (int): Processing Block scheduling priority
(higher values = higher priority)
pb_type (str): Processing Block type (offline, realtime)
"""
if pb_type not in ('offline', 'realtime'):
raise ValueError('Invalid PB type.')
with self._mutex:
added_time = datetime.datetime.utcnow().isoformat()
entry = (priority, sys.maxsize-self._index, block_id, pb_type,
added_time)
self._index += 1
if self._block_map.get(block_id) is not None:
raise KeyError('ERROR: Block id "{}" already exists in '
'PC PB queue!'.
format(block_id))
self._block_map[block_id] = entry
LOG.debug("Adding PB %s to queue", block_id)
self._queue.append(entry)
self._queue.sort() # Sort by priority followed by insertion order.
self._queue.reverse()
|
python
|
{
"resource": ""
}
|
q17294
|
ProcessingBlockQueue.get
|
train
|
def get(self):
"""Get the highest priority Processing Block from the queue."""
with self._mutex:
entry = self._queue.pop()
del self._block_map[entry[2]]
return entry[2]
|
python
|
{
"resource": ""
}
|
q17295
|
ProcessingBlockQueue.remove
|
train
|
def remove(self, block_id):
"""Remove a Processing Block from the queue.
Args:
block_id (str):
"""
with self._mutex:
entry = self._block_map[block_id]
self._queue.remove(entry)
|
python
|
{
"resource": ""
}
|
q17296
|
get_service_state_list
|
train
|
def get_service_state_list() -> List[ServiceState]:
"""Return a list of ServiceState objects known to SDP."""
keys = DB.get_keys('states*')
LOG.debug('Loading list of known services.')
services = []
for key in keys:
values = key.split(':')
if len(values) == 4:
services.append(ServiceState(*values[1:4]))
return services
|
python
|
{
"resource": ""
}
|
q17297
|
get_service_id_list
|
train
|
def get_service_id_list() -> List[tuple]:
"""Return list of Services."""
keys = DB.get_keys('states*')
services = []
for key in keys:
values = key.split(':')
if len(values) == 4:
services.append(':'.join(values[1:]))
return services
|
python
|
{
"resource": ""
}
|
q17298
|
get
|
train
|
def get():
"""Return the list of Processing Blocks known to SDP."""
LOG.debug('GET Processing Block list')
_url = get_root_url()
# Get list of Processing block Ids
block_ids = sorted(DB.get_processing_block_ids())
LOG.debug('Processing Block IDs: %s', block_ids)
# Construct response object
response = dict(num_processing_blocks=len(block_ids),
processing_blocks=list())
# Loop over blocks and add block summary to response.
for block in DB.get_block_details(block_ids):
block_id = block['id']
LOG.debug('Creating PB summary for %s', block_id)
block['links'] = dict(
detail='{}/processing-block/{}'.format(_url, block_id),
scheduling_block='{}/scheduling-block/{}'
.format(_url, block_id.split(':')[0])
)
response['processing_blocks'].append(block)
response['links'] = {
'self': '{}'.format(request.url),
'home': '{}'.format(_url)
}
return response, HTTPStatus.OK
|
python
|
{
"resource": ""
}
|
q17299
|
SchedulingObject.config
|
train
|
def config(self) -> dict:
"""Get the scheduling object config."""
# Check that the key exists
self._check_object_exists()
config_dict = DB.get_hash_dict(self.key)
for _, value in config_dict.items():
for char in ['[', '{']:
if char in value:
value = ast.literal_eval(value)
return config_dict
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.