_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q17300
|
SchedulingObject.get_property
|
train
|
def get_property(self, property_key: str) -> str:
"""Get a scheduling object property."""
self._check_object_exists()
return DB.get_hash_value(self.key, property_key)
|
python
|
{
"resource": ""
}
|
q17301
|
SchedulingObject.set_status
|
train
|
def set_status(self, value):
"""Set the status of the scheduling object."""
self._check_object_exists()
DB.set_hash_value(self.key, 'status', value)
self.publish('status_changed', event_data=dict(status=value))
|
python
|
{
"resource": ""
}
|
q17302
|
SchedulingObject.publish
|
train
|
def publish(self, event_type: str, event_data: dict = None):
"""Publish an event associated with the scheduling object.
Note:
Ideally publish should not be used directly but by other methods
which perform actions on the object.
Args:
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
import inspect
import os.path
_stack = inspect.stack()
_origin = os.path.basename(_stack[3][1]) + '::' + \
_stack[3][3]+'::L{}'.format(_stack[3][2])
publish(event_type=event_type,
event_data=event_data,
object_type=self._type,
object_id=self._id,
object_key=self._key,
origin=_origin)
|
python
|
{
"resource": ""
}
|
q17303
|
SchedulingObject.get_events
|
train
|
def get_events(self) -> List[Event]:
"""Get events associated with the scheduling object.
Returns:
list of Event objects
"""
LOG.debug('Getting events for %s', self.key)
return get_events(self.key)
|
python
|
{
"resource": ""
}
|
q17304
|
SchedulingObject._check_object_exists
|
train
|
def _check_object_exists(self):
"""Raise a KeyError if the scheduling object doesnt exist.
Raise:
KeyError, if the object doesnt exist in the database.
"""
if not DB.get_keys(self.key):
raise KeyError("Object with key '{}' not exist".format(self.key))
|
python
|
{
"resource": ""
}
|
q17305
|
ServiceState.get_service_state_object_id
|
train
|
def get_service_state_object_id(subsystem: str, name: str,
version: str) -> str:
"""Return service state data object key.
Args:
subsystem (str): Subsystem the service belongs to
name (str): Name of the Service
version (str): Version of the Service
Returns:
str, Key used to store the service state data object
"""
return '{}:{}:{}'.format(subsystem, name, version)
|
python
|
{
"resource": ""
}
|
q17306
|
DockerSwarmClient.create_services
|
train
|
def create_services(self, compose_str: str) -> list:
"""Create new docker services.
Args:
compose_str (string): Docker compose 'file' string
Return:
service_names, list
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be run on '
'swarm manager nodes')
# Initialise empty list
services_ids = []
try:
service_config = yaml.load(compose_str)
# Deepcopy the service config
service_list = copy.deepcopy(service_config)
# Removing version and service from the dict
service_config.pop('version')
service_config.pop('services')
for service_name in service_list['services']:
service_exist = self._client.services.list(
filters={'name': service_name})
if not service_exist:
service_config['name'] = service_name
service_spec = self._parse_services(
service_config, service_name, service_list)
created_service = self._client.services.create(
**service_spec)
service_id = created_service.short_id
LOG.debug('Service created: %s', service_id)
services_ids.append(service_id)
else:
LOG.debug('Services already exists')
except yaml.YAMLError as exc:
print(exc)
# Returning list of services created
return services_ids
|
python
|
{
"resource": ""
}
|
q17307
|
DockerSwarmClient.create_volume
|
train
|
def create_volume(self, volume_name: str, driver_spec: str = None):
"""Create new docker volumes.
Only the manager nodes can create a volume
Args:
volume_name (string): Name for the new docker volume
driver_spec (string): Driver for the docker volume
"""
# Default values
if driver_spec:
driver = driver_spec
else:
driver = 'local'
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
self._client.volumes.create(name=volume_name, driver=driver)
|
python
|
{
"resource": ""
}
|
q17308
|
DockerSwarmClient.delete_all_volumes
|
train
|
def delete_all_volumes(self):
"""Remove all the volumes.
Only the manager nodes can delete a volume
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Volumes can only be deleted '
'on swarm manager nodes')
volume_list = self.get_volume_list()
for volumes in volume_list:
# Remove all the services
self._api_client.remove_volume(volumes, force=True)
|
python
|
{
"resource": ""
}
|
q17309
|
DockerSwarmClient.get_service_list
|
train
|
def get_service_list(self) -> list:
"""Get a list of docker services.
Only the manager nodes can retrieve all the services
Returns:
list, all the ids of the services in swarm
"""
# Initialising empty list
services = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve'
' all the services.')
service_list = self._client.services.list()
for s_list in service_list:
services.append(s_list.short_id)
return services
|
python
|
{
"resource": ""
}
|
q17310
|
DockerSwarmClient.get_service_name
|
train
|
def get_service_name(self, service_id: str) -> str:
"""Get the name of the docker service.
Only the manager nodes can retrieve service name
Args:
service_id (string): List of service ID
Returns:
string, name of the docker service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve all'
' the services details.')
service = self._client.services.get(service_id)
return service.name
|
python
|
{
"resource": ""
}
|
q17311
|
DockerSwarmClient.get_service_details
|
train
|
def get_service_details(self, service_id: str) -> dict:
"""Get details of a service.
Only the manager nodes can retrieve service details
Args:
service_id (string): List of service id
Returns:
dict, details of the service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve all'
' the services details.')
service = self._client.services.get(service_id)
return service.attrs
|
python
|
{
"resource": ""
}
|
q17312
|
DockerSwarmClient.get_service_state
|
train
|
def get_service_state(self, service_id: str) -> str:
"""Get the state of the service.
Only the manager nodes can retrieve service state
Args:
service_id (str): Service id
Returns:
str, state of the service
"""
# Get service
service = self._client.services.get(service_id)
# Get the state of the service
for service_task in service.tasks():
service_state = service_task['DesiredState']
return service_state
|
python
|
{
"resource": ""
}
|
q17313
|
DockerSwarmClient.get_node_list
|
train
|
def get_node_list(self) -> list:
"""Get a list of nodes.
Only the manager nodes can retrieve all the nodes
Returns:
list, all the ids of the nodes in swarm
"""
# Initialising empty list
nodes = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node '
'can retrieve all the nodes.')
node_list = self._client.nodes.list()
for n_list in node_list:
nodes.append(n_list.id)
return nodes
|
python
|
{
"resource": ""
}
|
q17314
|
DockerSwarmClient.get_node_details
|
train
|
def get_node_details(self, node_id: list) -> dict:
"""Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can '
'retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs
|
python
|
{
"resource": ""
}
|
q17315
|
DockerSwarmClient.get_container_list
|
train
|
def get_container_list(self) -> list:
"""Get list of containers.
Returns:
list, all the ids of containers
"""
# Initialising empty list
containers = []
containers_list = self._client.containers.list()
for c_list in containers_list:
containers.append(c_list.short_id)
return containers
|
python
|
{
"resource": ""
}
|
q17316
|
DockerSwarmClient.get_container_details
|
train
|
def get_container_details(self, container_id_or_name: str) -> dict:
"""Get details of a container.
Args:
container_id_or_name (string): docker container id or name
Returns:
dict, details of the container
"""
container = self._client.containers.get(container_id_or_name)
return container.attrs
|
python
|
{
"resource": ""
}
|
q17317
|
DockerSwarmClient.get_volume_list
|
train
|
def get_volume_list(self) -> list:
"""Get a list of docker volumes.
Only the manager nodes can retrieve all the volumes
Returns:
list, all the names of the volumes in swarm
"""
# Initialising empty list
volumes = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve'
' all the services.')
volume_list = self._client.volumes.list()
for v_list in volume_list:
volumes.append(v_list.name)
return volumes
|
python
|
{
"resource": ""
}
|
q17318
|
DockerSwarmClient.get_volume_details
|
train
|
def get_volume_details(self, volume_name: str) -> dict:
"""Get details of the volume.
Args:
volume_name (str): Name of the volume
Returns:
dict, details of the volume
"""
if volume_name not in self.volumes:
raise RuntimeError('No such volume found: ', volume_name)
volume = self._client.volumes.get(volume_name)
return volume.attrs
|
python
|
{
"resource": ""
}
|
q17319
|
DockerSwarmClient.get_actual_replica
|
train
|
def get_actual_replica(self, service_id: str) -> str:
"""Get the actual replica level of a service.
Args:
service_id (str): docker swarm service id
Returns:
str, replicated level of the service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve '
'replication level of the service')
service_details = self.get_service_details(service_id)
actual_replica = service_details["Spec"]["Mode"][
"Replicated"]["Replicas"]
return actual_replica
|
python
|
{
"resource": ""
}
|
q17320
|
DockerSwarmClient.get_replicas
|
train
|
def get_replicas(self, service_id: str) -> str:
"""Get the replication level of a service.
Args:
service_id (str): docker swarm service id
Returns:
str, replication level of the service
"""
# Initialising empty list
replicas = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve '
'replication level of the service')
service_tasks = self._client.services.get(service_id).tasks()
for task in service_tasks:
if task['Status']['State'] == "running":
replicas.append(task)
return len(replicas)
|
python
|
{
"resource": ""
}
|
q17321
|
DockerSwarmClient.update_labels
|
train
|
def update_labels(self, node_name: str, labels: dict):
"""Update label of a node.
Args:
node_name (string): Name of the node.
labels (dict): Label to add to the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can update '
'node details.')
# Node specification
node_spec = {'Availability': 'active',
'Name': node_name,
'Role': 'manager',
'Labels': labels}
node = self._client.nodes.get(node_name)
node.update(node_spec)
|
python
|
{
"resource": ""
}
|
q17322
|
DockerSwarmClient._parse_services
|
train
|
def _parse_services(self, service_config: dict, service_name: str,
service_list: dict) -> dict:
"""Parse the docker compose file.
Args:
service_config (dict): Service configurations from the compose file
service_name (string): Name of the services
service_list (dict): Service configuration list
Returns:
dict, service specifications extracted from the compose file
"""
for key, value in service_list['services'][service_name].items():
service_config[key] = value
if 'command' in key:
key = "args"
service_config['args'] = value
service_config.pop('command')
if 'ports' in key:
endpoint_spec = self._parse_ports(value)
service_config['endpoint_spec'] = endpoint_spec
service_config.pop('ports')
if 'volumes' in key:
volume_spec = self._parse_volumes(value)
service_config['mounts'] = volume_spec
service_config.pop('volumes')
if 'deploy' in key:
self._parse_deploy(value, service_config)
service_config.pop('deploy')
if 'networks' in key:
network_spec = self._parse_networks(service_list)
service_config['networks'] = network_spec
if 'logging' in key:
self._parse_logging(value, service_config)
service_config.pop('logging')
if 'environment' in key:
service_config['env'] = value
service_config.pop('environment')
# LOG.info('Service Config: %s', service_config)
return service_config
|
python
|
{
"resource": ""
}
|
q17323
|
DockerSwarmClient._parse_deploy
|
train
|
def _parse_deploy(self, deploy_values: dict, service_config: dict):
"""Parse deploy key.
Args:
deploy_values (dict): deploy configuration values
service_config (dict): Service configuration
"""
# Initialising empty dictionary
mode = {}
for d_value in deploy_values:
if 'restart_policy' in d_value:
restart_spec = docker.types.RestartPolicy(
**deploy_values[d_value])
service_config['restart_policy'] = restart_spec
if 'placement' in d_value:
for constraints_key, constraints_value in \
deploy_values[d_value].items():
service_config[constraints_key] = constraints_value
if 'mode' in d_value:
mode[d_value] = deploy_values[d_value]
if 'replicas' in d_value:
mode[d_value] = deploy_values[d_value]
if 'resources' in d_value:
resource_spec = self._parse_resources(
deploy_values, d_value)
service_config['resources'] = resource_spec
# Setting the types
mode_spec = docker.types.ServiceMode(**mode)
service_config['mode'] = mode_spec
|
python
|
{
"resource": ""
}
|
q17324
|
DockerSwarmClient._parse_ports
|
train
|
def _parse_ports(port_values: dict) -> dict:
"""Parse ports key.
Args:
port_values (dict): ports configuration values
Returns:
dict, Ports specification which contains exposed ports
"""
# Initialising empty dictionary
endpoints = {}
for port_element in port_values:
target_port = port_element.split(':')
for port in target_port:
endpoints[int(port)] = int(port)
# Setting the types
endpoint_spec = docker.types.EndpointSpec(ports=endpoints)
return endpoint_spec
|
python
|
{
"resource": ""
}
|
q17325
|
DockerSwarmClient._parse_volumes
|
train
|
def _parse_volumes(volume_values: dict) -> str:
"""Parse volumes key.
Args:
volume_values (dict): volume configuration values
Returns:
string, volume specification with mount source and container path
"""
for v_values in volume_values:
for v_key, v_value in v_values.items():
if v_key == 'source':
if v_value == '.':
source = os.path.dirname(
os.path.abspath(__file__))
else:
source = v_value
if v_key == 'target':
target = v_value
volume_spec = [source + ':' + target]
return volume_spec
|
python
|
{
"resource": ""
}
|
q17326
|
DockerSwarmClient._parse_resources
|
train
|
def _parse_resources(resource_values: dict, resource_name: str) -> dict:
"""Parse resources key.
Args:
resource_values (dict): resource configurations values
resource_name (string): Resource name
Returns:
dict, resources specification
"""
# Initialising empty dictionary
resources = {}
for r_values in resource_values[resource_name]:
if 'limits' in r_values:
for r_key, r_value in \
resource_values[resource_name][r_values].items():
if 'cpu' in r_key:
cpu_value = float(r_value) * 10 ** 9
cpu_key = r_key[:3] + '_limit'
resources[cpu_key] = int(cpu_value)
if 'mem' in r_key:
mem_value = re.sub('M', '', r_value)
mem_key = r_key[:3] + '_limit'
resources[mem_key] = int(mem_value) * 1048576
resources_spec = docker.types.Resources(**resources)
return resources_spec
|
python
|
{
"resource": ""
}
|
q17327
|
DockerSwarmClient._parse_networks
|
train
|
def _parse_networks(service_list: dict) -> list:
"""Parse network key.
Args:
service_list (dict): Service configurations
Returns:
list, List of networks
"""
# Initialising empty list
networks = []
for n_values in service_list['networks'].values():
for n_key, n_value in n_values.items():
if 'name' in n_key:
networks.append(n_value)
return networks
|
python
|
{
"resource": ""
}
|
q17328
|
DockerSwarmClient._parse_logging
|
train
|
def _parse_logging(log_values: dict, service_config: dict):
"""Parse log key.
Args:
log_values (dict): logging configuration values
service_config (dict): Service specification
"""
for log_key, log_value in log_values.items():
if 'driver' in log_key:
service_config['log_driver'] = log_value
if 'options' in log_key:
service_config['log_driver_options'] = log_value
|
python
|
{
"resource": ""
}
|
q17329
|
ProcessingBlockScheduler._init_queue
|
train
|
def _init_queue():
"""Initialise the Processing Block queue from the database.
This method should populate the queue from the current state of the
Configuration Database.
This needs to be based on the current set of Processing Blocks in
the database and consider events on these processing blocks.
"""
LOG.info('Initialising Processing Block queue.')
queue = ProcessingBlockQueue()
active_pb_ids = ProcessingBlockList().active
LOG.info('Initialising PC PB queue: %s', active_pb_ids)
for pb_id in active_pb_ids:
pb = ProcessingBlock(pb_id)
queue.put(pb.id, pb.priority, pb.type)
return queue
|
python
|
{
"resource": ""
}
|
q17330
|
ProcessingBlockScheduler._monitor_events
|
train
|
def _monitor_events(self):
"""Watch for Processing Block events."""
LOG.info("Starting to monitor PB events")
check_counter = 0
while True:
if check_counter == 50:
check_counter = 0
LOG.debug('Checking for PB events...')
published_events = self._pb_events.get_published_events()
for event in published_events:
if event.type == 'status_changed':
LOG.info('PB status changed event: %s',
event.data['status'])
if event.data['status'] == 'created':
LOG.info('Acknowledged PB created event (%s) for %s, '
'[timestamp: %s]', event.id,
event.object_id, event.timestamp)
pb = ProcessingBlock(event.object_id)
self._queue.put(event.object_id, pb.priority, pb.type)
if event.data['status'] == 'completed':
LOG.info('Acknowledged PB completed event (%s) for %s,'
' [timestamp: %s]', event.id,
event.object_id, event.timestamp)
self._num_pbcs -= 1
if self._num_pbcs < 0:
self._num_pbcs = 0
time.sleep(0.1)
check_counter += 1
|
python
|
{
"resource": ""
}
|
q17331
|
ProcessingBlockScheduler._schedule_processing_blocks
|
train
|
def _schedule_processing_blocks(self):
"""Schedule Processing Blocks for execution."""
LOG.info('Starting to Schedule Processing Blocks.')
while True:
time.sleep(0.5)
if not self._queue:
continue
if self._num_pbcs >= self._max_pbcs:
LOG.warning('Resource limit reached!')
continue
_inspect = Inspect(app=APP)
if self._queue and _inspect.active() is not None:
next_pb = self._queue[-1]
LOG.info('Considering %s for execution...', next_pb[2])
utc_now = datetime.datetime.utcnow()
time_in_queue = (utc_now -
datetime_from_isoformat(next_pb[4]))
if time_in_queue.total_seconds() >= 10:
item = self._queue.get()
LOG.info('------------------------------------')
LOG.info('>>> Executing %s! <<<', item)
LOG.info('------------------------------------')
execute_processing_block.delay(item)
self._num_pbcs += 1
else:
LOG.info('Waiting for resources for %s', next_pb[2])
|
python
|
{
"resource": ""
}
|
q17332
|
ProcessingBlockScheduler._monitor_pbc_status
|
train
|
def _monitor_pbc_status(self):
"""Monitor the PBC status."""
LOG.info('Starting to Monitor PBC status.')
inspect = celery.current_app.control.inspect()
workers = inspect.ping()
start_time = time.time()
while workers is None:
time.sleep(0.1)
elapsed = time.time() - start_time
if elapsed > 20.0:
LOG.warning('PBC not found!')
break
if workers is not None:
for worker in workers:
_tasks = inspect.registered_tasks()[worker]
LOG.info('Worker: %s tasks:', worker)
for task_index, task_name in enumerate(_tasks):
LOG.info(' %02d : %s', task_index, task_name)
while True:
LOG.info('Checking PBC status (%d/%d)', self._num_pbcs,
self._max_pbcs)
celery_app = celery.current_app
inspect = celery_app.control.inspect()
workers = inspect.ping()
if workers is None:
LOG.warning('PBC service not found!')
else:
LOG.info('PBC state: %s', celery_app.events.State())
_active = inspect.active()
_scheduled = inspect.scheduled()
for worker in workers:
LOG.info(' Worker %s: scheduled: %s, active: %s',
worker, _active[worker], _scheduled[worker])
time.sleep(self._report_interval)
|
python
|
{
"resource": ""
}
|
q17333
|
ProcessingBlockScheduler.start
|
train
|
def start(self):
"""Start the scheduler threads."""
# TODO(BMo) having this check is probably a good idea but I've \
# disabled it for now while the PBC is in flux.
# assert sip_pbc.release.__version__ == '1.2.3'
scheduler_threads = [
Thread(target=self._monitor_events, daemon=True),
Thread(target=self._processing_controller_status, daemon=True),
Thread(target=self._schedule_processing_blocks, daemon=True),
Thread(target=self._monitor_pbc_status, daemon=True)
]
for thread in scheduler_threads:
thread.start()
try:
for thread in scheduler_threads:
thread.join()
except KeyboardInterrupt:
LOG.info('Keyboard interrupt!')
sys.exit(0)
finally:
LOG.info('Finally!')
|
python
|
{
"resource": ""
}
|
q17334
|
_update_service_current_state
|
train
|
def _update_service_current_state(service: ServiceState):
"""Update the current state of a service.
Updates the current state of services after their target state has changed.
Args:
service (ServiceState): Service state object to update
"""
LOG.debug("Setting current state from target state for %s", service.id)
service.update_current_state(service.target_state)
|
python
|
{
"resource": ""
}
|
q17335
|
_update_services_instant_gratification
|
train
|
def _update_services_instant_gratification(sdp_target_state: str):
"""For demonstration purposes only.
This instantly updates the services current state with the
target state, rather than wait on them or schedule random delays
in bringing them back up.
"""
service_states = get_service_state_list()
# Set the target state of services
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the current state of %s to be %s', service.id,
sdp_target_state)
service.update_current_state(sdp_target_state)
|
python
|
{
"resource": ""
}
|
q17336
|
_update_services_target_state
|
train
|
def _update_services_target_state(sdp_target_state: str):
"""Update the target states of services based on SDP target state.
When we get a new target state this function is called to ensure
components receive the target state(s) and/or act on them.
Args:
sdp_target_state (str): Target state of SDP
"""
service_states = get_service_state_list()
# Set the target state of services
for service in service_states:
if service.current_state != sdp_target_state:
LOG.debug('Setting the target state of %s to be %s', service.id,
sdp_target_state)
service.update_target_state(sdp_target_state)
|
python
|
{
"resource": ""
}
|
q17337
|
_handle_sdp_target_state_updated
|
train
|
def _handle_sdp_target_state_updated(sdp_state: SDPState):
"""Respond to an SDP target state change event.
This function sets the current state of SDP to the target state if that is
possible.
TODO(BMo) This cant be done as a blocking function as it is here!
"""
LOG.info('Handling SDP target state updated event...')
LOG.info('SDP target state: %s', sdp_state.target_state)
# Map between the SDP target state and the service target state?
if sdp_state.target_state == 'off':
_update_services_target_state('off')
# TODO: Work out if the state of SDP has reached the target state.
# If yes, update the current state.
sdp_state.update_current_state(sdp_state.target_state)
|
python
|
{
"resource": ""
}
|
q17338
|
_init
|
train
|
def _init(sdp_state: SDPState):
"""Initialise the Master Controller Service.
Performs the following actions:
1. Registers ServiceState objects into the Config Db.
2. If initialising for the first time (unknown state),
sets the SDPState to 'init'
3. Initialises the state of Services, if running for the first time
(their state == unknown)
4. Waits some time and sets the Service states to 'on'. This emulates
waiting for Services to become available.
5. Once all services are 'on', sets the SDP state to 'standby'.
"""
# Parse command line arguments.
LOG.info("Initialising: %s", __service_id__)
# FIXME(BMo) There is a bug when SDP or services 'start' in the 'off'
# state. At the moment it is impossible to transition out of this.
# FIXME(BMo) **Hack** Register all services or if already registered do
# nothing (this is handled by the ServiceState object).
_services = [
"ExecutionControl:AlarmReceiver:1.0.0",
"ExecutionControl:AlertManager:1.0.0",
"ExecutionControl:ConfigurationDatabase:5.0.1",
"ExecutionControl:MasterController:1.3.0",
"ExecutionControl:ProcessingController:1.2.6",
"ExecutionControl:ProcessingBlockController:1.3.0",
"TangoControl:Database:1.0.4",
"TangoControl:MySQL:1.0.3",
"TangoControl:SDPMaster:1.2.1",
"TangoControl:Subarrays:1.2.0",
"TangoControl:ProcessingBlocks:1.2.0",
"Platform:Kafka:2.1.1",
"Platform:Prometheus:1.0.0",
"Platform:PrometheusPushGateway:0.7.0",
"Platform:RedisCommander:210.0.0",
"Platform:Zookeeper:3.4.13"
]
for service_id in _services:
subsystem, name, version = service_id.split(':')
ServiceState(subsystem, name, version)
# If the SDP state is 'unknown', mark the SDP state as init.
# FIXME(BMo) This is not right as we want to allow for recovery from
# failure without just reinitialising...!? ie. respect the old sate
# NOTE: If the state is 'off' we will want to reset the database
# with 'skasip_config_db_init --clear'
if sdp_state.current_state in ['unknown', 'off']:
try:
LOG.info("Setting the SDPState to 'init'")
sdp_state.update_current_state('init', force=True)
except ValueError as error:
LOG.critical('Unable to set the State of SDP to init! %s',
str(error))
LOG.info("Updating Service States")
service_state_list = get_service_state_list()
# FIXME(BMo) **Hack** Mark all Services in the 'unknown' state as
# initialising.
for service_state in service_state_list:
if service_state.current_state in ['unknown', 'off']:
service_state.update_current_state('init', force=True)
# FIXME(BMo) **Hack** After 'checking' that the services are 'on' set
# their state on 'on' after a short delay.
# FIXME(BMo) This check should not be serialised!!! (should be part of the
# event loop)
for service_state in service_state_list:
if service_state.current_state == 'init':
time.sleep(random.uniform(0, 0.2))
service_state.update_current_state('on')
# FIXME(BMo): **Hack** Now the all services are on, set the sate of SDP to
# 'standby'
# FIXME(BMo) This should also be part of the event loop.
services_on = [service.current_state == 'on'
for service in service_state_list]
if all(services_on):
LOG.info('All Services are online!.')
sdp_state.update_current_state('standby')
else:
LOG.critical('Master Controller failed to initialise.')
return service_state_list
|
python
|
{
"resource": ""
}
|
q17339
|
_process_event
|
train
|
def _process_event(event: Event, sdp_state: SDPState,
service_states: List[ServiceState]):
"""Process a SDP state change event."""
LOG.debug('Event detected! (id : "%s", type: "%s", data: "%s")',
event.object_id, event.type, event.data)
if event.object_id == 'SDP' and event.type == 'current_state_updated':
LOG.info('SDP current state updated, no action required!')
if event.object_id == 'SDP' and event.type == 'target_state_updated':
LOG.info("SDP target state changed to '%s'",
sdp_state.target_state)
# If the sdp is already in the target state do nothing
if sdp_state.target_state == sdp_state.current_state:
LOG.warning('SDP already in %s state',
sdp_state.current_state)
return
# Check that a transition to the target state is allowed in the
# current state.
if not sdp_state.is_target_state_allowed(sdp_state.target_state):
LOG.error('Transition to %s is not allowed when in state %s',
sdp_state.target_state, sdp_state.current_state)
sdp_state.target_state = sdp_state.current_state
return
_update_services_target_state(sdp_state.target_state)
# If asking SDP to turn off, also turn off services.
if sdp_state.target_state == 'off':
LOG.info('Turning off services!')
for service_state in service_states:
service_state.update_target_state('off')
service_state.update_current_state('off')
LOG.info('Processing target state change request ...')
time.sleep(0.1)
LOG.info('Done processing target state change request!')
# Assuming that the SDP has responding to the target
# target state command by now, set the current state
# to the target state.
sdp_state.update_current_state(sdp_state.target_state)
if sdp_state.current_state == 'alarm':
LOG.debug('raising SDP state alarm')
SIP_STATE_ALARM.set(1)
else:
SIP_STATE_ALARM.set(0)
try:
# FIXME(BMo) the pushgateway host should not be hardcoded!
push_to_gateway('platform_pushgateway:9091', job='SIP',
registry=COLLECTOR_REGISTRY)
except urllib.error.URLError:
LOG.warning("Unable to connect to the Alarms service!")
|
python
|
{
"resource": ""
}
|
q17340
|
_process_state_change_events
|
train
|
def _process_state_change_events():
"""Process events relating to the overall state of SDP.
This function starts and event loop which continually checks for
and responds to SDP state change events.
"""
sdp_state = SDPState()
service_states = get_service_state_list()
state_events = sdp_state.get_event_queue(subscriber=__service_name__)
state_is_off = sdp_state.current_state == 'off'
counter = 0
while True:
time.sleep(0.1)
if not state_is_off:
# *Hack* to avoid problems with historical events not being
# correctly handled by EventQueue.get(), replay old events every
# 10s
# - see issue #54
if counter % 1000 == 0:
LOG.debug('Checking published events ... %d', counter / 1000)
_published_events = state_events.get_published_events(
process=True)
for _state_event in _published_events:
_process_event(_state_event, sdp_state, service_states)
else:
_state_event = state_events.get()
if _state_event:
_process_event(_state_event, sdp_state, service_states)
state_is_off = sdp_state.current_state == 'off'
counter += 1
|
python
|
{
"resource": ""
}
|
q17341
|
main
|
train
|
def main():
"""Merge temp_main and main."""
# Parse command line args.
_parse_args()
LOG.info("Starting: %s", __service_id__)
# Subscribe to state change events.
# FIXME(BMo) This API is unfortunate as it looks like we are only
# subscribing to sdp_state events.
LOG.info('Subscribing to state change events (subscriber = %s)',
__service_name__)
sdp_state = SDPState()
_ = sdp_state.subscribe(subscriber=__service_name__)
# Initialise the service.
_ = _init(sdp_state)
LOG.info('Finished initialising!')
# Enter a pseudo event-loop (using Sched) to monitor for state change
# events
# (Also random set services into a fault or alarm state if enabled)
LOG.info('Responding to state change events ...')
try:
_process_state_change_events()
except ValueError as error:
LOG.critical('Value error: %s', str(error))
except KeyboardInterrupt as err:
LOG.debug('Keyboard Interrupt %s', err)
LOG.info('Exiting!')
|
python
|
{
"resource": ""
}
|
q17342
|
main
|
train
|
def main():
"""Runs the test sender."""
stream_config = spead2.send.StreamConfig(
max_packet_size=16356, rate=1000e6, burst_size=10, max_heaps=1)
item_group = spead2.send.ItemGroup(flavour=spead2.Flavour(4, 64, 48, 0))
# Add item descriptors to the heap.
num_baselines = (512 * 513) // 2
dtype = [('TCI', 'i1'), ('FD', 'u1'), ('VIS', '<c8', 4)]
item_group.add_item(
id=0x6000, name='visibility_timestamp_count', description='',
shape=tuple(), format=None, dtype='<u4')
item_group.add_item(
id=0x6001, name='visibility_timestamp_fraction', description='',
shape=tuple(), format=None, dtype='<u4')
item_group.add_item(
id=0x6005, name='visibility_baseline_count', description='',
shape=tuple(), format=None, dtype='<u4')
item_group.add_item(
id=0x6008, name='scan_id', description='',
shape=tuple(), format=None, dtype='<u8')
item_group.add_item(
id=0x600A, name='correlator_output_data', description='',
shape=(num_baselines,), dtype=dtype)
# Create streams and send start-of-stream message.
streams = []
num_streams = 2
for i in range(num_streams):
stream = spead2.send.UdpStream(
thread_pool=spead2.ThreadPool(threads=1),
hostname='127.0.0.1', port=41000 + i, config=stream_config)
stream.send_heap(item_group.get_start())
streams.append(stream)
vis = numpy.zeros(shape=(num_baselines,), dtype=dtype)
num_heaps = 200
start_time = time.time()
for stream in streams:
# Update values in the heap.
item_group['visibility_timestamp_count'].value = 1
item_group['visibility_timestamp_fraction'].value = 0
item_group['visibility_baseline_count'].value = num_baselines
item_group['scan_id'].value = 100000000
item_group['correlator_output_data'].value = vis
# Iterate heaps.
for i in range(num_heaps):
# Send heap.
stream.send_heap(item_group.get_heap(descriptors='all', data='all'))
# Print time taken.
duration = time.time() - start_time
data_size = num_streams * num_heaps * (vis.nbytes / 1e6)
print("Sent %.3f MB in %.3f sec (%.3f MB/sec)" % (
data_size, duration, (data_size/duration)))
# Send end-of-stream message.
for stream in streams:
stream.send_heap(item_group.get_end())
|
python
|
{
"resource": ""
}
|
q17343
|
Event.from_config
|
train
|
def from_config(cls, config: dict):
"""Create an event object from an event dictionary object.
Args:
config (dict): Event Configuration dictionary.
"""
timestamp = config.get('timestamp', None)
return cls(config.get('id'),
config.get('type'),
config.get('data', dict()),
config.get('origin', None),
timestamp,
config.get('object_type', None),
config.get('object_id', None),
config.get('object_key', None))
|
python
|
{
"resource": ""
}
|
q17344
|
process_input_data
|
train
|
def process_input_data(filename, imager, grid_data, grid_norm, grid_weights):
"""Reads visibility data from a Measurement Set.
The visibility grid or weights grid is updated accordingly.
Visibility data are read from disk in blocks of size num_baselines.
Args:
filename (str): Name of Measurement Set to open.
imager (oskar.Imager): Handle to configured imager.
grid_data (numpy.ndarray or None): Visibility grid to populate.
grid_norm (float) Current grid normalisation.
grid_weights (numpy.ndarray): Weights grid to populate or read.
Returns:
grid_norm (float): Updated grid normalisation.
"""
# Get data from the input Measurement Set.
ms = oskar.MeasurementSet.open(filename)
block_start = 0
num_rows = ms.num_rows
num_baselines = ms.num_stations * (ms.num_stations - 1) // 2
# Loop over data blocks of size num_baselines.
while block_start < num_rows:
block_size = num_rows - block_start
if block_size > num_baselines:
block_size = num_baselines
# Get the baseline coordinates. (Replace this with a query to LTS.)
uvw = ms.read_column('UVW', block_start, block_size)
# Read the Stokes-I visibility weights.
vis_weights = ms.read_column('WEIGHT', block_start, block_size)
if ms.num_pols == 4:
vis_weights = 0.5 * (vis_weights[:, 0] + vis_weights[:, 3])
# Loop over frequency channels.
# (We expect there to be only one channel here, but loop just in case.)
for j in range(ms.num_channels):
# Get coordinates in wavelengths.
coords = uvw * (ms.freq_start_hz + j * ms.freq_inc_hz) / 299792458.
# Get the Stokes-I visibilities for this channel.
vis_data = None
if not imager.coords_only:
vis_data = ms.read_vis(block_start, j, 1, block_size)
if ms.num_pols == 4:
vis_data = 0.5 * (vis_data[0, :, 0] + vis_data[0, :, 3])
# Update the grid plane with this visibility block.
grid_norm = imager.update_plane(
coords[:, 0], coords[:, 1], coords[:, 2], vis_data,
vis_weights, grid_data, grid_norm, grid_weights)
# Increment start row by block size.
block_start += block_size
# Return updated grid normalisation.
return grid_norm
|
python
|
{
"resource": ""
}
|
q17345
|
get
|
train
|
def get():
"""Return list of Scheduling Blocks Instances known to SDP ."""
LOG.debug('GET list of SBIs.')
# Construct response object.
_url = get_root_url()
response = dict(scheduling_blocks=[],
links=dict(home='{}'.format(_url)))
# Get ordered list of SBI ID's.
block_ids = DB.get_sched_block_instance_ids()
# Loop over SBIs and add summary of each to the list of SBIs in the
# response.
for block in DB.get_block_details(block_ids):
block_id = block['id']
LOG.debug('Adding SBI %s to list', block_id)
LOG.debug(block)
block['num_processing_blocks'] = len(block['processing_block_ids'])
temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2
block['status'] = choice(temp)
try:
del block['processing_block_ids']
except KeyError:
pass
block['links'] = {
'detail': '{}/scheduling-block/{}' .format(_url, block_id)
}
response['scheduling_blocks'].append(block)
return response, HTTPStatus.OK
|
python
|
{
"resource": ""
}
|
q17346
|
get_table
|
train
|
def get_table():
"""Provides table of scheduling block instance metadata for use with AJAX
tables"""
response = dict(blocks=[])
block_ids = DB.get_sched_block_instance_ids()
for index, block_id in enumerate(block_ids):
block = DB.get_block_details([block_id]).__next__()
info = [
index,
block['id'],
block['sub_array_id'],
len(block['processing_blocks'])
]
response['blocks'].append(info)
return response, HTTPStatus.OK
|
python
|
{
"resource": ""
}
|
q17347
|
ConfigDB.set_value
|
train
|
def set_value(self, key, field, value):
"""Add the state of the key and field"""
self._db.hset(key, field, value)
|
python
|
{
"resource": ""
}
|
q17348
|
ConfigDB.push_event
|
train
|
def push_event(self, event_name, event_type, block_id):
"""Push inserts all the specified values at the tail of the list
stored at the key"""
self._db.rpush(event_name, dict(type=event_type, id=block_id))
|
python
|
{
"resource": ""
}
|
q17349
|
main
|
train
|
def main():
"""Task run method."""
# Install handler to respond to SIGTERM
signal.signal(signal.SIGTERM, _sig_handler)
with open(sys.argv[1]) as fh:
config = json.load(fh)
# Starts the pulsar search ftp server
os.chdir(os.path.expanduser('~'))
receiver = PulsarStart(config, logging.getLogger())
receiver.run()
|
python
|
{
"resource": ""
}
|
q17350
|
check_connection
|
train
|
def check_connection(func):
"""Check connection exceptions."""
@wraps(func)
def with_exception_handling(*args, **kwargs):
"""Wrap function being decorated."""
try:
return func(*args, **kwargs)
except redis.exceptions.ConnectionError:
raise ConnectionError("Unable to connect to the Redis "
"Configuration Database. host = {}, "
"port = {}, id = {}."
.format(REDIS_HOST, REDIS_PORT,
REDIS_DB_ID))
return with_exception_handling
|
python
|
{
"resource": ""
}
|
q17351
|
ConfigDb.save_dict
|
train
|
def save_dict(self, key: str, my_dict: dict, hierarchical: bool = False):
"""Store the specified dictionary at the specified key."""
for _key, _value in my_dict.items():
if isinstance(_value, dict):
if not hierarchical:
self._db.hmset(key, {_key: json.dumps(_value)})
else:
self.save_dict(key + ':' + _key, _value, hierarchical)
elif isinstance(_value, list):
if not hierarchical:
self._db.hmset(key, {_key: str(_value)})
else:
print('saving list at ', key + ':' + _key)
self._db.lpush(key + ':' + _key, *_value[::-1])
elif isinstance(_value, bool):
self._db.hmset(key, {_key: str(_value)})
else:
self._db.hmset(key, {_key: _value})
|
python
|
{
"resource": ""
}
|
q17352
|
ConfigDb._build_dict
|
train
|
def _build_dict(my_dict, keys, values):
"""Build a dictionary from a set of redis hashes.
keys = ['a', 'b', 'c']
values = {'value': 'foo'}
my_dict = {'a': {'b': {'c': {'value': 'foo'}}}}
Args:
my_dict (dict): Dictionary to add to
keys (list[str]): List of keys used to define hierarchy in my_dict
values (dict): Values to add at to the dictionary at the key
specified by keys
Returns:
dict, new dictionary with values added at keys
"""
temp = my_dict
for depth, key in enumerate(keys):
if depth < len(keys) - 1:
if key not in temp:
temp[key] = dict()
temp = temp[key]
else:
if key not in temp:
temp[key] = values
else:
temp[key] = {**temp[key], **values}
return my_dict
|
python
|
{
"resource": ""
}
|
q17353
|
ConfigDb._load_values
|
train
|
def _load_values(self, db_key: str) -> dict:
"""Load values from the db at the specified key, db_key.
FIXME(BMo): Could also be extended to load scalar types (instead of
just list and hash)
"""
if self._db.type(db_key) == 'list':
db_values = self._db.lrange(db_key, 0, -1)
for i, value in enumerate(db_values):
try:
db_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
else: # self._db.type == 'hash'
db_values = self._db.hgetall(db_key)
for _key, _value in db_values.items():
try:
db_values[_key] = ast.literal_eval(_value)
except SyntaxError:
pass
except ValueError:
pass
return db_values
|
python
|
{
"resource": ""
}
|
q17354
|
ConfigDb._load_dict_hierarchical
|
train
|
def _load_dict_hierarchical(self, db_key: str) -> dict:
"""Load a dictionary stored hierarchically at db_key."""
db_keys = self._db.keys(pattern=db_key + '*')
my_dict = {}
for _db_key in db_keys:
if self._db.type(_db_key) == 'list':
db_values = self._db.lrange(_db_key, 0, -1)
for i, value in enumerate(db_values):
try:
db_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
else: # self._db.type == 'hash'
db_values = self._db.hgetall(_db_key)
for _key, _value in db_values.items():
try:
db_values[_key] = ast.literal_eval(_value)
except SyntaxError:
pass
except ValueError:
pass
my_dict = self._build_dict(my_dict, _db_key.split(':'),
db_values)
return my_dict[db_key]
|
python
|
{
"resource": ""
}
|
q17355
|
ConfigDb.load_dict
|
train
|
def load_dict(self, db_key: str, hierarchical: bool = False) -> dict:
"""Load the dictionary at the specified key.
Hierarchically stored dictionaries use a ':' separator to expand
the dictionary into a set of Redis hashes.
Args:
db_key (str): Key at which the dictionary is stored in the db.
hierarchical (bool): If True, expect the dictionary to have been
stored hierarchically. If False, expect the dictionary to have
been stored flat.
Returns:
dict, the dictionary stored at key
"""
if not hierarchical:
db_values = self._db.hgetall(db_key)
for _key, _value in db_values.items():
if isinstance(_value, str):
db_values[_key] = ast.literal_eval(_value)
my_dict = db_values
else:
my_dict = self._load_dict_hierarchical(db_key)
return my_dict
|
python
|
{
"resource": ""
}
|
q17356
|
ConfigDb.load_dict_values
|
train
|
def load_dict_values(self, db_key: str, dict_keys: List[str],
hierarchical: bool = False) -> List:
"""Load values from a dictionary with the specified dict_keys.
Args:
db_key (str): Key where the dictionary is stored
dict_keys (List[str]): Keys within the dictionary to load.
hierarchical (bool): If True, expect the dictionary to have been
stored hierarchically. If False, expect the dictionary to have
been stored flat.
Returns:
object: The value stored at dict_key in the dictionary stored at
key
"""
result = []
if not hierarchical:
_values = self._db.hmget(db_key, *dict_keys)
result = [ast.literal_eval(_value) for _value in _values]
else:
# Get all keys in the set of keys for this dict 'db_key'
db_keys = self._db.keys(pattern=db_key + '*')
for _db_key in db_keys:
# Check if one of the dict_keys is an entire sub-dict entry
for name in _db_key.split(':')[1:]:
if name in dict_keys:
_values = self._load_values(_db_key)
result.append(_values)
# Look in the sub-dict for any of the dict_keys
_values = self._db.hmget(_db_key, *dict_keys)
for i, value in enumerate(_values):
try:
_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
result += [value for value in _values if value is not None]
return result
|
python
|
{
"resource": ""
}
|
q17357
|
ConfigDb.set_hash_value
|
train
|
def set_hash_value(self, key, field, value, pipeline=False):
"""Set the value of field in a hash stored at key.
Args:
key (str): key (name) of the hash
field (str): Field within the hash to set
value: Value to set
pipeline (bool): True, start a transaction block. Default false.
"""
# FIXME(BMo): new name for this function -> save_dict_value ?
if pipeline:
self._pipeline.hset(key, field, str(value))
else:
self._db.hset(key, field, str(value))
|
python
|
{
"resource": ""
}
|
q17358
|
ConfigDb.prepend_to_list
|
train
|
def prepend_to_list(self, key, *value, pipeline=False):
"""Add new element to the start of the list stored at key.
Args:
key (str): Key where the list is stored
value: Value to add to the list
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.lpush(key, *value)
else:
self._db.lpush(key, *value)
|
python
|
{
"resource": ""
}
|
q17359
|
ConfigDb.append_to_list
|
train
|
def append_to_list(self, key, *value, pipeline=False):
"""Add new element to the end of the list stored at key.
Args:
key (str): Key where the list is stored
value: Value to add to the list
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.rpush(key, *value)
else:
self._db.rpush(key, *value)
|
python
|
{
"resource": ""
}
|
q17360
|
ConfigDb.get_list
|
train
|
def get_list(self, key, pipeline=False):
"""Get all the value in the list stored at key.
Args:
key (str): Key where the list is stored.
pipeline (bool): True, start a transaction block. Default false.
Returns:
list: values in the list ordered by list index
"""
if pipeline:
return self._pipeline.lrange(key, 0, -1)
return self._db.lrange(key, 0, -1)
|
python
|
{
"resource": ""
}
|
q17361
|
ConfigDb.delete
|
train
|
def delete(self, *names: str, pipeline=False):
"""Delete one or more keys specified by names.
Args:
names (str): Names of keys to delete
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.delete(*names)
else:
self._db.delete(*names)
|
python
|
{
"resource": ""
}
|
q17362
|
ConfigDb.get_event
|
train
|
def get_event(self, event_name, event_history=None):
"""Get an event from the database.
Gets an event from the named event list removing the event and
adding it to the event history.
Args:
event_name (str): Event list key.
event_history (str, optional): Event history list.
Returns:
str: string representation of the event object
"""
if event_history is None:
event_history = event_name + '_history'
return self._db.rpoplpush(event_name, event_history)
|
python
|
{
"resource": ""
}
|
q17363
|
ConfigDb.watch
|
train
|
def watch(self, key, pipeline=False):
"""Watch the given key.
Marks the given key to be watch for conditional execution
of a transaction.
Args:
key (str): Key that needs to be watched
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.watch(key)
else:
self._db.watch(key)
|
python
|
{
"resource": ""
}
|
q17364
|
ConfigDb.publish
|
train
|
def publish(self, channel, message, pipeline=False):
"""Post a message to a given channel.
Args:
channel (str): Channel where the message will be published
message (str): Message to publish
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.publish(channel, message)
else:
self._db.publish(channel, message)
|
python
|
{
"resource": ""
}
|
q17365
|
MasterHealthCheck.get_services_health
|
train
|
def get_services_health(self) -> dict:
"""Get the health of all services.
Returns:
dict, services id and health status
"""
# Initialise
services_health = {}
# Get Service IDs
services_ids = self._get_services()
for service_id in services_ids:
service_name = DC.get_service_name(service_id)
# Check if the current and actual replica levels are the same
if DC.get_replicas(service_id) != \
DC.get_actual_replica(service_id):
services_health[service_name] = "Unhealthy"
else:
services_health[service_name] = "Healthy"
return services_health
|
python
|
{
"resource": ""
}
|
q17366
|
MasterHealthCheck.get_overall_services_health
|
train
|
def get_overall_services_health(self) -> str:
"""Get the overall health of all the services.
Returns:
str, overall health status
"""
services_health_status = self.get_services_health()
# Evaluate overall health
health_status = all(status == "Healthy" for status in
services_health_status.values())
# Converting from bool to str
if health_status:
overall_status = "Healthy"
else:
overall_status = "Unhealthy"
return overall_status
|
python
|
{
"resource": ""
}
|
q17367
|
MasterHealthCheck.get_service_health
|
train
|
def get_service_health(service_id: str) -> str:
"""Get the health of a service using service_id.
Args:
service_id
Returns:
str, health status
"""
# Check if the current and actual replica levels are the same
if DC.get_replicas(service_id) != DC.get_actual_replica(service_id):
health_status = "Unhealthy"
else:
health_status = "Healthy"
return health_status
|
python
|
{
"resource": ""
}
|
q17368
|
get
|
train
|
def get():
"""Check the health of this service"""
uptime = time.time() - START_TIME
response = dict(uptime=f'{uptime:.2f}s',
links=dict(root='{}'.format(get_root_url())))
# TODO(BM) check if we can connect to the config database ...
# try:
# DB.get_sub_array_ids()
# except ConnectionError as error:
# response['state'] = 'ERROR'
# response['message'] = str(error)
return response, HTTPStatus.OK
|
python
|
{
"resource": ""
}
|
q17369
|
main
|
train
|
def main():
"""Main function for SPEAD receiver module."""
# Check command line arguments.
if len(sys.argv) < 2:
raise RuntimeError('Usage: python3 async_recv.py <json config>')
# Set up logging.
sip_logging.init_logger(show_thread=True)
# Load SPEAD configuration from JSON file.
# with open(sys.argv[-1]) as f:
# spead_config = json.load(f)
spead_config = json.loads(sys.argv[1])
# Set up the SPEAD receiver and run it (see method, above).
receiver = SpeadReceiver(spead_config)
receiver.run()
|
python
|
{
"resource": ""
}
|
q17370
|
SpeadReceiver.process_buffer
|
train
|
def process_buffer(self, i_block, receive_buffer):
"""Blocking function to process the received heaps.
This is run in an executor.
"""
self._log.info("Worker thread processing block %i", i_block)
time_overall0 = time.time()
time_unpack = 0.0
time_write = 0.0
for i_heap, heap in enumerate(receive_buffer.result()):
# Skip and log any incomplete heaps.
if isinstance(heap, spead2.recv.IncompleteHeap):
self._log.info("Dropped incomplete heap %i", heap.cnt + 1)
continue
# Update the item group from this heap.
items = self._item_group.update(heap)
# Get the time and channel indices from the heap index.
i_chan = i_heap // self._num_buffer_times
i_time = i_heap % self._num_buffer_times
if 'correlator_output_data' in items:
vis_data = items['correlator_output_data'].value['VIS']
if self._block is None:
num_baselines = vis_data.shape[0]
num_pols = vis_data[0].shape[0]
self._block = numpy.zeros((self._num_buffer_times,
self._num_streams,
num_baselines),
dtype=('c8', num_pols))
self._block[:, :, :] = 0 # To make the copies faster.
# Unpack data from the heap into the block to be processed.
time_unpack0 = time.time()
self._block[i_time, i_chan, :] = vis_data
time_unpack += time.time() - time_unpack0
# Check the data for debugging!
val = self._block[i_time, i_chan, -1][-1].real
self._log.debug("Data: %.3f", val)
if self._block is not None:
# Process the buffered data here.
if self._config['process_data']:
pass
# Write the buffered data to storage.
if self._config['write_data']:
time_write0 = time.time()
with open(self._config['filename'], 'ab') as f:
# Don't use pickle, it's really slow (even protocol 4)!
numpy.save(f, self._block, allow_pickle=False)
time_write += time.time() - time_write0
# Report time taken.
time_overall = time.time() - time_overall0
self._log.info("Total processing time: %.1f ms", 1000 * time_overall)
self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall)
self._log.info("Write was %.1f %%", 100 * time_write / time_overall)
if time_unpack != 0.0:
self._log.info("Memory speed %.1f MB/s",
(self._block.nbytes * 1e-6) / time_unpack)
if time_write != 0.0:
self._log.info("Write speed %.1f MB/s",
(self._block.nbytes * 1e-6) / time_write)
|
python
|
{
"resource": ""
}
|
q17371
|
SpeadReceiver.run
|
train
|
def run(self):
"""Starts the receiver."""
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_event_loop()
loop.run_until_complete(self._run_loop(executor))
self._log.info('Shutting down...')
executor.shutdown()
|
python
|
{
"resource": ""
}
|
q17372
|
PulsarStart.run
|
train
|
def run(self):
"""Start the FTP Server for pulsar search."""
self._log.info('Starting Pulsar Search Interface')
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user(self._config['login']['user'],
self._config['login']['psswd'], '.',
perm=self._config['login']['perm'])
authorizer.add_anonymous(os.getcwd())
# Instantiate FTP handler class
handler = FTPHandler
handler.authorizer = authorizer
handler.abstracted_fs = PulsarFileSystem
# Define a customized banner (string returned when client connects)
handler.banner = "SKA SDP pulsar search interface."
# Instantiate FTP server class and listen on 0.0.0.0:7878
address = (self._config['address']['listen'],
self._config['address']['port'])
server = FTPServer(address, handler)
# set a limit for connections
server.max_cons = 256
server.max_cons_per_ip = 5
# start ftp server
server.serve_forever()
|
python
|
{
"resource": ""
}
|
q17373
|
WorkflowStage.status
|
train
|
def status(self) -> str:
"""Return the workflow stage status."""
# As status is a modifiable property, have to reload from the db.
self._config = self._load_config()
return self._config.get('status')
|
python
|
{
"resource": ""
}
|
q17374
|
WorkflowStage.status
|
train
|
def status(self, value):
"""Set the workflow stage status."""
# FIXME(BM) This is currently a hack because workflow stages
# don't each have their own db entry.
pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id)
stages = DB.get_hash_value(pb_key, 'workflow_stages')
stages = ast.literal_eval(stages)
stages[self._index]['status'] = value
DB.set_hash_value(pb_key, 'workflow_stages', stages)
|
python
|
{
"resource": ""
}
|
q17375
|
WorkflowStage._load_config
|
train
|
def _load_config(self):
"""Load the workflow stage config from the database."""
pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id)
stages = DB.get_hash_value(pb_key, 'workflow_stages')
stages = ast.literal_eval(stages)
return stages[self._index]
|
python
|
{
"resource": ""
}
|
q17376
|
register_pb_devices
|
train
|
def register_pb_devices(num_pbs: int = 100):
"""Register PBs devices.
Note(BMo): Ideally we do not want to register any devices here. There
does not seem to be a way to create a device server with no registered
devices in Tango. This is (probably) because Tango devices must have been
registered before the server starts ...
"""
tango_db = Database()
LOG.info("Registering PB devices:")
dev_info = DbDevInfo()
# pylint: disable=protected-access
dev_info._class = 'ProcessingBlockDevice'
dev_info.server = 'processing_block_ds/1'
for index in range(num_pbs):
dev_info.name = 'sip_sdp/pb/{:05d}'.format(index)
LOG.info("\t%s", dev_info.name)
tango_db.add_device(dev_info)
|
python
|
{
"resource": ""
}
|
q17377
|
SchedulingBlockInstance.from_config
|
train
|
def from_config(cls, config_dict: dict, schema_path: str = None):
"""Create an SBI object from the specified configuration dict.
NOTE(BM) This should really be done as a single atomic db transaction.
Args:
config_dict(dict): SBI configuration dictionary
schema_path(str, optional): Path to the SBI config schema.
"""
# Validate the SBI config schema
if schema_path is None:
schema_path = join(dirname(__file__), 'schema',
'configure_sbi.json')
with open(schema_path, 'r') as file:
schema = json.loads(file.read())
validate(config_dict, schema)
# Add SBI status field
config_dict['status'] = 'created'
# Set the subarray field to None if not defined.
if 'subarray_id' not in config_dict:
config_dict['subarray_id'] = 'None'
# Add created, and updated timestamps.
timestamp = datetime.datetime.utcnow().isoformat()
config_dict['created'] = timestamp
config_dict['updated'] = timestamp
# Split out the processing block data array
pb_list = copy.deepcopy(config_dict['processing_blocks'])
# Remove processing blocks from the SBI configuration.
config_dict.pop('processing_blocks', None)
# Add list of PB ids to the SBI configuration
config_dict['processing_block_ids'] = []
for pb in pb_list:
config_dict['processing_block_ids'].append(pb['id'])
# Add the SBI data object to the database.
key = SchedulingObject.get_key(SBI_KEY, config_dict['id'])
DB.save_dict(key, config_dict, hierarchical=False)
# DB.set_hash_values(key, config_dict)
# Add the SBI id to the list of active SBIs
key = '{}:active'.format(SBI_KEY)
DB.append_to_list(key, config_dict['id'])
# Publish notification to subscribers
sbi = SchedulingObject(SBI_KEY, config_dict['id'])
sbi.set_status('created')
for pb in pb_list:
pb['sbi_id'] = config_dict['id']
cls._add_pb(pb)
return cls(config_dict['id'])
|
python
|
{
"resource": ""
}
|
q17378
|
SchedulingBlockInstance.get_pb_ids
|
train
|
def get_pb_ids(self) -> List[str]:
"""Return the list of PB ids associated with the SBI.
Returns:
list, Processing block ids
"""
values = DB.get_hash_value(self._key, 'processing_block_ids')
return ast.literal_eval(values)
|
python
|
{
"resource": ""
}
|
q17379
|
SchedulingBlockInstance.get_id
|
train
|
def get_id(date=None, project: str = 'sip',
instance_id: int = None) -> str:
"""Get a SBI Identifier.
Args:
date (str or datetime.datetime, optional): UTC date of the SBI
project (str, optional ): Project Name
instance_id (int, optional): SBI instance identifier
Returns:
str, Scheduling Block Instance (SBI) ID.
"""
if date is None:
date = datetime.datetime.utcnow()
if isinstance(date, datetime.datetime):
date = date.strftime('%Y%m%d')
if instance_id is None:
instance_id = randint(0, 9999)
return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
|
python
|
{
"resource": ""
}
|
q17380
|
SchedulingBlockInstance._update_workflow_definition
|
train
|
def _update_workflow_definition(pb_config: dict):
"""Update the PB configuration workflow definition.
Args:
pb_config (dict): PB configuration dictionary
Raises:
RunTimeError, if the workflow definition (id, version)
specified in the sbi_config is not known.
"""
known_workflows = get_workflows()
workflow_id = pb_config['workflow']['id']
workflow_version = pb_config['workflow']['version']
if workflow_id not in known_workflows or \
workflow_version not in known_workflows[workflow_id]:
raise RuntimeError("Unknown workflow definition: {}:{}"
.format(workflow_id, workflow_version))
workflow = get_workflow(workflow_id, workflow_version)
for stage in workflow['stages']:
stage['status'] = 'none'
pb_config['workflow_parameters'] = pb_config['workflow']['parameters']
pb_config['workflow_id'] = pb_config['workflow']['id']
pb_config['workflow_version'] = pb_config['workflow']['version']
pb_config['workflow_stages'] = workflow['stages']
pb_config.pop('workflow', None)
|
python
|
{
"resource": ""
}
|
q17381
|
root
|
train
|
def root():
"""Placeholder root url for the PCI.
Ideally this should never be called!
"""
response = {
"links": {
"message": "Welcome to the SIP Processing Controller Interface",
"items": [
{"href": "{}health".format(request.url)},
{"href": "{}subarrays".format(request.url)},
{"href": "{}scheduling_blocks".format(request.url)},
{"href": "{}processing_blocks".format(request.url)}
]
}
}
return response, HTTPStatus.OK
|
python
|
{
"resource": ""
}
|
q17382
|
generate_scheduling_block_id
|
train
|
def generate_scheduling_block_id(num_blocks, project='test'):
"""Generate a scheduling_block id"""
_date = strftime("%Y%m%d", gmtime())
_project = project
for i in range(num_blocks):
yield '{}-{}-sbi{:03d}'.format(_date, _project, i)
|
python
|
{
"resource": ""
}
|
q17383
|
add_scheduling_block
|
train
|
def add_scheduling_block(config):
"""Adds a scheduling block to the database, returning a response object"""
try:
DB.add_sbi(config)
except jsonschema.ValidationError as error:
error_dict = error.__dict__
for key in error_dict:
error_dict[key] = error_dict[key].__str__()
error_response = dict(message="Failed to add scheduling block",
reason="JSON validation error",
details=error_dict)
return error_response, HTTPStatus.BAD_REQUEST
response = dict(config=config,
message='Successfully registered scheduling block '
'instance with ID: {}'.format(config['id']))
response['links'] = {
'self': '{}scheduling-block/{}'.format(request.url_root,
config['id']),
'list': '{}'.format(request.url),
'home': '{}'.format(request.url_root)
}
return response, HTTPStatus.ACCEPTED
|
python
|
{
"resource": ""
}
|
q17384
|
missing_db_response
|
train
|
def missing_db_response(func):
"""Decorator to check connection exceptions"""
@wraps(func)
def with_exception_handling(*args, **kwargs):
"""Wrapper to check for connection failures"""
try:
return func(*args, **kwargs)
except ConnectionError as error:
return (dict(error='Unable to connect to Configuration Db.',
error_message=str(error),
links=dict(root='{}'.format(get_root_url()))),
HTTPStatus.NOT_FOUND)
return with_exception_handling
|
python
|
{
"resource": ""
}
|
q17385
|
main
|
train
|
def main():
"""Run the workflow task."""
log = logging.getLogger('sip.mock_workflow_stage')
if len(sys.argv) != 2:
log.critical('Expecting JSON string as first argument!')
return
config = json.loads(sys.argv[1])
log.info('Running mock_workflow_stage (version: %s).', __version__)
log.info('Received configuration: %s', json.dumps(config))
log.info('Starting task')
i = 0
start_time = time.time()
duration = config.get('duration', 20)
while time.time() - start_time <= duration:
time.sleep(duration / 20)
elapsed = time.time() - start_time
log.info(" %s %2i / 20 (elapsed %.2f s)",
config.get('message', 'Progress '),
i + 1, elapsed)
i += 1
log.info('Task complete!')
|
python
|
{
"resource": ""
}
|
q17386
|
main
|
train
|
def main(sleep_length=0.1):
"""Log to stdout using python logging in a while loop"""
log = logging.getLogger('sip.examples.log_spammer')
log.info('Starting to spam log messages every %fs', sleep_length)
counter = 0
try:
while True:
log.info('Hello %06i (log_spammer: %s, sip logging: %s)',
counter, _version.__version__, __version__)
counter += 1
time.sleep(sleep_length)
except KeyboardInterrupt:
log.info('Exiting...')
|
python
|
{
"resource": ""
}
|
q17387
|
init_logging
|
train
|
def init_logging():
"""Initialise Python logging."""
fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \
'| %(message)s'
logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
|
python
|
{
"resource": ""
}
|
q17388
|
get
|
train
|
def get(sub_array_id):
"""Sub array detail resource.
This method will list scheduling blocks and processing blocks
in the specified sub-array.
"""
if not re.match(r'^subarray-0[0-9]|subarray-1[0-5]$', sub_array_id):
response = dict(error='Invalid sub-array ID specified "{}" does not '
'match sub-array ID naming convention '
'(ie. subarray-[00-15]).'.
format(sub_array_id))
return response, HTTPStatus.BAD_REQUEST
if sub_array_id not in DB.get_sub_array_ids():
response = dict(error='Sub-array "{}" does not currently exist. '
'Known sub-arrays = {}'
.format(sub_array_id, DB.get_sub_array_ids()))
return response, HTTPStatus.NOT_FOUND
block_ids = DB.get_sub_array_sbi_ids(sub_array_id)
_blocks = [b for b in DB.get_block_details(block_ids)]
response = dict(scheduling_blocks=[])
_url = get_root_url()
for block in _blocks:
block['links'] = {
'self': '{}/scheduling-block/{}'.format(_url, block['id'])
}
response['scheduling_blocks'].append(block)
response['links'] = {
'self': '{}'.format(request.url),
'list': '{}/sub-arrays'.format(_url),
'home': '{}'.format(_url),
}
return response, HTTPStatus.OK
|
python
|
{
"resource": ""
}
|
q17389
|
get_scheduling_block
|
train
|
def get_scheduling_block(sub_array_id, block_id):
"""Return the list of scheduling blocks instances associated with the sub
array"""
block_ids = DB.get_sub_array_sbi_ids(sub_array_id)
if block_id in block_ids:
block = DB.get_block_details([block_id]).__next__()
return block, HTTPStatus.OK
return dict(error="unknown id"), HTTPStatus.NOT_FOUND
|
python
|
{
"resource": ""
}
|
q17390
|
package_files
|
train
|
def package_files(directory):
"""Get list of data files to add to the package."""
paths = []
for (path, _, file_names) in walk(directory):
for filename in file_names:
paths.append(join('..', path, filename))
return paths
|
python
|
{
"resource": ""
}
|
q17391
|
register_master
|
train
|
def register_master():
"""Register the SDP Master device."""
tango_db = Database()
device = "sip_sdp/elt/master"
device_info = DbDevInfo()
device_info._class = "SDPMasterDevice"
device_info.server = "sdp_master_ds/1"
device_info.name = device
devices = tango_db.get_device_name(device_info.server, device_info._class)
if device not in devices:
LOG.info('Registering device "%s" with device server "%s"',
device_info.name, device_info.server)
tango_db.add_device(device_info)
|
python
|
{
"resource": ""
}
|
q17392
|
main
|
train
|
def main(args=None, **kwargs):
"""Run the Tango SDP Master device server."""
LOG.info('Starting %s', __service_id__)
return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout,
args=args, **kwargs)
|
python
|
{
"resource": ""
}
|
q17393
|
main
|
train
|
def main():
"""Main script function"""
# Create simulation object, and start streaming SPEAD heaps
sender = PulsarSender()
# Parse command line arguments
args = parse_command_line()
# Initialise logging.
_log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO)
# Load configuration.
_log.info('Loading config: %s', args.config_file.name)
_config = json.load(args.config_file)
if args.print_settings:
_log.debug('Settings:\n %s', json.dumps(_config, indent=4,
sort_keys=True))
sender.send(_config, _log, 1, 1)
|
python
|
{
"resource": ""
}
|
q17394
|
CommonService.start_transport
|
train
|
def start_transport(self):
"""If a transport object has been defined then connect it now."""
if self.transport:
if self.transport.connect():
self.log.debug("Service successfully connected to transport layer")
else:
raise RuntimeError("Service could not connect to transport layer")
# direct all transport callbacks into the main queue
self._transport_interceptor_counter = itertools.count()
self.transport.subscription_callback_set_intercept(
self._transport_interceptor
)
else:
self.log.debug("No transport layer defined for service. Skipping.")
|
python
|
{
"resource": ""
}
|
q17395
|
CommonService._transport_interceptor
|
train
|
def _transport_interceptor(self, callback):
"""Takes a callback function and returns a function that takes headers and
messages and places them on the main service queue."""
def add_item_to_queue(header, message):
queue_item = (
Priority.TRANSPORT,
next(
self._transport_interceptor_counter
), # insertion sequence to keep messages in order
(callback, header, message),
)
self.__queue.put(
queue_item
) # Block incoming transport until insertion completes
return add_item_to_queue
|
python
|
{
"resource": ""
}
|
q17396
|
CommonService.extend_log
|
train
|
def extend_log(self, field, value):
"""A context wherein a specified extra field in log messages is populated
with a fixed value. This affects all log messages within the context."""
self.__log_extensions.append((field, value))
try:
yield
except Exception as e:
setattr(e, "workflows_log_" + field, value)
raise
finally:
self.__log_extensions.remove((field, value))
|
python
|
{
"resource": ""
}
|
q17397
|
CommonService._log_send
|
train
|
def _log_send(self, logrecord):
"""Forward log records to the frontend."""
for field, value in self.__log_extensions:
setattr(logrecord, field, value)
self.__send_to_frontend({"band": "log", "payload": logrecord})
|
python
|
{
"resource": ""
}
|
q17398
|
CommonService.__update_service_status
|
train
|
def __update_service_status(self, statuscode):
"""Set the internal status of the service object, and notify frontend."""
if self.__service_status != statuscode:
self.__service_status = statuscode
self.__send_service_status_to_frontend()
|
python
|
{
"resource": ""
}
|
q17399
|
CommonService._set_name
|
train
|
def _set_name(self, name):
"""Set a new name for this service, and notify the frontend accordingly."""
self._service_name = name
self.__send_to_frontend({"band": "set_name", "name": self._service_name})
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.