code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
transcript_preference, __ = TranscriptPreference.objects.update_or_create( course_id=course_id, defaults=preferences ) return TranscriptPreferenceSerializer(transcript_preference).data
def create_or_update_transcript_preferences(course_id, **preferences)
Creates or updates course-wide transcript preferences Arguments: course_id(str): course id Keyword Arguments: preferences(dict): keyword arguments
2.719406
4.917504
0.553005
try: transcript_preference = TranscriptPreference.objects.get(course_id=course_id) transcript_preference.delete() except TranscriptPreference.DoesNotExist: pass
def remove_transcript_preferences(course_id)
Deletes course-wide transcript preferences. Arguments: course_id(str): course id
1.99329
2.452612
0.812722
try: video_image = CourseVideo.objects.select_related('video_image').get( course_id=course_id, video__edx_video_id=edx_video_id ).video_image return video_image.image_url() except ObjectDoesNotExist: return None
def get_course_video_image_url(course_id, edx_video_id)
Returns course video image url or None if no image found
2.534488
2.428536
1.043628
try: course_video = CourseVideo.objects.select_related('video').get( course_id=course_id, video__edx_video_id=edx_video_id ) except ObjectDoesNotExist: error_message = u'VAL: CourseVideo not found for edx_video_id: {0} and course_id: {1}'.format( edx_video_id, course_id ) raise ValVideoNotFoundError(error_message) video_image, _ = VideoImage.create_or_update(course_video, file_name, image_data) return video_image.image_url()
def update_video_image(edx_video_id, course_id, image_data, file_name)
Update video image for an existing video. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. Arguments: image_data (InMemoryUploadedFile): Image data to be saved for a course video. Returns: course video image url Raises: Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved.
2.818938
2.457903
1.146888
try: profile = Profile(profile_name=profile_name) profile.full_clean() profile.save() except ValidationError as err: raise ValCannotCreateError(err.message_dict)
def create_profile(profile_name)
Used to create Profile objects in the database A profile needs to exists before an EncodedVideo object can be created. Args: profile_name (str): ID of the profile Raises: ValCannotCreateError: Raised if the profile name is invalid or exists
3.625454
2.710947
1.337339
try: return Video.objects.prefetch_related("encoded_videos", "courses").get(edx_video_id=edx_video_id) except Video.DoesNotExist: error_message = u"Video not found for edx_video_id: {0}".format(edx_video_id) raise ValVideoNotFoundError(error_message) except Exception: error_message = u"Could not get edx_video_id: {0}".format(edx_video_id) logger.exception(error_message) raise ValInternalError(error_message)
def _get_video(edx_video_id)
Get a Video instance, prefetching encoded video and course information. Raises ValVideoNotFoundError if the video cannot be retrieved.
2.626737
2.093916
1.254462
profiles_to_urls = {profile: None for profile in profiles} try: video_info = get_video_info(edx_video_id) except ValVideoNotFoundError: return profiles_to_urls for encoded_video in video_info["encoded_videos"]: if encoded_video["profile"] in profiles: profiles_to_urls[encoded_video["profile"]] = encoded_video["url"] return profiles_to_urls
def get_urls_for_profiles(edx_video_id, profiles)
Returns a dict mapping profiles to URLs. If the profiles or video is not found, urls will be blank. Args: edx_video_id (str): id of the video profiles (list): list of profiles we want to search for Returns: (dict): A dict containing the profile to url pair
2.810125
3.248596
0.865028
videos = Video.objects.filter(**video_filter) paginator_context = {} if sort_field: # Refining by edx_video_id ensures a total order videos = videos.order_by(sort_field.value, "edx_video_id") if sort_dir == SortDirection.desc: videos = videos.reverse() if pagination_conf: videos_per_page = pagination_conf.get('videos_per_page') paginator = Paginator(videos, videos_per_page) videos = paginator.page(pagination_conf.get('page_number')) paginator_context = { 'current_page': videos.number, 'total_pages': videos.paginator.num_pages, 'items_on_one_page':videos_per_page } return (VideoSerializer(video).data for video in videos), paginator_context
def _get_videos_for_filter(video_filter, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None)
Returns a generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order.
2.894256
2.467168
1.173109
course_videos = (CourseVideo.objects.select_related('video') .prefetch_related('video__encoded_videos', 'video__encoded_videos__profile') .filter(video__encoded_videos__profile__profile_name='youtube') .order_by('id') .distinct()) if course_ids: course_videos = course_videos.filter(course_id__in=course_ids) course_videos = course_videos.values_list('course_id', 'video__edx_video_id') if limit is not None and offset is not None: course_videos = course_videos[offset: offset+limit] course_videos_with_yt_profile = [] for course_id, edx_video_id in course_videos: yt_profile = EncodedVideo.objects.filter( video__edx_video_id=edx_video_id, profile__profile_name='youtube' ).first() if yt_profile: course_videos_with_yt_profile.append(( course_id, edx_video_id, yt_profile.url )) return course_videos_with_yt_profile
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None)
Returns a list that contains all the course ids and video ids with the youtube profile Args: course_ids (list): valid course ids limit (int): batch records limit offset (int): an offset for selecting a batch Returns: (list): Tuples of course_id, edx_video_id and youtube video url
2.046783
2.046047
1.00036
return _get_videos_for_filter( {'courses__course_id': six.text_type(course_id), 'courses__is_hidden': False}, sort_field, sort_dir, pagination_conf, )
def get_videos_for_course(course_id, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None)
Returns an iterator of videos for the given course id. Args: course_id (String) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order.
3.961936
4.55372
0.870044
course_video = CourseVideo.objects.get(course_id=course_id, video__edx_video_id=edx_video_id) course_video.is_hidden = True course_video.save()
def remove_video_for_course(course_id, edx_video_id)
Soft deletes video for particular course. Arguments: course_id (str): id of the course edx_video_id (str): id of the video to be hidden
2.365867
2.615757
0.904467
videos, __ = _get_videos_for_filter( {"edx_video_id__in":edx_video_ids}, sort_field, sort_dir, ) return videos
def get_videos_for_ids( edx_video_ids, sort_field=None, sort_dir=SortDirection.asc )
Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order
4.451266
5.612413
0.793111
# In case someone passes in a key (VAL doesn't really understand opaque keys) course_id = six.text_type(course_id) try: encoded_videos = EncodedVideo.objects.filter( profile__profile_name__in=profiles, video__courses__course_id=course_id ).select_related() except Exception: error_message = u"Could not get encoded videos for course: {0}".format(course_id) logger.exception(error_message) raise ValInternalError(error_message) # DRF serializers were causing extra queries for some reason... return_dict = {} for enc_vid in encoded_videos: # Add duration to edx_video_id return_dict.setdefault(enc_vid.video.edx_video_id, {}).update( { "duration": enc_vid.video.duration, } ) # Add profile information to edx_video_id's profiles return_dict[enc_vid.video.edx_video_id].setdefault("profiles", {}).update( {enc_vid.profile.profile_name: { "url": enc_vid.url, "file_size": enc_vid.file_size, }} ) return return_dict
def get_video_info_for_course_and_profiles(course_id, profiles)
Returns a dict of edx_video_ids with a dict of requested profiles. Args: course_id (str): id of the course profiles (list): list of profile_names Returns: (dict): Returns all the profiles attached to a specific edx_video_id { edx_video_id: { 'duration': length of the video in seconds, 'profiles': { profile_name: { 'url': url of the encoding 'file_size': size of the file in bytes }, } }, } Example: Given two videos with two profiles each in course_id 'test_course': { u'edx_video_id_1': { u'duration: 1111, u'profiles': { u'mobile': { 'url': u'http: //www.example.com/meow', 'file_size': 2222 }, u'desktop': { 'url': u'http: //www.example.com/woof', 'file_size': 4444 } } }, u'edx_video_id_2': { u'duration: 2222, u'profiles': { u'mobile': { 'url': u'http: //www.example.com/roar', 'file_size': 6666 }, u'desktop': { 'url': u'http: //www.example.com/bzzz', 'file_size': 8888 } } } }
4.010552
3.331529
1.203817
if source_course_id == destination_course_id: return course_videos = CourseVideo.objects.select_related('video', 'video_image').filter( course_id=six.text_type(source_course_id) ) for course_video in course_videos: destination_course_video, __ = CourseVideo.objects.get_or_create( video=course_video.video, course_id=destination_course_id ) if hasattr(course_video, 'video_image'): VideoImage.create_or_update( course_video=destination_course_video, file_name=course_video.video_image.image.name )
def copy_course_videos(source_course_id, destination_course_id)
Adds the destination_course_id to the videos taken from the source_course_id Args: source_course_id: The original course_id destination_course_id: The new course_id where the videos will be copied
2.232022
2.338941
0.954287
video_image_name = '' video = _get_video(video_id) try: course_video = CourseVideo.objects.select_related('video_image').get(course_id=course_id, video=video) video_image_name = course_video.video_image.image.name except ObjectDoesNotExist: pass video_el = Element( 'video_asset', attrib={ 'client_video_id': video.client_video_id, 'duration': six.text_type(video.duration), 'image': video_image_name } ) for encoded_video in video.encoded_videos.all(): SubElement( video_el, 'encoded_video', { name: six.text_type(getattr(encoded_video, name)) for name in ['profile', 'url', 'file_size', 'bitrate'] } ) return create_transcripts_xml(video_id, video_el, resource_fs, static_dir)
def export_to_xml(video_id, resource_fs, static_dir, course_id=None)
Exports data for a video into an xml object. NOTE: For external video ids, only transcripts information will be added into xml. If external=False, then edx_video_id is going to be on first index of the list. Arguments: video_id (str): Video id of the video to export transcripts. course_id (str): The ID of the course with which this video is associated. static_dir (str): The Directory to store transcript file. resource_fs (SubFS): Export file system. Returns: An lxml video_asset element containing export data Raises: ValVideoNotFoundError: if the video does not exist
3.12485
3.335113
0.936955
transcript_filename = '{video_id}-{language_code}.srt'.format( video_id=video_id, language_code=language_code ) transcript_data = get_video_transcript_data(video_id, language_code) if transcript_data: transcript_content = Transcript.convert( transcript_data['content'], input_format=file_format, output_format=Transcript.SRT ) create_file_in_fs(transcript_content, transcript_filename, resource_fs, static_dir) return transcript_filename
def create_transcript_file(video_id, language_code, file_format, resource_fs, static_dir)
Writes transcript file to file system. Arguments: video_id (str): Video id of the video transcript file is attached. language_code (str): Language code of the transcript. file_format (str): File format of the transcript file. static_dir (str): The Directory to store transcript file. resource_fs (SubFS): The file system to store transcripts.
2.48754
2.849923
0.872845
video_transcripts = VideoTranscript.objects.filter(video__edx_video_id=video_id).order_by('language_code') # create transcripts node only when we have transcripts for a video if video_transcripts.exists(): transcripts_el = SubElement(video_el, 'transcripts') transcript_files_map = {} for video_transcript in video_transcripts: language_code = video_transcript.language_code file_format = video_transcript.file_format try: transcript_filename = create_transcript_file( video_id=video_id, language_code=language_code, file_format=file_format, resource_fs=resource_fs.delegate_fs(), static_dir=combine(u'course', static_dir) # File system should not start from /draft directory. ) transcript_files_map[language_code] = transcript_filename except TranscriptsGenerationException: # we don't want to halt export in this case, just log and move to the next transcript. logger.exception('[VAL] Error while generating "%s" transcript for video["%s"].', language_code, video_id) continue SubElement( transcripts_el, 'transcript', { 'language_code': language_code, 'file_format': Transcript.SRT, 'provider': video_transcript.provider, } ) return dict(xml=video_el, transcripts=transcript_files_map)
def create_transcripts_xml(video_id, video_el, resource_fs, static_dir)
Creates xml for transcripts. For each transcript element, an associated transcript file is also created in course OLX. Arguments: video_id (str): Video id of the video. video_el (Element): lxml Element object static_dir (str): The Directory to store transcript file. resource_fs (SubFS): The file system to store transcripts. Returns: lxml Element object with transcripts information
3.887784
4.083887
0.951981
file_format = None transcript_data = get_video_transcript_data(edx_video_id, language_code) # First check if transcript record does not exist. if not transcript_data: # Read file from import file system and attach it to transcript record in DS. try: with resource_fs.open(combine(static_dir, file_name), 'rb') as f: file_content = f.read() file_content = file_content.decode('utf-8-sig') except ResourceNotFound as exc: # Don't raise exception in case transcript file is not found in course OLX. logger.warn( '[edx-val] "%s" transcript "%s" for video "%s" is not found.', language_code, file_name, edx_video_id ) return except UnicodeDecodeError: # Don't raise exception in case transcript contains non-utf8 content. logger.warn( '[edx-val] "%s" transcript "%s" for video "%s" contains a non-utf8 file content.', language_code, file_name, edx_video_id ) return # Get file format from transcript content. try: file_format = get_transcript_format(file_content) except Error as ex: # Don't raise exception, just don't create transcript record. logger.warn( '[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s', edx_video_id, language_code, file_name ) return # Create transcript record. create_video_transcript( video_id=edx_video_id, language_code=language_code, file_format=file_format, content=ContentFile(file_content), provider=provider )
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir)
Imports transcript file from file system and creates transcript record in DS. Arguments: edx_video_id (str): Video id of the video. language_code (unicode): Language code of the requested transcript. file_name (unicode): File name of the transcript file. provider (unicode): Transcript provider. resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file.
3.035913
2.928775
1.036581
# File system should not start from /draft directory. with open_fs(resource_fs.root_path.split('/drafts')[0]) as file_system: # First import VAL transcripts. for transcript in xml.findall('.//transcripts/transcript'): try: file_format = transcript.attrib['file_format'] language_code = transcript.attrib['language_code'] transcript_file_name = u'{edx_video_id}-{language_code}.{file_format}'.format( edx_video_id=edx_video_id, language_code=language_code, file_format=file_format ) import_transcript_from_fs( edx_video_id=edx_video_id, language_code=transcript.attrib['language_code'], file_name=transcript_file_name, provider=transcript.attrib['provider'], resource_fs=file_system, static_dir=static_dir ) except KeyError: logger.warn("VAL: Required attributes are missing from xml, xml=[%s]", etree.tostring(transcript).strip()) # This won't overwrite transcript for a language which is already present for the video. for language_code, transcript_file_names in six.iteritems(external_transcripts): for transcript_file_name in transcript_file_names: import_transcript_from_fs( edx_video_id=edx_video_id, language_code=language_code, file_name=transcript_file_name, provider=TranscriptProviderType.CUSTOM, resource_fs=file_system, static_dir=static_dir )
def create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts)
Create VideoTranscript objects. Arguments: xml (Element): lxml Element object. edx_video_id (str): Video id of the video. resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. external_transcripts (dict): A dict containing the list of names of the external transcripts. Example: { 'en': ['The_Flash.srt', 'Harry_Potter.srt'], 'es': ['Green_Arrow.srt'] }
3.028703
3.000275
1.009475
cache_stats = [] for name, _ in six.iteritems(settings.CACHES): cache_backend = caches[name] try: cache_backend_stats = cache_backend._cache.get_stats() except AttributeError: # this backend doesn't provide stats logger.info( 'The memcached backend "{0}" does not support or ' 'provide stats.'.format(name) ) continue for address, stats in cache_backend_stats: cache_stats.append( {'name': name, 'address': address, 'stats': stats} ) return cache_stats
def get_cache_stats()
Returns a list of dictionaries of all cache servers and their stats, if they provide stats.
3.557041
3.262171
1.090391
fd, tmp_path = mkstemp() atexit.register(lambda: os.remove(tmp_path)) with os.fdopen(fd, 'w') as f: f.write(ASKPASS.format(username=username, password=password or '')) chmod(tmp_path, 0o700) os.environ['GIT_ASKPASS'] = tmp_path
def register_git_injector(username, password)
Generate a script that writes the password to the git command line tool
2.674059
2.716229
0.984475
if not camel: return camel return ''.join('_' + x if 'A' <= x <= 'Z' else x for x in camel).lower()[camel[0].isupper():]
def to_snake(camel)
TimeSkill -> time_skill
4.595861
4.478902
1.026113
@wraps(func) def wrapper(*args, **kwargs): return '\n'.join( ' '.join(parts) if isinstance(parts, tuple) else parts for parts in func(*args, **kwargs) ) return wrapper
def serialized(func)
Write a serializer by yielding each line of output
3.074136
2.827736
1.087137
title = 'Upgrade ' + self.skill.name body = body_template.format( skill_name=self.skill.name, commits='\n'.join( ' - [{}]({})'.format( skill_git.show('-s', sha, format='%s'), skill_repo.get_commit(sha).html_url ) for sha in skill_git.rev_list( '--ancestry-path', '{}..{}'.format(self.skill.entry.sha, 'HEAD') ).split('\n') ) ) return title, body
def create_pr_message(self, skill_git: Git, skill_repo: Repository) -> tuple
Reads git commits from skill repo to create a list of changes as the PR content
4.313721
3.908744
1.103608
click.echo('Moving ship %s to %s,%s with speed %s' % (ship, x, y, speed))
def ship_move(ship, x, y, speed)
Moves SHIP to the new location X,Y.
3.768996
3.991505
0.944254
try: errval = "error" errdict = dict(state="unknown", reason="unknown") if sdp_state.current_state == "unknown": errdict['reason'] = 'database not initialised.' LOG.debug('Current state is unknown;') LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) elif sdp_state.current_state is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is NONE;') LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) elif sdp_state.target_state is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is NONE;') LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) LOG.debug('Target state timestamp is %s;', sdp_state.target_timestamp) elif sdp_state.current_timestamp is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is NONE') LOG.debug('Target state timestamp is %s;', sdp_state.target_timestamp) elif sdp_state.target_timestamp is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) LOG.debug('Target state timestamp is NONE') elif sdp_state.current_timestamp < sdp_state.target_timestamp: errdict['reason'] = \ 'Timestamp for Master Controller Services is stale.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) LOG.debug('Target state timestamp is %s;', sdp_state.target_timestamp) else: errval = "okay" except ConnectionError as err: errdict['reason'] = err LOG.debug('Connection Error %s', err) return errval, errdict
def _check_status(sdp_state)
SDP Status check. Do all the tests to determine, if the SDP state is "broken", what could be the cause, and return a suitable status message to be sent back by the calling function.
1.677229
1.660264
1.010218
return { "message": "Welcome to the SIP Master Controller (flask variant)", "_links": { "items": [ { "Link": "Health", "href": "{}health".format(request.url) }, { "Link": "Version", "href": "{}version".format(request.url) }, { "Link": "Allowed target states", "href": "{}allowed_target_sdp_states".format(request.url) }, { "Link": "SDP state", "href": "{}state".format(request.url) }, { "Link": "SDP target state", "href": "{}state/target".format(request.url) }, { "Link": "SDP target state", "href": "{}target_state".format(request.url) }, { "Link": "SDP current state", "href": "{}state/current".format(request.url) }, { "Link": "Scheduling Block Instances", "href": "{}scheduling_block_instances".format(request.url) }, { "Link": "Processing Blocks", "href": "{}processing_blocks".format(request.url) }, { "Link": "Resource Availability", "href": "{}resource_availability".format(request.url) }, { "Link": "Configure SBI", "href": "{}configure_sbi".format(request.url) } ] } }
def root()
Home page.
2.528496
2.520643
1.003116
up_time = time.time() - START_TIME response = dict(service=__service_id__, uptime='{:.2f}s'.format(up_time)) return response, HTTPStatus.OK
def health()
Check the health of this service.
5.886471
6.15781
0.955936
try: sdp_state = SDPState() return sdp_state.allowed_target_states[sdp_state.current_state] except KeyError: LOG.error("Key Error") return dict(state="KeyError", reason="KeyError")
def allowed_transitions()
Get target states allowed for the current state.
7.390677
6.285488
1.175832
sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) return dict( current_state="unknown", target_state="unknown", last_updated="unknown", reason=errdict['reason'] ) _last_updated = sdp_state.current_timestamp if sdp_state.target_timestamp > _last_updated: _last_updated = sdp_state.target_timestamp return dict( current_state=sdp_state.current_state, target_state=sdp_state.target_state, allowed_target_states=sdp_state.allowed_target_states[ sdp_state.current_state], last_updated=_last_updated.isoformat() ), HTTPStatus.OK
def get_state()
SDP State. Return current state; target state and allowed target states.
3.441882
3.132012
1.098936
sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) return dict( current_target_state="unknown", last_updated="unknown", reason=errdict['reason'] ) LOG.debug('Getting target state') target_state = sdp_state.target_state LOG.debug('Target state = %s', target_state) return dict( current_target_state=target_state, allowed_target_states=sdp_state.allowed_target_states[ sdp_state.current_state], last_updated=sdp_state.target_timestamp.isoformat())
def get_target_state()
SDP target State. Returns the target state; allowed target states and time updated
3.925495
3.694475
1.062531
sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) return dict( current_state="unknown", last_updated="unknown", reason=errdict['reason'] ) LOG.debug('Current State: %s', sdp_state.current_state) LOG.debug('Current State last updated: %s', sdp_state.current_timestamp.isoformat()) return dict( current_state=sdp_state.current_state, last_updated=sdp_state.current_timestamp.isoformat() ), HTTPStatus.OK
def get_current_state()
Return the SDP State and the timestamp for when it was updated.
3.726541
3.294049
1.131295
sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) rdict = dict( current_state="unknown", last_updated="unknown", reason=errdict['reason'] ) else: try: LOG.debug('request is of type %s', type(request)) request_data = request.data LOG.debug('request data is of type %s', type(request_data)) LOG.debug('request is %s', request_data) request_data = request.data target_state = request_data['value'].lower() sdp_state.update_target_state(target_state) rdict = dict(message='Target state successfully updated to {}' .format(target_state)) except ValueError as error: rdict = dict(error='Failed to set target state', reason=str(error)) except RuntimeError as error: rdict = dict(error='RunTime error', reason=str(error)) return rdict
def put_target_state()
SDP target State. Sets the target state
3.585417
3.541566
1.012382
pb_list = ProcessingBlockList() return dict(active=pb_list.active, completed=pb_list.completed, aborted=pb_list.aborted)
def processing_block_list()
Return the list of processing blocks known to SDP.
5.73545
5.131579
1.117677
sbi_list = SchedulingBlockInstanceList() return dict(active=sbi_list.active, completed=sbi_list.completed, aborted=sbi_list.aborted)
def scheduling_blocks()
Return list of Scheduling Block instances known to SDP.
5.819362
5.076963
1.146229
# Need an ID for the subarray - guessing I just get # the list of inactive subarrays and use the first inactive_list = SubarrayList().inactive request_data = request.data LOG.debug('request is of type %s', type(request_data)) try: sbi = Subarray(inactive_list[0]) sbi.activate() sbi.configure_sbi(request_data) except jsonschema.exceptions.ValidationError as error: LOG.error('Error configuring SBI: %s', error) return dict(path=error.absolute_path.__str__(), schema_path=error.schema_path.__str__(), message=error.message) return dict(status="Accepted SBI: {}".format(sbi.id))
def configure_sbi()
Configure an SBI using POSTed configuration.
5.903594
5.749272
1.026842
return dict(links=dict(api='{}{}'.format(request.url, PREFIX[1:]))), \ HTTPStatus.OK
def home()
Temporary helper function to link to the API routes
14.747852
10.718721
1.375897
return (dict(error='Invalid URL: /{}'.format(path), links=dict(root='{}{}'.format(request.url_root, PREFIX[1:]))), HTTPStatus.NOT_FOUND)
def catch_all(path)
Catch all path - return a JSON 404
9.563305
8.21176
1.164587
active = [] for i in range(__num_subarrays__): key = Subarray.get_key(i) if DB.get_hash_value(key, 'active').upper() == 'TRUE': active.append(Subarray.get_id(i)) return active
def get_active() -> List[str]
Return the list of active subarrays.
5.557498
4.220361
1.31683
inactive = [] for i in range(__num_subarrays__): key = Subarray.get_key(i) if DB.get_hash_value(key, 'active').upper() == 'FALSE': inactive.append(Subarray.get_id(i)) return inactive
def get_inactive() -> List[str]
Return the list of inactive subarrays.
5.796409
4.354901
1.331008
# Create a logger. log = logging.getLogger('pyspark') log.setLevel(logging.INFO) if len(log.handlers) == 0: log.addHandler(logging.StreamHandler(sys.stdout)) # Create an imager and configure it. precision = bc_settings.value['precision'] imager = oskar.Imager(precision) for key, value in bc_settings.value['imager'].items(): setattr(imager, key, value) grid_size = imager.plane_size grid_weights = None # Get a handle to the input Measurement Set. ms_han = oskar.MeasurementSet.open(input_file) # Check if doing a first pass. if coords_only: # If necessary, generate a local weights grid. if imager.weighting == 'Uniform': grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision) # Do a first pass for uniform weighting or W-projection. log.info('Reading coordinates from %s', input_file) imager.coords_only = True process_input_data(ms_han, imager, None, grid_weights) imager.coords_only = False # Return weights grid and required number of W-planes as RDD element. return grid_weights, imager.num_w_planes # Allocate a local visibility grid on the node. grid_data = numpy.zeros([grid_size, grid_size], dtype='c8' if precision == 'single' else 'c16') # Process data according to mode. log.info('Reading visibilities from %s', input_file) if bc_settings.value['combine']: # Get weights grid from Spark Broadcast variable. if imager.weighting == 'Uniform': grid_weights = bc_grid_weights.value # Populate the local visibility grid. grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights) # Return grid as RDD element. log.info('Returning gridded visibilities to RDD') return grid_data, grid_norm else: # If necessary, generate a local weights grid. if imager.weighting == 'Uniform': grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision) # If necessary, do a first pass for uniform weighting or W-projection. if imager.weighting == 'Uniform' or imager.algorithm == 'W-projection': imager.coords_only = True process_input_data(ms_han, imager, None, grid_weights) imager.coords_only = False # Populate the local visibility grid. grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights) # Save image by finalising grid. output_file = splitext(input_file)[0] + '.fits' save_image(imager, grid_data, grid_norm, output_file) log.info('Finished. Output file is %s', output_file) return 0
def node_run(input_file, coords_only, bc_settings, bc_grid_weights)
Main function to process visibility data on Spark cluster nodes. Args: input_file (str): RDD element containing filename to process. coords_only (boolean): If true, read only baseline coordinates to define the weights grid. bc_settings (pyspark.broadcast.Broadcast): Spark broadcast variable containing pipeline settings dictionary. bc_grid_weights (pyspark.broadcast.Broadcast): Spark broadcast variable containing weights grid. May be None. Returns: tuple: Output RDD element.
3.300485
3.111596
1.060705
# Make the image (take the FFT, normalise, and apply grid correction). imager.finalise_plane(grid_data, grid_norm) grid_data = numpy.real(grid_data) # Trim the image if required. border = (imager.plane_size - imager.image_size) // 2 if border > 0: end = border + imager.image_size grid_data = grid_data[border:end, border:end] # Write the FITS file. hdr = fits.header.Header() fits.writeto(output_file, grid_data, hdr, clobber=True)
def save_image(imager, grid_data, grid_norm, output_file)
Makes an image from gridded visibilities and saves it to a FITS file. Args: imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray): Final visibility grid. grid_norm (float): Grid normalisation to apply. output_file (str): Name of output FITS file to write.
3.81439
3.690728
1.033506
def is_seq(obj): return hasattr(obj, "__getitem__") or hasattr(obj, "__iter__") if object_a is None or object_b is None: return None elif is_seq(object_a) and is_seq(object_b): reduced = [] for element_a, element_b in zip(object_a, object_b): if element_a is not None and element_b is not None: reduced.append(element_a + element_b) else: reduced.append(None) return reduced else: return object_a + object_b
def reduce_sequences(object_a, object_b)
Performs an element-wise addition of sequences into a new list. Both sequences must have the same length, and the addition operator must be defined for each element of the sequence.
1.850014
1.84442
1.003033
# Check command line arguments. if len(sys.argv) < 3: raise RuntimeError( 'Usage: spark-submit spark_imager_test.py <settings_file> <dir> ' '[partitions]') # Create log object. log = logging.getLogger('pyspark') log.setLevel(logging.INFO) log.addHandler(logging.StreamHandler(sys.stdout)) # Load pipeline settings. with open(sys.argv[1]) as f: settings = json.load(f) # Get a list of input Measurement Sets to process. data_dir = str(sys.argv[2]) inputs = glob(join(data_dir, '*.ms')) + glob(join(data_dir, '*.MS')) inputs = filter(None, inputs) log.info('Found input Measurement Sets: %s', ', '.join(inputs)) # Get a Spark context. context = pyspark.SparkContext(appName="spark_imager_test") # Create the Spark RDD containing the input filenames, # suitably parallelized. partitions = int(sys.argv[3]) if len(sys.argv) > 3 else 2 rdd = context.parallelize(inputs, partitions) # Define Spark broadcast variables. bc_settings = context.broadcast(settings) bc_grid_weights = None # Process coordinates first if required. if (settings['combine'] and ( settings['imager']['weighting'] == 'Uniform' or settings['imager']['algorithm'] == 'W-projection')): # Create RDD to generate weights grids. rdd_coords = rdd.map( partial(node_run, coords_only=True, bc_settings=bc_settings, bc_grid_weights=None)) # Mark this RDD as persistent so it isn't computed more than once. rdd_coords.persist() # Get the maximum number of W-planes required, and update settings. num_w_planes = rdd_coords.map(lambda x: x[1]).max() settings['imager']['num_w_planes'] = num_w_planes # Get the combined grid of weights and broadcast it to nodes. output = rdd_coords.reduce(reduce_sequences) bc_grid_weights = context.broadcast(output[0]) # Delete this RDD. rdd_coords.unpersist() # Re-broadcast updated settings. bc_settings = context.broadcast(settings) # Run parallel pipeline on worker nodes and combine visibility grids. output = rdd.map( partial(node_run, coords_only=False, bc_settings=bc_settings, bc_grid_weights=bc_grid_weights)).reduce(reduce_sequences) # Finalise combined visibility grids if required. if settings['combine']: # Create an imager to finalise (FFT) the gridded data. imager = oskar.Imager(settings['precision']) for key, value in settings['imager'].items(): setattr(imager, key, value) # Finalise grid and save image. save_image(imager, output[0], output[1], settings['output_file']) log.info('Finished. Output file is %s', settings['output_file']) context.stop()
def main()
Runs test imaging pipeline using Spark.
3.905975
3.750131
1.041557
with open(path) as json_data: schema = json.load(json_data) return schema
def load_schema(path)
Loads a JSON schema file.
2.568031
2.567102
1.000362
cursor = '0' while cursor != 0: cursor, keys = DB.scan(cursor, match='*', count=5000) if keys: DB.delete(*keys)
def clear_db()
Clear the entire db.
5.359053
5.350273
1.001641
ids = [key.split('/')[-1] for key in DB.keys(pattern='scheduling_block/*')] return sorted(ids)
def get_scheduling_block_ids()
Return list of scheduling block IDs
8.354188
7.319187
1.141409
if schema_path is None: schema_path = os.path.join(os.path.dirname(__file__), 'sbi_post.json') schema = load_schema(schema_path) jsonschema.validate(config, schema) # Add the scheduling block to the database # (This is done as a single k/v pair here but would probably be # expanded to a set of keys in the actual implementation) DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config)) # Add a event to the scheduling block event list to notify # of a new scheduling block being added to the db. DB.rpush('scheduling_block_events', json.dumps(dict(type="created", id=config["id"])))
def add_scheduling_block(config, schema_path=None)
Add a Scheduling Block to the Configuration Database. The configuration dictionary must match the schema defined in in the schema_path variable at the top of the function. Args: config (dict): Scheduling Block instance request configuration. schema_path (str): Path to schema file used to validate the Scheduling Block Instance request
5.107554
4.900758
1.042197
DB.delete('scheduling_block/{}'.format(block_id)) # Add a event to the scheduling block event list to notify # of a deleting a scheduling block from the db DB.rpush('scheduling_block_events', json.dumps(dict(type="deleted", id=block_id)))
def delete_scheduling_block(block_id)
Delete Scheduling Block with the specified ID
7.804842
8.096486
0.963979
event = DB.rpoplpush('scheduling_block_events', 'scheduling_block_event_history') if event: event = json.loads(event.decode('utf-8')) return event
def get_scheduling_block_event()
Return the latest Scheduling Block event
4.946588
4.597358
1.075963
ids = set() for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) ids.add(config['sub_array_id']) return sorted(list(ids))
def get_sub_array_ids()
Return list of sub-array Id's currently known to SDP
5.693138
5.860985
0.971362
ids = [] for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) if config['sub_array_id'] == sub_array_id: ids.append(config['id']) return ids
def get_subarray_sbi_ids(sub_array_id)
Return list of scheduling block Id's associated with the given sub_array_id
4.25976
3.653662
1.165888
ids = [] for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) for processing_block in config['processing_blocks']: ids.append(processing_block['id']) return ids
def get_processing_block_ids()
Return an array of Processing Block ids
4.586822
4.504985
1.018166
identifiers = block_id.split(':') scheduling_block_id = identifiers[0] scheduling_block_config = get_scheduling_block(scheduling_block_id) for processing_block in scheduling_block_config['processing_blocks']: if processing_block['id'] == block_id: return processing_block raise KeyError('Unknown Processing Block id: {} ({})' .format(identifiers[-1], block_id))
def get_processing_block(block_id)
Return the Processing Block Configuration for the specified ID
3.156243
2.87162
1.099116
scheduling_block_id = processing_block_id.split(':')[0] config = get_scheduling_block(scheduling_block_id) processing_blocks = config.get('processing_blocks') processing_block = list(filter( lambda x: x.get('id') == processing_block_id, processing_blocks))[0] config['processing_blocks'].remove(processing_block) DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config)) # Add a event to the scheduling block event list to notify # of a new scheduling block being added to the db. DB.rpush('processing_block_events', json.dumps(dict(type="deleted", id=processing_block_id)))
def delete_processing_block(processing_block_id)
Delete Processing Block with the specified ID
3.553992
3.626447
0.98002
event = DB.rpoplpush('processing_block_events', 'processing_block_event_history') if event: event = json.loads(event.decode('utf-8')) return event
def get_processing_block_event()
Return the latest Processing Block event
4.901985
4.43814
1.104513
key = _keys.subscribers(object_type) DB.remove_from_list(key, subscriber) DB.append_to_list(key, subscriber) return EventQueue(object_type, subscriber, callback_handler)
def subscribe(object_type: str, subscriber: str, callback_handler: Callable = None) -> EventQueue
Subscribe to the specified object type. Returns an EventQueue object which can be used to query events associated with the object type for this subscriber. Args: object_type (str): Object type subscriber (str): Subscriber name callback_handler (function, optional): Callback handler function. Returns: EventQueue, event queue object.
4.941782
5.652797
0.874219
return DB.get_list(_keys.subscribers(object_type))
def get_subscribers(object_type: str) -> List[str]
Get the list of subscribers to events of the object type. Args: object_type (str): Type of object. Returns: List[str], list of subscriber names.
21.209522
48.172646
0.440281
event = Event(event_id=_get_event_id(object_type), event_type=event_type, event_data=event_data, event_origin=origin, object_type=object_type, object_id=object_id, object_key=object_key) # Publish the event to subscribers _publish_to_subscribers(event) # Update the object event list and data. if object_key is None: object_key = '{}:{}'.format(object_type, object_id) _update_object(object_key, event) # Execute the set of db transactions as an atomic transaction. DB.execute()
def publish(event_type: str, event_data: dict = None, object_type: str = None, object_id: str = None, object_key: str = None, origin: str = None)
Publish an event. Published the event to all subscribers and stores the event with the object. Args: event_type (str): The event type event_data (dict, optional): Optional event data object_type (str): Type of object. object_id (str): Object ID object_key (str, optional): Key used to stored the object. If None, the default assume the key is of the form <object type>:<object id> origin (str): Origin or publisher of the event.
3.54356
3.546928
0.99905
return DB.get_list(_keys.events_list(object_key))
def _get_events_list(object_key: str) -> List[str]
Get list of event ids for the object with the specified key. Args: object_key (str): Key of an object in the database.
16.354811
23.569088
0.693909
events_data = [] key = _keys.events_data(object_key) for event_id in _get_events_list(object_key): event_dict = literal_eval(DB.get_hash_value(key, event_id)) events_data.append(event_dict) return events_data
def _get_events_data(object_key: str) -> List[dict]
Get the list of event data for the object with the specified key. Args: object_key (str): Key of an object in the database.
3.982669
4.509296
0.883213
events_data = _get_events_data(object_key) return [Event.from_config(event_dict) for event_dict in events_data]
def get_events(object_key: str) -> List[Event]
Get list of events for the object with the specified key.
3.565624
3.4005
1.048559
subscribers = get_subscribers(event.object_type) # Add the event to each subscribers published list for sub in subscribers: DB.prepend_to_list(_keys.published(event.object_type, sub), event.id, pipeline=True) event_dict = deepcopy(event.config) event_dict.pop('id') DB.set_hash_value(_keys.data(event.object_type, sub), event.id, str(event_dict), pipeline=True) DB.publish(event.object_type, event.id, pipeline=True)
def _publish_to_subscribers(event: Event)
Publish and event to all subscribers. - Adds the event id to the published event list for all subscribers. - Adds the event data to the published event data for all subscribers. - Publishes the event id notification to all subscribers. Args: event (Event): Event object to publish.
5.203559
5.02909
1.034692
events_list_key = _keys.events_list(object_key) events_data_key = _keys.events_data(object_key) event_dict = deepcopy(event.config) event_dict.pop('id') DB.append_to_list(events_list_key, event.id, pipeline=True) DB.set_hash_value(events_data_key, event.id, json.dumps(event_dict), pipeline=True)
def _update_object(object_key: str, event: Event)
Update the events list and events data for the object. - Adds the event Id to the list of events for the object. - Adds the event data to the hash of object event data keyed by event id. Args: object_key (str): Key of the object being updated. event (Event): Event object
4.070854
3.681578
1.105736
key = _keys.event_counter(object_type) DB.watch(key, pipeline=True) count = DB.get_value(key) DB.increment(key) DB.execute() if count is None: count = 0 return '{}_event_{:08d}'.format(object_type, int(count))
def _get_event_id(object_type: str) -> str
Return an event key for the event on the object type. This must be a unique event id for the object. Args: object_type (str): Type of object Returns: str, event id
6.539793
6.56654
0.995927
start_time = time.time() Device.init_device(self) self._pb_id = '' LOG.debug('init PB device %s, time taken %.6f s (total: %.2f s)', self.get_name(), (time.time() - start_time), (time.time() - self._start_time)) self.set_state(DevState.STANDBY)
def init_device(self)
Device constructor.
4.831915
4.423182
1.092407
# FIXME(BMo) instead of creating the object to check if the PB exists # use a method on PB List? # ProcessingBlock(pb_id) self.set_state(DevState.ON) self._pb_id = pb_id
def pb_id(self, pb_id: str)
Set the PB Id for this device.
18.798683
15.423088
1.218866
pb = ProcessingBlock(self._pb_id) return json.dumps(pb.config)
def pb_config(self)
Return the PB configuration.
11.205281
8.718946
1.285165
timestamp = DB.get_hash_value(self._key, 'current_timestamp') return datetime_from_isoformat(timestamp)
def current_timestamp(self) -> datetime
Get the current state timestamp.
10.995116
8.555087
1.285214
timestamp = DB.get_hash_value(self._key, 'target_timestamp') return datetime_from_isoformat(timestamp)
def target_timestamp(self) -> datetime
Get the target state timestamp.
10.657207
8.443954
1.262111
value = value.lower() if not force: current_state = self.current_state if current_state == 'unknown': raise RuntimeError("Unable to set target state when current " "state is 'unknown'") allowed_target_states = self._allowed_target_states[current_state] LOG.debug('Updating target state of %s to %s', self._id, value) if value not in allowed_target_states: raise ValueError("Invalid target state: '{}'. {} can be " "commanded to states: {}". format(value, current_state, allowed_target_states)) return self._update_state('target', value)
def update_target_state(self, value: str, force: bool = True) -> datetime
Set the target state. Args: value (str): New value for target state force (bool): If true, ignore allowed transitions Returns: datetime, update timestamp Raises: RuntimeError, if it is not possible to currently set the target state. ValueError, if the specified target stat is not allowed.
3.760582
3.541935
1.061731
value = value.lower() if not force: current_state = self.current_state # IF the current state is unknown, it can be set to any of the # allowed states, otherwise only allow certain transitions. if current_state == 'unknown': allowed_transitions = self._allowed_states else: allowed_transitions = self._allowed_transitions[current_state] allowed_transitions.append(current_state) LOG.debug('Updating current state of %s to %s', self._id, value) if value not in allowed_transitions: raise ValueError("Invalid current state update: '{}'. '{}' " "can be transitioned to states: {}" .format(value, current_state, allowed_transitions)) return self._update_state('current', value)
def update_current_state(self, value: str, force: bool = False) -> datetime
Update the current state. Args: value (str): New value for sdp state force (bool): If true, ignore allowed transitions Returns: datetime, update timestamp Raises: ValueError: If the specified current state is not allowed.
3.746585
3.684954
1.016725
initial_state = initial_state.lower() if initial_state != 'unknown' and \ initial_state not in self._allowed_states: raise ValueError('Invalid initial state: {}'.format(initial_state)) _initial_state = dict( current_state=initial_state, target_state=initial_state, current_timestamp=datetime.utcnow().isoformat(), target_timestamp=datetime.utcnow().isoformat()) return _initial_state
def _initialise(self, initial_state: str = 'unknown') -> dict
Return a dictionary used to initialise a state object. This method is used to obtain a dictionary/hash describing the initial state of SDP or a service in SDP. Args: initial_state (str): Initial state. Returns: dict, Initial state configuration
2.735949
3.016975
0.906852
timestamp = datetime.utcnow() field = '{}_state'.format(state_type) old_state = DB.get_hash_value(self._key, field) DB.set_hash_value(self._key, field, value, pipeline=True) DB.set_hash_value(self._key, '{}_timestamp'.format(state_type), timestamp.isoformat(), pipeline=True) DB.execute() # Publish an event to notify subscribers of the change in state self.publish('{}_state_updated'.format(state_type), event_data=dict(state=value, old_state=old_state)) return timestamp
def _update_state(self, state_type: str, value: str) -> datetime
Update the state of type specified (current or target). Args: state_type(str): Type of state to update, current or target. value (str): New state value. Returns: timestamp, current time
3.467698
3.813238
0.909384
return {key.lower(): [value.lower() for value in value] for key, value in dictionary.items()}
def _dict_lower(dictionary: dict)
Convert allowed state transitions / target states to lowercase.
4.269631
3.178447
1.343308
# Get schema for validation schema = self._get_schema() LOG.debug('Adding SBI with config: %s', config_dict) # Validates the schema validate(config_dict, schema) # Add status field and value to the data updated_block = self._add_status(config_dict) # Splitting into different names and fields before # adding to the database scheduling_block_data, processing_block_data = \ self._split_sched_block_instance(updated_block) # Adding Scheduling block instance with id name = "scheduling_block:" + updated_block["id"] self._db.set_specified_values(name, scheduling_block_data) # Add a event to the scheduling block event list to notify # of a new scheduling block being added to the db. self._db.push_event(self.scheduling_event_name, updated_block["status"], updated_block["id"]) # Adding Processing block with id for value in processing_block_data: name = ("scheduling_block:" + updated_block["id"] + ":processing_block:" + value['id']) self._db.set_specified_values(name, value) # Add a event to the processing block event list to notify # of a new processing block being added to the db. self._db.push_event(self.processing_event_name, value["status"], value["id"])
def add_sched_block_instance(self, config_dict)
Add Scheduling Block to the database. Args: config_dict (dict): SBI configuration
3.876525
3.760283
1.030913
# Initialise empty list scheduling_block_ids = [] # Pattern used to search scheduling block ids pattern = 'scheduling_block:*' block_ids = self._db.get_ids(pattern) for block_id in block_ids: if 'processing_block' not in block_id: id_split = block_id.split(':')[-1] scheduling_block_ids.append(id_split) return sorted(scheduling_block_ids)
def get_sched_block_instance_ids(self)
Get unordered list of scheduling block ids
4.318694
4.02786
1.072206
# Initialise empty list _processing_block_ids = [] # Pattern used to search processing block ids pattern = '*:processing_block:*' block_ids = self._db.get_ids(pattern) for block_id in block_ids: id_split = block_id.split(':')[-1] _processing_block_ids.append(id_split) return sorted(_processing_block_ids)
def get_processing_block_ids(self)
Get list of processing block ids using the processing block id
4.229445
4.015373
1.053313
# Initialise empty list _scheduling_block_ids = [] _sub_array_ids = [] for blocks_id in self.get_sched_block_instance_ids(): _scheduling_block_ids.append(blocks_id) block_details = self.get_block_details(_scheduling_block_ids) for details in block_details: _sub_array_ids.append(details['sub_array_id']) _sub_array_ids = sorted(list(set(_sub_array_ids))) return _sub_array_ids
def get_sub_array_ids(self)
Get list of sub array ids
3.009438
2.925295
1.028764
_ids = [] sbi_ids = self.get_sched_block_instance_ids() for details in self.get_block_details(sbi_ids): if details['sub_array_id'] == sub_array_id: _ids.append(details['id']) return sorted(_ids)
def get_sub_array_sbi_ids(self, sub_array_id)
Get Scheduling Block Instance ID associated with sub array id
3.448694
2.909914
1.185153
# Convert input to list if needed if not hasattr(block_ids, "__iter__"): block_ids = [block_ids] for _id in block_ids: block_key = self._db.get_block(_id)[0] block_data = self._db.get_all_field_value(block_key) # NOTE(BM) unfortunately this doesn't quite work for keys where \ # the value is a python type (list, dict etc) \ # The following hack works for now but is probably not infallible for key in block_data: for char in ['[', '{']: if char in block_data[key]: block_data[key] = ast.literal_eval( str(block_data[key])) yield block_data
def get_block_details(self, block_ids)
Get details of scheduling or processing block Args: block_ids (list): List of block IDs
4.741266
4.911906
0.96526
block_name = self._db.get_block(block_id) for name in block_name: self._db.set_value(name, field, value)
def update_value(self, block_id, field, value)
Update the value of the given block id and field
3.82344
3.885695
0.983979
LOG.debug('Deleting SBI %s', block_id) scheduling_blocks = self._db.get_all_blocks(block_id) if not scheduling_blocks: raise RuntimeError('Scheduling block not found: {}'. format(block_id)) if scheduling_blocks: for blocks in scheduling_blocks: if "processing_block" not in blocks: self._db.delete_block(blocks) else: split_key = blocks.split(':') self._db.delete_block(blocks) # Add a event to the processing block event list to notify # about deleting from the db self._db.push_event(self.processing_event_name, "deleted", split_key[3]) # Add a event to the scheduling block event list to notify # of a deleting a scheduling block from the db self._db.push_event(self.scheduling_event_name, "deleted", block_id)
def delete_sched_block_instance(self, block_id)
Delete the specified Scheduling Block Instance. Removes the Scheduling Block Instance, and all Processing Blocks that belong to it from the database
4.142608
3.788901
1.093353
LOG.debug("Deleting Processing Block %s ...", processing_block_id) processing_block = self._db.get_block(processing_block_id) if not processing_block: raise RuntimeError('Invalid Processing Block ID: {}' .format(processing_block_id)) for block in processing_block: if 'processing_block' in block: self._db.delete_block(block) # Remove processing block id from scheduling block id scheduling_block_id = block.split(':') scheduling_block_details = self.get_block_details( [scheduling_block_id[1]]) for block_details in scheduling_block_details: block_list = block_details['processing_block_ids'] if processing_block_id in block_list: block_list.remove(processing_block_id) self.update_value(scheduling_block_id[1], 'processing_block_ids', block_list) # Add a event to the processing block event list to notify # about deleting from the db self._db.push_event(self.processing_event_name, "deleted", id)
def delete_processing_block(self, processing_block_id)
Delete Processing Block(s). Uses Processing Block IDs
3.469643
3.535549
0.981359
schema_path = os.path.join(os.path.dirname(__file__), 'schema', 'scheduling_block_schema.json') with open(schema_path, 'r') as file: schema_data = file.read() schema = json.loads(schema_data) return schema
def _get_schema()
Get the schema for validation
2.758781
2.596645
1.06244
scheduling_block['status'] = "created" for block in scheduling_block: if isinstance(scheduling_block[block], list): for field in scheduling_block[block]: field['status'] = 'created' return scheduling_block
def _add_status(scheduling_block)
This function adds status fields to all the section in the scheduling block instance
3.150984
3.206962
0.982545
# Initialise empty list _scheduling_block_data = {} _processing_block_data = {} _processing_block_id = [] for block in scheduling_block: values = scheduling_block[block] if block != 'processing_blocks': _scheduling_block_data[block] = values else: # Check if there is a processing block that already exits in # the database processing_block_id = self.get_processing_block_ids() for value in values: if value['id'] not in processing_block_id: _processing_block_data = values else: raise Exception("Processing block already exits", value['id']) # Adding processing block id to the scheduling block list for block_id in _processing_block_data: _processing_block_id.append(block_id['id']) _scheduling_block_data['processing_block_ids'] = _processing_block_id return _scheduling_block_data, _processing_block_data
def _split_sched_block_instance(self, scheduling_block)
Split the scheduling block data into multiple names before adding to the configuration database
3.010299
2.8948
1.039899
log = logging.getLogger(logger_name) log.propagate = propagate # Remove existing handlers (avoids duplicate messages) for handler in log.handlers: log.removeHandler(handler) _debug = '%(filename)s:%(lineno)d | ' if show_log_origin else '' # P3 mode is intended to work with the fluentd configuration on P3. # This has ms timestamp precision and uses '-' as a delimiter # between statements in the log file. if p3_mode: _prefix = '%(asctime)s - %(name)s - %(levelname)s' if show_thread: _format = '{} - %(threadName)s - {}%(message)s'\ .format(_prefix, _debug) else: _format = '{} - {}%(message)s'.format(_prefix, _debug) formatter = logging.Formatter(_format) formatter.converter = time.gmtime # If not in P3 mode, the timestamp will be us precision and use '|' # as a separator. else: _prefix = '%(asctime)s | %(name)s | %(levelname)s' if show_thread: _format = '{} | %(threadName)s | {}%(message)s'\ .format(_prefix, _debug) else: _format = '{} | {}%(message)s'.format(_prefix, _debug) formatter = SIPFormatter(_format, datefmt='%Y-%m-%dT%H:%M:%S.%fZ') handler = logging.StreamHandler(stream=sys.stdout) handler.setFormatter(formatter) log.addHandler(handler) # Set the logging level. if log_level: log.setLevel(log_level) else: log.setLevel(os.getenv('SIP_LOG_LEVEL', 'DEBUG'))
def init_logger(logger_name='sip', log_level=None, p3_mode: bool = True, show_thread: bool = False, propagate: bool = False, show_log_origin=False)
Initialise the SIP logger. Attaches a stdout stream handler to the 'sip' logger. This will apply to all logger objects with a name prefixed by 'sip.' This function respects the 'SIP_LOG_LEVEL' environment variable to set the logging level. Args: logger_name (str, optional): Name of the logger object. log_level (str or int, optional): Logging level for the SIP logger. p3_mode (bool, optional): Print logging statements in a format that P3 can support. show_thread (bool, optional): Display the thread in the log message. propagate (bool, optional): Propagate settings to parent loggers. show_log_origin (boo, optional): If true show the origin (file, line no.) of log messages.
2.680528
2.718021
0.986206
log = logging.getLogger(logger_name) log.propagate = propagate for handler in log.handlers: log.removeHandler(handler)
def disable_logger(logger_name: str, propagate: bool = False)
Disable output for the logger of the specified name.
2.758217
2.197008
1.255443
log = logging.getLogger(logger_name) log.propagate = propagate log.setLevel(log_level)
def set_log_level(logger_name: str, log_level: str, propagate: bool = False)
Set the log level of the specified logger.
3.389131
2.620523
1.293303
_seconds_fraction = record.created - int(record.created) _datetime_utc = time.mktime(time.gmtime(record.created)) _datetime_utc += _seconds_fraction _created = self.converter(_datetime_utc) if datefmt: time_string = _created.strftime(datefmt) else: time_string = _created.strftime('%Y-%m-%dT%H:%M:%S.%fZ') time_string = "%s,%03d" % (time_string, record.msecs) return time_string
def formatTime(self, record, datefmt=None)
Format the log timestamp.
2.92883
2.822178
1.037791
date = datetime.datetime.utcnow().strftime('%Y%m%d') if index is None: index = randint(0, 999) sbi_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index) sb_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index) pb_id = 'PB-{}-sip-demo-{:03d}'.format(date, index) print('* Generating SBI: %s, PB: %s' % (sb_id, pb_id)) sbi = dict( id=sbi_id, version='1.0.0', scheduling_block=dict( id=sb_id, project='sip', programme_block='sip_demos' ), processing_blocks=[ dict( id=pb_id, version='1.0.0', type='offline', priority=1, dependencies=[], resources_required=[], workflow=dict( id='mock_workflow', version='1.0.0', parameters=dict( stage1=dict(duration=30), stage2=dict(duration=30), stage3=dict(duration=30) ) ) ) ] ) return sbi
def generate_sbi(index: int = None)
Generate a SBI config JSON string.
3.044306
2.873213
1.059548
log.info('Starting Pulsar Data Transfer...') socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id)) socket.send(json.dumps(config).encode()) socket.send(bytearray(1000 * 1000)) # Overwrites the metadata name in the config dict # and re-sends the data to the receiver. config['metadata']['name'] = 'candidate_two' socket.send(json.dumps(config).encode()) socket.send(bytearray(1000 * 1000)) socket.close() log.info('Pulsar Data Transfer Completed...')
def send(self, config, log, obs_id, beam_id)
Send the pulsar data to the ftp server Args: config (dict): Dictionary of settings log (logging.Logger): Python logging object obs_id: observation id beam_id: beam id
4.735879
4.322733
1.095575
_url = get_root_url() try: block = DB.get_block_details([block_id]).__next__() response = block response['links'] = { 'self': '{}'.format(request.url), 'list': '{}/processing-blocks'.format(_url), 'home': '{}'.format(_url) } return block except IndexError as error: response = dict(message='Unable to GET Processing Block', id='{}'.format(block_id), error=error.__str__()) response['links'] = { 'list': '{}/processing-blocks'.format(_url), 'home': '{}'.format(_url) } return response, HTTPStatus.NOT_FOUND
def get(block_id)
Processing block detail resource.
4.205027
3.844805
1.093691
_url = get_root_url() try: DB.delete_processing_block(block_id) response = dict(message='Deleted block', id='{}'.format(block_id), links=dict(list='{}/processing-blocks'.format(_url), home='{}'.format(_url))) return response, HTTPStatus.OK except RuntimeError as error: response = dict(error='Failed to delete Processing Block: {}'. format(block_id), reason=str(error), links=dict(list='{}/processing-blocks'.format(_url), home='{}'.format(_url))) return response, HTTPStatus.OK
def delete(block_id)
Processing block detail resource.
4.124532
3.807581
1.083242
Device.init_device(self) time.sleep(0.1) self.set_state(DevState.STANDBY)
def init_device(self)
Initialise the device.
4.721736
4.133682
1.142259
# print(sbi_config) config_dict = json.loads(sbi_config) self.debug_stream('SBI configuration:\n%s', json.dumps(config_dict, indent=2)) try: sbi = Subarray(self.get_name()).configure_sbi(config_dict) except jsonschema.exceptions.ValidationError as error: return json.dumps(dict(path=error.absolute_path.__str__(), schema_path=error.schema_path.__str__(), message=error.message), indent=2) except RuntimeError as error: return json.dumps(dict(error=str(error)), indent=2) return 'Accepted SBI: {}'.format(sbi.id)
def configure(self, sbi_config: str)
Configure an SBI for this subarray. Args: sbi_config (str): SBI configuration JSON Returns: str,
3.81552
3.387067
1.126497
sbi_ids = Subarray(self.get_name()).sbi_ids pbs = [] for sbi_id in sbi_ids: sbi = SchedulingBlockInstance(sbi_id) pbs.append(sbi.processing_block_ids) return 'PB', pbs
def processing_blocks(self)
Return list of PBs associated with the subarray. <http://www.esrf.eu/computing/cs/tango/pytango/v920/server_api/server.html#PyTango.server.pipe>
6.461486
4.665633
1.384911
if sys.version_info >= (3, 7): return datetime.fromisoformat(value) return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
def datetime_from_isoformat(value: str)
Return a datetime object from an isoformat string. Args: value (str): Datetime string in isoformat.
2.536514
2.508767
1.01106
Device.init_device(self) # Add anything here that has to be done before the device is set to # its ON state. self._set_master_state('on') self._devProxy = DeviceProxy(self.get_name())
def init_device(self)
Device constructor.
10.807906
9.941969
1.087099