_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q17100
create_hls_profile
train
def create_hls_profile(apps, schema_editor): """ Create hls profile """ Profile = apps.get_model("edxval", "Profile") Profile.objects.get_or_create(profile_name=HLS_PROFILE)
python
{ "resource": "" }
q17101
delete_hls_profile
train
def delete_hls_profile(apps, schema_editor): """ Delete hls profile """ Profile = apps.get_model("edxval", "Profile") Profile.objects.filter(profile_name=HLS_PROFILE).delete()
python
{ "resource": "" }
q17102
create_default_profiles
train
def create_default_profiles(apps, schema_editor): """ Add default profiles """ Profile = apps.get_model("edxval", "Profile") for profile in DEFAULT_PROFILES: Profile.objects.get_or_create(profile_name=profile)
python
{ "resource": "" }
q17103
delete_default_profiles
train
def delete_default_profiles(apps, schema_editor): """ Remove default profiles """ Profile = apps.get_model("edxval", "Profile") Profile.objects.filter(profile_name__in=DEFAULT_PROFILES).delete()
python
{ "resource": "" }
q17104
TranscriptSerializer.validate
train
def validate(self, data): """ Validates the transcript data. """ video_id = self.context.get('video_id') video = Video.get_or_none(edx_video_id=video_id) if not video: raise serializers.ValidationError('Video "{video_id}" is not valid.'.format(video_id=video_id)) data.update(video=video) return data
python
{ "resource": "" }
q17105
CourseSerializer.to_internal_value
train
def to_internal_value(self, data): """ Convert data into CourseVideo instance and image filename tuple. """ course_id = data course_video = image = '' if data: if isinstance(data, dict): (course_id, image), = list(data.items()) course_video = CourseVideo(course_id=course_id) course_video.full_clean(exclude=['video']) return course_video, image
python
{ "resource": "" }
q17106
VideoSerializer.validate
train
def validate(self, data): """ Check that the video data is valid. """ if data is not None and not isinstance(data, dict): raise serializers.ValidationError("Invalid data") try: profiles = [ev["profile"] for ev in data.get("encoded_videos", [])] if len(profiles) != len(set(profiles)): raise serializers.ValidationError("Invalid data: duplicate profiles") except KeyError: raise serializers.ValidationError("profile required for deserializing") except TypeError: raise serializers.ValidationError("profile field needs to be a profile_name (str)") # Clean course_video list from any invalid data. course_videos = [(course_video, image) for course_video, image in data.get('courses', []) if course_video] data['courses'] = course_videos return data
python
{ "resource": "" }
q17107
VideoSerializer.create
train
def create(self, validated_data): """ Create the video and its nested resources. """ courses = validated_data.pop("courses", []) encoded_videos = validated_data.pop("encoded_videos", []) video = Video.objects.create(**validated_data) EncodedVideo.objects.bulk_create( EncodedVideo(video=video, **video_data) for video_data in encoded_videos ) # The CourseSerializer will already have converted the course data # to CourseVideo models, so we can just set the video and save. # Also create VideoImage objects if an image filename is present for course_video, image_name in courses: course_video.video = video course_video.save() if image_name: VideoImage.create_or_update(course_video, image_name) return video
python
{ "resource": "" }
q17108
VideoSerializer.update
train
def update(self, instance, validated_data): """ Update an existing video resource. """ instance.status = validated_data["status"] instance.client_video_id = validated_data["client_video_id"] instance.duration = validated_data["duration"] instance.save() # Set encoded videos instance.encoded_videos.all().delete() EncodedVideo.objects.bulk_create( EncodedVideo(video=instance, **video_data) for video_data in validated_data.get("encoded_videos", []) ) # Set courses # NOTE: for backwards compatibility with the DRF v2 behavior, # we do NOT delete existing course videos during the update. # Also update VideoImage objects if an image filename is present for course_video, image_name in validated_data.get("courses", []): course_video.video = instance course_video.save() if image_name: VideoImage.create_or_update(course_video, image_name) return instance
python
{ "resource": "" }
q17109
get_video_image_storage
train
def get_video_image_storage(): """ Return the configured django storage backend. """ if hasattr(settings, 'VIDEO_IMAGE_SETTINGS'): return get_storage_class( settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_CLASS'), )(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {})) else: # during edx-platform loading this method gets called but settings are not ready yet # so in that case we will return default(FileSystemStorage) storage class instance return get_storage_class()()
python
{ "resource": "" }
q17110
get_video_transcript_storage
train
def get_video_transcript_storage(): """ Return the configured django storage backend for video transcripts. """ if hasattr(settings, 'VIDEO_TRANSCRIPTS_SETTINGS'): return get_storage_class( settings.VIDEO_TRANSCRIPTS_SETTINGS.get('STORAGE_CLASS'), )(**settings.VIDEO_TRANSCRIPTS_SETTINGS.get('STORAGE_KWARGS', {})) else: # during edx-platform loading this method gets called but settings are not ready yet # so in that case we will return default(FileSystemStorage) storage class instance return get_storage_class()()
python
{ "resource": "" }
q17111
create_file_in_fs
train
def create_file_in_fs(file_data, file_name, file_system, static_dir): """ Writes file in specific file system. Arguments: file_data (str): Data to store into the file. file_name (str): File name of the file to be created. file_system (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. """ with file_system.open(combine(static_dir, file_name), 'wb') as f: f.write(file_data.encode('utf-8'))
python
{ "resource": "" }
q17112
get_transcript_format
train
def get_transcript_format(transcript_content): """ Returns transcript format. Arguments: transcript_content (str): Transcript file content. """ try: sjson_obj = json.loads(transcript_content) except ValueError: # With error handling (set to 'ERROR_RAISE'), we will be getting # the exception if something went wrong in parsing the transcript. srt_subs = SubRipFile.from_string(transcript_content, error_handling=SubRipFile.ERROR_RAISE) if len(srt_subs) > 0: return TranscriptFormat.SRT return TranscriptFormat.SJSON
python
{ "resource": "" }
q17113
MultipleFieldLookupMixin.get_object
train
def get_object(self): """ Returns an object instance that should be used for detail views. """ queryset = self.get_queryset() # Get the base queryset queryset = self.filter_queryset(queryset) # Apply any filter backends filter = {} # pylint: disable=W0622 for field in self.lookup_fields: filter[field] = self.kwargs[field] return get_object_or_404(queryset, **filter)
python
{ "resource": "" }
q17114
VideoTranscriptView.post
train
def post(self, request): """ Creates a video transcript instance with the given information. Arguments: request: A WSGI request. """ attrs = ('video_id', 'name', 'language_code', 'provider', 'file_format') missing = [attr for attr in attrs if attr not in request.data] if missing: LOGGER.warn( '[VAL] Required transcript params are missing. %s', ' and '.join(missing) ) return Response( status=status.HTTP_400_BAD_REQUEST, data=dict(message=u'{missing} must be specified.'.format(missing=' and '.join(missing))) ) video_id = request.data['video_id'] language_code = request.data['language_code'] transcript_name = request.data['name'] provider = request.data['provider'] file_format = request.data['file_format'] supported_formats = sorted(dict(TranscriptFormat.CHOICES).keys()) if file_format not in supported_formats: message = ( u'"{format}" transcript file type is not supported. Supported formats are "{supported_formats}"' ).format(format=file_format, supported_formats=supported_formats) return Response(status=status.HTTP_400_BAD_REQUEST, data={'message': message}) supported_providers = sorted(dict(TranscriptProviderType.CHOICES).keys()) if provider not in supported_providers: message = ( u'"{provider}" provider is not supported. Supported transcription providers are "{supported_providers}"' ).format(provider=provider, supported_providers=supported_providers) return Response(status=status.HTTP_400_BAD_REQUEST, data={'message': message}) transcript = VideoTranscript.get_or_none(video_id, language_code) if transcript is None: create_or_update_video_transcript(video_id, language_code, metadata={ 'provider': provider, 'file_name': transcript_name, 'file_format': file_format }) response = Response(status=status.HTTP_200_OK) else: message = ( u'Can not override existing transcript for video "{video_id}" and language code "{language}".' ).format(video_id=video_id, language=language_code) response = Response(status=status.HTTP_400_BAD_REQUEST, data={'message': message}) return response
python
{ "resource": "" }
q17115
VideoStatusView.patch
train
def patch(self, request): """ Update the status of a video. """ attrs = ('edx_video_id', 'status') missing = [attr for attr in attrs if attr not in request.data] if missing: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'"{missing}" params must be specified.'.format(missing=' and '.join(missing))} ) edx_video_id = request.data['edx_video_id'] video_status = request.data['status'] if video_status not in VALID_VIDEO_STATUSES: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'"{status}" is not a valid Video status.'.format(status=video_status)} ) try: video = Video.objects.get(edx_video_id=edx_video_id) video.status = video_status video.save() response_status = status.HTTP_200_OK response_payload = {} except Video.DoesNotExist: response_status = status.HTTP_400_BAD_REQUEST response_payload = { 'message': u'Video is not found for specified edx_video_id: {edx_video_id}'.format( edx_video_id=edx_video_id ) } return Response(status=response_status, data=response_payload)
python
{ "resource": "" }
q17116
VideoImagesView.post
train
def post(self, request): """ Update a course video image instance with auto generated image names. """ attrs = ('course_id', 'edx_video_id', 'generated_images') missing = [attr for attr in attrs if attr not in request.data] if missing: return Response( status=status.HTTP_400_BAD_REQUEST, data={ 'message': u'{missing} must be specified to update a video image.'.format( missing=' and '.join(missing) ) } ) course_id = request.data['course_id'] edx_video_id = request.data['edx_video_id'] generated_images = request.data['generated_images'] try: course_video = CourseVideo.objects.select_related('video_image').get( course_id=six.text_type(course_id), video__edx_video_id=edx_video_id ) except CourseVideo.DoesNotExist: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'CourseVideo not found for course_id: {course_id}'.format(course_id=course_id)} ) try: VideoImage.create_or_update(course_video, generated_images=generated_images) except ValidationError as ex: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': str(ex)} ) return Response()
python
{ "resource": "" }
q17117
HLSMissingVideoView.put
train
def put(self, request): """ Update a single profile for a given video. Example request data: { 'edx_video_id': '1234' 'profile': 'hls', 'encode_data': { 'url': 'foo.com/qwe.m3u8' 'file_size': 34 'bitrate': 12 } } """ edx_video_id = request.data['edx_video_id'] profile = request.data['profile'] encode_data = request.data['encode_data'] video = Video.objects.get(edx_video_id=edx_video_id) profile = Profile.objects.get(profile_name=profile) # Delete existing similar profile if its present and # create new one with updated data. EncodedVideo.objects.filter(video=video, profile=profile).delete() EncodedVideo.objects.create(video=video, profile=profile, **encode_data) return Response(status=status.HTTP_200_OK)
python
{ "resource": "" }
q17118
video_status_update_callback
train
def video_status_update_callback(sender, **kwargs): # pylint: disable=unused-argument """ Log video status for an existing video instance """ video = kwargs['instance'] if kwargs['created']: logger.info('VAL: Video created with id [%s] and status [%s]', video.edx_video_id, video.status) else: logger.info('VAL: Status changed to [%s] for video [%s]', video.status, video.edx_video_id)
python
{ "resource": "" }
q17119
ModelFactoryWithValidation.create_with_validation
train
def create_with_validation(cls, *args, **kwargs): """ Factory method that creates and validates the model object before it is saved. """ ret_val = cls(*args, **kwargs) ret_val.full_clean() ret_val.save() return ret_val
python
{ "resource": "" }
q17120
Video.get_or_none
train
def get_or_none(cls, **filter_kwargs): """ Returns a video or None. """ try: video = cls.objects.get(**filter_kwargs) except cls.DoesNotExist: video = None return video
python
{ "resource": "" }
q17121
Video.by_youtube_id
train
def by_youtube_id(cls, youtube_id): """ Look up video by youtube id """ qset = cls.objects.filter( encoded_videos__profile__profile_name='youtube', encoded_videos__url=youtube_id ).prefetch_related('encoded_videos', 'courses') return qset
python
{ "resource": "" }
q17122
ListField.get_prep_value
train
def get_prep_value(self, value): """ Converts a list to its json representation to store in database as text. """ if value and not isinstance(value, list): raise ValidationError(u'ListField value {} is not a list.'.format(value)) return json.dumps(self.validate_list(value) or [])
python
{ "resource": "" }
q17123
ListField.to_python
train
def to_python(self, value): """ Converts the value into a list. """ if not value: value = [] # If a list is set then validated its items if isinstance(value, list): py_list = self.validate_list(value) else: # try to de-serialize value and expect list and then validate try: py_list = json.loads(value) if not isinstance(py_list, list): raise TypeError self.validate_list(py_list) except (ValueError, TypeError): raise ValidationError(u'Must be a valid list of strings.') return py_list
python
{ "resource": "" }
q17124
ListField.validate_list
train
def validate_list(self, value): """ Validate data before saving to database. Arguemtns: value(list): list to be validated Returns: list if validation is successful Raises: ValidationError """ if len(value) > self.max_items: raise ValidationError( u'list must not contain more than {max_items} items.'.format(max_items=self.max_items) ) if all(isinstance(item, six.string_types) for item in value) is False: raise ValidationError(u'list must only contain strings.') return value
python
{ "resource": "" }
q17125
VideoImage.create_or_update
train
def create_or_update(cls, course_video, file_name=None, image_data=None, generated_images=None): """ Create a VideoImage object for a CourseVideo. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. `generated_images` list contains names of images auto generated by VEDA. If an image is not already set then first image name from `generated_images` list will be used. Arguments: course_video (CourseVideo): CourseVideo instance file_name (str): File name of the image image_data (InMemoryUploadedFile): Image data to be saved. generated_images (list): auto generated image names Returns: Returns a tuple of (video_image, created). """ video_image, created = cls.objects.get_or_create(course_video=course_video) if image_data: # Delete the existing image only if this image is not used by anyone else. This is necessary because # after a course re-run, a video in original course and the new course points to same image, So when # we update an image in new course and delete the existing image. This will delete the image from # original course as well, thus leaving video with having no image. if not created and VideoImage.objects.filter(image=video_image.image).count() == 1: video_image.image.delete() with closing(image_data) as image_file: file_name = '{uuid}{ext}'.format(uuid=uuid4().hex, ext=os.path.splitext(file_name)[1]) try: video_image.image.save(file_name, image_file) except Exception: # pylint: disable=broad-except logger.exception( 'VAL: Video Image save failed to storage for course_id [%s] and video_id [%s]', course_video.course_id, course_video.video.edx_video_id ) raise else: if generated_images: video_image.generated_images = generated_images if not video_image.image.name: file_name = generated_images[0] video_image.image.name = file_name video_image.save() return video_image, created
python
{ "resource": "" }
q17126
VideoTranscript.filename
train
def filename(self): """ Returns readable filename for a transcript """ client_id, __ = os.path.splitext(self.video.client_video_id) file_name = u'{name}-{language}.{format}'.format( name=client_id, language=self.language_code, format=self.file_format ).replace('\n', ' ') return file_name
python
{ "resource": "" }
q17127
VideoTranscript.get_or_none
train
def get_or_none(cls, video_id, language_code): """ Returns a data model object if found or none otherwise. Arguments: video_id(unicode): video id to which transcript may be associated language_code(unicode): language of the requested transcript """ try: transcript = cls.objects.get(video__edx_video_id=video_id, language_code=language_code) except cls.DoesNotExist: transcript = None return transcript
python
{ "resource": "" }
q17128
VideoTranscript.create
train
def create(cls, video, language_code, file_format, content, provider): """ Create a Video Transcript. Arguments: video(Video): Video data model object language_code(unicode): A language code. file_format(unicode): Transcript file format. content(InMemoryUploadedFile): Transcript content. provider(unicode): Transcript provider. """ video_transcript = cls(video=video, language_code=language_code, file_format=file_format, provider=provider) with closing(content) as transcript_content: try: file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format) video_transcript.transcript.save(file_name, transcript_content) video_transcript.save() except Exception: logger.exception( '[VAL] Transcript save failed to storage for video_id "%s" language code "%s"', video.edx_video_id, language_code ) raise return video_transcript
python
{ "resource": "" }
q17129
VideoTranscript.create_or_update
train
def create_or_update(cls, video, language_code, metadata, file_data=None): """ Create or update Transcript object. Arguments: video (Video): Video for which transcript is going to be saved. language_code (str): language code for (to be created/updated) transcript metadata (dict): A dict containing (to be overwritten) properties file_data (InMemoryUploadedFile): File data to be saved Returns: Returns a tuple of (video_transcript, created). """ try: video_transcript = cls.objects.get(video=video, language_code=language_code) retrieved = True except cls.DoesNotExist: video_transcript = cls(video=video, language_code=language_code) retrieved = False for prop, value in six.iteritems(metadata): if prop in ['language_code', 'file_format', 'provider']: setattr(video_transcript, prop, value) transcript_name = metadata.get('file_name') try: if transcript_name: video_transcript.transcript.name = transcript_name elif file_data: with closing(file_data) as transcript_file_data: file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format) video_transcript.transcript.save(file_name, transcript_file_data) video_transcript.save() except Exception: logger.exception( '[VAL] Transcript save failed to storage for video_id "%s" language code "%s"', video.edx_video_id, language_code ) raise return video_transcript, not retrieved
python
{ "resource": "" }
q17130
ThirdPartyTranscriptCredentialsState.update_or_create
train
def update_or_create(cls, org, provider, exists): """ Update or create credentials state. """ instance, created = cls.objects.update_or_create( org=org, provider=provider, defaults={'exists': exists}, ) return instance, created
python
{ "resource": "" }
q17131
create_video
train
def create_video(video_data): """ Called on to create Video objects in the database create_video is used to create Video objects whose children are EncodedVideo objects which are linked to Profile objects. This is an alternative to the HTTP requests so it can be used internally. The VideoSerializer is used to deserialize this object. If there are duplicate profile_names, the entire creation will be rejected. If the profile is not found in the database, the video will not be created. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video image: poster image file name for a particular course } Raises: Raises ValCannotCreateError if the video cannot be created. Returns the successfully created Video object """ serializer = VideoSerializer(data=video_data) if serializer.is_valid(): serializer.save() return video_data.get("edx_video_id") else: raise ValCannotCreateError(serializer.errors)
python
{ "resource": "" }
q17132
update_video
train
def update_video(video_data): """ Called on to update Video objects in the database update_video is used to update Video objects by the given edx_video_id in the video_data. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video } Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. Raises ValCannotUpdateError if the video cannot be updated. Returns the successfully updated Video object """ try: video = _get_video(video_data.get("edx_video_id")) except Video.DoesNotExist: error_message = u"Video not found when trying to update video with edx_video_id: {0}".format(video_data.get("edx_video_id")) raise ValVideoNotFoundError(error_message) serializer = VideoSerializer(video, data=video_data) if serializer.is_valid(): serializer.save() return video_data.get("edx_video_id") else: raise ValCannotUpdateError(serializer.errors)
python
{ "resource": "" }
q17133
update_video_status
train
def update_video_status(edx_video_id, status): """ Update status for an existing video. Args: edx_video_id: ID of the video status: video status Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. """ try: video = _get_video(edx_video_id) except Video.DoesNotExist: error_message = u"Video not found when trying to update video status with edx_video_id: {0}".format( edx_video_id ) raise ValVideoNotFoundError(error_message) video.status = status video.save()
python
{ "resource": "" }
q17134
get_transcript_credentials_state_for_org
train
def get_transcript_credentials_state_for_org(org, provider=None): """ Returns transcript credentials state for an org Arguments: org (unicode): course organization provider (unicode): transcript provider Returns: dict: provider name and their credential existance map { u'Cielo24': True } { u'3PlayMedia': False, u'Cielo24': True } """ query_filter = {'org': org} if provider: query_filter['provider'] = provider return { credential.provider: credential.exists for credential in ThirdPartyTranscriptCredentialsState.objects.filter(**query_filter) }
python
{ "resource": "" }
q17135
is_transcript_available
train
def is_transcript_available(video_id, language_code=None): """ Returns whether the transcripts are available for a video. Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: it will the language code of the requested transcript. """ filter_attrs = {'video__edx_video_id': video_id} if language_code: filter_attrs['language_code'] = language_code transcript_set = VideoTranscript.objects.filter(**filter_attrs) return transcript_set.exists()
python
{ "resource": "" }
q17136
get_video_transcript
train
def get_video_transcript(video_id, language_code): """ Get video transcript info Arguments: video_id(unicode): A video id, it can be an edx_video_id or an external video id extracted from external sources of a video component. language_code(unicode): it will be the language code of the requested transcript. """ transcript = VideoTranscript.get_or_none(video_id=video_id, language_code=language_code) return TranscriptSerializer(transcript).data if transcript else None
python
{ "resource": "" }
q17137
get_video_transcript_data
train
def get_video_transcript_data(video_id, language_code): """ Get video transcript data Arguments: video_id(unicode): An id identifying the Video. language_code(unicode): it will be the language code of the requested transcript. Returns: A dict containing transcript file name and its content. """ video_transcript = VideoTranscript.get_or_none(video_id, language_code) if video_transcript: try: return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read()) except Exception: logger.exception( '[edx-val] Error while retrieving transcript for video=%s -- language_code=%s', video_id, language_code ) raise
python
{ "resource": "" }
q17138
get_available_transcript_languages
train
def get_available_transcript_languages(video_id): """ Get available transcript languages Arguments: video_id(unicode): An id identifying the Video. Returns: A list containing transcript language codes for the Video. """ available_languages = VideoTranscript.objects.filter( video__edx_video_id=video_id ).values_list( 'language_code', flat=True ) return list(available_languages)
python
{ "resource": "" }
q17139
get_video_transcript_url
train
def get_video_transcript_url(video_id, language_code): """ Returns course video transcript url or None if no transcript Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: language code of a video transcript """ video_transcript = VideoTranscript.get_or_none(video_id, language_code) if video_transcript: return video_transcript.url()
python
{ "resource": "" }
q17140
create_video_transcript
train
def create_video_transcript(video_id, language_code, file_format, content, provider=TranscriptProviderType.CUSTOM): """ Create a video transcript. Arguments: video_id(unicode): An Id identifying the Video data model object. language_code(unicode): A language code. file_format(unicode): Transcript file format. content(InMemoryUploadedFile): Transcript content. provider(unicode): Transcript provider (it will be 'custom' by default if not selected). """ transcript_serializer = TranscriptSerializer( data=dict(provider=provider, language_code=language_code, file_format=file_format), context=dict(video_id=video_id), ) if transcript_serializer.is_valid(): transcript_serializer.save(content=content) return transcript_serializer.data else: raise ValCannotCreateError(transcript_serializer.errors)
python
{ "resource": "" }
q17141
create_or_update_video_transcript
train
def create_or_update_video_transcript(video_id, language_code, metadata, file_data=None): """ Create or Update video transcript for an existing video. Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: language code of a video transcript metadata (dict): A dict containing (to be overwritten) properties file_data (InMemoryUploadedFile): Transcript data to be saved for a course video. Returns: video transcript url """ # Filter wanted properties metadata = { prop: value for prop, value in six.iteritems(metadata) if prop in ['provider', 'language_code', 'file_name', 'file_format'] and value } file_format = metadata.get('file_format') if file_format and file_format not in list(dict(TranscriptFormat.CHOICES).keys()): raise InvalidTranscriptFormat('{} transcript format is not supported'.format(file_format)) provider = metadata.get('provider') if provider and provider not in list(dict(TranscriptProviderType.CHOICES).keys()): raise InvalidTranscriptProvider('{} transcript provider is not supported'.format(provider)) try: # Video should be present in edxval in order to attach transcripts to it. video = Video.objects.get(edx_video_id=video_id) video_transcript, __ = VideoTranscript.create_or_update(video, language_code, metadata, file_data) except Video.DoesNotExist: return None return video_transcript.url()
python
{ "resource": "" }
q17142
delete_video_transcript
train
def delete_video_transcript(video_id, language_code): """ Delete transcript for an existing video. Arguments: video_id: id identifying the video to which the transcript is associated. language_code: language code of a video transcript. """ video_transcript = VideoTranscript.get_or_none(video_id, language_code) if video_transcript: # delete the transcript content from storage. video_transcript.transcript.delete() # delete the transcript metadata from db. video_transcript.delete() logger.info('Transcript is removed for video "%s" and language code "%s"', video_id, language_code)
python
{ "resource": "" }
q17143
get_transcript_preferences
train
def get_transcript_preferences(course_id): """ Retrieves course wide transcript preferences Arguments: course_id (str): course id """ try: transcript_preference = TranscriptPreference.objects.get(course_id=course_id) except TranscriptPreference.DoesNotExist: return return TranscriptPreferenceSerializer(transcript_preference).data
python
{ "resource": "" }
q17144
create_or_update_transcript_preferences
train
def create_or_update_transcript_preferences(course_id, **preferences): """ Creates or updates course-wide transcript preferences Arguments: course_id(str): course id Keyword Arguments: preferences(dict): keyword arguments """ transcript_preference, __ = TranscriptPreference.objects.update_or_create( course_id=course_id, defaults=preferences ) return TranscriptPreferenceSerializer(transcript_preference).data
python
{ "resource": "" }
q17145
remove_transcript_preferences
train
def remove_transcript_preferences(course_id): """ Deletes course-wide transcript preferences. Arguments: course_id(str): course id """ try: transcript_preference = TranscriptPreference.objects.get(course_id=course_id) transcript_preference.delete() except TranscriptPreference.DoesNotExist: pass
python
{ "resource": "" }
q17146
get_course_video_image_url
train
def get_course_video_image_url(course_id, edx_video_id): """ Returns course video image url or None if no image found """ try: video_image = CourseVideo.objects.select_related('video_image').get( course_id=course_id, video__edx_video_id=edx_video_id ).video_image return video_image.image_url() except ObjectDoesNotExist: return None
python
{ "resource": "" }
q17147
update_video_image
train
def update_video_image(edx_video_id, course_id, image_data, file_name): """ Update video image for an existing video. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. Arguments: image_data (InMemoryUploadedFile): Image data to be saved for a course video. Returns: course video image url Raises: Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved. """ try: course_video = CourseVideo.objects.select_related('video').get( course_id=course_id, video__edx_video_id=edx_video_id ) except ObjectDoesNotExist: error_message = u'VAL: CourseVideo not found for edx_video_id: {0} and course_id: {1}'.format( edx_video_id, course_id ) raise ValVideoNotFoundError(error_message) video_image, _ = VideoImage.create_or_update(course_video, file_name, image_data) return video_image.image_url()
python
{ "resource": "" }
q17148
create_profile
train
def create_profile(profile_name): """ Used to create Profile objects in the database A profile needs to exists before an EncodedVideo object can be created. Args: profile_name (str): ID of the profile Raises: ValCannotCreateError: Raised if the profile name is invalid or exists """ try: profile = Profile(profile_name=profile_name) profile.full_clean() profile.save() except ValidationError as err: raise ValCannotCreateError(err.message_dict)
python
{ "resource": "" }
q17149
_get_video
train
def _get_video(edx_video_id): """ Get a Video instance, prefetching encoded video and course information. Raises ValVideoNotFoundError if the video cannot be retrieved. """ try: return Video.objects.prefetch_related("encoded_videos", "courses").get(edx_video_id=edx_video_id) except Video.DoesNotExist: error_message = u"Video not found for edx_video_id: {0}".format(edx_video_id) raise ValVideoNotFoundError(error_message) except Exception: error_message = u"Could not get edx_video_id: {0}".format(edx_video_id) logger.exception(error_message) raise ValInternalError(error_message)
python
{ "resource": "" }
q17150
get_urls_for_profiles
train
def get_urls_for_profiles(edx_video_id, profiles): """ Returns a dict mapping profiles to URLs. If the profiles or video is not found, urls will be blank. Args: edx_video_id (str): id of the video profiles (list): list of profiles we want to search for Returns: (dict): A dict containing the profile to url pair """ profiles_to_urls = {profile: None for profile in profiles} try: video_info = get_video_info(edx_video_id) except ValVideoNotFoundError: return profiles_to_urls for encoded_video in video_info["encoded_videos"]: if encoded_video["profile"] in profiles: profiles_to_urls[encoded_video["profile"]] = encoded_video["url"] return profiles_to_urls
python
{ "resource": "" }
q17151
_get_videos_for_filter
train
def _get_videos_for_filter(video_filter, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None): """ Returns a generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order. """ videos = Video.objects.filter(**video_filter) paginator_context = {} if sort_field: # Refining by edx_video_id ensures a total order videos = videos.order_by(sort_field.value, "edx_video_id") if sort_dir == SortDirection.desc: videos = videos.reverse() if pagination_conf: videos_per_page = pagination_conf.get('videos_per_page') paginator = Paginator(videos, videos_per_page) videos = paginator.page(pagination_conf.get('page_number')) paginator_context = { 'current_page': videos.number, 'total_pages': videos.paginator.num_pages, 'items_on_one_page':videos_per_page } return (VideoSerializer(video).data for video in videos), paginator_context
python
{ "resource": "" }
q17152
get_course_video_ids_with_youtube_profile
train
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None): """ Returns a list that contains all the course ids and video ids with the youtube profile Args: course_ids (list): valid course ids limit (int): batch records limit offset (int): an offset for selecting a batch Returns: (list): Tuples of course_id, edx_video_id and youtube video url """ course_videos = (CourseVideo.objects.select_related('video') .prefetch_related('video__encoded_videos', 'video__encoded_videos__profile') .filter(video__encoded_videos__profile__profile_name='youtube') .order_by('id') .distinct()) if course_ids: course_videos = course_videos.filter(course_id__in=course_ids) course_videos = course_videos.values_list('course_id', 'video__edx_video_id') if limit is not None and offset is not None: course_videos = course_videos[offset: offset+limit] course_videos_with_yt_profile = [] for course_id, edx_video_id in course_videos: yt_profile = EncodedVideo.objects.filter( video__edx_video_id=edx_video_id, profile__profile_name='youtube' ).first() if yt_profile: course_videos_with_yt_profile.append(( course_id, edx_video_id, yt_profile.url )) return course_videos_with_yt_profile
python
{ "resource": "" }
q17153
get_videos_for_course
train
def get_videos_for_course(course_id, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None): """ Returns an iterator of videos for the given course id. Args: course_id (String) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order. """ return _get_videos_for_filter( {'courses__course_id': six.text_type(course_id), 'courses__is_hidden': False}, sort_field, sort_dir, pagination_conf, )
python
{ "resource": "" }
q17154
remove_video_for_course
train
def remove_video_for_course(course_id, edx_video_id): """ Soft deletes video for particular course. Arguments: course_id (str): id of the course edx_video_id (str): id of the video to be hidden """ course_video = CourseVideo.objects.get(course_id=course_id, video__edx_video_id=edx_video_id) course_video.is_hidden = True course_video.save()
python
{ "resource": "" }
q17155
get_videos_for_ids
train
def get_videos_for_ids( edx_video_ids, sort_field=None, sort_dir=SortDirection.asc ): """ Returns an iterator of videos that match the given list of ids. Args: edx_video_ids (list) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order """ videos, __ = _get_videos_for_filter( {"edx_video_id__in":edx_video_ids}, sort_field, sort_dir, ) return videos
python
{ "resource": "" }
q17156
get_video_info_for_course_and_profiles
train
def get_video_info_for_course_and_profiles(course_id, profiles): """ Returns a dict of edx_video_ids with a dict of requested profiles. Args: course_id (str): id of the course profiles (list): list of profile_names Returns: (dict): Returns all the profiles attached to a specific edx_video_id { edx_video_id: { 'duration': length of the video in seconds, 'profiles': { profile_name: { 'url': url of the encoding 'file_size': size of the file in bytes }, } }, } Example: Given two videos with two profiles each in course_id 'test_course': { u'edx_video_id_1': { u'duration: 1111, u'profiles': { u'mobile': { 'url': u'http: //www.example.com/meow', 'file_size': 2222 }, u'desktop': { 'url': u'http: //www.example.com/woof', 'file_size': 4444 } } }, u'edx_video_id_2': { u'duration: 2222, u'profiles': { u'mobile': { 'url': u'http: //www.example.com/roar', 'file_size': 6666 }, u'desktop': { 'url': u'http: //www.example.com/bzzz', 'file_size': 8888 } } } } """ # In case someone passes in a key (VAL doesn't really understand opaque keys) course_id = six.text_type(course_id) try: encoded_videos = EncodedVideo.objects.filter( profile__profile_name__in=profiles, video__courses__course_id=course_id ).select_related() except Exception: error_message = u"Could not get encoded videos for course: {0}".format(course_id) logger.exception(error_message) raise ValInternalError(error_message) # DRF serializers were causing extra queries for some reason... return_dict = {} for enc_vid in encoded_videos: # Add duration to edx_video_id return_dict.setdefault(enc_vid.video.edx_video_id, {}).update( { "duration": enc_vid.video.duration, } ) # Add profile information to edx_video_id's profiles return_dict[enc_vid.video.edx_video_id].setdefault("profiles", {}).update( {enc_vid.profile.profile_name: { "url": enc_vid.url, "file_size": enc_vid.file_size, }} ) return return_dict
python
{ "resource": "" }
q17157
copy_course_videos
train
def copy_course_videos(source_course_id, destination_course_id): """ Adds the destination_course_id to the videos taken from the source_course_id Args: source_course_id: The original course_id destination_course_id: The new course_id where the videos will be copied """ if source_course_id == destination_course_id: return course_videos = CourseVideo.objects.select_related('video', 'video_image').filter( course_id=six.text_type(source_course_id) ) for course_video in course_videos: destination_course_video, __ = CourseVideo.objects.get_or_create( video=course_video.video, course_id=destination_course_id ) if hasattr(course_video, 'video_image'): VideoImage.create_or_update( course_video=destination_course_video, file_name=course_video.video_image.image.name )
python
{ "resource": "" }
q17158
export_to_xml
train
def export_to_xml(video_id, resource_fs, static_dir, course_id=None): """ Exports data for a video into an xml object. NOTE: For external video ids, only transcripts information will be added into xml. If external=False, then edx_video_id is going to be on first index of the list. Arguments: video_id (str): Video id of the video to export transcripts. course_id (str): The ID of the course with which this video is associated. static_dir (str): The Directory to store transcript file. resource_fs (SubFS): Export file system. Returns: An lxml video_asset element containing export data Raises: ValVideoNotFoundError: if the video does not exist """ video_image_name = '' video = _get_video(video_id) try: course_video = CourseVideo.objects.select_related('video_image').get(course_id=course_id, video=video) video_image_name = course_video.video_image.image.name except ObjectDoesNotExist: pass video_el = Element( 'video_asset', attrib={ 'client_video_id': video.client_video_id, 'duration': six.text_type(video.duration), 'image': video_image_name } ) for encoded_video in video.encoded_videos.all(): SubElement( video_el, 'encoded_video', { name: six.text_type(getattr(encoded_video, name)) for name in ['profile', 'url', 'file_size', 'bitrate'] } ) return create_transcripts_xml(video_id, video_el, resource_fs, static_dir)
python
{ "resource": "" }
q17159
create_transcript_file
train
def create_transcript_file(video_id, language_code, file_format, resource_fs, static_dir): """ Writes transcript file to file system. Arguments: video_id (str): Video id of the video transcript file is attached. language_code (str): Language code of the transcript. file_format (str): File format of the transcript file. static_dir (str): The Directory to store transcript file. resource_fs (SubFS): The file system to store transcripts. """ transcript_filename = '{video_id}-{language_code}.srt'.format( video_id=video_id, language_code=language_code ) transcript_data = get_video_transcript_data(video_id, language_code) if transcript_data: transcript_content = Transcript.convert( transcript_data['content'], input_format=file_format, output_format=Transcript.SRT ) create_file_in_fs(transcript_content, transcript_filename, resource_fs, static_dir) return transcript_filename
python
{ "resource": "" }
q17160
create_transcripts_xml
train
def create_transcripts_xml(video_id, video_el, resource_fs, static_dir): """ Creates xml for transcripts. For each transcript element, an associated transcript file is also created in course OLX. Arguments: video_id (str): Video id of the video. video_el (Element): lxml Element object static_dir (str): The Directory to store transcript file. resource_fs (SubFS): The file system to store transcripts. Returns: lxml Element object with transcripts information """ video_transcripts = VideoTranscript.objects.filter(video__edx_video_id=video_id).order_by('language_code') # create transcripts node only when we have transcripts for a video if video_transcripts.exists(): transcripts_el = SubElement(video_el, 'transcripts') transcript_files_map = {} for video_transcript in video_transcripts: language_code = video_transcript.language_code file_format = video_transcript.file_format try: transcript_filename = create_transcript_file( video_id=video_id, language_code=language_code, file_format=file_format, resource_fs=resource_fs.delegate_fs(), static_dir=combine(u'course', static_dir) # File system should not start from /draft directory. ) transcript_files_map[language_code] = transcript_filename except TranscriptsGenerationException: # we don't want to halt export in this case, just log and move to the next transcript. logger.exception('[VAL] Error while generating "%s" transcript for video["%s"].', language_code, video_id) continue SubElement( transcripts_el, 'transcript', { 'language_code': language_code, 'file_format': Transcript.SRT, 'provider': video_transcript.provider, } ) return dict(xml=video_el, transcripts=transcript_files_map)
python
{ "resource": "" }
q17161
import_from_xml
train
def import_from_xml(xml, edx_video_id, resource_fs, static_dir, external_transcripts=dict(), course_id=None): """ Imports data from a video_asset element about the given video_id. If the edx_video_id already exists, then no changes are made. If an unknown profile is referenced by an encoded video, that encoding will be ignored. Arguments: xml (Element): An lxml video_asset element containing import data edx_video_id (str): val video id resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. external_transcripts (dict): A dict containing the list of names of the external transcripts. Example: { 'en': ['The_Flash.srt', 'Harry_Potter.srt'], 'es': ['Green_Arrow.srt'] } course_id (str): The ID of a course to associate the video with Raises: ValCannotCreateError: if there is an error importing the video Returns: edx_video_id (str): val video id. """ if xml.tag != 'video_asset': raise ValCannotCreateError('Invalid XML') # If video with edx_video_id already exists, associate it with the given course_id. try: if not edx_video_id: raise Video.DoesNotExist video = Video.objects.get(edx_video_id=edx_video_id) logger.info( "edx_video_id '%s' present in course '%s' not imported because it exists in VAL.", edx_video_id, course_id, ) # We don't want to link an existing video to course if its an external video. # External videos do not have any playback profiles associated, these are just to track video # transcripts for those video components who do not use edx hosted videos for playback. if course_id and video.status != EXTERNAL_VIDEO_STATUS: course_video, __ = CourseVideo.get_or_create_with_validation(video=video, course_id=course_id) image_file_name = xml.get('image', '').strip() if image_file_name: VideoImage.create_or_update(course_video, image_file_name) return edx_video_id except ValidationError as err: logger.exception(err.message) raise ValCannotCreateError(err.message_dict) except Video.DoesNotExist: pass if edx_video_id: # Video with edx_video_id did not exist, so create one from xml data. data = { 'edx_video_id': edx_video_id, 'client_video_id': xml.get('client_video_id'), 'duration': xml.get('duration'), 'status': 'imported', 'encoded_videos': [], 'courses': [{course_id: xml.get('image')}] if course_id else [], } for encoded_video_el in xml.iterfind('encoded_video'): profile_name = encoded_video_el.get('profile') try: Profile.objects.get(profile_name=profile_name) except Profile.DoesNotExist: logger.info( "Imported edx_video_id '%s' contains unknown profile '%s'.", edx_video_id, profile_name ) continue data['encoded_videos'].append({ 'profile': profile_name, 'url': encoded_video_el.get('url'), 'file_size': encoded_video_el.get('file_size'), 'bitrate': encoded_video_el.get('bitrate'), }) if not data['encoded_videos']: # Video's status does not get included in video xml at the time of export. So, at this point, # we cannot tell from xml that whether a video had an external status. But if encoded videos # are not set, the chances are, the video was an external one, in which case, we will not link # it to the course(s). Even if the video wasn't an external one and it is having 0 encodes in # xml, it does not have a side effect if not linked to a course, since the video was already # non-playable. data['status'] = EXTERNAL_VIDEO_STATUS data['courses'] = [] # Create external video if no edx_video_id. edx_video_id = create_video(data) else: edx_video_id = create_external_video('External Video') create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts) return edx_video_id
python
{ "resource": "" }
q17162
import_transcript_from_fs
train
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir): """ Imports transcript file from file system and creates transcript record in DS. Arguments: edx_video_id (str): Video id of the video. language_code (unicode): Language code of the requested transcript. file_name (unicode): File name of the transcript file. provider (unicode): Transcript provider. resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. """ file_format = None transcript_data = get_video_transcript_data(edx_video_id, language_code) # First check if transcript record does not exist. if not transcript_data: # Read file from import file system and attach it to transcript record in DS. try: with resource_fs.open(combine(static_dir, file_name), 'rb') as f: file_content = f.read() file_content = file_content.decode('utf-8-sig') except ResourceNotFound as exc: # Don't raise exception in case transcript file is not found in course OLX. logger.warn( '[edx-val] "%s" transcript "%s" for video "%s" is not found.', language_code, file_name, edx_video_id ) return except UnicodeDecodeError: # Don't raise exception in case transcript contains non-utf8 content. logger.warn( '[edx-val] "%s" transcript "%s" for video "%s" contains a non-utf8 file content.', language_code, file_name, edx_video_id ) return # Get file format from transcript content. try: file_format = get_transcript_format(file_content) except Error as ex: # Don't raise exception, just don't create transcript record. logger.warn( '[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s', edx_video_id, language_code, file_name ) return # Create transcript record. create_video_transcript( video_id=edx_video_id, language_code=language_code, file_format=file_format, content=ContentFile(file_content), provider=provider )
python
{ "resource": "" }
q17163
create_transcript_objects
train
def create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts): """ Create VideoTranscript objects. Arguments: xml (Element): lxml Element object. edx_video_id (str): Video id of the video. resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. external_transcripts (dict): A dict containing the list of names of the external transcripts. Example: { 'en': ['The_Flash.srt', 'Harry_Potter.srt'], 'es': ['Green_Arrow.srt'] } """ # File system should not start from /draft directory. with open_fs(resource_fs.root_path.split('/drafts')[0]) as file_system: # First import VAL transcripts. for transcript in xml.findall('.//transcripts/transcript'): try: file_format = transcript.attrib['file_format'] language_code = transcript.attrib['language_code'] transcript_file_name = u'{edx_video_id}-{language_code}.{file_format}'.format( edx_video_id=edx_video_id, language_code=language_code, file_format=file_format ) import_transcript_from_fs( edx_video_id=edx_video_id, language_code=transcript.attrib['language_code'], file_name=transcript_file_name, provider=transcript.attrib['provider'], resource_fs=file_system, static_dir=static_dir ) except KeyError: logger.warn("VAL: Required attributes are missing from xml, xml=[%s]", etree.tostring(transcript).strip()) # This won't overwrite transcript for a language which is already present for the video. for language_code, transcript_file_names in six.iteritems(external_transcripts): for transcript_file_name in transcript_file_names: import_transcript_from_fs( edx_video_id=edx_video_id, language_code=language_code, file_name=transcript_file_name, provider=TranscriptProviderType.CUSTOM, resource_fs=file_system, static_dir=static_dir )
python
{ "resource": "" }
q17164
get_cache_stats
train
def get_cache_stats(): """ Returns a list of dictionaries of all cache servers and their stats, if they provide stats. """ cache_stats = [] for name, _ in six.iteritems(settings.CACHES): cache_backend = caches[name] try: cache_backend_stats = cache_backend._cache.get_stats() except AttributeError: # this backend doesn't provide stats logger.info( 'The memcached backend "{0}" does not support or ' 'provide stats.'.format(name) ) continue for address, stats in cache_backend_stats: cache_stats.append( {'name': name, 'address': address, 'stats': stats} ) return cache_stats
python
{ "resource": "" }
q17165
register_git_injector
train
def register_git_injector(username, password): """Generate a script that writes the password to the git command line tool""" fd, tmp_path = mkstemp() atexit.register(lambda: os.remove(tmp_path)) with os.fdopen(fd, 'w') as f: f.write(ASKPASS.format(username=username, password=password or '')) chmod(tmp_path, 0o700) os.environ['GIT_ASKPASS'] = tmp_path
python
{ "resource": "" }
q17166
to_snake
train
def to_snake(camel): """TimeSkill -> time_skill""" if not camel: return camel return ''.join('_' + x if 'A' <= x <= 'Z' else x for x in camel).lower()[camel[0].isupper():]
python
{ "resource": "" }
q17167
serialized
train
def serialized(func): """Write a serializer by yielding each line of output""" @wraps(func) def wrapper(*args, **kwargs): return '\n'.join( ' '.join(parts) if isinstance(parts, tuple) else parts for parts in func(*args, **kwargs) ) return wrapper
python
{ "resource": "" }
q17168
UpgradeAction.create_pr_message
train
def create_pr_message(self, skill_git: Git, skill_repo: Repository) -> tuple: """Reads git commits from skill repo to create a list of changes as the PR content""" title = 'Upgrade ' + self.skill.name body = body_template.format( skill_name=self.skill.name, commits='\n'.join( ' - [{}]({})'.format( skill_git.show('-s', sha, format='%s'), skill_repo.get_commit(sha).html_url ) for sha in skill_git.rev_list( '--ancestry-path', '{}..{}'.format(self.skill.entry.sha, 'HEAD') ).split('\n') ) ) return title, body
python
{ "resource": "" }
q17169
ship_move
train
def ship_move(ship, x, y, speed): """Moves SHIP to the new location X,Y.""" click.echo('Moving ship %s to %s,%s with speed %s' % (ship, x, y, speed))
python
{ "resource": "" }
q17170
_check_status
train
def _check_status(sdp_state): """SDP Status check. Do all the tests to determine, if the SDP state is "broken", what could be the cause, and return a suitable status message to be sent back by the calling function. """ try: errval = "error" errdict = dict(state="unknown", reason="unknown") if sdp_state.current_state == "unknown": errdict['reason'] = 'database not initialised.' LOG.debug('Current state is unknown;') LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) elif sdp_state.current_state is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is NONE;') LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) elif sdp_state.target_state is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is NONE;') LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) LOG.debug('Target state timestamp is %s;', sdp_state.target_timestamp) elif sdp_state.current_timestamp is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is NONE') LOG.debug('Target state timestamp is %s;', sdp_state.target_timestamp) elif sdp_state.target_timestamp is None: errdict['reason'] = 'Master Controller Services may have died.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) LOG.debug('Target state timestamp is NONE') elif sdp_state.current_timestamp < sdp_state.target_timestamp: errdict['reason'] = \ 'Timestamp for Master Controller Services is stale.' LOG.debug('Current state is %s;', sdp_state.current_state) LOG.debug('Target state is %s;', sdp_state.target_state) LOG.debug('Current state timestamp is %s;', sdp_state.current_timestamp) LOG.debug('Target state timestamp is %s;', sdp_state.target_timestamp) else: errval = "okay" except ConnectionError as err: errdict['reason'] = err LOG.debug('Connection Error %s', err) return errval, errdict
python
{ "resource": "" }
q17171
health
train
def health(): """Check the health of this service.""" up_time = time.time() - START_TIME response = dict(service=__service_id__, uptime='{:.2f}s'.format(up_time)) return response, HTTPStatus.OK
python
{ "resource": "" }
q17172
allowed_transitions
train
def allowed_transitions(): """Get target states allowed for the current state.""" try: sdp_state = SDPState() return sdp_state.allowed_target_states[sdp_state.current_state] except KeyError: LOG.error("Key Error") return dict(state="KeyError", reason="KeyError")
python
{ "resource": "" }
q17173
get_state
train
def get_state(): """SDP State. Return current state; target state and allowed target states. """ sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) return dict( current_state="unknown", target_state="unknown", last_updated="unknown", reason=errdict['reason'] ) _last_updated = sdp_state.current_timestamp if sdp_state.target_timestamp > _last_updated: _last_updated = sdp_state.target_timestamp return dict( current_state=sdp_state.current_state, target_state=sdp_state.target_state, allowed_target_states=sdp_state.allowed_target_states[ sdp_state.current_state], last_updated=_last_updated.isoformat() ), HTTPStatus.OK
python
{ "resource": "" }
q17174
get_current_state
train
def get_current_state(): """Return the SDP State and the timestamp for when it was updated.""" sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) return dict( current_state="unknown", last_updated="unknown", reason=errdict['reason'] ) LOG.debug('Current State: %s', sdp_state.current_state) LOG.debug('Current State last updated: %s', sdp_state.current_timestamp.isoformat()) return dict( current_state=sdp_state.current_state, last_updated=sdp_state.current_timestamp.isoformat() ), HTTPStatus.OK
python
{ "resource": "" }
q17175
processing_block_list
train
def processing_block_list(): """Return the list of processing blocks known to SDP.""" pb_list = ProcessingBlockList() return dict(active=pb_list.active, completed=pb_list.completed, aborted=pb_list.aborted)
python
{ "resource": "" }
q17176
scheduling_blocks
train
def scheduling_blocks(): """Return list of Scheduling Block instances known to SDP.""" sbi_list = SchedulingBlockInstanceList() return dict(active=sbi_list.active, completed=sbi_list.completed, aborted=sbi_list.aborted)
python
{ "resource": "" }
q17177
configure_sbi
train
def configure_sbi(): """Configure an SBI using POSTed configuration.""" # Need an ID for the subarray - guessing I just get # the list of inactive subarrays and use the first inactive_list = SubarrayList().inactive request_data = request.data LOG.debug('request is of type %s', type(request_data)) try: sbi = Subarray(inactive_list[0]) sbi.activate() sbi.configure_sbi(request_data) except jsonschema.exceptions.ValidationError as error: LOG.error('Error configuring SBI: %s', error) return dict(path=error.absolute_path.__str__(), schema_path=error.schema_path.__str__(), message=error.message) return dict(status="Accepted SBI: {}".format(sbi.id))
python
{ "resource": "" }
q17178
home
train
def home(): """Temporary helper function to link to the API routes""" return dict(links=dict(api='{}{}'.format(request.url, PREFIX[1:]))), \ HTTPStatus.OK
python
{ "resource": "" }
q17179
catch_all
train
def catch_all(path): """Catch all path - return a JSON 404 """ return (dict(error='Invalid URL: /{}'.format(path), links=dict(root='{}{}'.format(request.url_root, PREFIX[1:]))), HTTPStatus.NOT_FOUND)
python
{ "resource": "" }
q17180
SubarrayList.get_active
train
def get_active() -> List[str]: """Return the list of active subarrays.""" active = [] for i in range(__num_subarrays__): key = Subarray.get_key(i) if DB.get_hash_value(key, 'active').upper() == 'TRUE': active.append(Subarray.get_id(i)) return active
python
{ "resource": "" }
q17181
SubarrayList.get_inactive
train
def get_inactive() -> List[str]: """Return the list of inactive subarrays.""" inactive = [] for i in range(__num_subarrays__): key = Subarray.get_key(i) if DB.get_hash_value(key, 'active').upper() == 'FALSE': inactive.append(Subarray.get_id(i)) return inactive
python
{ "resource": "" }
q17182
node_run
train
def node_run(input_file, coords_only, bc_settings, bc_grid_weights): """Main function to process visibility data on Spark cluster nodes. Args: input_file (str): RDD element containing filename to process. coords_only (boolean): If true, read only baseline coordinates to define the weights grid. bc_settings (pyspark.broadcast.Broadcast): Spark broadcast variable containing pipeline settings dictionary. bc_grid_weights (pyspark.broadcast.Broadcast): Spark broadcast variable containing weights grid. May be None. Returns: tuple: Output RDD element. """ # Create a logger. log = logging.getLogger('pyspark') log.setLevel(logging.INFO) if len(log.handlers) == 0: log.addHandler(logging.StreamHandler(sys.stdout)) # Create an imager and configure it. precision = bc_settings.value['precision'] imager = oskar.Imager(precision) for key, value in bc_settings.value['imager'].items(): setattr(imager, key, value) grid_size = imager.plane_size grid_weights = None # Get a handle to the input Measurement Set. ms_han = oskar.MeasurementSet.open(input_file) # Check if doing a first pass. if coords_only: # If necessary, generate a local weights grid. if imager.weighting == 'Uniform': grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision) # Do a first pass for uniform weighting or W-projection. log.info('Reading coordinates from %s', input_file) imager.coords_only = True process_input_data(ms_han, imager, None, grid_weights) imager.coords_only = False # Return weights grid and required number of W-planes as RDD element. return grid_weights, imager.num_w_planes # Allocate a local visibility grid on the node. grid_data = numpy.zeros([grid_size, grid_size], dtype='c8' if precision == 'single' else 'c16') # Process data according to mode. log.info('Reading visibilities from %s', input_file) if bc_settings.value['combine']: # Get weights grid from Spark Broadcast variable. if imager.weighting == 'Uniform': grid_weights = bc_grid_weights.value # Populate the local visibility grid. grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights) # Return grid as RDD element. log.info('Returning gridded visibilities to RDD') return grid_data, grid_norm else: # If necessary, generate a local weights grid. if imager.weighting == 'Uniform': grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision) # If necessary, do a first pass for uniform weighting or W-projection. if imager.weighting == 'Uniform' or imager.algorithm == 'W-projection': imager.coords_only = True process_input_data(ms_han, imager, None, grid_weights) imager.coords_only = False # Populate the local visibility grid. grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights) # Save image by finalising grid. output_file = splitext(input_file)[0] + '.fits' save_image(imager, grid_data, grid_norm, output_file) log.info('Finished. Output file is %s', output_file) return 0
python
{ "resource": "" }
q17183
save_image
train
def save_image(imager, grid_data, grid_norm, output_file): """Makes an image from gridded visibilities and saves it to a FITS file. Args: imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray): Final visibility grid. grid_norm (float): Grid normalisation to apply. output_file (str): Name of output FITS file to write. """ # Make the image (take the FFT, normalise, and apply grid correction). imager.finalise_plane(grid_data, grid_norm) grid_data = numpy.real(grid_data) # Trim the image if required. border = (imager.plane_size - imager.image_size) // 2 if border > 0: end = border + imager.image_size grid_data = grid_data[border:end, border:end] # Write the FITS file. hdr = fits.header.Header() fits.writeto(output_file, grid_data, hdr, clobber=True)
python
{ "resource": "" }
q17184
reduce_sequences
train
def reduce_sequences(object_a, object_b): """Performs an element-wise addition of sequences into a new list. Both sequences must have the same length, and the addition operator must be defined for each element of the sequence. """ def is_seq(obj): """Returns true if the object passed is a sequence.""" return hasattr(obj, "__getitem__") or hasattr(obj, "__iter__") if object_a is None or object_b is None: return None elif is_seq(object_a) and is_seq(object_b): reduced = [] for element_a, element_b in zip(object_a, object_b): if element_a is not None and element_b is not None: reduced.append(element_a + element_b) else: reduced.append(None) return reduced else: return object_a + object_b
python
{ "resource": "" }
q17185
main
train
def main(): """Runs test imaging pipeline using Spark.""" # Check command line arguments. if len(sys.argv) < 3: raise RuntimeError( 'Usage: spark-submit spark_imager_test.py <settings_file> <dir> ' '[partitions]') # Create log object. log = logging.getLogger('pyspark') log.setLevel(logging.INFO) log.addHandler(logging.StreamHandler(sys.stdout)) # Load pipeline settings. with open(sys.argv[1]) as f: settings = json.load(f) # Get a list of input Measurement Sets to process. data_dir = str(sys.argv[2]) inputs = glob(join(data_dir, '*.ms')) + glob(join(data_dir, '*.MS')) inputs = filter(None, inputs) log.info('Found input Measurement Sets: %s', ', '.join(inputs)) # Get a Spark context. context = pyspark.SparkContext(appName="spark_imager_test") # Create the Spark RDD containing the input filenames, # suitably parallelized. partitions = int(sys.argv[3]) if len(sys.argv) > 3 else 2 rdd = context.parallelize(inputs, partitions) # Define Spark broadcast variables. bc_settings = context.broadcast(settings) bc_grid_weights = None # Process coordinates first if required. if (settings['combine'] and ( settings['imager']['weighting'] == 'Uniform' or settings['imager']['algorithm'] == 'W-projection')): # Create RDD to generate weights grids. rdd_coords = rdd.map( partial(node_run, coords_only=True, bc_settings=bc_settings, bc_grid_weights=None)) # Mark this RDD as persistent so it isn't computed more than once. rdd_coords.persist() # Get the maximum number of W-planes required, and update settings. num_w_planes = rdd_coords.map(lambda x: x[1]).max() settings['imager']['num_w_planes'] = num_w_planes # Get the combined grid of weights and broadcast it to nodes. output = rdd_coords.reduce(reduce_sequences) bc_grid_weights = context.broadcast(output[0]) # Delete this RDD. rdd_coords.unpersist() # Re-broadcast updated settings. bc_settings = context.broadcast(settings) # Run parallel pipeline on worker nodes and combine visibility grids. output = rdd.map( partial(node_run, coords_only=False, bc_settings=bc_settings, bc_grid_weights=bc_grid_weights)).reduce(reduce_sequences) # Finalise combined visibility grids if required. if settings['combine']: # Create an imager to finalise (FFT) the gridded data. imager = oskar.Imager(settings['precision']) for key, value in settings['imager'].items(): setattr(imager, key, value) # Finalise grid and save image. save_image(imager, output[0], output[1], settings['output_file']) log.info('Finished. Output file is %s', settings['output_file']) context.stop()
python
{ "resource": "" }
q17186
load_schema
train
def load_schema(path): """Loads a JSON schema file.""" with open(path) as json_data: schema = json.load(json_data) return schema
python
{ "resource": "" }
q17187
clear_db
train
def clear_db(): """Clear the entire db.""" cursor = '0' while cursor != 0: cursor, keys = DB.scan(cursor, match='*', count=5000) if keys: DB.delete(*keys)
python
{ "resource": "" }
q17188
get_scheduling_block_ids
train
def get_scheduling_block_ids(): """Return list of scheduling block IDs""" ids = [key.split('/')[-1] for key in DB.keys(pattern='scheduling_block/*')] return sorted(ids)
python
{ "resource": "" }
q17189
add_scheduling_block
train
def add_scheduling_block(config, schema_path=None): """Add a Scheduling Block to the Configuration Database. The configuration dictionary must match the schema defined in in the schema_path variable at the top of the function. Args: config (dict): Scheduling Block instance request configuration. schema_path (str): Path to schema file used to validate the Scheduling Block Instance request """ if schema_path is None: schema_path = os.path.join(os.path.dirname(__file__), 'sbi_post.json') schema = load_schema(schema_path) jsonschema.validate(config, schema) # Add the scheduling block to the database # (This is done as a single k/v pair here but would probably be # expanded to a set of keys in the actual implementation) DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config)) # Add a event to the scheduling block event list to notify # of a new scheduling block being added to the db. DB.rpush('scheduling_block_events', json.dumps(dict(type="created", id=config["id"])))
python
{ "resource": "" }
q17190
delete_scheduling_block
train
def delete_scheduling_block(block_id): """Delete Scheduling Block with the specified ID""" DB.delete('scheduling_block/{}'.format(block_id)) # Add a event to the scheduling block event list to notify # of a deleting a scheduling block from the db DB.rpush('scheduling_block_events', json.dumps(dict(type="deleted", id=block_id)))
python
{ "resource": "" }
q17191
get_scheduling_block_event
train
def get_scheduling_block_event(): """Return the latest Scheduling Block event""" event = DB.rpoplpush('scheduling_block_events', 'scheduling_block_event_history') if event: event = json.loads(event.decode('utf-8')) return event
python
{ "resource": "" }
q17192
get_sub_array_ids
train
def get_sub_array_ids(): """Return list of sub-array Id's currently known to SDP""" ids = set() for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) ids.add(config['sub_array_id']) return sorted(list(ids))
python
{ "resource": "" }
q17193
get_subarray_sbi_ids
train
def get_subarray_sbi_ids(sub_array_id): """Return list of scheduling block Id's associated with the given sub_array_id """ ids = [] for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) if config['sub_array_id'] == sub_array_id: ids.append(config['id']) return ids
python
{ "resource": "" }
q17194
get_processing_block_ids
train
def get_processing_block_ids(): """Return an array of Processing Block ids""" ids = [] for key in sorted(DB.keys(pattern='scheduling_block/*')): config = json.loads(DB.get(key)) for processing_block in config['processing_blocks']: ids.append(processing_block['id']) return ids
python
{ "resource": "" }
q17195
get_processing_block
train
def get_processing_block(block_id): """Return the Processing Block Configuration for the specified ID""" identifiers = block_id.split(':') scheduling_block_id = identifiers[0] scheduling_block_config = get_scheduling_block(scheduling_block_id) for processing_block in scheduling_block_config['processing_blocks']: if processing_block['id'] == block_id: return processing_block raise KeyError('Unknown Processing Block id: {} ({})' .format(identifiers[-1], block_id))
python
{ "resource": "" }
q17196
delete_processing_block
train
def delete_processing_block(processing_block_id): """Delete Processing Block with the specified ID""" scheduling_block_id = processing_block_id.split(':')[0] config = get_scheduling_block(scheduling_block_id) processing_blocks = config.get('processing_blocks') processing_block = list(filter( lambda x: x.get('id') == processing_block_id, processing_blocks))[0] config['processing_blocks'].remove(processing_block) DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config)) # Add a event to the scheduling block event list to notify # of a new scheduling block being added to the db. DB.rpush('processing_block_events', json.dumps(dict(type="deleted", id=processing_block_id)))
python
{ "resource": "" }
q17197
get_processing_block_event
train
def get_processing_block_event(): """Return the latest Processing Block event""" event = DB.rpoplpush('processing_block_events', 'processing_block_event_history') if event: event = json.loads(event.decode('utf-8')) return event
python
{ "resource": "" }
q17198
subscribe
train
def subscribe(object_type: str, subscriber: str, callback_handler: Callable = None) -> EventQueue: """Subscribe to the specified object type. Returns an EventQueue object which can be used to query events associated with the object type for this subscriber. Args: object_type (str): Object type subscriber (str): Subscriber name callback_handler (function, optional): Callback handler function. Returns: EventQueue, event queue object. """ key = _keys.subscribers(object_type) DB.remove_from_list(key, subscriber) DB.append_to_list(key, subscriber) return EventQueue(object_type, subscriber, callback_handler)
python
{ "resource": "" }
q17199
get_subscribers
train
def get_subscribers(object_type: str) -> List[str]: """Get the list of subscribers to events of the object type. Args: object_type (str): Type of object. Returns: List[str], list of subscriber names. """ return DB.get_list(_keys.subscribers(object_type))
python
{ "resource": "" }