code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
'''
Returns a function that attaches an image to page if it exists
Currenlty assumes that images have already been imported and info
has been stored in record_keeper
'''
if (field in nested_fields) and nested_fields[field]:
foreign_image_id = nested_fields[field]["id"]
# Handle the following
# record keeper may not exist
# record keeper may not have image ref
if record_keeper:
try:
local_image_id = record_keeper.get_local_image(
foreign_image_id)
local_image = Image.objects.get(id=local_image_id)
setattr(page, field, local_image)
except ObjectDoesNotExist:
raise ObjectDoesNotExist(
("executing attach_image: local image referenced"
"in record_keeper does not actually exist."),
None)
except Exception:
raise
else:
raise Exception(
("Attempted to attach image without record_keeper. "
"This functionality is not yet implemented")) | def attach_image(field, nested_fields, page, record_keeper=None) | Returns a function that attaches an image to page if it exists
Currenlty assumes that images have already been imported and info
has been stored in record_keeper | 5.237957 | 3.712304 | 1.410972 |
# getting the content rotation settings from site settings
for main in Main.objects.all():
site = main.sites_rooted_here.all().first()
main_lang = Languages.for_site(site).languages.filter(
is_main_language=True).first()
index = SectionIndexPage.objects.live().child_of(main).first()
site_settings = SiteSettings.for_site(site)
if day is None:
day = timezone.now().weekday()
# calls the two rotate methods with the necessary params
if main and index:
rotate_latest(main_lang, index, main, site_settings, day)
rotate_featured_in_homepage(main_lang, day, main) | def rotate_content(day=None) | this method gets the parameters that are needed for rotate_latest
and rotate_featured_in_homepage methods, and calls them both | 5.79992 | 4.610919 | 1.257866 |
db_alias = schema_editor.connection.alias
emit_pre_migrate_signal(verbosity=2, interactive=False, db=db_alias) | def run_wagtail_migration_before_core_34(apps, schema_editor) | Migration 34 needs migration 0040 from wagtail core
and this Migration will run wagtail migration before
molo core migration 34 | 3.905957 | 3.958415 | 0.986748 |
request = context.get('request')
locale = context.get('locale_code')
page = section.get_main_language_page()
settings = SiteSettings.for_site(request.site) \
if request else None
qs = ArticlePage.objects.descendant_of(page).filter(
language__is_main_language=True)
article_ordering = settings \
and settings.article_ordering_within_section
cms_ordering = article_ordering \
and settings.article_ordering_within_section !=\
ArticleOrderingChoices.CMS_DEFAULT_SORTING
if article_ordering and cms_ordering:
order_by = ArticleOrderingChoices.\
get(settings.article_ordering_within_section).name.lower()
order_by = order_by if order_by.find('_desc') == -1 \
else '-{}'.format(order_by.replace('_desc', ''))
qs = qs.order_by(order_by)
if featured_in_homepage is not None:
qs = qs.filter(featured_in_homepage=featured_in_homepage)\
.order_by('-featured_in_homepage_start_date')
if featured_in_latest is not None:
qs = qs.filter(featured_in_latest=featured_in_latest)
if featured_in_section is not None:
qs = qs.filter(featured_in_section=featured_in_section)\
.order_by('-featured_in_section_start_date')
if not locale:
return qs.live()[:count]
return get_pages(context, qs, locale)[:count] | def load_descendant_articles_for_section(
context, section, featured_in_homepage=None, featured_in_section=None,
featured_in_latest=None, count=5) | Returns all descendant articles (filtered using the parameters)
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles. | 2.561378 | 2.536528 | 1.009797 |
request = context.get('request')
locale = context.get('locale_code')
main_language_page = section.get_main_language_page()
settings = SiteSettings.for_site(request.site) \
if request else None
# TODO: Consider caching the pks of these articles using a timestamp on
# section as the key so tha twe don't always do these joins
article_ordering = settings and settings.article_ordering_within_section
order_by = ArticleOrderingChoices.\
get(settings.article_ordering_within_section).name.lower() \
if article_ordering \
and settings.article_ordering_within_section !=\
ArticleOrderingChoices.CMS_DEFAULT_SORTING\
else '-first_published_at'
order_by = order_by if order_by.find('_desc') == -1 \
else '-{}'.format(order_by.replace('_desc', ''))
child_articles = ArticlePage.objects.child_of(
main_language_page).filter(
language__is_main_language=True).order_by(order_by)
if featured_in_section is not None:
child_articles = child_articles.filter(
featured_in_section=featured_in_section)\
.order_by('-featured_in_section_start_date')
related_articles = ArticlePage.objects.filter(
related_sections__section__slug=main_language_page.slug)
qs = list(chain(
get_pages(context, child_articles, locale),
get_pages(context, related_articles, locale)))
# Pagination
if count:
p = context.get('p', 1)
paginator = Paginator(qs, count)
try:
articles = paginator.page(p)
except PageNotAnInteger:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
else:
articles = qs
if not locale:
return articles
context.update({'articles_paginated': articles})
return articles | def load_child_articles_for_section(
context, section, featured_in_section=None, count=5) | Returns all child articles
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles. | 3.353962 | 3.307403 | 1.014077 |
'''
Returns all child sections
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles.
'''
page = section.get_main_language_page()
locale = context.get('locale_code')
qs = SectionPage.objects.child_of(page).filter(
language__is_main_language=True)
if not locale:
return qs[:count]
return get_pages(context, qs, locale) | def load_child_sections_for_section(context, section, count=None) | Returns all child sections
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles. | 6.653255 | 3.078359 | 2.161299 |
'''
Returns all sibling sections
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles.
'''
page = section.get_main_language_page()
locale = context.get('locale_code')
qs = SectionPage.objects.sibling_of(page).filter(
language__is_main_language=True)
if not locale:
return qs[:count]
return get_pages(context, qs, locale) | def load_sibling_sections(context, section, count=None) | Returns all sibling sections
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles. | 6.924941 | 3.290238 | 2.104693 |
md = markdown(
value,
extensions=[
'markdown.extensions.fenced_code',
'codehilite',
]
)
open_tag = '<p>'
close_tag = '</p>'
if md.startswith(open_tag) and md.endswith(close_tag):
md = md[len(open_tag):-len(close_tag)]
return mark_safe(md) | def handle_markdown(value) | For some unknown reason markdown wraps the value in <p> tags.
Currently there doesn't seem to be an extension to turn this off. | 2.613137 | 2.341245 | 1.116132 |
'''
Robust as possible
Attempts to create the page
If any of the functions used to attach content to the page
fail, keep going, keep a record of those errors in a context dict
return the page and the context dict in a tuple
'''
fields, nested_fields = separate_fields(content)
foreign_id = content.pop('id')
# remove unwanted fields
if 'latest_revision_created_at' in content:
content.pop('latest_revision_created_at')
page = class_(**fields)
# create functions to attach attributes
function_args_mapping = (
# add_section_time
(add_json_dump, ("time", nested_fields, page)),
# add_tags
(add_list_of_things, ("tags", nested_fields, page)),
# add_metadata_tags
(add_list_of_things, ("metadata_tags", nested_fields, page)),
# attach_image
(attach_image, ("image", nested_fields, page, record_keeper)),
# attach_social_media_image
(attach_image, ("social_media_image", nested_fields,
page, record_keeper)),
# attach_banner_image
(attach_image, ("banner", nested_fields, page, record_keeper)),
)
for mapping in function_args_mapping:
function = mapping[0]
_args = mapping[1]
try:
function(*_args)
except Exception as e:
if logger:
logger.log(
ERROR,
"Failed to create page content",
{
"foreign_page_id": foreign_id,
"exception": e,
"function": function.__name__,
})
# Handle content in nested_fields
body = add_stream_fields(nested_fields, page)
# body has not been added as it contains reference to pages
if body:
record_keeper.article_bodies[foreign_id] = body
# Handle relationships in nested_fields
if record_keeper:
record_relation_functions = [
record_keeper.record_nav_tags,
record_keeper.record_recommended_articles,
record_keeper.record_reaction_questions,
record_keeper.record_related_sections,
record_keeper.record_section_tags,
record_keeper.record_banner_page_link,
]
for function in record_relation_functions:
try:
function(nested_fields, foreign_id)
except Exception as e:
if logger:
logger.log(
ERROR,
"Failed to record content",
{
"foreign_page_id": foreign_id,
"exception": e,
"function": function.__name__,
})
return page | def create_page(self, content, class_, record_keeper=None, logger=None) | Robust as possible
Attempts to create the page
If any of the functions used to attach content to the page
fail, keep going, keep a record of those errors in a context dict
return the page and the context dict in a tuple | 3.892956 | 3.073414 | 1.266655 |
list_tuple = []
for article in ArticlePage.objects.all():
if article.get_parent_section():
section_tuple = (
article.get_parent_section().id,
article.get_parent_section().title
)
if section_tuple not in list_tuple:
list_tuple.append(section_tuple)
return list_tuple | def lookups(self, request, model_admin) | Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar. | 3.114969 | 3.058194 | 1.018565 |
if self.value():
try:
section = SectionPage.objects.get(id=self.value())
return queryset.child_of(section).all()
except (ObjectDoesNotExist, MultipleObjectsReturned):
return None | def queryset(self, request, queryset) | Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`. | 3.896332 | 4.618595 | 0.843619 |
'''Upload a Zip File Containing a single file containing media.'''
if request.method == 'POST':
form = MediaForm(request.POST, request.FILES)
if form.is_valid():
context_dict = {}
try:
context_dict['copied_files'] = update_media_file(
request.FILES['zip_file'])
except Exception as e:
context_dict['error_message'] = e.message
return render(request,
'django_admin/transfer_media_message.html',
context_dict)
else:
form = MediaForm()
return render(request, 'django_admin/upload_media.html', {'form': form}) | def upload_file(request) | Upload a Zip File Containing a single file containing media. | 3.346642 | 2.606609 | 1.283907 |
'''Create and download a zip file containing the media file.'''
if request.method == "GET":
if path.exists(settings.MEDIA_ROOT):
zipfile_name = 'media_%s.zip' % settings.SITE_NAME
in_memory_file = BytesIO()
media_zipfile = zipfile.ZipFile(in_memory_file, 'w',
zipfile.ZIP_DEFLATED)
directory_name = path.split(settings.MEDIA_ROOT)[-1]
for root, dirs, files in walk(directory_name):
for file in files:
media_zipfile.write(path.join(root, file))
media_zipfile.close()
resp = HttpResponse(in_memory_file.getvalue(),
content_type="application/x-zip-compressed")
resp['Content-Disposition'] = (
'attachment; filename=%s' % zipfile_name)
else:
resp = render(request,
'django_admin/transfer_media_message.html',
{'error_message':
'media file does not exist'})
else:
resp = HttpResponseNotAllowed(permitted_methods=['GET'])
return resp | def download_file(request) | Create and download a zip file containing the media file. | 2.660741 | 2.458857 | 1.082105 |
'''
Update permissions for some users.
Give bulk-delete permissions to moderators.
Give edit permission to moderators and editors in order
to display 'Main' page in the explorer.
'''
db_alias = schema_editor.connection.alias
try:
# Django 1.9
emit_post_migrate_signal(2, False, db_alias)
except TypeError:
# Django < 1.9
try:
# Django 1.8
emit_post_migrate_signal(2, False, 'default', db_alias)
except TypeError: # Django < 1.8
emit_post_migrate_signal([], 2, False, 'default', db_alias)
Group = apps.get_model('auth.Group')
Permission = apps.get_model('auth.Permission')
GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission')
SectionIndexPage = apps.get_model('core.SectionIndexPage')
MainPage = apps.get_model('core.Main')
moderator_group = Group.objects.filter(name='Moderators').first()
editor_group = Group.objects.filter(name='Editors').first()
if moderator_group:
sections = SectionIndexPage.objects.first()
GroupPagePermission.objects.get_or_create(
group_id=moderator_group.id,
page_id=sections.id,
permission_type='bulk_delete'
)
main = MainPage.objects.first()
GroupPagePermission.objects.get_or_create(
group_id=moderator_group.id,
page_id=main.id,
permission_type='edit'
)
if editor_group:
main = MainPage.objects.first()
GroupPagePermission.objects.get_or_create(
group_id=editor_group.id,
page_id=main.id,
permission_type='edit'
) | def update_permissions_for_group(apps, schema_editor) | Update permissions for some users.
Give bulk-delete permissions to moderators.
Give edit permission to moderators and editors in order
to display 'Main' page in the explorer. | 2.250919 | 1.716909 | 1.31103 |
if 'is_main_language' in request.GET:
# TODO investigate possible error cases where page
# does not have language
return queryset.filter(languages__language__is_main_language=True)
else:
return queryset | def filter_queryset(self, request, queryset, view) | Returns only pages in the main language for a site | 7.755033 | 6.016005 | 1.289067 |
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
# store existing recommended articles
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
# delete existing recommended articles
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save() | def create_recomended_articles(main_article, article_list) | Creates recommended article objects from article_list
and _prepends_ to existing recommended articles. | 3.805133 | 2.664093 | 1.428303 |
'''
Seperate out page blocks at the end of a StreamField.
Accepts: List of streamfield blocks
Returns: Tuple of 2 lists of blocks - (remaining body, final article)
'''
stream_data_copy = list(stream_data)
end_page_links = []
for block in stream_data_copy[::-1]:
if block['type'] == 'page':
end_page_links.insert(0, block)
stream_data_copy.pop()
else:
break
return (stream_data_copy, end_page_links) | def seperate_end_page_links(stream_data) | Seperate out page blocks at the end of a StreamField.
Accepts: List of streamfield blocks
Returns: Tuple of 2 lists of blocks - (remaining body, final article) | 4.137617 | 1.852359 | 2.233701 |
'''
Accepts: list of page ids
Returns: list of specific page objects
'''
page_list = []
for id_ in id_list:
try:
page_list.append(
Page.objects.get(id=id_).specific)
except ObjectDoesNotExist:
logging.error(
"Attempted to fetch non-existent"
" page with id of {}".format(id_))
return page_list | def get_pages_from_id_list(id_list) | Accepts: list of page ids
Returns: list of specific page objects | 3.554502 | 2.708426 | 1.312387 |
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all()
for article in articles:
(remaining_blocks, linked_article_blocks) = seperate_end_page_links(
article.body.stream_data)
if linked_article_blocks:
linked_article_ids = get_page_ids_from_page_blocks(
linked_article_blocks)
linked_articles = get_pages_from_id_list(linked_article_ids)
create_recomended_articles(article, linked_articles)
parent = article.get_parent().specific
parent.enable_recommended_section = True
parent.save()
stream_block = article.body.stream_block
article.body = StreamValue(stream_block,
remaining_blocks,
is_lazy=True)
article.save() | def move_page_links_to_recommended_articles() | Derived from https://github.com/wagtail/wagtail/issues/2110 | 3.691401 | 3.278531 | 1.125931 |
# Get user config from ~/.cookiecutterrc or equivalent
# If no config file, sensible defaults from config.DEFAULT_CONFIG are used
config_dict = get_user_config()
template = expand_abbreviations(template, config_dict)
# TODO: find a better way to tell if it's a repo URL
if 'git@' in template or 'https://' in template:
repo_dir = clone(
repo_url=template,
checkout=checkout,
clone_to_dir=config_dict['cookiecutters_dir'],
no_input=no_input
)
else:
# If it's a local repo, no need to clone or copy to your
# cookiecutters_dir
repo_dir = template
context_file = os.path.join(repo_dir, 'cookiecutter.json')
logging.debug('context_file is {0}'.format(context_file))
context = generate_context(
context_file=context_file,
default_context=config_dict['default_context'],
extra_context=extra_context,
)
# Create project from local context and project template.
generate_files(
repo_dir=repo_dir,
context=context
) | def cookiecutter(template, checkout=None, no_input=False, extra_context=None) | Replacement for cookiecutter's own cookiecutter.
The difference with cookiecutter's cookiecutter function
is that this one doesn't automatically str() all the values
passed along to the template.
:param template: A directory containing a project template directory,
or a URL to a git repository.
:param checkout: The branch, tag or commit ID to checkout after clone.
:param no_input: Prompt the user at command line for manual configuration?
:param extra_context: A dictionary of context that overrides default
and user configuration. | 3.369535 | 3.385026 | 0.995424 |
'''
This is overwritten in order to not exclude drafts
and pages submitted for moderation
'''
request = self.request
# Allow pages to be filtered to a specific type
if 'type' not in request.GET:
model = Page
else:
model_name = request.GET['type']
try:
model = resolve_model_string(model_name)
except LookupError:
raise BadRequestError("type doesn't exist")
if not issubclass(model, Page):
raise BadRequestError("type doesn't exist")
# This is the overwritten line
queryset = model.objects.public() # exclude .live()
# Filter by site
queryset = queryset.descendant_of(
request.site.root_page, inclusive=True)
return queryset | def get_queryset(self) | This is overwritten in order to not exclude drafts
and pages submitted for moderation | 5.212918 | 3.609638 | 1.444166 |
'''
Only serve site-specific languages
'''
request = self.request
return (Languages.for_site(request.site)
.languages.filter().order_by('pk')) | def get_queryset(self) | Only serve site-specific languages | 15.139044 | 6.994816 | 2.164323 |
'''
API only serves 20 pages by default
This fetches info on all of items and return them as a list
Assumption: limit of API is not less than 20
'''
response = requests.get(url)
content = json.loads(response.content)
count = content["meta"]["total_count"]
if count <= 20:
return content["items"]
else:
items = [] + content["items"]
num_requests = int(math.ceil(count // 20))
for i in range(1, num_requests + 1):
paginated_url = "{}?limit=20&offset={}".format(
url, str(i * 20))
paginated_response = requests.get(paginated_url)
items = items + json.loads(paginated_response.content)["items"]
return items | def list_of_objects_from_api(url) | API only serves 20 pages by default
This fetches info on all of items and return them as a list
Assumption: limit of API is not less than 20 | 3.725371 | 2.232171 | 1.668945 |
# assemble url
base_url = base_url.rstrip("/")
url = base_url + API_PAGES_ENDPOINT + "?type=" + self._content_type + \
"&fields=" + ",".join(self._fields) + \
"&order=latest_revision_created_at"
# make request
try:
response = requests.get(url)
self._base_url = base_url
self._content = response.json()
self._content = self._content["items"]
return self._content
except requests.exceptions.ConnectionError:
return "No content could be found from {}. " \
"Are you sure this is the correct URL?".format(base_url)
except requests.exceptions.RequestException:
return "Content could not be imported at this time. " \
"Please try again later." | def get_content_from_url(self, base_url) | Sections can have SectionPage and ArticlePage child objects.
These have different fields, and thus have to be treated
differently. | 3.523969 | 3.373442 | 1.044621 |
if self.content():
parent = Page.objects.get(id=parent_id)
# Save the selected section page
response = requests.get(
self._base_url + API_PAGES_ENDPOINT + str(indexes[0]) + "/"
)
section_page = response.json()
self.process_child_section(section_page["id"], parent) | def save(self, indexes, parent_id) | Save the selected section. This will save the selected section
as well as its direct child pages obtained through the ?child_of
query parameter. The ?descendant_of query parameter is probably
better suited because it all pages under that part of the tree will
be obtained. The problem , however, is that that will require being
able to traverse the tree and recreate parent-child relationships
after they are imported | 6.6986 | 5.423568 | 1.235091 |
'''
Create a reference of site images by hash
If there are duplicate images, only store the first
and create warnings for other images
'''
if Image.objects.count() == 0:
return None
total = Image.objects.count()
count = 1
for local_image in Image.objects.all():
if not hasattr(local_image, 'image_info'):
ImageInfo.objects.create(image=local_image)
local_image.refresh_from_db()
hash_ = local_image.image_info.image_hash
if hash_ in self.image_hashes:
self.log(WARNING, "Image found with matching hash", context={
"composite hash": hash_,
"hashed image ID": self.image_hashes[hash_].id,
"matching image ID": local_image.id,
})
else:
self.image_hashes[hash_] = local_image
self.log(ACTION, "{}/{} images processed".format(count, total))
count += 1 | def get_image_details(self) | Create a reference of site images by hash
If there are duplicate images, only store the first
and create warnings for other images | 4.60533 | 2.986927 | 1.541829 |
'''
fetches, creates image object
returns tuple with Image object and context dictionary containing
request URL
'''
context = {
"file_url": url,
"foreign_title": image_title,
}
try:
image_file = requests.get(url)
local_image = Image(
title=image_title,
file=ImageFile(
BytesIO(image_file.content),
name=image_title
)
)
local_image.save()
return (local_image, context)
except Exception as e:
context.update({
"exception": e,
})
raise ImageCreationFailed(context, None) | def fetch_and_create_image(self, url, image_title) | fetches, creates image object
returns tuple with Image object and context dictionary containing
request URL | 4.330876 | 3.138639 | 1.379858 |
'''
Imports and returns tuple with image and context dict
Input: foreign image ID
Output: (Image: imported image, Dict: info about import)
Side effects: If Importer object has a record_keeper, it
will update the record of foreign to local images.
Attempts to avoid duplicates by matching image dimensions
and hashes. If a match is found it refers to local instance
instead. If it is not, the image is fetched, created and
referenced.
'''
image_detail_url = "{}{}/".format(self.image_url, image_id)
try:
img_response = requests.get(image_detail_url)
img_info = json.loads(img_response.content)
except Exception as e:
error_context = {
"image detail url": image_detail_url,
"exception": e,
}
raise ImageInfoFetchFailed(error_context)
if img_info["image_hash"] is None:
raise ValueError('image hash should not be none')
# check if a replica exists
local_image = self.get_replica_image(
img_info["image_hash"])
file_url = img_info['image_url']
# handle when image_url is relative
# assumes that image import means local storage
if img_info['image_url'][0] == '/':
file_url = "{}{}".format(
self.base_url, img_info['image_url'])
if local_image:
context = {
"local_version_existed": True,
"file_url": file_url,
"image_detail_url": image_detail_url,
"foreign_title": img_info["title"],
}
# update record keeper
if self.record_keeper:
self.record_keeper.record_image_relation(
image_id,
local_image.id)
return (local_image, context)
else:
new_image, context = self.fetch_and_create_image(
file_url,
img_info["title"])
# update record keeper
if self.record_keeper:
self.record_keeper.record_image_relation(
image_id,
new_image.id)
context.update({
"local_version_existed": False,
})
return (new_image, context) | def import_image(self, image_id) | Imports and returns tuple with image and context dict
Input: foreign image ID
Output: (Image: imported image, Dict: info about import)
Side effects: If Importer object has a record_keeper, it
will update the record of foreign to local images.
Attempts to avoid duplicates by matching image dimensions
and hashes. If a match is found it refers to local instance
instead. If it is not, the image is fetched, created and
referenced. | 4.02416 | 2.30388 | 1.746688 |
'''
Fetches all images from site
Handles Errors in creation process
Updates record_keeper
Logs the result of each attempt to create an image
'''
self.log(ACTION, "Importing Images")
try:
images = list_of_objects_from_api(self.image_url)
except Exception as e:
raise ImageInfoFetchFailed(
"Something went wrong fetching list of images")
if not images:
return None
# iterate through foreign images
for image_summary in images:
self.log(ACTION, "Importing Image", depth=1)
try:
(image, context) = self.import_image(image_summary["id"])
# log success
self.log(SUCCESS, "Importing Image",
context=context,
depth=1)
except ImageInfoFetchFailed as e:
self.log(ERROR, "Importing Images", e, depth=1)
except ImageCreationFailed as e:
self.log(ERROR, "Importing Images", e.message, depth=1)
except Exception as e:
context = {
"exception": e,
"foreign_image_id": image_summary["id"],
}
self.log(ERROR, "Importing Images", context, depth=1) | def import_images(self) | Fetches all images from site
Handles Errors in creation process
Updates record_keeper
Logs the result of each attempt to create an image | 4.684828 | 3.151012 | 1.486769 |
'''
Return list of foreign language IDs from API language endpoint
TODO: add in validation before creating languages
'''
languages = list_of_objects_from_api(self.language_url)
language_ids = []
for language in languages:
language_ids.append(language["id"])
return language_ids | def get_language_ids(self) | Return list of foreign language IDs from API language endpoint
TODO: add in validation before creating languages | 8.211714 | 2.761783 | 2.973338 |
'''
Recreates one-to-many relationship
'''
iterable = self.record_keeper.foreign_to_many_foreign_map[key]
for foreign_page_id, foreign_page_id_list in iteritems(iterable):
# Assumption: local page has been indexed and exists
# TODO: handle case where it doesn't exist
local_page_id = self.record_keeper.get_local_page(foreign_page_id)
local_page = Page.objects.get(id=local_page_id).specific
for _foreign_page_id in foreign_page_id_list:
try:
local_version_page_id = (self.record_keeper
.get_local_page(_foreign_page_id))
foreign_page = Page.objects.get(
id=local_version_page_id).specific
realtionship_object = class_(page=local_page)
setattr(realtionship_object, attribute_name, foreign_page)
realtionship_object.save()
except Exception as e:
context = {
"exception": e,
"function_schema": ("recreate_relationships"
"(class, attribute_name, key)"),
"attribute_name": str(attribute_name),
"key": str(key),
"class": str(class_),
"foreign_page_id": str(foreign_page_id),
}
self.log(ERROR, "recreating relationships",
context=context) | def recreate_relationships(self, class_, attribute_name, key) | Recreates one-to-many relationship | 3.484414 | 3.379398 | 1.031075 |
'''
Recreates one-to-one relationship
'''
iterable = self.record_keeper.foreign_to_foreign_map["banner_link_page"] # noqa
for foreign_page_id, linked_page_foreign_id in iteritems(iterable):
# get local banner page
local_page_id = self.record_keeper.get_local_page(foreign_page_id)
local_page = Page.objects.get(id=local_page_id).specific
# get local linked page
local_id = self.record_keeper.get_local_page(
linked_page_foreign_id)
linked_page = Page.objects.get(id=local_id).specific
# link the two together
setattr(local_page, attribute_name, linked_page)
# TODO: review publishing and saving revisions
local_page.save_revision().publish() | def recreate_relationship(self, attribute_name, key) | Recreates one-to-one relationship | 4.533009 | 4.28938 | 1.056798 |
'''
Handles case where article body contained page or image.
Assumes all articles and images have been created.
'''
for foreign_id, body in iteritems(self.record_keeper.article_bodies):
try:
local_page_id = self.record_keeper.get_local_page(foreign_id)
page = Page.objects.get(id=local_page_id).specific
# iterate through the body
new_body = []
for item in body:
if not item['value']:
continue
if item['type'] == 'page':
new_page_id = self.record_keeper.get_local_page(
item['value'])
item['value'] = new_page_id
elif item['type'] == 'image':
new_image_id = self.record_keeper.get_local_image(
item['value'])
item['value'] = new_image_id
new_body.append(item)
setattr(page, 'body', json.dumps(new_body))
page.save_revision().publish()
except Exception as e:
self.log(ERROR, "recreating article body",
{
"exception": e,
"foreign_id": foreign_id,
"body": body,
},
depth=1) | def recreate_article_body(self) | Handles case where article body contained page or image.
Assumes all articles and images have been created. | 3.171236 | 2.535687 | 1.250641 |
'''
Get the foreign page id based on type
Only works for index pages
'''
# TODO: log this
response = requests.get("{}pages/?type={}".format(
self.api_url, page_type))
content = json.loads(response.content)
return content["items"][0]["id"] | def get_foreign_page_id_from_type(self, page_type) | Get the foreign page id based on type
Only works for index pages | 5.812402 | 3.827041 | 1.518772 |
'''
Wrapper for attach_page
Creates the content
Then attaches a language relation from the main language page to the
newly created Page
Note: we get the parent from the main language page
'''
try:
page = self.attach_page(
local_main_lang_page.get_parent(),
content)
except:
# TODO: log this
return None
try:
# create the translation object for page
language = SiteLanguageRelation.objects.get(
language_setting=self.language_setting,
locale=locale)
page.language = language
page.translated_pages.add(local_main_lang_page)
local_main_lang_page.translated_pages.add(page)
page.save()
local_main_lang_page.save()
except:
# TODO: log that creating translation failed
# TODO: log that page is now being deleted
page.delete()
return page | def attach_translated_content(self, local_main_lang_page,
content, locale) | Wrapper for attach_page
Creates the content
Then attaches a language relation from the main language page to the
newly created Page
Note: we get the parent from the main language page | 4.359258 | 2.69835 | 1.615527 |
'''
Recusively copies over pages, their translations, and child pages
'''
url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id)
self.log(ACTION, "Requesting Data", {"url": url}, depth)
try:
# TODO: create a robust wrapper around this functionality
response = requests.get(url)
content = json.loads(response.content)
except Exception as e:
self.log(ERROR, "Requesting Data - abandoning copy",
{"url": url, "exception": e}, depth)
return None
parent = Page.objects.get(id=parent_id).specific
page = None
try:
self.log(ACTION, "Create Page", {"url": url}, depth)
page = self.attach_page(parent, content)
if page:
self.log(SUCCESS, "Create Page",
{"url": url,
"page title": page.title.encode('utf-8')},
depth)
except PageNotImportable as e:
message = e.message.pop("message")
self.log(WARNING, message, e.message.pop("message"), depth)
return None
if page:
# create translations
if content["meta"]["translations"]:
for translation_obj in content["meta"]["translations"]:
_url = "{}/api/v2/pages/{}/".format(self.base_url,
translation_obj["id"])
# TODO: create a robust wrapper around this functionality
_response = requests.get(_url)
self.log(
ACTION,
"Getting translated content",
{"url": _url}, depth)
if _response.content:
_content = json.loads(_response.content)
if ("locale" in translation_obj and
translation_obj["locale"]):
self.attach_translated_content(
page, _content, translation_obj["locale"])
else:
self.log(
ERROR,
"locale is null",
{"url": _url, }, depth)
else:
self.log(
ERROR,
"Getting translated content",
{"url": _url}, depth)
main_language_child_ids = content["meta"]["main_language_children"]
# recursively iterate through child nodes
if main_language_child_ids:
for main_language_child_id in main_language_child_ids:
try:
self.copy_page_and_children(
foreign_id=main_language_child_id,
parent_id=page.id, depth=depth + 1)
except Exception as e:
self.log(ERROR, "Copying Children",
{"url": url, "exception": e}) | def copy_page_and_children(self, foreign_id, parent_id, depth=0) | Recusively copies over pages, their translations, and child pages | 2.884185 | 2.720064 | 1.060337 |
'''
Initiates copying of tree, with existing_node acting as root
'''
url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id)
self.log(
ACTION,
"Copying Children",
{"existing node type": str(type(existing_node))})
# TODO: create a robust wrapper around this functionality
try:
self.log(ACTION, "Requesting Data", {"url": url})
response = requests.get(url)
content = json.loads(response.content)
self.log(SUCCESS, "Data Fetched Successfully", {"url": url})
main_language_child_ids = content["meta"]["main_language_children"]
if main_language_child_ids:
for main_language_child_id in main_language_child_ids:
self.copy_page_and_children(
foreign_id=main_language_child_id,
parent_id=existing_node.id, depth=1)
else:
self.log(SUCCESS, "No children to copy")
except Exception as e:
self.log(ERROR, "Copying Children", {"url": url, "exception": e}) | def copy_children(self, foreign_id, existing_node) | Initiates copying of tree, with existing_node acting as root | 3.784162 | 3.247041 | 1.165418 |
'''
Returns a string representation of logs.
Only displays errors and warnings in the email logs
to avoid being verbose
'''
message = ""
for log in self.record:
if log["log_type"] in [ERROR, WARNING]:
message += self.format_message(**log)
return message | def get_email_logs(self) | Returns a string representation of logs.
Only displays errors and warnings in the email logs
to avoid being verbose | 11.427226 | 3.80653 | 3.002006 |
if chain == Chain.bitcoin_mainnet:
return SYS_NETWORK_BITCOIN_MAINNET
elif chain == Chain.bitcoin_testnet:
return SYS_NETWORK_BITCOIN_TESTNET
elif chain == Chain.bitcoin_regtest:
return SYS_NETWORK_BITCOIN_REGTEST
else:
message = 'This chain cannot be converted to a bitcoin netcode; chain='
if chain:
message += chain.name
else:
message += '<NULL>'
raise UnknownChainError(message) | def chain_to_bitcoin_network(chain) | Used or bitcoin.SelectParams
:param chain:
:return: | 2.955779 | 3.151276 | 0.937963 |
dirname, _ = os.path.split(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname) | def ensure_dirs(filename) | Make sure the directories exist for `filename`. | 1.9444 | 2.110164 | 0.921445 |
certificate_json = self.get_certificate_json(certificate_uid)
return model.to_certificate_model(certificate_json) | def get_certificate(self, certificate_uid) | Returns a certificate. Propagates KeyError if key isn't found
:param certificate_uid:
:return: | 4.645041 | 5.220246 | 0.889813 |
if certificate_uid.startswith(URN_UUID_PREFIX):
uid = certificate_uid[len(URN_UUID_PREFIX):]
elif certificate_uid.startswith('http'):
last_slash = certificate_uid.rindex('/')
uid = certificate_uid[last_slash + 1:]
else:
uid = certificate_uid
logging.debug('Retrieving certificate for uid=%s', uid)
certificate_bytes = self._get_certificate_raw(uid)
logging.debug('Found certificate for uid=%s', uid)
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
return certificate_json | def get_certificate_json(self, certificate_uid) | Returns certificate as json. Propagates KeyError if key isn't found
:param certificate_uid:
:return: | 2.755739 | 2.846839 | 0.968 |
cert_file_bytes = self.kv_store.get(certificate_uid_to_filename(certificate_uid))
return cert_file_bytes | def _get_certificate_raw(self, certificate_uid) | Returns certificate as raw bytes. Per kvstore contract, raises an KeyError if key isn't found.
:param certificate_uid:
:return: | 5.727064 | 5.364632 | 1.06756 |
logging.debug('Retrieving certificate for uid=%s', certificate_uid)
is_v1_uid = model.is_v1_uid(certificate_uid)
if not is_v1_uid:
return super(V1AwareCertificateStore, self).get_certificate(certificate_uid)
# else it's V1.1 (if not valid, it will throw)
certificate = self._find_certificate_metadata(uid=certificate_uid)
if certificate:
certificate_bytes = self._get_certificate_raw(certificate_uid)
certificate_json = helpers.certificate_bytes_to_json(certificate_bytes)
return model.to_certificate_model(certificate_json, certificate['txid'], certificate_bytes)
message = 'Certificate metadata not found for certificate uid=%s' % certificate_uid
logging.error(message)
raise KeyError(message) | def get_certificate(self, certificate_uid) | Returns certificate as byte array. We need this for v1 certs, which compute a binary hash. Raises
KeyError if not found
:param certificate_uid:
:return: | 4.272487 | 4.217535 | 1.01303 |
certificate = self.db.certificates.find_one({'uid': uid})
return certificate | def _find_certificate_metadata(self, uid=None) | Find certificate by certificate uid
:param uid: certificate uid
:return: certificate from certificates collection | 5.216149 | 5.527284 | 0.943709 |
cmd = ['git', '--git-dir=%s' % self.path] + cmd
print("cmd list", cmd)
print("cmd", ' '.join(cmd))
res = None
try:
res = subprocess.check_output(cmd)
except BaseException:
pass
if res:
try:
res = res.decode()
except UnicodeDecodeError:
res = res.decode('utf-8')
return res | def run(self, cmd) | Execute git command in bash | 2.958004 | 2.777708 | 1.064908 |
if not os.path.isdir(os.path.join(self.path)):
os.makedirs(self.path)
if not os.path.isdir(os.path.join(self.path, 'refs')):
subprocess.check_output([
'git', 'clone', '--bare', self.repo_git, self.path
])
self.run(['gc', '--auto', '--prune=all'])
self.run(['fetch', '-p', 'origin', '+refs/heads/*:refs/heads/*'])
# github support
self.run(['fetch', 'origin', '+refs/pull/*/head:refs/pull/*'])
# gitlab support
self.run([
'fetch', 'origin', '+refs/merge-requests/*/head:refs/pull/*']) | def update(self) | Get a repository git or update it | 3.205505 | 2.953759 | 1.085229 |
if settings.DISCORD_REDIRECT_URI is not None:
redirect_uri = settings.DISCORD_REDIRECT_URI
else:
redirect_uri = request.build_absolute_uri(
reverse('discord_bind_callback'))
scope = (['email', 'guilds.join'] if settings.DISCORD_EMAIL_SCOPE
else ['identity', 'guilds.join'])
return OAuth2Session(settings.DISCORD_CLIENT_ID,
redirect_uri=redirect_uri,
scope=scope,
token=token,
state=state) | def oauth_session(request, state=None, token=None) | Constructs the OAuth2 session object. | 2.507396 | 2.413417 | 1.03894 |
src = os.path.expandvars(os.path.expanduser(path))
basename = os.path.basename(src)
dest_path = os.path.expandvars(os.path.expanduser(
os.path.join(self.curr_work_path, basename)))
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
if os.path.isdir(src):
try:
shutil.copytree(src, dest_path)
except shutil.Error:
pass # There are permissions errors to copy
elif os.path.isfile(src):
shutil.copy(src, dest_path)
else:
raise UserWarning(
"Just directory or file is supported to copy [%s]" % src)
return os.path.relpath(dest_path, self.curr_work_path) | def copy_path(self, path) | :param paths list: List of paths to copy | 2.539076 | 2.522146 | 1.006712 |
Profile = apps.get_model('edxval', 'Profile')
Profile.objects.get_or_create(profile_name=AUDIO_MP3_PROFILE) | def create_audio_mp3_profile(apps, schema_editor) | Create audio_mp3 profile | 4.220957 | 3.961693 | 1.065443 |
Profile = apps.get_model('edxval', 'Profile')
Profile.objects.filter(profile_name=AUDIO_MP3_PROFILE).delete() | def delete_audio_mp3_profile(apps, schema_editor) | Delete audio_mp3 profile | 4.14888 | 3.921191 | 1.058066 |
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
sjson_subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts
}
return sjson_subs | def generate_sjson_from_srt(srt_subs) | Generate transcripts from sjson to SubRip (*.srt).
Arguments:
srt_subs(SubRip): "SRT" subs object
Returns:
Subs converted to "SJSON" format. | 1.91231 | 2.062053 | 0.927382 |
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
for i in range(len(sjson_subs['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_subs['start'][i]),
end=SubRipTime(milliseconds=sjson_subs['end'][i]),
text=sjson_subs['text'][i]
)
output += (six.text_type(item))
output += '\n'
return output | def generate_srt_from_sjson(sjson_subs) | Generate transcripts from sjson to SubRip (*.srt).
Arguments:
sjson_subs (dict): `sjson` subs.
Returns:
Subtitles in SRT format. | 2.53467 | 2.539437 | 0.998123 |
assert input_format in ('srt', 'sjson')
assert output_format in ('srt', 'sjson')
# Decode the content with utf-8-sig which will also
# skip byte order mark(BOM) character if found.
content = content.decode('utf-8-sig')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'sjson':
try:
# With error handling (set to 'ERROR_RAISE'), we will be getting
# the exception if something went wrong in parsing the transcript.
srt_subs = SubRipFile.from_string(content, error_handling=SubRipFile.ERROR_RAISE)
except Error as ex: # Base exception from pysrt
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(cls.generate_sjson_from_srt(srt_subs))
if input_format == 'sjson':
if output_format == 'srt':
return cls.generate_srt_from_sjson(json.loads(content)) | def convert(cls, content, input_format, output_format) | Convert transcript `content` from `input_format` to `output_format`.
Arguments:
content: Transcript content byte-stream.
input_format: Input transcript format.
output_format: Output transcript format.
Accepted input formats: sjson, srt.
Accepted output format: srt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt
content during conversion from srt to sjson. | 4.065212 | 3.767927 | 1.078899 |
Profile = apps.get_model("edxval", "Profile")
Profile.objects.get_or_create(profile_name=HLS_PROFILE) | def create_hls_profile(apps, schema_editor) | Create hls profile | 4.118681 | 3.891711 | 1.058322 |
Profile = apps.get_model("edxval", "Profile")
Profile.objects.filter(profile_name=HLS_PROFILE).delete() | def delete_hls_profile(apps, schema_editor) | Delete hls profile | 4.125347 | 4.027785 | 1.024222 |
Profile = apps.get_model("edxval", "Profile")
for profile in DEFAULT_PROFILES:
Profile.objects.get_or_create(profile_name=profile) | def create_default_profiles(apps, schema_editor) | Add default profiles | 3.079697 | 2.896457 | 1.063264 |
Profile = apps.get_model("edxval", "Profile")
Profile.objects.filter(profile_name__in=DEFAULT_PROFILES).delete() | def delete_default_profiles(apps, schema_editor) | Remove default profiles | 3.482786 | 3.128277 | 1.113324 |
video_id = self.context.get('video_id')
video = Video.get_or_none(edx_video_id=video_id)
if not video:
raise serializers.ValidationError('Video "{video_id}" is not valid.'.format(video_id=video_id))
data.update(video=video)
return data | def validate(self, data) | Validates the transcript data. | 3.406506 | 3.344669 | 1.018488 |
course_id = data
course_video = image = ''
if data:
if isinstance(data, dict):
(course_id, image), = list(data.items())
course_video = CourseVideo(course_id=course_id)
course_video.full_clean(exclude=['video'])
return course_video, image | def to_internal_value(self, data) | Convert data into CourseVideo instance and image filename tuple. | 5.410404 | 3.981138 | 1.35901 |
if data is not None and not isinstance(data, dict):
raise serializers.ValidationError("Invalid data")
try:
profiles = [ev["profile"] for ev in data.get("encoded_videos", [])]
if len(profiles) != len(set(profiles)):
raise serializers.ValidationError("Invalid data: duplicate profiles")
except KeyError:
raise serializers.ValidationError("profile required for deserializing")
except TypeError:
raise serializers.ValidationError("profile field needs to be a profile_name (str)")
# Clean course_video list from any invalid data.
course_videos = [(course_video, image) for course_video, image in data.get('courses', []) if course_video]
data['courses'] = course_videos
return data | def validate(self, data) | Check that the video data is valid. | 5.000236 | 4.820996 | 1.037179 |
courses = validated_data.pop("courses", [])
encoded_videos = validated_data.pop("encoded_videos", [])
video = Video.objects.create(**validated_data)
EncodedVideo.objects.bulk_create(
EncodedVideo(video=video, **video_data)
for video_data in encoded_videos
)
# The CourseSerializer will already have converted the course data
# to CourseVideo models, so we can just set the video and save.
# Also create VideoImage objects if an image filename is present
for course_video, image_name in courses:
course_video.video = video
course_video.save()
if image_name:
VideoImage.create_or_update(course_video, image_name)
return video | def create(self, validated_data) | Create the video and its nested resources. | 3.73118 | 3.426087 | 1.08905 |
instance.status = validated_data["status"]
instance.client_video_id = validated_data["client_video_id"]
instance.duration = validated_data["duration"]
instance.save()
# Set encoded videos
instance.encoded_videos.all().delete()
EncodedVideo.objects.bulk_create(
EncodedVideo(video=instance, **video_data)
for video_data in validated_data.get("encoded_videos", [])
)
# Set courses
# NOTE: for backwards compatibility with the DRF v2 behavior,
# we do NOT delete existing course videos during the update.
# Also update VideoImage objects if an image filename is present
for course_video, image_name in validated_data.get("courses", []):
course_video.video = instance
course_video.save()
if image_name:
VideoImage.create_or_update(course_video, image_name)
return instance | def update(self, instance, validated_data) | Update an existing video resource. | 3.872022 | 3.684311 | 1.050949 |
if hasattr(settings, 'VIDEO_IMAGE_SETTINGS'):
return get_storage_class(
settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_CLASS'),
)(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {}))
else:
# during edx-platform loading this method gets called but settings are not ready yet
# so in that case we will return default(FileSystemStorage) storage class instance
return get_storage_class()() | def get_video_image_storage() | Return the configured django storage backend. | 5.395997 | 5.144249 | 1.048938 |
if hasattr(settings, 'VIDEO_TRANSCRIPTS_SETTINGS'):
return get_storage_class(
settings.VIDEO_TRANSCRIPTS_SETTINGS.get('STORAGE_CLASS'),
)(**settings.VIDEO_TRANSCRIPTS_SETTINGS.get('STORAGE_KWARGS', {}))
else:
# during edx-platform loading this method gets called but settings are not ready yet
# so in that case we will return default(FileSystemStorage) storage class instance
return get_storage_class()() | def get_video_transcript_storage() | Return the configured django storage backend for video transcripts. | 5.218251 | 4.674622 | 1.116294 |
with file_system.open(combine(static_dir, file_name), 'wb') as f:
f.write(file_data.encode('utf-8')) | def create_file_in_fs(file_data, file_name, file_system, static_dir) | Writes file in specific file system.
Arguments:
file_data (str): Data to store into the file.
file_name (str): File name of the file to be created.
file_system (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file. | 2.850372 | 3.448669 | 0.826514 |
try:
sjson_obj = json.loads(transcript_content)
except ValueError:
# With error handling (set to 'ERROR_RAISE'), we will be getting
# the exception if something went wrong in parsing the transcript.
srt_subs = SubRipFile.from_string(transcript_content, error_handling=SubRipFile.ERROR_RAISE)
if len(srt_subs) > 0:
return TranscriptFormat.SRT
return TranscriptFormat.SJSON | def get_transcript_format(transcript_content) | Returns transcript format.
Arguments:
transcript_content (str): Transcript file content. | 5.928598 | 6.100038 | 0.971895 |
queryset = self.get_queryset() # Get the base queryset
queryset = self.filter_queryset(queryset) # Apply any filter backends
filter = {} # pylint: disable=W0622
for field in self.lookup_fields:
filter[field] = self.kwargs[field]
return get_object_or_404(queryset, **filter) | def get_object(self) | Returns an object instance that should be used for detail views. | 3.162857 | 2.715655 | 1.164676 |
attrs = ('video_id', 'name', 'language_code', 'provider', 'file_format')
missing = [attr for attr in attrs if attr not in request.data]
if missing:
LOGGER.warn(
'[VAL] Required transcript params are missing. %s', ' and '.join(missing)
)
return Response(
status=status.HTTP_400_BAD_REQUEST,
data=dict(message=u'{missing} must be specified.'.format(missing=' and '.join(missing)))
)
video_id = request.data['video_id']
language_code = request.data['language_code']
transcript_name = request.data['name']
provider = request.data['provider']
file_format = request.data['file_format']
supported_formats = sorted(dict(TranscriptFormat.CHOICES).keys())
if file_format not in supported_formats:
message = (
u'"{format}" transcript file type is not supported. Supported formats are "{supported_formats}"'
).format(format=file_format, supported_formats=supported_formats)
return Response(status=status.HTTP_400_BAD_REQUEST, data={'message': message})
supported_providers = sorted(dict(TranscriptProviderType.CHOICES).keys())
if provider not in supported_providers:
message = (
u'"{provider}" provider is not supported. Supported transcription providers are "{supported_providers}"'
).format(provider=provider, supported_providers=supported_providers)
return Response(status=status.HTTP_400_BAD_REQUEST, data={'message': message})
transcript = VideoTranscript.get_or_none(video_id, language_code)
if transcript is None:
create_or_update_video_transcript(video_id, language_code, metadata={
'provider': provider,
'file_name': transcript_name,
'file_format': file_format
})
response = Response(status=status.HTTP_200_OK)
else:
message = (
u'Can not override existing transcript for video "{video_id}" and language code "{language}".'
).format(video_id=video_id, language=language_code)
response = Response(status=status.HTTP_400_BAD_REQUEST, data={'message': message})
return response | def post(self, request) | Creates a video transcript instance with the given information.
Arguments:
request: A WSGI request. | 2.20493 | 2.190199 | 1.006726 |
attrs = ('edx_video_id', 'status')
missing = [attr for attr in attrs if attr not in request.data]
if missing:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'message': u'"{missing}" params must be specified.'.format(missing=' and '.join(missing))}
)
edx_video_id = request.data['edx_video_id']
video_status = request.data['status']
if video_status not in VALID_VIDEO_STATUSES:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'message': u'"{status}" is not a valid Video status.'.format(status=video_status)}
)
try:
video = Video.objects.get(edx_video_id=edx_video_id)
video.status = video_status
video.save()
response_status = status.HTTP_200_OK
response_payload = {}
except Video.DoesNotExist:
response_status = status.HTTP_400_BAD_REQUEST
response_payload = {
'message': u'Video is not found for specified edx_video_id: {edx_video_id}'.format(
edx_video_id=edx_video_id
)
}
return Response(status=response_status, data=response_payload) | def patch(self, request) | Update the status of a video. | 1.861583 | 1.774128 | 1.049295 |
attrs = ('course_id', 'edx_video_id', 'generated_images')
missing = [attr for attr in attrs if attr not in request.data]
if missing:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'message': u'{missing} must be specified to update a video image.'.format(
missing=' and '.join(missing)
)
}
)
course_id = request.data['course_id']
edx_video_id = request.data['edx_video_id']
generated_images = request.data['generated_images']
try:
course_video = CourseVideo.objects.select_related('video_image').get(
course_id=six.text_type(course_id), video__edx_video_id=edx_video_id
)
except CourseVideo.DoesNotExist:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'message': u'CourseVideo not found for course_id: {course_id}'.format(course_id=course_id)}
)
try:
VideoImage.create_or_update(course_video, generated_images=generated_images)
except ValidationError as ex:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'message': str(ex)}
)
return Response() | def post(self, request) | Update a course video image instance with auto generated image names. | 2.171146 | 1.988323 | 1.091948 |
courses = request.data.get('courses')
batch_size = request.data.get('batch_size', 50)
offset = request.data.get('offset', 0)
if courses:
videos = (CourseVideo.objects.select_related('video')
.prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')
.filter(course_id__in=courses, video__status='file_complete')
.exclude(video__encoded_videos__profile__profile_name='hls')
.values_list('video__edx_video_id', flat=True)
.distinct())
response = Response({'videos': videos}, status=status.HTTP_200_OK)
else:
videos = (Video.objects.prefetch_related('encoded_videos', 'encoded_videos__profile')
.filter(status='file_complete')
.exclude(encoded_videos__profile__profile_name='hls')
.order_by('id')
.values_list('edx_video_id', flat=True)
.distinct())
response = Response(
{
'videos': videos[offset: offset+batch_size],
'total': videos.count(),
'offset': offset,
'batch_size': batch_size,
},
status=status.HTTP_200_OK
)
return response | def post(self, request) | Retrieve video IDs that are missing HLS profiles. This endpoint supports 2 types of input data:
1. If we want a batch of video ids which are missing HLS profile irrespective of their courses, the request
data should be in following format:
{
'batch_size': 50,
'offset': 0
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ... , video_id50],
'total': 300,
'offset': 50,
'batch_size': 50
}
2. If we want all the videos which are missing HLS profiles in a set of specific courses, the request data
should be in following format:
{
'courses': [
'course_id1',
'course_id2',
...
]
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ...]
} | 2.253955 | 1.897743 | 1.187703 |
edx_video_id = request.data['edx_video_id']
profile = request.data['profile']
encode_data = request.data['encode_data']
video = Video.objects.get(edx_video_id=edx_video_id)
profile = Profile.objects.get(profile_name=profile)
# Delete existing similar profile if its present and
# create new one with updated data.
EncodedVideo.objects.filter(video=video, profile=profile).delete()
EncodedVideo.objects.create(video=video, profile=profile, **encode_data)
return Response(status=status.HTTP_200_OK) | def put(self, request) | Update a single profile for a given video.
Example request data:
{
'edx_video_id': '1234'
'profile': 'hls',
'encode_data': {
'url': 'foo.com/qwe.m3u8'
'file_size': 34
'bitrate': 12
}
} | 3.202582 | 2.262818 | 1.415307 |
logger.info('VAL: Video created with id [%s] and status [%s]', video.edx_video_id, video.status)
else:
logger.info('VAL: Status changed to [%s] for video [%s]', video.status, video.edx_video_id) | def video_status_update_callback(sender, **kwargs): # pylint: disable=unused-argument
video = kwargs['instance']
if kwargs['created'] | Log video status for an existing video instance | 4.389489 | 3.820488 | 1.148934 |
ret_val = cls(*args, **kwargs)
ret_val.full_clean()
ret_val.save()
return ret_val | def create_with_validation(cls, *args, **kwargs) | Factory method that creates and validates the model object before it is saved. | 2.955935 | 2.369081 | 1.247714 |
try:
return cls.objects.get(*args, **kwargs), False
except cls.DoesNotExist:
return cls.create_with_validation(*args, **kwargs), True | def get_or_create_with_validation(cls, *args, **kwargs) | Factory method that gets or creates-and-validates the model object before it is saved.
Similar to the get_or_create method on Models, it returns a tuple of (object, created),
where created is a boolean specifying whether an object was created. | 2.262226 | 2.122427 | 1.065868 |
try:
video = cls.objects.get(**filter_kwargs)
except cls.DoesNotExist:
video = None
return video | def get_or_none(cls, **filter_kwargs) | Returns a video or None. | 3.312253 | 2.132569 | 1.553176 |
qset = cls.objects.filter(
encoded_videos__profile__profile_name='youtube',
encoded_videos__url=youtube_id
).prefetch_related('encoded_videos', 'courses')
return qset | def by_youtube_id(cls, youtube_id) | Look up video by youtube id | 6.593506 | 6.477992 | 1.017832 |
if value and not isinstance(value, list):
raise ValidationError(u'ListField value {} is not a list.'.format(value))
return json.dumps(self.validate_list(value) or []) | def get_prep_value(self, value) | Converts a list to its json representation to store in database as text. | 5.111633 | 4.064135 | 1.257742 |
if not value:
value = []
# If a list is set then validated its items
if isinstance(value, list):
py_list = self.validate_list(value)
else: # try to de-serialize value and expect list and then validate
try:
py_list = json.loads(value)
if not isinstance(py_list, list):
raise TypeError
self.validate_list(py_list)
except (ValueError, TypeError):
raise ValidationError(u'Must be a valid list of strings.')
return py_list | def to_python(self, value) | Converts the value into a list. | 4.349346 | 4.126298 | 1.054055 |
if len(value) > self.max_items:
raise ValidationError(
u'list must not contain more than {max_items} items.'.format(max_items=self.max_items)
)
if all(isinstance(item, six.string_types) for item in value) is False:
raise ValidationError(u'list must only contain strings.')
return value | def validate_list(self, value) | Validate data before saving to database.
Arguemtns:
value(list): list to be validated
Returns:
list if validation is successful
Raises:
ValidationError | 2.722097 | 3.091713 | 0.880449 |
video_image, created = cls.objects.get_or_create(course_video=course_video)
if image_data:
# Delete the existing image only if this image is not used by anyone else. This is necessary because
# after a course re-run, a video in original course and the new course points to same image, So when
# we update an image in new course and delete the existing image. This will delete the image from
# original course as well, thus leaving video with having no image.
if not created and VideoImage.objects.filter(image=video_image.image).count() == 1:
video_image.image.delete()
with closing(image_data) as image_file:
file_name = '{uuid}{ext}'.format(uuid=uuid4().hex, ext=os.path.splitext(file_name)[1])
try:
video_image.image.save(file_name, image_file)
except Exception: # pylint: disable=broad-except
logger.exception(
'VAL: Video Image save failed to storage for course_id [%s] and video_id [%s]',
course_video.course_id,
course_video.video.edx_video_id
)
raise
else:
if generated_images:
video_image.generated_images = generated_images
if not video_image.image.name:
file_name = generated_images[0]
video_image.image.name = file_name
video_image.save()
return video_image, created | def create_or_update(cls, course_video, file_name=None, image_data=None, generated_images=None) | Create a VideoImage object for a CourseVideo.
NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise
a new file name is constructed based on uuid and extension from `file_name` value.
`image_data` will be None in case of course re-run and export. `generated_images` list
contains names of images auto generated by VEDA. If an image is not already set then first
image name from `generated_images` list will be used.
Arguments:
course_video (CourseVideo): CourseVideo instance
file_name (str): File name of the image
image_data (InMemoryUploadedFile): Image data to be saved.
generated_images (list): auto generated image names
Returns:
Returns a tuple of (video_image, created). | 3.849906 | 3.744203 | 1.028231 |
client_id, __ = os.path.splitext(self.video.client_video_id)
file_name = u'{name}-{language}.{format}'.format(
name=client_id,
language=self.language_code,
format=self.file_format
).replace('\n', ' ')
return file_name | def filename(self) | Returns readable filename for a transcript | 5.595307 | 5.083657 | 1.100646 |
try:
transcript = cls.objects.get(video__edx_video_id=video_id, language_code=language_code)
except cls.DoesNotExist:
transcript = None
return transcript | def get_or_none(cls, video_id, language_code) | Returns a data model object if found or none otherwise.
Arguments:
video_id(unicode): video id to which transcript may be associated
language_code(unicode): language of the requested transcript | 2.628678 | 2.686546 | 0.97846 |
video_transcript = cls(video=video, language_code=language_code, file_format=file_format, provider=provider)
with closing(content) as transcript_content:
try:
file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format)
video_transcript.transcript.save(file_name, transcript_content)
video_transcript.save()
except Exception:
logger.exception(
'[VAL] Transcript save failed to storage for video_id "%s" language code "%s"',
video.edx_video_id,
language_code
)
raise
return video_transcript | def create(cls, video, language_code, file_format, content, provider) | Create a Video Transcript.
Arguments:
video(Video): Video data model object
language_code(unicode): A language code.
file_format(unicode): Transcript file format.
content(InMemoryUploadedFile): Transcript content.
provider(unicode): Transcript provider. | 3.382387 | 3.202554 | 1.056153 |
try:
video_transcript = cls.objects.get(video=video, language_code=language_code)
retrieved = True
except cls.DoesNotExist:
video_transcript = cls(video=video, language_code=language_code)
retrieved = False
for prop, value in six.iteritems(metadata):
if prop in ['language_code', 'file_format', 'provider']:
setattr(video_transcript, prop, value)
transcript_name = metadata.get('file_name')
try:
if transcript_name:
video_transcript.transcript.name = transcript_name
elif file_data:
with closing(file_data) as transcript_file_data:
file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format)
video_transcript.transcript.save(file_name, transcript_file_data)
video_transcript.save()
except Exception:
logger.exception(
'[VAL] Transcript save failed to storage for video_id "%s" language code "%s"',
video.edx_video_id,
language_code
)
raise
return video_transcript, not retrieved | def create_or_update(cls, video, language_code, metadata, file_data=None) | Create or update Transcript object.
Arguments:
video (Video): Video for which transcript is going to be saved.
language_code (str): language code for (to be created/updated) transcript
metadata (dict): A dict containing (to be overwritten) properties
file_data (InMemoryUploadedFile): File data to be saved
Returns:
Returns a tuple of (video_transcript, created). | 2.907943 | 2.800937 | 1.038204 |
instance, created = cls.objects.update_or_create(
org=org,
provider=provider,
defaults={'exists': exists},
)
return instance, created | def update_or_create(cls, org, provider, exists) | Update or create credentials state. | 2.190965 | 2.272099 | 0.964291 |
serializer = VideoSerializer(data=video_data)
if serializer.is_valid():
serializer.save()
return video_data.get("edx_video_id")
else:
raise ValCannotCreateError(serializer.errors) | def create_video(video_data) | Called on to create Video objects in the database
create_video is used to create Video objects whose children are EncodedVideo
objects which are linked to Profile objects. This is an alternative to the HTTP
requests so it can be used internally. The VideoSerializer is used to
deserialize this object. If there are duplicate profile_names, the entire
creation will be rejected. If the profile is not found in the database, the
video will not be created.
Args:
video_data (dict):
{
url: api url to the video
edx_video_id: ID of the video
duration: Length of video in seconds
client_video_id: client ID of video
encoded_video: a list of EncodedVideo dicts
url: url of the video
file_size: size of the video in bytes
profile: ID of the profile
courses: Courses associated with this video
image: poster image file name for a particular course
}
Raises:
Raises ValCannotCreateError if the video cannot be created.
Returns the successfully created Video object | 4.546275 | 2.336847 | 1.945474 |
try:
video = _get_video(video_data.get("edx_video_id"))
except Video.DoesNotExist:
error_message = u"Video not found when trying to update video with edx_video_id: {0}".format(video_data.get("edx_video_id"))
raise ValVideoNotFoundError(error_message)
serializer = VideoSerializer(video, data=video_data)
if serializer.is_valid():
serializer.save()
return video_data.get("edx_video_id")
else:
raise ValCannotUpdateError(serializer.errors) | def update_video(video_data) | Called on to update Video objects in the database
update_video is used to update Video objects by the given edx_video_id in the video_data.
Args:
video_data (dict):
{
url: api url to the video
edx_video_id: ID of the video
duration: Length of video in seconds
client_video_id: client ID of video
encoded_video: a list of EncodedVideo dicts
url: url of the video
file_size: size of the video in bytes
profile: ID of the profile
courses: Courses associated with this video
}
Raises:
Raises ValVideoNotFoundError if the video cannot be retrieved.
Raises ValCannotUpdateError if the video cannot be updated.
Returns the successfully updated Video object | 2.914706 | 2.205307 | 1.321678 |
try:
video = _get_video(edx_video_id)
except Video.DoesNotExist:
error_message = u"Video not found when trying to update video status with edx_video_id: {0}".format(
edx_video_id
)
raise ValVideoNotFoundError(error_message)
video.status = status
video.save() | def update_video_status(edx_video_id, status) | Update status for an existing video.
Args:
edx_video_id: ID of the video
status: video status
Raises:
Raises ValVideoNotFoundError if the video cannot be retrieved. | 3.405736 | 2.830072 | 1.20341 |
query_filter = {'org': org}
if provider:
query_filter['provider'] = provider
return {
credential.provider: credential.exists
for credential in ThirdPartyTranscriptCredentialsState.objects.filter(**query_filter)
} | def get_transcript_credentials_state_for_org(org, provider=None) | Returns transcript credentials state for an org
Arguments:
org (unicode): course organization
provider (unicode): transcript provider
Returns:
dict: provider name and their credential existance map
{
u'Cielo24': True
}
{
u'3PlayMedia': False,
u'Cielo24': True
} | 4.894001 | 4.530594 | 1.080212 |
filter_attrs = {'video__edx_video_id': video_id}
if language_code:
filter_attrs['language_code'] = language_code
transcript_set = VideoTranscript.objects.filter(**filter_attrs)
return transcript_set.exists() | def is_transcript_available(video_id, language_code=None) | Returns whether the transcripts are available for a video.
Arguments:
video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.
language_code: it will the language code of the requested transcript. | 2.921053 | 3.200337 | 0.912733 |
transcript = VideoTranscript.get_or_none(video_id=video_id, language_code=language_code)
return TranscriptSerializer(transcript).data if transcript else None | def get_video_transcript(video_id, language_code) | Get video transcript info
Arguments:
video_id(unicode): A video id, it can be an edx_video_id or an external video id extracted from
external sources of a video component.
language_code(unicode): it will be the language code of the requested transcript. | 2.757038 | 4.600725 | 0.599262 |
video_transcript = VideoTranscript.get_or_none(video_id, language_code)
if video_transcript:
try:
return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read())
except Exception:
logger.exception(
'[edx-val] Error while retrieving transcript for video=%s -- language_code=%s',
video_id,
language_code
)
raise | def get_video_transcript_data(video_id, language_code) | Get video transcript data
Arguments:
video_id(unicode): An id identifying the Video.
language_code(unicode): it will be the language code of the requested transcript.
Returns:
A dict containing transcript file name and its content. | 4.180732 | 4.725676 | 0.884685 |
available_languages = VideoTranscript.objects.filter(
video__edx_video_id=video_id
).values_list(
'language_code', flat=True
)
return list(available_languages) | def get_available_transcript_languages(video_id) | Get available transcript languages
Arguments:
video_id(unicode): An id identifying the Video.
Returns:
A list containing transcript language codes for the Video. | 2.817881 | 3.972915 | 0.709273 |
video_transcript = VideoTranscript.get_or_none(video_id, language_code)
if video_transcript:
return video_transcript.url() | def get_video_transcript_url(video_id, language_code) | Returns course video transcript url or None if no transcript
Arguments:
video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.
language_code: language code of a video transcript | 3.13154 | 4.467724 | 0.700925 |
transcript_serializer = TranscriptSerializer(
data=dict(provider=provider, language_code=language_code, file_format=file_format),
context=dict(video_id=video_id),
)
if transcript_serializer.is_valid():
transcript_serializer.save(content=content)
return transcript_serializer.data
else:
raise ValCannotCreateError(transcript_serializer.errors) | def create_video_transcript(video_id, language_code, file_format, content, provider=TranscriptProviderType.CUSTOM) | Create a video transcript.
Arguments:
video_id(unicode): An Id identifying the Video data model object.
language_code(unicode): A language code.
file_format(unicode): Transcript file format.
content(InMemoryUploadedFile): Transcript content.
provider(unicode): Transcript provider (it will be 'custom' by default if not selected). | 2.760493 | 3.276731 | 0.842453 |
# Filter wanted properties
metadata = {
prop: value
for prop, value in six.iteritems(metadata)
if prop in ['provider', 'language_code', 'file_name', 'file_format'] and value
}
file_format = metadata.get('file_format')
if file_format and file_format not in list(dict(TranscriptFormat.CHOICES).keys()):
raise InvalidTranscriptFormat('{} transcript format is not supported'.format(file_format))
provider = metadata.get('provider')
if provider and provider not in list(dict(TranscriptProviderType.CHOICES).keys()):
raise InvalidTranscriptProvider('{} transcript provider is not supported'.format(provider))
try:
# Video should be present in edxval in order to attach transcripts to it.
video = Video.objects.get(edx_video_id=video_id)
video_transcript, __ = VideoTranscript.create_or_update(video, language_code, metadata, file_data)
except Video.DoesNotExist:
return None
return video_transcript.url() | def create_or_update_video_transcript(video_id, language_code, metadata, file_data=None) | Create or Update video transcript for an existing video.
Arguments:
video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.
language_code: language code of a video transcript
metadata (dict): A dict containing (to be overwritten) properties
file_data (InMemoryUploadedFile): Transcript data to be saved for a course video.
Returns:
video transcript url | 3.318122 | 3.180999 | 1.043107 |
video_transcript = VideoTranscript.get_or_none(video_id, language_code)
if video_transcript:
# delete the transcript content from storage.
video_transcript.transcript.delete()
# delete the transcript metadata from db.
video_transcript.delete()
logger.info('Transcript is removed for video "%s" and language code "%s"', video_id, language_code) | def delete_video_transcript(video_id, language_code) | Delete transcript for an existing video.
Arguments:
video_id: id identifying the video to which the transcript is associated.
language_code: language code of a video transcript. | 3.073765 | 3.630736 | 0.846595 |
try:
transcript_preference = TranscriptPreference.objects.get(course_id=course_id)
except TranscriptPreference.DoesNotExist:
return
return TranscriptPreferenceSerializer(transcript_preference).data | def get_transcript_preferences(course_id) | Retrieves course wide transcript preferences
Arguments:
course_id (str): course id | 2.458791 | 2.692645 | 0.913151 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.