_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q26800 | DirectoryPinger.ping_entry | train | def ping_entry(self, entry):
"""
Ping an entry to a directory.
"""
entry_url = '%s%s' % (self.ressources.site_url,
entry.get_absolute_url())
categories = '|'.join([c.title for c in entry.categories.all()])
try:
reply = self.server.weblogUpdates.extendedPing(
self.ressources.current_site.name,
self.ressources.blog_url, entry_url,
self.ressources.blog_feed, categories)
except Exception:
try:
reply = self.server.weblogUpdates.ping(
self.ressources.current_site.name,
self.ressources.blog_url, entry_url,
categories)
except Exception:
reply = {'message': '%s is an invalid directory.' %
self.server_name,
'flerror': True}
return reply | python | {
"resource": ""
} |
q26801 | ExternalUrlsPinger.run | train | def run(self):
"""
Ping external URLs in a Thread.
"""
logger = getLogger('zinnia.ping.external_urls')
socket.setdefaulttimeout(self.timeout)
external_urls = self.find_external_urls(self.entry)
external_urls_pingable = self.find_pingback_urls(external_urls)
for url, server_name in external_urls_pingable.items():
reply = self.pingback_url(server_name, url)
self.results.append(reply)
logger.info('%s : %s', url, reply)
socket.setdefaulttimeout(None) | python | {
"resource": ""
} |
q26802 | ExternalUrlsPinger.is_external_url | train | def is_external_url(self, url, site_url):
"""
Check if the URL is an external URL.
"""
url_splitted = urlsplit(url)
if not url_splitted.netloc:
return False
return url_splitted.netloc != urlsplit(site_url).netloc | python | {
"resource": ""
} |
q26803 | ExternalUrlsPinger.find_external_urls | train | def find_external_urls(self, entry):
"""
Find external URLs in an entry.
"""
soup = BeautifulSoup(entry.html_content, 'html.parser')
external_urls = [a['href'] for a in soup.find_all('a')
if self.is_external_url(
a['href'], self.ressources.site_url)]
return external_urls | python | {
"resource": ""
} |
q26804 | ExternalUrlsPinger.find_pingback_href | train | def find_pingback_href(self, content):
"""
Try to find LINK markups to pingback URL.
"""
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('link'):
dict_attr = dict(link.attrs)
if 'rel' in dict_attr and 'href' in dict_attr:
for rel_type in dict_attr['rel']:
if rel_type.lower() == PINGBACK:
return dict_attr.get('href') | python | {
"resource": ""
} |
q26805 | ExternalUrlsPinger.find_pingback_urls | train | def find_pingback_urls(self, urls):
"""
Find the pingback URL for each URLs.
"""
pingback_urls = {}
for url in urls:
try:
page = urlopen(url)
headers = page.info()
server_url = headers.get('X-Pingback')
if not server_url:
content_type = headers.get('Content-Type', '').split(
';')[0].strip().lower()
if content_type in ['text/html', 'application/xhtml+xml']:
server_url = self.find_pingback_href(
page.read(5 * 1024))
if server_url:
server_url_splitted = urlsplit(server_url)
if not server_url_splitted.netloc:
url_splitted = urlsplit(url)
server_url = '%s://%s%s' % (url_splitted.scheme,
url_splitted.netloc,
server_url)
pingback_urls[url] = server_url
except IOError:
pass
return pingback_urls | python | {
"resource": ""
} |
q26806 | ExternalUrlsPinger.pingback_url | train | def pingback_url(self, server_name, target_url):
"""
Do a pingback call for the target URL.
"""
try:
server = ServerProxy(server_name)
reply = server.pingback.ping(self.entry_url, target_url)
except (Error, socket.error):
reply = '%s cannot be pinged.' % target_url
return reply | python | {
"resource": ""
} |
q26807 | get_categories | train | def get_categories(context, template='zinnia/tags/categories.html'):
"""
Return the published categories.
"""
return {'template': template,
'categories': Category.published.all().annotate(
count_entries_published=Count('entries')),
'context_category': context.get('category')} | python | {
"resource": ""
} |
q26808 | get_categories_tree | train | def get_categories_tree(context, template='zinnia/tags/categories_tree.html'):
"""
Return the categories as a tree.
"""
return {'template': template,
'categories': Category.objects.all().annotate(
count_entries=Count('entries')),
'context_category': context.get('category')} | python | {
"resource": ""
} |
q26809 | get_authors | train | def get_authors(context, template='zinnia/tags/authors.html'):
"""
Return the published authors.
"""
return {'template': template,
'authors': Author.published.all().annotate(
count_entries_published=Count('entries')),
'context_author': context.get('author')} | python | {
"resource": ""
} |
q26810 | get_featured_entries | train | def get_featured_entries(number=5,
template='zinnia/tags/entries_featured.html'):
"""
Return the featured entries.
"""
return {'template': template,
'entries': Entry.published.filter(featured=True)[:number]} | python | {
"resource": ""
} |
q26811 | get_draft_entries | train | def get_draft_entries(number=5,
template='zinnia/tags/entries_draft.html'):
"""
Return the last draft entries.
"""
return {'template': template,
'entries': Entry.objects.filter(status=DRAFT)[:number]} | python | {
"resource": ""
} |
q26812 | get_popular_entries | train | def get_popular_entries(number=5, template='zinnia/tags/entries_popular.html'):
"""
Return popular entries.
"""
return {'template': template,
'entries': Entry.published.filter(
comment_count__gt=0).order_by(
'-comment_count', '-publication_date')[:number]} | python | {
"resource": ""
} |
q26813 | get_similar_entries | train | def get_similar_entries(context, number=5,
template='zinnia/tags/entries_similar.html'):
"""
Return similar entries.
"""
entry = context.get('entry')
if not entry:
return {'template': template, 'entries': []}
vectors = EntryPublishedVectorBuilder()
entries = vectors.get_related(entry, number)
return {'template': template,
'entries': entries} | python | {
"resource": ""
} |
q26814 | get_calendar_entries | train | def get_calendar_entries(context, year=None, month=None,
template='zinnia/tags/entries_calendar.html'):
"""
Return an HTML calendar of entries.
"""
if not (year and month):
day_week_month = (context.get('day') or
context.get('week') or
context.get('month'))
publication_date = getattr(context.get('object'),
'publication_date', None)
if day_week_month:
current_month = day_week_month
elif publication_date:
if settings.USE_TZ:
publication_date = timezone.localtime(publication_date)
current_month = publication_date.date()
else:
today = timezone.now()
if settings.USE_TZ:
today = timezone.localtime(today)
current_month = today.date()
current_month = current_month.replace(day=1)
else:
current_month = date(year, month, 1)
dates = list(map(
lambda x: settings.USE_TZ and timezone.localtime(x).date() or x.date(),
Entry.published.datetimes('publication_date', 'month')))
if current_month not in dates:
dates.append(current_month)
dates.sort()
index = dates.index(current_month)
previous_month = index > 0 and dates[index - 1] or None
next_month = index != len(dates) - 1 and dates[index + 1] or None
calendar = Calendar()
return {'template': template,
'next_month': next_month,
'previous_month': previous_month,
'calendar': calendar.formatmonth(
current_month.year,
current_month.month,
previous_month=previous_month,
next_month=next_month)} | python | {
"resource": ""
} |
q26815 | get_recent_comments | train | def get_recent_comments(number=5, template='zinnia/tags/comments_recent.html'):
"""
Return the most recent comments.
"""
# Using map(smart_text... fix bug related to issue #8554
entry_published_pks = map(smart_text,
Entry.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Entry)
comments = get_comment_model().objects.filter(
Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL),
content_type=content_type, object_pk__in=entry_published_pks,
is_public=True).order_by('-pk')[:number]
comments = comments.prefetch_related('content_object')
return {'template': template,
'comments': comments} | python | {
"resource": ""
} |
q26816 | get_recent_linkbacks | train | def get_recent_linkbacks(number=5,
template='zinnia/tags/linkbacks_recent.html'):
"""
Return the most recent linkbacks.
"""
entry_published_pks = map(smart_text,
Entry.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Entry)
linkbacks = get_comment_model().objects.filter(
content_type=content_type,
object_pk__in=entry_published_pks,
flags__flag__in=[PINGBACK, TRACKBACK],
is_public=True).order_by('-pk')[:number]
linkbacks = linkbacks.prefetch_related('content_object')
return {'template': template,
'linkbacks': linkbacks} | python | {
"resource": ""
} |
q26817 | zinnia_pagination | train | def zinnia_pagination(context, page, begin_pages=1, end_pages=1,
before_pages=2, after_pages=2,
template='zinnia/tags/pagination.html'):
"""
Return a Digg-like pagination,
by splitting long list of page into 3 blocks of pages.
"""
get_string = ''
for key, value in context['request'].GET.items():
if key != 'page':
get_string += '&%s=%s' % (key, value)
page_range = list(page.paginator.page_range)
begin = page_range[:begin_pages]
end = page_range[-end_pages:]
middle = page_range[max(page.number - before_pages - 1, 0):
page.number + after_pages]
if set(begin) & set(middle): # [1, 2, 3], [2, 3, 4], [...]
begin = sorted(set(begin + middle)) # [1, 2, 3, 4]
middle = []
elif begin[-1] + 1 == middle[0]: # [1, 2, 3], [4, 5, 6], [...]
begin += middle # [1, 2, 3, 4, 5, 6]
middle = []
elif middle[-1] + 1 == end[0]: # [...], [15, 16, 17], [18, 19, 20]
end = middle + end # [15, 16, 17, 18, 19, 20]
middle = []
elif set(middle) & set(end): # [...], [17, 18, 19], [18, 19, 20]
end = sorted(set(middle + end)) # [17, 18, 19, 20]
middle = []
if set(begin) & set(end): # [1, 2, 3], [...], [2, 3, 4]
begin = sorted(set(begin + end)) # [1, 2, 3, 4]
middle, end = [], []
elif begin[-1] + 1 == end[0]: # [1, 2, 3], [...], [4, 5, 6]
begin += end # [1, 2, 3, 4, 5, 6]
middle, end = [], []
return {'template': template,
'page': page,
'begin': begin,
'middle': middle,
'end': end,
'GET_string': get_string} | python | {
"resource": ""
} |
q26818 | zinnia_breadcrumbs | train | def zinnia_breadcrumbs(context, root_name='',
template='zinnia/tags/breadcrumbs.html',):
"""
Return a breadcrumb for the application.
"""
path = context['request'].path
context_object = get_context_first_object(
context, ['object', 'category', 'tag', 'author'])
context_page = context.get('page_obj')
breadcrumbs = retrieve_breadcrumbs(
path, context_object, context_page, root_name)
return {'template': template,
'breadcrumbs': breadcrumbs} | python | {
"resource": ""
} |
q26819 | zinnia_loop_template | train | def zinnia_loop_template(context, default_template):
"""
Return a selected template from his position within a loop
and the filtering context.
"""
matching, context_object = get_context_first_matching_object(
context,
['category', 'tag', 'author', 'pattern',
'year', 'month', 'week', 'day'])
context_positions = get_context_loop_positions(context)
templates = loop_template_list(
context_positions, context_object, matching,
default_template, ENTRY_LOOP_TEMPLATES)
return select_template(templates) | python | {
"resource": ""
} |
q26820 | get_gravatar | train | def get_gravatar(email, size=80, rating='g', default=None,
protocol=PROTOCOL):
"""
Return url for a Gravatar.
"""
gravatar_protocols = {'http': 'http://www',
'https': 'https://secure'}
url = '%s.gravatar.com/avatar/%s' % (
gravatar_protocols[protocol],
md5(email.strip().lower().encode('utf-8')).hexdigest())
options = {'s': size, 'r': rating}
if default:
options['d'] = default
url = '%s?%s' % (url, urlencode(options))
return url.replace('&', '&') | python | {
"resource": ""
} |
q26821 | get_tag_cloud | train | def get_tag_cloud(context, steps=6, min_count=None,
template='zinnia/tags/tag_cloud.html'):
"""
Return a cloud of published tags.
"""
tags = Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True,
min_count=min_count)
return {'template': template,
'tags': calculate_cloud(tags, steps),
'context_tag': context.get('tag')} | python | {
"resource": ""
} |
q26822 | comment_admin_urlname | train | def comment_admin_urlname(action):
"""
Return the admin URLs for the comment app used.
"""
comment = get_comment_model()
return 'admin:%s_%s_%s' % (
comment._meta.app_label, comment._meta.model_name,
action) | python | {
"resource": ""
} |
q26823 | user_admin_urlname | train | def user_admin_urlname(action):
"""
Return the admin URLs for the user app used.
"""
user = get_user_model()
return 'admin:%s_%s_%s' % (
user._meta.app_label, user._meta.model_name,
action) | python | {
"resource": ""
} |
q26824 | zinnia_statistics | train | def zinnia_statistics(template='zinnia/tags/statistics.html'):
"""
Return statistics on the content of Zinnia.
"""
content_type = ContentType.objects.get_for_model(Entry)
discussions = get_comment_model().objects.filter(
content_type=content_type)
entries = Entry.published
categories = Category.objects
tags = tags_published()
authors = Author.published
replies = discussions.filter(
flags=None, is_public=True)
pingbacks = discussions.filter(
flags__flag=PINGBACK, is_public=True)
trackbacks = discussions.filter(
flags__flag=TRACKBACK, is_public=True)
rejects = discussions.filter(is_public=False)
entries_count = entries.count()
replies_count = replies.count()
pingbacks_count = pingbacks.count()
trackbacks_count = trackbacks.count()
if entries_count:
first_entry = entries.order_by('publication_date')[0]
last_entry = entries.latest()
months_count = (last_entry.publication_date -
first_entry.publication_date).days / 31.0
entries_per_month = entries_count / (months_count or 1.0)
comments_per_entry = float(replies_count) / entries_count
linkbacks_per_entry = float(pingbacks_count + trackbacks_count) / \
entries_count
total_words_entry = 0
for e in entries.all():
total_words_entry += e.word_count
words_per_entry = float(total_words_entry) / entries_count
words_per_comment = 0.0
if replies_count:
total_words_comment = 0
for c in replies.all():
total_words_comment += len(c.comment.split())
words_per_comment = float(total_words_comment) / replies_count
else:
words_per_entry = words_per_comment = entries_per_month = \
comments_per_entry = linkbacks_per_entry = 0.0
return {'template': template,
'entries': entries_count,
'categories': categories.count(),
'tags': tags.count(),
'authors': authors.count(),
'comments': replies_count,
'pingbacks': pingbacks_count,
'trackbacks': trackbacks_count,
'rejects': rejects.count(),
'words_per_entry': words_per_entry,
'words_per_comment': words_per_comment,
'entries_per_month': entries_per_month,
'comments_per_entry': comments_per_entry,
'linkbacks_per_entry': linkbacks_per_entry} | python | {
"resource": ""
} |
q26825 | EntryCacheMixin.get_object | train | def get_object(self, queryset=None):
"""
Implement cache on ``get_object`` method to
avoid repetitive calls, in POST.
"""
if self._cached_object is None:
self._cached_object = super(EntryCacheMixin, self).get_object(
queryset)
return self._cached_object | python | {
"resource": ""
} |
q26826 | PasswordMixin.password | train | def password(self):
"""
Return the password view.
"""
return self.response_class(request=self.request,
template='zinnia/password.html',
context={'error': self.error}) | python | {
"resource": ""
} |
q26827 | ping_directories_handler | train | def ping_directories_handler(sender, **kwargs):
"""
Ping directories when an entry is saved.
"""
entry = kwargs['instance']
if entry.is_visible and settings.SAVE_PING_DIRECTORIES:
for directory in settings.PING_DIRECTORIES:
DirectoryPinger(directory, [entry]) | python | {
"resource": ""
} |
q26828 | ping_external_urls_handler | train | def ping_external_urls_handler(sender, **kwargs):
"""
Ping externals URLS when an entry is saved.
"""
entry = kwargs['instance']
if entry.is_visible and settings.SAVE_PING_EXTERNAL_URLS:
ExternalUrlsPinger(entry) | python | {
"resource": ""
} |
q26829 | count_discussions_handler | train | def count_discussions_handler(sender, **kwargs):
"""
Update the count of each type of discussion on an entry.
"""
if kwargs.get('instance') and kwargs.get('created'):
# The signal is emitted by the comment creation,
# so we do nothing, comment_was_posted is used instead.
return
comment = 'comment' in kwargs and kwargs['comment'] or kwargs['instance']
entry = comment.content_object
if isinstance(entry, Entry):
entry.comment_count = entry.comments.count()
entry.pingback_count = entry.pingbacks.count()
entry.trackback_count = entry.trackbacks.count()
entry.save(update_fields=[
'comment_count', 'pingback_count', 'trackback_count']) | python | {
"resource": ""
} |
q26830 | count_pingbacks_handler | train | def count_pingbacks_handler(sender, **kwargs):
"""
Update Entry.pingback_count when a pingback was posted.
"""
entry = kwargs['entry']
entry.pingback_count = F('pingback_count') + 1
entry.save(update_fields=['pingback_count']) | python | {
"resource": ""
} |
q26831 | count_trackbacks_handler | train | def count_trackbacks_handler(sender, **kwargs):
"""
Update Entry.trackback_count when a trackback was posted.
"""
entry = kwargs['entry']
entry.trackback_count = F('trackback_count') + 1
entry.save(update_fields=['trackback_count']) | python | {
"resource": ""
} |
q26832 | connect_entry_signals | train | def connect_entry_signals():
"""
Connect all the signals on Entry model.
"""
post_save.connect(
ping_directories_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.connect(
ping_external_urls_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
post_save.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE)
post_delete.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE) | python | {
"resource": ""
} |
q26833 | disconnect_entry_signals | train | def disconnect_entry_signals():
"""
Disconnect all the signals on Entry model.
"""
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE)
post_delete.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE) | python | {
"resource": ""
} |
q26834 | connect_discussion_signals | train | def connect_discussion_signals():
"""
Connect all the signals on the Comment model to
maintains a valid discussion count on each entries
when an action is done with the comments.
"""
post_save.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS)
post_delete.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS)
comment_was_flagged.connect(
count_discussions_handler, sender=comment_model,
dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS)
comment_was_posted.connect(
count_comments_handler, sender=comment_model,
dispatch_uid=COMMENT_WP_COUNT_COMMENTS)
pingback_was_posted.connect(
count_pingbacks_handler, sender=comment_model,
dispatch_uid=PINGBACK_WF_COUNT_PINGBACKS)
trackback_was_posted.connect(
count_trackbacks_handler, sender=comment_model,
dispatch_uid=TRACKBACK_WF_COUNT_TRACKBACKS) | python | {
"resource": ""
} |
q26835 | disconnect_discussion_signals | train | def disconnect_discussion_signals():
"""
Disconnect all the signals on Comment model
provided by Zinnia.
"""
post_save.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_PS_COUNT_DISCUSSIONS)
post_delete.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_PD_COUNT_DISCUSSIONS)
comment_was_flagged.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_WF_COUNT_DISCUSSIONS)
comment_was_posted.disconnect(
sender=comment_model,
dispatch_uid=COMMENT_WP_COUNT_COMMENTS)
pingback_was_posted.disconnect(
sender=comment_model,
dispatch_uid=PINGBACK_WF_COUNT_PINGBACKS)
trackback_was_posted.disconnect(
sender=comment_model,
dispatch_uid=TRACKBACK_WF_COUNT_TRACKBACKS) | python | {
"resource": ""
} |
q26836 | QuickEntry.dispatch | train | def dispatch(self, *args, **kwargs):
"""
Decorate the view dispatcher with permission_required.
"""
return super(QuickEntry, self).dispatch(*args, **kwargs) | python | {
"resource": ""
} |
q26837 | QuickEntry.post | train | def post(self, request, *args, **kwargs):
"""
Handle the datas for posting a quick entry,
and redirect to the admin in case of error or
to the entry's page in case of success.
"""
now = timezone.now()
data = {
'title': request.POST.get('title'),
'slug': slugify(request.POST.get('title')),
'status': DRAFT if 'save_draft' in request.POST else PUBLISHED,
'sites': [Site.objects.get_current().pk],
'authors': [request.user.pk],
'content_template': 'zinnia/_entry_detail.html',
'detail_template': 'entry_detail.html',
'publication_date': now,
'creation_date': now,
'last_update': now,
'content': request.POST.get('content'),
'tags': request.POST.get('tags')}
form = QuickEntryForm(data)
if form.is_valid():
form.instance.content = self.htmlize(form.cleaned_data['content'])
entry = form.save()
return redirect(entry)
data = {'title': smart_str(request.POST.get('title', '')),
'content': smart_str(self.htmlize(
request.POST.get('content', ''))),
'tags': smart_str(request.POST.get('tags', '')),
'slug': slugify(request.POST.get('title', '')),
'authors': request.user.pk,
'sites': Site.objects.get_current().pk}
return redirect('%s?%s' % (reverse('admin:zinnia_entry_add'),
urlencode(data))) | python | {
"resource": ""
} |
q26838 | EntryCommentModerator.moderate | train | def moderate(self, comment, entry, request):
"""
Determine if a new comment should be marked as non-public
and await approval.
Return ``True`` to put the comment into the moderator queue,
or ``False`` to allow it to be showed up immediately.
"""
if self.auto_moderate_comments:
return True
if check_is_spam(comment, entry, request,
self.spam_checker_backends):
return True
return False | python | {
"resource": ""
} |
q26839 | EntryCommentModerator.email | train | def email(self, comment, entry, request):
"""
Send email notifications needed.
"""
current_language = get_language()
try:
activate(settings.LANGUAGE_CODE)
site = Site.objects.get_current()
if self.auto_moderate_comments or comment.is_public:
self.do_email_notification(comment, entry, site)
if comment.is_public:
self.do_email_authors(comment, entry, site)
self.do_email_reply(comment, entry, site)
finally:
activate(current_language) | python | {
"resource": ""
} |
q26840 | EntryCommentModerator.do_email_notification | train | def do_email_notification(self, comment, entry, site):
"""
Send email notification of a new comment to site staff.
"""
if not self.mail_comment_notification_recipients:
return
template = loader.get_template(
'comments/zinnia/entry/email/notification.txt')
context = {
'comment': comment,
'entry': entry,
'site': site,
'protocol': PROTOCOL
}
subject = _('[%(site)s] New comment posted on "%(title)s"') % \
{'site': site.name, 'title': entry.title}
message = template.render(context)
send_mail(
subject, message,
settings.DEFAULT_FROM_EMAIL,
self.mail_comment_notification_recipients,
fail_silently=not settings.DEBUG
) | python | {
"resource": ""
} |
q26841 | EntryCommentModerator.do_email_authors | train | def do_email_authors(self, comment, entry, site):
"""
Send email notification of a new comment to
the authors of the entry.
"""
if not self.email_authors:
return
exclude_list = self.mail_comment_notification_recipients + ['']
recipient_list = (
set([author.email for author in entry.authors.all()])
- set(exclude_list)
)
if not recipient_list:
return
template = loader.get_template(
'comments/zinnia/entry/email/authors.txt')
context = {
'comment': comment,
'entry': entry,
'site': site,
'protocol': PROTOCOL
}
subject = _('[%(site)s] New comment posted on "%(title)s"') % \
{'site': site.name, 'title': entry.title}
message = template.render(context)
send_mail(
subject, message,
settings.DEFAULT_FROM_EMAIL,
recipient_list,
fail_silently=not settings.DEBUG
) | python | {
"resource": ""
} |
q26842 | EntryCommentModerator.do_email_reply | train | def do_email_reply(self, comment, entry, site):
"""
Send email notification of a new comment to
the authors of the previous comments.
"""
if not self.email_reply:
return
exclude_list = (
self.mail_comment_notification_recipients
+ [author.email for author in entry.authors.all()]
+ [comment.email]
)
recipient_list = (
set([other_comment.email
for other_comment in entry.comments
if other_comment.email])
- set(exclude_list)
)
if not recipient_list:
return
template = loader.get_template(
'comments/zinnia/entry/email/reply.txt')
context = {
'comment': comment,
'entry': entry,
'site': site,
'protocol': PROTOCOL
}
subject = _('[%(site)s] New comment posted on "%(title)s"') % \
{'site': site.name, 'title': entry.title}
message = template.render(context)
mail = EmailMessage(
subject, message,
settings.DEFAULT_FROM_EMAIL,
bcc=recipient_list)
mail.send(fail_silently=not settings.DEBUG) | python | {
"resource": ""
} |
q26843 | EntryWeek.get_dated_items | train | def get_dated_items(self):
"""
Override get_dated_items to add a useful 'week_end_day'
variable in the extra context of the view.
"""
self.date_list, self.object_list, extra_context = super(
EntryWeek, self).get_dated_items()
self.date_list = self.get_date_list(self.object_list, 'day')
extra_context['week_end_day'] = extra_context[
'week'] + datetime.timedelta(days=6)
return self.date_list, self.object_list, extra_context | python | {
"resource": ""
} |
q26844 | Calendar.formatday | train | def formatday(self, day, weekday):
"""
Return a day as a table cell with a link
if entries are published this day.
"""
if day and day in self.day_entries:
day_date = date(self.current_year, self.current_month, day)
archive_day_url = reverse('zinnia:entry_archive_day',
args=[day_date.strftime('%Y'),
day_date.strftime('%m'),
day_date.strftime('%d')])
return '<td class="%s entry"><a href="%s" '\
'class="archives">%d</a></td>' % (
self.cssclasses[weekday], archive_day_url, day)
return super(Calendar, self).formatday(day, weekday) | python | {
"resource": ""
} |
q26845 | Calendar.formatfooter | train | def formatfooter(self, previous_month, next_month):
"""
Return a footer for a previous and next month.
"""
footer = '<tfoot><tr>' \
'<td colspan="3" class="prev">%s</td>' \
'<td class="pad"> </td>' \
'<td colspan="3" class="next">%s</td>' \
'</tr></tfoot>'
if previous_month:
previous_content = '<a href="%s" class="previous-month">%s</a>' % (
reverse('zinnia:entry_archive_month', args=[
previous_month.strftime('%Y'),
previous_month.strftime('%m')]),
date_format(previous_month, 'YEAR_MONTH_FORMAT'))
else:
previous_content = ' '
if next_month:
next_content = '<a href="%s" class="next-month">%s</a>' % (
reverse('zinnia:entry_archive_month', args=[
next_month.strftime('%Y'),
next_month.strftime('%m')]),
date_format(next_month, 'YEAR_MONTH_FORMAT'))
else:
next_content = ' '
return footer % (previous_content, next_content) | python | {
"resource": ""
} |
q26846 | Calendar.formatmonthname | train | def formatmonthname(self, theyear, themonth, withyear=True):
"""Return a month name translated as a table row."""
monthname = '%s %s' % (MONTHS[themonth].title(), theyear)
return '<caption>%s</caption>' % monthname | python | {
"resource": ""
} |
q26847 | append_position | train | def append_position(path, position, separator=''):
"""
Concatenate a path and a position,
between the filename and the extension.
"""
filename, extension = os.path.splitext(path)
return ''.join([filename, separator, str(position), extension]) | python | {
"resource": ""
} |
q26848 | loop_template_list | train | def loop_template_list(loop_positions, instance, instance_type,
default_template, registry):
"""
Build a list of templates from a position within a loop
and a registry of templates.
"""
templates = []
local_loop_position = loop_positions[1]
global_loop_position = loop_positions[0]
instance_string = slugify(str(instance))
for key in ['%s-%s' % (instance_type, instance_string),
instance_string,
instance_type,
'default']:
try:
templates.append(registry[key][global_loop_position])
except KeyError:
pass
templates.append(
append_position(default_template, global_loop_position, '-'))
templates.append(
append_position(default_template, local_loop_position, '_'))
templates.append(default_template)
return templates | python | {
"resource": ""
} |
q26849 | get_url_shortener | train | def get_url_shortener():
"""
Return the selected URL shortener backend.
"""
try:
backend_module = import_module(URL_SHORTENER_BACKEND)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % URL_SHORTENER_BACKEND,
RuntimeWarning)
backend = default_backend
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = default_backend
return backend | python | {
"resource": ""
} |
q26850 | generate_pingback_content | train | def generate_pingback_content(soup, target, max_length, trunc_char='...'):
"""
Generate a description text for the pingback.
"""
link = soup.find('a', href=target)
content = strip_tags(six.text_type(link.findParent()))
index = content.index(link.string)
if len(content) > max_length:
middle = max_length // 2
start = index - middle
end = index + middle
if start <= 0:
end -= start
extract = content[0:end]
else:
extract = '%s%s' % (trunc_char, content[start:end])
if end < len(content):
extract += trunc_char
return extract
return content | python | {
"resource": ""
} |
q26851 | EntryTrackback.dispatch | train | def dispatch(self, *args, **kwargs):
"""
Decorate the view dispatcher with csrf_exempt.
"""
return super(EntryTrackback, self).dispatch(*args, **kwargs) | python | {
"resource": ""
} |
q26852 | EntryTrackback.get | train | def get(self, request, *args, **kwargs):
"""
GET only do a permanent redirection to the Entry.
"""
entry = self.get_object()
return HttpResponsePermanentRedirect(entry.get_absolute_url()) | python | {
"resource": ""
} |
q26853 | EntryTrackback.post | train | def post(self, request, *args, **kwargs):
"""
Check if an URL is provided and if trackbacks
are enabled on the Entry.
If so the URL is registered one time as a trackback.
"""
url = request.POST.get('url')
if not url:
return self.get(request, *args, **kwargs)
entry = self.get_object()
site = Site.objects.get_current()
if not entry.trackbacks_are_open:
return self.render_to_response(
{'error': 'Trackback is not enabled for %s' % entry.title})
title = request.POST.get('title') or url
excerpt = request.POST.get('excerpt') or title
blog_name = request.POST.get('blog_name') or title
ip_address = request.META.get('REMOTE_ADDR', None)
trackback_klass = comments.get_model()
trackback_datas = {
'content_type': ContentType.objects.get_for_model(Entry),
'object_pk': entry.pk,
'site': site,
'user_url': url,
'user_name': blog_name,
'ip_address': ip_address,
'comment': excerpt
}
trackback = trackback_klass(**trackback_datas)
if check_is_spam(trackback, entry, request):
return self.render_to_response(
{'error': 'Trackback considered like spam'})
trackback_defaults = {'comment': trackback_datas.pop('comment')}
trackback, created = trackback_klass.objects.get_or_create(
defaults=trackback_defaults,
**trackback_datas)
if created:
trackback.flags.create(user=get_user_flagger(), flag=TRACKBACK)
trackback_was_posted.send(trackback.__class__,
trackback=trackback,
entry=entry)
else:
return self.render_to_response(
{'error': 'Trackback is already registered'})
return self.render_to_response({}) | python | {
"resource": ""
} |
q26854 | Sitemap.get_context_data | train | def get_context_data(self, **kwargs):
"""
Populate the context of the template
with all published entries and all the categories.
"""
context = super(Sitemap, self).get_context_data(**kwargs)
context.update(
{'entries': Entry.published.all(),
'categories': Category.published.all(),
'authors': Author.published.all()}
)
return context | python | {
"resource": ""
} |
q26855 | get_spam_checker | train | def get_spam_checker(backend_path):
"""
Return the selected spam checker backend.
"""
try:
backend_module = import_module(backend_path)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % backend_path,
RuntimeWarning)
backend = None
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = None
return backend | python | {
"resource": ""
} |
q26856 | check_is_spam | train | def check_is_spam(content, content_object, request,
backends=None):
"""
Return True if the content is a spam, else False.
"""
if backends is None:
backends = SPAM_CHECKER_BACKENDS
for backend_path in backends:
spam_checker = get_spam_checker(backend_path)
if spam_checker is not None:
is_spam = spam_checker(content, content_object, request)
if is_spam:
return True
return False | python | {
"resource": ""
} |
q26857 | PreviousNextPublishedMixin.get_previous_next_published | train | def get_previous_next_published(self, date):
"""
Returns a dict of the next and previous date periods
with published entries.
"""
previous_next = getattr(self, 'previous_next', None)
if previous_next is None:
date_year = datetime(date.year, 1, 1)
date_month = datetime(date.year, date.month, 1)
date_day = datetime(date.year, date.month, date.day)
date_next_week = date_day + timedelta(weeks=1)
previous_next = {'year': [None, None],
'week': [None, None],
'month': [None, None],
'day': [None, None]}
dates = self.get_queryset().datetimes(
'publication_date', 'day', order='ASC')
for d in dates:
d_year = datetime(d.year, 1, 1)
d_month = datetime(d.year, d.month, 1)
d_day = datetime(d.year, d.month, d.day)
if d_year < date_year:
previous_next['year'][0] = d_year.date()
elif d_year > date_year and not previous_next['year'][1]:
previous_next['year'][1] = d_year.date()
if d_month < date_month:
previous_next['month'][0] = d_month.date()
elif d_month > date_month and not previous_next['month'][1]:
previous_next['month'][1] = d_month.date()
if d_day < date_day:
previous_next['day'][0] = d_day.date()
previous_next['week'][0] = d_day.date() - timedelta(
days=d_day.weekday())
elif d_day > date_day and not previous_next['day'][1]:
previous_next['day'][1] = d_day.date()
if d_day > date_next_week and not previous_next['week'][1]:
previous_next['week'][1] = d_day.date() - timedelta(
days=d_day.weekday())
setattr(self, 'previous_next', previous_next)
return previous_next | python | {
"resource": ""
} |
q26858 | BaseEntrySearch.get_queryset | train | def get_queryset(self):
"""
Overridde the get_queryset method to
do some validations and build the search queryset.
"""
entries = Entry.published.none()
if self.request.GET:
self.pattern = self.request.GET.get('pattern', '')
if len(self.pattern) < 3:
self.error = _('The pattern is too short')
else:
entries = Entry.published.search(self.pattern)
else:
self.error = _('No pattern to search found')
return entries | python | {
"resource": ""
} |
q26859 | BaseEntrySearch.get_context_data | train | def get_context_data(self, **kwargs):
"""
Add error and pattern in context.
"""
context = super(BaseEntrySearch, self).get_context_data(**kwargs)
context.update({'error': self.error, 'pattern': self.pattern})
return context | python | {
"resource": ""
} |
q26860 | BaseAuthorDetail.get_queryset | train | def get_queryset(self):
"""
Retrieve the author by his username and
build a queryset of his published entries.
"""
self.author = get_object_or_404(
Author, **{Author.USERNAME_FIELD: self.kwargs['username']})
return self.author.entries_published() | python | {
"resource": ""
} |
q26861 | BaseAuthorDetail.get_context_data | train | def get_context_data(self, **kwargs):
"""
Add the current author in context.
"""
context = super(BaseAuthorDetail, self).get_context_data(**kwargs)
context['author'] = self.author
return context | python | {
"resource": ""
} |
q26862 | BaseTagDetail.get_queryset | train | def get_queryset(self):
"""
Retrieve the tag by his name and
build a queryset of his published entries.
"""
self.tag = get_tag(self.kwargs['tag'])
if self.tag is None:
raise Http404(_('No Tag found matching "%s".') %
self.kwargs['tag'])
return TaggedItem.objects.get_by_model(
Entry.published.all(), self.tag) | python | {
"resource": ""
} |
q26863 | BaseTagDetail.get_context_data | train | def get_context_data(self, **kwargs):
"""
Add the current tag in context.
"""
context = super(BaseTagDetail, self).get_context_data(**kwargs)
context['tag'] = self.tag
return context | python | {
"resource": ""
} |
q26864 | read_analogy_file | train | def read_analogy_file(filename):
"""
Read the analogy task test set from a file.
"""
section = None
with open(filename, 'r') as questions_file:
for line in questions_file:
if line.startswith(':'):
section = line[2:].replace('\n', '')
continue
else:
words = line.replace('\n', '').split(' ')
yield section, words | python | {
"resource": ""
} |
q26865 | analogy_rank_score | train | def analogy_rank_score(analogies, word_vectors, no_threads=1):
"""
Calculate the analogy rank score for the given set of analogies.
A rank of zero denotes a perfect score; with random word vectors
we would expect a rank of 0.5.
Arguments:
- analogies: a numpy array holding the ids of the words in the analogy tasks,
as constructed by `construct_analogy_test_set`.
- word_vectors: numpy array holding the word vectors to use.
- num_threads: number of parallel threads to use in the calculation.
Returns:
- ranks: a numpy array holding the normalized rank of the target word
in each analogy task. Rank 0 means that the target words was
returned first; rank 1 means it was returned last.
"""
# The mean of the vectors for the
# second, third, and the negative of
# the first word.
input_vectors = (word_vectors[analogies[:, 1]]
+ word_vectors[analogies[:, 2]]
- word_vectors[analogies[:, 0]])
word_vector_norms = np.linalg.norm(word_vectors,
axis=1)
# Pre-allocate the array storing the rank violations
rank_violations = np.zeros(input_vectors.shape[0], dtype=np.int32)
compute_rank_violations(word_vectors,
word_vector_norms,
input_vectors,
analogies[:, 3],
analogies,
rank_violations,
no_threads)
return rank_violations / float(word_vectors.shape[0]) | python | {
"resource": ""
} |
q26866 | Corpus.fit | train | def fit(self, corpus, window=10, ignore_missing=False):
"""
Perform a pass through the corpus to construct
the cooccurrence matrix.
Parameters:
- iterable of lists of strings corpus
- int window: the length of the (symmetric)
context window used for cooccurrence.
- bool ignore_missing: whether to ignore words missing from
the dictionary (if it was supplied).
Context window distances will be preserved
even if out-of-vocabulary words are
ignored.
If False, a KeyError is raised.
"""
self.matrix = construct_cooccurrence_matrix(corpus,
self.dictionary,
int(self.dictionary_supplied),
int(window),
int(ignore_missing)) | python | {
"resource": ""
} |
q26867 | Glove.fit | train | def fit(self, matrix, epochs=5, no_threads=2, verbose=False):
"""
Estimate the word embeddings.
Parameters:
- scipy.sparse.coo_matrix matrix: coocurrence matrix
- int epochs: number of training epochs
- int no_threads: number of training threads
- bool verbose: print progress messages if True
"""
shape = matrix.shape
if (len(shape) != 2 or
shape[0] != shape[1]):
raise Exception('Coocurrence matrix must be square')
if not sp.isspmatrix_coo(matrix):
raise Exception('Coocurrence matrix must be in the COO format')
random_state = check_random_state(self.random_state)
self.word_vectors = ((random_state.rand(shape[0],
self.no_components) - 0.5)
/ self.no_components)
self.word_biases = np.zeros(shape[0],
dtype=np.float64)
self.vectors_sum_gradients = np.ones_like(self.word_vectors)
self.biases_sum_gradients = np.ones_like(self.word_biases)
shuffle_indices = np.arange(matrix.nnz, dtype=np.int32)
if verbose:
print('Performing %s training epochs '
'with %s threads' % (epochs, no_threads))
for epoch in range(epochs):
if verbose:
print('Epoch %s' % epoch)
# Shuffle the coocurrence matrix
random_state.shuffle(shuffle_indices)
fit_vectors(self.word_vectors,
self.vectors_sum_gradients,
self.word_biases,
self.biases_sum_gradients,
matrix.row,
matrix.col,
matrix.data,
shuffle_indices,
self.learning_rate,
self.max_count,
self.alpha,
self.max_loss,
int(no_threads))
if not np.isfinite(self.word_vectors).all():
raise Exception('Non-finite values in word vectors. '
'Try reducing the learning rate or the '
'max_loss parameter.') | python | {
"resource": ""
} |
q26868 | Glove.add_dictionary | train | def add_dictionary(self, dictionary):
"""
Supply a word-id dictionary to allow similarity queries.
"""
if self.word_vectors is None:
raise Exception('Model must be fit before adding a dictionary')
if len(dictionary) > self.word_vectors.shape[0]:
raise Exception('Dictionary length must be smaller '
'or equal to the number of word vectors')
self.dictionary = dictionary
if hasattr(self.dictionary, 'iteritems'):
# Python 2 compat
items_iterator = self.dictionary.iteritems()
else:
items_iterator = self.dictionary.items()
self.inverse_dictionary = {v: k for k, v in items_iterator} | python | {
"resource": ""
} |
q26869 | Glove.save | train | def save(self, filename):
"""
Serialize model to filename.
"""
with open(filename, 'wb') as savefile:
pickle.dump(self.__dict__,
savefile,
protocol=pickle.HIGHEST_PROTOCOL) | python | {
"resource": ""
} |
q26870 | Glove.load | train | def load(cls, filename):
"""
Load model from filename.
"""
instance = Glove()
with open(filename, 'rb') as savefile:
instance.__dict__ = pickle.load(savefile)
return instance | python | {
"resource": ""
} |
q26871 | Glove.most_similar | train | def most_similar(self, word, number=5):
"""
Run a similarity query, retrieving number
most similar words.
"""
if self.word_vectors is None:
raise Exception('Model must be fit before querying')
if self.dictionary is None:
raise Exception('No word dictionary supplied')
try:
word_idx = self.dictionary[word]
except KeyError:
raise Exception('Word not in dictionary')
return self._similarity_query(self.word_vectors[word_idx], number)[1:] | python | {
"resource": ""
} |
q26872 | set_gcc | train | def set_gcc():
"""
Try to find and use GCC on OSX for OpenMP support.
"""
# For macports and homebrew
patterns = ['/opt/local/bin/gcc-mp-[0-9].[0-9]',
'/opt/local/bin/gcc-mp-[0-9]',
'/usr/local/bin/gcc-[0-9].[0-9]',
'/usr/local/bin/gcc-[0-9]']
if 'darwin' in platform.platform().lower():
gcc_binaries = []
for pattern in patterns:
gcc_binaries += glob.glob(pattern)
gcc_binaries.sort()
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
os.environ["CC"] = gcc
else:
raise Exception('No GCC available. Install gcc from Homebrew '
'using brew install gcc.') | python | {
"resource": ""
} |
q26873 | RandomizedSearchCV.fit | train | def fit(self, X, y=None, groups=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, groups, sampled_params) | python | {
"resource": ""
} |
q26874 | _new_java_obj | train | def _new_java_obj(sc, java_class, *args):
"""
Construct a new Java object.
"""
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
java_args = [_py2java(sc, arg) for arg in args]
return java_obj(*java_args) | python | {
"resource": ""
} |
q26875 | _call_java | train | def _call_java(sc, java_obj, name, *args):
"""
Method copied from pyspark.ml.wrapper. Uses private Spark APIs.
"""
m = getattr(java_obj, name)
java_args = [_py2java(sc, arg) for arg in args]
return _java2py(sc, m(*java_args)) | python | {
"resource": ""
} |
q26876 | SAFEMSIMDXML.get_area_def | train | def get_area_def(self, dsid):
"""Get the area definition of the dataset."""
geocoding = self.root.find('.//Tile_Geocoding')
epsg = geocoding.find('HORIZONTAL_CS_CODE').text
rows = int(geocoding.find('Size[@resolution="' + str(dsid.resolution) + '"]/NROWS').text)
cols = int(geocoding.find('Size[@resolution="' + str(dsid.resolution) + '"]/NCOLS').text)
geoposition = geocoding.find('Geoposition[@resolution="' + str(dsid.resolution) + '"]')
ulx = float(geoposition.find('ULX').text)
uly = float(geoposition.find('ULY').text)
xdim = float(geoposition.find('XDIM').text)
ydim = float(geoposition.find('YDIM').text)
area_extent = (ulx, uly + rows * ydim, ulx + cols * xdim, uly)
area = geometry.AreaDefinition(
self.tile,
"On-the-fly area",
self.tile,
{'init': epsg},
cols,
rows,
area_extent)
return area | python | {
"resource": ""
} |
q26877 | SAFEMSIMDXML._get_coarse_dataset | train | def _get_coarse_dataset(self, key, info):
"""Get the coarse dataset refered to by `key` from the XML data."""
angles = self.root.find('.//Tile_Angles')
if key in ['solar_zenith_angle', 'solar_azimuth_angle']:
elts = angles.findall(info['xml_tag'] + '/Values_List/VALUES')
return np.array([[val for val in elt.text.split()] for elt in elts],
dtype=np.float)
elif key in ['satellite_zenith_angle', 'satellite_azimuth_angle']:
arrays = []
elts = angles.findall(info['xml_tag'] + '[@bandId="1"]')
for elt in elts:
items = elt.findall(info['xml_item'] + '/Values_List/VALUES')
arrays.append(np.array([[val for val in item.text.split()] for item in items],
dtype=np.float))
return np.nanmean(np.dstack(arrays), -1)
else:
return | python | {
"resource": ""
} |
q26878 | SAFEMSIMDXML.get_dataset | train | def get_dataset(self, key, info):
"""Get the dataset refered to by `key`."""
angles = self._get_coarse_dataset(key, info)
if angles is None:
return
# Fill gaps at edges of swath
darr = DataArray(angles, dims=['y', 'x'])
darr = darr.bfill('x')
darr = darr.ffill('x')
angles = darr.data
res = self.interpolate_angles(angles, key.resolution)
proj = DataArray(res, dims=['y', 'x'])
proj.attrs = info.copy()
proj.attrs['units'] = 'degrees'
proj.attrs['platform_name'] = self.platform_name
return proj | python | {
"resource": ""
} |
q26879 | AMSR2L1BFileHandler.get_shape | train | def get_shape(self, ds_id, ds_info):
"""Get output shape of specified dataset."""
var_path = ds_info['file_key']
shape = self[var_path + '/shape']
if ((ds_info.get('standard_name') == "longitude" or ds_info.get('standard_name') == "latitude") and
ds_id.resolution == 10000):
return shape[0], int(shape[1] / 2)
return shape | python | {
"resource": ""
} |
q26880 | AMSR2L1BFileHandler.get_dataset | train | def get_dataset(self, ds_id, ds_info):
"""Get output data and metadata of specified dataset."""
var_path = ds_info['file_key']
fill_value = ds_info.get('fill_value', 65535)
metadata = self.get_metadata(ds_id, ds_info)
data = self[var_path]
if ((ds_info.get('standard_name') == "longitude" or
ds_info.get('standard_name') == "latitude") and
ds_id.resolution == 10000):
# FIXME: Lower frequency channels need CoRegistration parameters applied
data = data[:, ::2] * self[var_path + "/attr/SCALE FACTOR"]
else:
data = data * self[var_path + "/attr/SCALE FACTOR"]
data = data.where(data != fill_value)
data.attrs.update(metadata)
return data | python | {
"resource": ""
} |
q26881 | find_coefficient_index | train | def find_coefficient_index(sensor, wavelength_range, resolution=0):
"""Return index in to coefficient arrays for this band's wavelength.
This function search through the `COEFF_INDEX_MAP` dictionary and
finds the first key where the nominal wavelength of `wavelength_range`
falls between the minimum wavelength and maximum wavelength of the key.
`wavelength_range` can also be the standard name of the band. For
example, "M05" for VIIRS or "1" for MODIS.
:param sensor: sensor of band to be corrected
:param wavelength_range: 3-element tuple of (min wavelength, nominal wavelength, max wavelength)
:param resolution: resolution of the band to be corrected
:return: index in to coefficient arrays like `aH2O`, `aO3`, etc.
None is returned if no matching wavelength is found
"""
index_map = COEFF_INDEX_MAP[sensor.lower()]
# Find the best resolution of coefficients
for res in sorted(index_map.keys()):
if resolution <= res:
index_map = index_map[res]
break
else:
raise ValueError("Unrecognized data resolution: {}", resolution)
# Find the best wavelength of coefficients
if isinstance(wavelength_range, str):
# wavelength range is actually a band name
return index_map[wavelength_range]
else:
for k, v in index_map.items():
if isinstance(k, str):
# we are analyzing wavelengths and ignoring dataset names
continue
if k[0] <= wavelength_range[1] <= k[2]:
return v | python | {
"resource": ""
} |
q26882 | run_crefl | train | def run_crefl(refl, coeffs,
lon,
lat,
sensor_azimuth,
sensor_zenith,
solar_azimuth,
solar_zenith,
avg_elevation=None,
percent=False,
use_abi=False):
"""Run main crefl algorithm.
All input parameters are per-pixel values meaning they are the same size
and shape as the input reflectance data, unless otherwise stated.
:param reflectance_bands: tuple of reflectance band arrays
:param coefficients: tuple of coefficients for each band (see `get_coefficients`)
:param lon: input swath longitude array
:param lat: input swath latitude array
:param sensor_azimuth: input swath sensor azimuth angle array
:param sensor_zenith: input swath sensor zenith angle array
:param solar_azimuth: input swath solar azimuth angle array
:param solar_zenith: input swath solar zenith angle array
:param avg_elevation: average elevation (usually pre-calculated and stored in CMGDEM.hdf)
:param percent: True if input reflectances are on a 0-100 scale instead of 0-1 scale (default: False)
"""
# FUTURE: Find a way to compute the average elevation before hand
# Get digital elevation map data for our granule, set ocean fill value to 0
if avg_elevation is None:
LOG.debug("No average elevation information provided in CREFL")
# height = np.zeros(lon.shape, dtype=np.float)
height = 0.
else:
LOG.debug("Using average elevation information provided to CREFL")
lat[(lat <= -90) | (lat >= 90)] = np.nan
lon[(lon <= -180) | (lon >= 180)] = np.nan
row = ((90.0 - lat) * avg_elevation.shape[0] / 180.0).astype(np.int32)
col = ((lon + 180.0) * avg_elevation.shape[1] / 360.0).astype(np.int32)
space_mask = da.isnull(lon) | da.isnull(lat)
row[space_mask] = 0
col[space_mask] = 0
height = da.map_blocks(_avg_elevation_index, avg_elevation, row, col, dtype=avg_elevation.dtype)
height = xr.DataArray(height, dims=['y', 'x'])
# negative heights aren't allowed, clip to 0
height = height.where((height >= 0.) & ~space_mask, 0.0)
del lat, lon, row, col
mus = da.cos(da.deg2rad(solar_zenith))
mus = mus.where(mus >= 0)
muv = da.cos(da.deg2rad(sensor_zenith))
phi = solar_azimuth - sensor_azimuth
if use_abi:
LOG.debug("Using ABI CREFL algorithm")
a_O3 = [268.45, 0.5, 115.42, -3.2922]
a_H2O = [0.0311, 0.1, 92.471, -1.3814]
a_O2 = [0.4567, 0.007, 96.4884, -1.6970]
G_O3 = G_calc(solar_zenith, a_O3) + G_calc(sensor_zenith, a_O3)
G_H2O = G_calc(solar_zenith, a_H2O) + G_calc(sensor_zenith, a_H2O)
G_O2 = G_calc(solar_zenith, a_O2) + G_calc(sensor_zenith, a_O2)
# Note: bh2o values are actually ao2 values for abi
sphalb, rhoray, TtotraytH2O, tOG = get_atm_variables_abi(mus, muv, phi, height, G_O3, G_H2O, G_O2, *coeffs)
else:
LOG.debug("Using original VIIRS CREFL algorithm")
sphalb, rhoray, TtotraytH2O, tOG = get_atm_variables(mus, muv, phi, height, *coeffs)
del solar_azimuth, solar_zenith, sensor_zenith, sensor_azimuth
# Note: Assume that fill/invalid values are either NaN or we are dealing
# with masked arrays
if percent:
corr_refl = ((refl / 100.) / tOG - rhoray) / TtotraytH2O
else:
corr_refl = (refl / tOG - rhoray) / TtotraytH2O
corr_refl /= (1.0 + corr_refl * sphalb)
return corr_refl.clip(REFLMIN, REFLMAX) | python | {
"resource": ""
} |
q26883 | make_day_night_masks | train | def make_day_night_masks(solarZenithAngle,
good_mask,
highAngleCutoff,
lowAngleCutoff,
stepsDegrees=None):
"""
given information on the solarZenithAngle for each point,
generate masks defining where the day, night, and mixed regions are
optionally provide the highAngleCutoff and lowAngleCutoff that define
the limits of the terminator region (if no cutoffs are given the
DEFAULT_HIGH_ANGLE and DEFAULT_LOW_ANGLE will be used)
optionally provide the stepsDegrees that define how many degrees each
"mixed" mask in the terminator region should be (if no stepsDegrees is
given, the whole terminator region will be one mask)
"""
# if the caller passes None, we're only doing one step
stepsDegrees = highAngleCutoff - lowAngleCutoff if stepsDegrees is None else stepsDegrees
night_mask = (solarZenithAngle > highAngleCutoff) & good_mask
day_mask = (solarZenithAngle <= lowAngleCutoff) & good_mask
mixed_mask = []
steps = list(range(lowAngleCutoff, highAngleCutoff + 1, stepsDegrees))
if steps[-1] >= highAngleCutoff:
steps[-1] = highAngleCutoff
steps = zip(steps, steps[1:])
for i, j in steps:
LOG.debug("Processing step %d to %d" % (i, j))
tmp = (solarZenithAngle > i) & (solarZenithAngle <= j) & good_mask
if tmp.any():
LOG.debug("Adding step %d to %d" % (i, j))
# log.debug("Points to process in this range: " + str(np.sum(tmp)))
mixed_mask.append(tmp)
del tmp
return day_mask, mixed_mask, night_mask | python | {
"resource": ""
} |
q26884 | _histogram_equalization_helper | train | def _histogram_equalization_helper(valid_data, number_of_bins, clip_limit=None, slope_limit=None):
"""Calculate the simplest possible histogram equalization, using only valid data.
Returns:
cumulative distribution function and bin information
"""
# bucket all the selected data using np's histogram function
temp_histogram, temp_bins = np.histogram(valid_data, number_of_bins)
# if we have a clip limit and we should do our clipping before building
# the cumulative distribution function, clip off our histogram
if clip_limit is not None:
# clip our histogram and remember how much we removed
pixels_to_clip_at = int(clip_limit *
(valid_data.size / float(number_of_bins)))
mask_to_clip = temp_histogram > clip_limit
# num_bins_clipped = sum(mask_to_clip)
# num_pixels_clipped = sum(temp_histogram[mask_to_clip]) - (num_bins_clipped * pixels_to_clip_at)
temp_histogram[mask_to_clip] = pixels_to_clip_at
# calculate the cumulative distribution function
cumulative_dist_function = temp_histogram.cumsum()
# if we have a clip limit and we should do our clipping after building the
# cumulative distribution function, clip off our cdf
if slope_limit is not None:
# clip our cdf and remember how much we removed
pixel_height_limit = int(slope_limit *
(valid_data.size / float(number_of_bins)))
cumulative_excess_height = 0
num_clipped_pixels = 0
weight_metric = np.zeros(cumulative_dist_function.shape, dtype=float)
for pixel_index in range(1, cumulative_dist_function.size):
current_pixel_count = cumulative_dist_function[pixel_index]
diff_from_acceptable = (
current_pixel_count - cumulative_dist_function[pixel_index - 1]
- pixel_height_limit - cumulative_excess_height)
if diff_from_acceptable < 0:
weight_metric[pixel_index] = abs(diff_from_acceptable)
cumulative_excess_height += max(diff_from_acceptable, 0)
cumulative_dist_function[
pixel_index] = current_pixel_count - cumulative_excess_height
num_clipped_pixels = num_clipped_pixels + cumulative_excess_height
# now normalize the overall distribution function
cumulative_dist_function = (number_of_bins - 1) * cumulative_dist_function / cumulative_dist_function[-1]
# return what someone else will need in order to apply the equalization later
return cumulative_dist_function, temp_bins | python | {
"resource": ""
} |
q26885 | _calculate_weights | train | def _calculate_weights(tile_size):
"""
calculate a weight array that will be used to quickly bilinearly-interpolate the histogram equalizations
tile size should be the width and height of a tile in pixels
returns a 4D weight array, where the first 2 dimensions correspond to the grid of where the tiles are
relative to the tile being interpolated
"""
# we are essentially making a set of weight masks for an ideal center tile
# that has all 8 surrounding tiles available
# create our empty template tiles
template_tile = np.zeros((3, 3, tile_size, tile_size), dtype=np.float32)
"""
# TEMP FOR TESTING, create a weight tile that does no interpolation
template_tile[1,1] = template_tile[1,1] + 1.0
"""
# for ease of calculation, figure out the index of the center pixel in a tile
# and how far that pixel is from the edge of the tile (in pixel units)
center_index = int(tile_size / 2)
center_dist = tile_size / 2.0
# loop through each pixel in the tile and calculate the 9 weights for that pixel
# were weights for a pixel are 0.0 they are not set (since the template_tile
# starts out as all zeros)
for row in range(tile_size):
for col in range(tile_size):
vertical_dist = abs(
center_dist - row
) # the distance from our pixel to the center of our tile, vertically
horizontal_dist = abs(
center_dist - col
) # the distance from our pixel to the center of our tile, horizontally
# pre-calculate which 3 adjacent tiles will affect our tile
# (note: these calculations aren't quite right if center_index equals the row or col)
horizontal_index = 0 if col < center_index else 2
vertical_index = 0 if row < center_index else 2
# if this is the center pixel, we only need to use it's own tile
# for it
if (row is center_index) and (col is center_index):
# all of the weight for this pixel comes from it's own tile
template_tile[1, 1][row, col] = 1.0
# if this pixel is in the center row, but is not the center pixel
# we're going to need to linearly interpolate it's tile and the
# tile that is horizontally nearest to it
elif (row is center_index) and (col is not center_index):
# linear interp horizontally
beside_weight = horizontal_dist / tile_size # the weight from the adjacent tile
local_weight = (
tile_size -
horizontal_dist) / tile_size # the weight from this tile
# set the weights for the two relevant tiles
template_tile[1, 1][row, col] = local_weight
template_tile[1, horizontal_index][row, col] = beside_weight
# if this pixel is in the center column, but is not the center pixel
# we're going to need to linearly interpolate it's tile and the
# tile that is vertically nearest to it
elif (row is not center_index) and (col is center_index):
# linear interp vertical
beside_weight = vertical_dist / tile_size # the weight from the adjacent tile
local_weight = (
tile_size -
vertical_dist) / tile_size # the weight from this tile
# set the weights for the two relevant tiles
template_tile[1, 1][row, col] = local_weight
template_tile[vertical_index, 1][row, col] = beside_weight
# if the pixel is in one of the four quadrants that are above or below the center
# row and column, we need to bilinearly interpolate it between the
# nearest four tiles
else:
# bilinear interpolation
local_weight = ((tile_size - vertical_dist) / tile_size) * (
(tile_size - horizontal_dist) /
tile_size) # the weight from this tile
vertical_weight = ((vertical_dist) / tile_size) * (
(tile_size - horizontal_dist) / tile_size
) # the weight from the vertically adjacent tile
horizontal_weight = (
(tile_size - vertical_dist) / tile_size) * (
(horizontal_dist) / tile_size
) # the weight from the horizontally adjacent tile
diagonal_weight = ((vertical_dist) / tile_size) * (
(horizontal_dist) / tile_size
) # the weight from the diagonally adjacent tile
# set the weights for the four relevant tiles
template_tile[1, 1, row, col] = local_weight
template_tile[vertical_index, 1, row, col] = vertical_weight
template_tile[1, horizontal_index, row,
col] = horizontal_weight
template_tile[vertical_index, horizontal_index, row,
col] = diagonal_weight
# return the weights for an ideal center tile
return template_tile | python | {
"resource": ""
} |
q26886 | _linear_normalization_from_0to1 | train | def _linear_normalization_from_0to1(
data,
mask,
theoretical_max,
theoretical_min=0,
message="normalizing equalized data to fit in 0 to 1 range"):
"""Do a linear normalization so all data is in the 0 to 1 range.
This is a sloppy but fast calculation that relies on parameters giving it
the correct theoretical current max and min so it can scale the data
accordingly.
"""
LOG.debug(message)
if theoretical_min != 0:
data[mask] = data[mask] - theoretical_min
theoretical_max = theoretical_max - theoretical_min
data[mask] = data[mask] / theoretical_max | python | {
"resource": ""
} |
q26887 | HistogramDNB._run_dnb_normalization | train | def _run_dnb_normalization(self, dnb_data, sza_data):
"""Scale the DNB data using a histogram equalization method.
Args:
dnb_data (ndarray): Day/Night Band data array
sza_data (ndarray): Solar Zenith Angle data array
"""
# convert dask arrays to DataArray objects
dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))
sza_data = xr.DataArray(sza_data, dims=('y', 'x'))
good_mask = ~(dnb_data.isnull() | sza_data.isnull())
output_dataset = dnb_data.where(good_mask)
# we only need the numpy array
output_dataset = output_dataset.values.copy()
dnb_data = dnb_data.values
sza_data = sza_data.values
day_mask, mixed_mask, night_mask = make_day_night_masks(
sza_data,
good_mask.values,
self.high_angle_cutoff,
self.low_angle_cutoff,
stepsDegrees=self.mixed_degree_step)
did_equalize = False
if day_mask.any():
LOG.debug("Histogram equalizing DNB day data...")
histogram_equalization(dnb_data, day_mask, out=output_dataset)
did_equalize = True
if mixed_mask:
for mask in mixed_mask:
if mask.any():
LOG.debug("Histogram equalizing DNB mixed data...")
histogram_equalization(dnb_data, mask, out=output_dataset)
did_equalize = True
if night_mask.any():
LOG.debug("Histogram equalizing DNB night data...")
histogram_equalization(dnb_data, night_mask, out=output_dataset)
did_equalize = True
if not did_equalize:
raise RuntimeError("No valid data found to histogram equalize")
return output_dataset | python | {
"resource": ""
} |
q26888 | AdaptiveDNB._run_dnb_normalization | train | def _run_dnb_normalization(self, dnb_data, sza_data):
"""Scale the DNB data using a adaptive histogram equalization method.
Args:
dnb_data (ndarray): Day/Night Band data array
sza_data (ndarray): Solar Zenith Angle data array
"""
# convert dask arrays to DataArray objects
dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))
sza_data = xr.DataArray(sza_data, dims=('y', 'x'))
good_mask = ~(dnb_data.isnull() | sza_data.isnull())
# good_mask = ~(dnb_data.mask | sza_data.mask)
output_dataset = dnb_data.where(good_mask)
# we only need the numpy array
output_dataset = output_dataset.values.copy()
dnb_data = dnb_data.values
sza_data = sza_data.values
day_mask, mixed_mask, night_mask = make_day_night_masks(
sza_data,
good_mask.values,
self.high_angle_cutoff,
self.low_angle_cutoff,
stepsDegrees=self.mixed_degree_step)
did_equalize = False
has_multi_times = len(mixed_mask) > 0
if day_mask.any():
did_equalize = True
if self.adaptive_day == "always" or (
has_multi_times and self.adaptive_day == "multiple"):
LOG.debug("Adaptive histogram equalizing DNB day data...")
local_histogram_equalization(
dnb_data,
day_mask,
valid_data_mask=good_mask.values,
local_radius_px=self.day_radius_pixels,
out=output_dataset)
else:
LOG.debug("Histogram equalizing DNB day data...")
histogram_equalization(dnb_data,
day_mask,
out=output_dataset)
if mixed_mask:
for mask in mixed_mask:
if mask.any():
did_equalize = True
if self.adaptive_mixed == "always" or (
has_multi_times and
self.adaptive_mixed == "multiple"):
LOG.debug(
"Adaptive histogram equalizing DNB mixed data...")
local_histogram_equalization(
dnb_data,
mask,
valid_data_mask=good_mask.values,
local_radius_px=self.mixed_radius_pixels,
out=output_dataset)
else:
LOG.debug("Histogram equalizing DNB mixed data...")
histogram_equalization(dnb_data,
day_mask,
out=output_dataset)
if night_mask.any():
did_equalize = True
if self.adaptive_night == "always" or (
has_multi_times and self.adaptive_night == "multiple"):
LOG.debug("Adaptive histogram equalizing DNB night data...")
local_histogram_equalization(
dnb_data,
night_mask,
valid_data_mask=good_mask.values,
local_radius_px=self.night_radius_pixels,
out=output_dataset)
else:
LOG.debug("Histogram equalizing DNB night data...")
histogram_equalization(dnb_data,
night_mask,
out=output_dataset)
if not did_equalize:
raise RuntimeError("No valid data found to histogram equalize")
return output_dataset | python | {
"resource": ""
} |
q26889 | CLAVRXFileHandler._read_pug_fixed_grid | train | def _read_pug_fixed_grid(projection, distance_multiplier=1.0):
"""Read from recent PUG format, where axes are in meters
"""
a = projection.semi_major_axis
h = projection.perspective_point_height
b = projection.semi_minor_axis
lon_0 = projection.longitude_of_projection_origin
sweep_axis = projection.sweep_angle_axis[0]
proj_dict = {'a': float(a) * distance_multiplier,
'b': float(b) * distance_multiplier,
'lon_0': float(lon_0),
'h': float(h) * distance_multiplier,
'proj': 'geos',
'units': 'm',
'sweep': sweep_axis}
return proj_dict | python | {
"resource": ""
} |
q26890 | CLAVRXFileHandler._read_axi_fixed_grid | train | def _read_axi_fixed_grid(self, l1b_attr):
"""CLAVR-x does not transcribe fixed grid parameters to its output
We have to recover that information from the original input file,
which is partially named as L1B attribute
example attributes found in L2 CLAVR-x files:
sensor = "AHI" ;
platform = "HIM8" ;
FILENAME = "clavrx_H08_20180719_1300.level2.hdf" ;
L1B = "clavrx_H08_20180719_1300" ;
"""
LOG.debug("looking for corresponding input file for {0}"
" to act as fixed grid navigation donor".format(l1b_attr))
l1b_path = self._find_input_nc(l1b_attr)
LOG.info("Since CLAVR-x does not include fixed-grid parameters,"
" using input file {0} as donor".format(l1b_path))
l1b = netCDF4.Dataset(l1b_path)
proj = None
proj_var = l1b.variables.get("Projection", None)
if proj_var is not None:
# hsd2nc input typically used by CLAVR-x uses old-form km for axes/height
LOG.debug("found hsd2nc-style draft PUG fixed grid specification")
proj = self._read_pug_fixed_grid(proj_var, 1000.0)
if proj is None: # most likely to come into play for ABI cases
proj_var = l1b.variables.get("goes_imager_projection", None)
if proj_var is not None:
LOG.debug("found cmip-style final PUG fixed grid specification")
proj = self._read_pug_fixed_grid(proj_var)
if not proj:
raise ValueError("Unable to recover projection information"
" for {0}".format(self.filename))
h = float(proj['h'])
x, y = l1b['x'], l1b['y']
area_extent, ncols, nlines = self._area_extent(x, y, h)
# LOG.debug(repr(proj))
# LOG.debug(repr(area_extent))
area = geometry.AreaDefinition(
'ahi_geos',
"AHI L2 file area",
'ahi_geos',
proj,
ncols,
nlines,
np.asarray(area_extent))
return area | python | {
"resource": ""
} |
q26891 | Plugin.load_yaml_config | train | def load_yaml_config(self, conf):
"""Load a YAML configuration file and recursively update the overall configuration."""
with open(conf) as fd:
self.config = recursive_dict_update(self.config, yaml.load(fd, Loader=UnsafeLoader)) | python | {
"resource": ""
} |
q26892 | VIIRSCompactFileHandler.read_geo | train | def read_geo(self, key, info):
"""Read angles.
"""
pairs = {('satellite_azimuth_angle', 'satellite_zenith_angle'):
("SatelliteAzimuthAngle", "SatelliteZenithAngle"),
('solar_azimuth_angle', 'solar_zenith_angle'):
("SolarAzimuthAngle", "SolarZenithAngle"),
('dnb_solar_azimuth_angle', 'dnb_solar_zenith_angle'):
("SolarAzimuthAngle", "SolarZenithAngle"),
('dnb_lunar_azimuth_angle', 'dnb_lunar_zenith_angle'):
("LunarAzimuthAngle", "LunarZenithAngle"),
}
for pair, fkeys in pairs.items():
if key.name in pair:
if (self.cache.get(pair[0]) is None or
self.cache.get(pair[1]) is None):
angles = self.angles(*fkeys)
self.cache[pair[0]], self.cache[pair[1]] = angles
if key.name == pair[0]:
return xr.DataArray(self.cache[pair[0]], name=key.name,
attrs=self.mda, dims=('y', 'x'))
else:
return xr.DataArray(self.cache[pair[1]], name=key.name,
attrs=self.mda, dims=('y', 'x'))
if info.get('standard_name') in ['latitude', 'longitude']:
if self.lons is None or self.lats is None:
self.lons, self.lats = self.navigate()
mda = self.mda.copy()
mda.update(info)
if info['standard_name'] == 'longitude':
return xr.DataArray(self.lons, attrs=mda, dims=('y', 'x'))
else:
return xr.DataArray(self.lats, attrs=mda, dims=('y', 'x'))
if key.name == 'dnb_moon_illumination_fraction':
mda = self.mda.copy()
mda.update(info)
return xr.DataArray(self.geostuff["MoonIllumFraction"].value,
attrs=info) | python | {
"resource": ""
} |
q26893 | get_cds_time | train | def get_cds_time(days, msecs):
"""Get the datetime object of the time since epoch given in days and
milliseconds of day
"""
return datetime(1958, 1, 1) + timedelta(days=float(days),
milliseconds=float(msecs)) | python | {
"resource": ""
} |
q26894 | dec10216 | train | def dec10216(inbuf):
"""Decode 10 bits data into 16 bits words.
::
/*
* pack 4 10-bit words in 5 bytes into 4 16-bit words
*
* 0 1 2 3 4 5
* 01234567890123456789012345678901234567890
* 0 1 2 3 4
*/
ip = &in_buffer[i];
op = &out_buffer[j];
op[0] = ip[0]*4 + ip[1]/64;
op[1] = (ip[1] & 0x3F)*16 + ip[2]/16;
op[2] = (ip[2] & 0x0F)*64 + ip[3]/4;
op[3] = (ip[3] & 0x03)*256 +ip[4];
"""
arr10 = inbuf.astype(np.uint16)
arr16_len = int(len(arr10) * 4 / 5)
arr10_len = int((arr16_len * 5) / 4)
arr10 = arr10[:arr10_len] # adjust size
# dask is slow with indexing
arr10_0 = arr10[::5]
arr10_1 = arr10[1::5]
arr10_2 = arr10[2::5]
arr10_3 = arr10[3::5]
arr10_4 = arr10[4::5]
arr16_0 = (arr10_0 << 2) + (arr10_1 >> 6)
arr16_1 = ((arr10_1 & 63) << 4) + (arr10_2 >> 4)
arr16_2 = ((arr10_2 & 15) << 6) + (arr10_3 >> 2)
arr16_3 = ((arr10_3 & 3) << 8) + arr10_4
arr16 = da.stack([arr16_0, arr16_1, arr16_2, arr16_3], axis=-1).ravel()
arr16 = da.rechunk(arr16, arr16.shape[0])
return arr16 | python | {
"resource": ""
} |
q26895 | chebyshev | train | def chebyshev(coefs, time, domain):
"""Evaluate a Chebyshev Polynomial
Args:
coefs (list, np.array): Coefficients defining the polynomial
time (int, float): Time where to evaluate the polynomial
domain (list, tuple): Domain (or time interval) for which the polynomial is defined: [left, right]
Reference: Appendix A in the MSG Level 1.5 Image Data Format Description.
"""
return Chebyshev(coefs, domain=domain)(time) - 0.5 * coefs[0] | python | {
"resource": ""
} |
q26896 | SEVIRICalibrationHandler._erads2bt | train | def _erads2bt(self, data, channel_name):
"""Computation based on effective radiance."""
cal_info = CALIB[self.platform_id][channel_name]
alpha = cal_info["ALPHA"]
beta = cal_info["BETA"]
wavenumber = CALIB[self.platform_id][channel_name]["VC"]
return (self._tl15(data, wavenumber) - beta) / alpha | python | {
"resource": ""
} |
q26897 | SEVIRICalibrationHandler._ir_calibrate | train | def _ir_calibrate(self, data, channel_name, cal_type):
"""Calibrate to brightness temperature."""
if cal_type == 1:
# spectral radiances
return self._srads2bt(data, channel_name)
elif cal_type == 2:
# effective radiances
return self._erads2bt(data, channel_name)
else:
raise NotImplementedError('Unknown calibration type') | python | {
"resource": ""
} |
q26898 | SEVIRICalibrationHandler._srads2bt | train | def _srads2bt(self, data, channel_name):
"""Computation based on spectral radiance."""
a__, b__, c__ = BTFIT[channel_name]
wavenumber = CALIB[self.platform_id][channel_name]["VC"]
temp = self._tl15(data, wavenumber)
return a__ * temp * temp + b__ * temp + c__ | python | {
"resource": ""
} |
q26899 | SEVIRICalibrationHandler._tl15 | train | def _tl15(self, data, wavenumber):
"""Compute the L15 temperature."""
return ((C2 * wavenumber) /
xu.log((1.0 / data) * C1 * wavenumber ** 3 + 1.0)) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.