id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
18,000
|
caktus/django-timepiece
|
timepiece/reports/views.py
|
BillableHours.get_hours_data
|
def get_hours_data(self, entries, date_headers):
"""Sum billable and non-billable hours across all users."""
project_totals = get_project_totals(
entries, date_headers, total_column=False) if entries else []
data_map = {}
for rows, totals in project_totals:
for user, user_id, periods in rows:
for period in periods:
day = period['day']
if day not in data_map:
data_map[day] = {'billable': 0, 'nonbillable': 0}
data_map[day]['billable'] += period['billable']
data_map[day]['nonbillable'] += period['nonbillable']
return data_map
|
python
|
def get_hours_data(self, entries, date_headers):
"""Sum billable and non-billable hours across all users."""
project_totals = get_project_totals(
entries, date_headers, total_column=False) if entries else []
data_map = {}
for rows, totals in project_totals:
for user, user_id, periods in rows:
for period in periods:
day = period['day']
if day not in data_map:
data_map[day] = {'billable': 0, 'nonbillable': 0}
data_map[day]['billable'] += period['billable']
data_map[day]['nonbillable'] += period['nonbillable']
return data_map
|
[
"def",
"get_hours_data",
"(",
"self",
",",
"entries",
",",
"date_headers",
")",
":",
"project_totals",
"=",
"get_project_totals",
"(",
"entries",
",",
"date_headers",
",",
"total_column",
"=",
"False",
")",
"if",
"entries",
"else",
"[",
"]",
"data_map",
"=",
"{",
"}",
"for",
"rows",
",",
"totals",
"in",
"project_totals",
":",
"for",
"user",
",",
"user_id",
",",
"periods",
"in",
"rows",
":",
"for",
"period",
"in",
"periods",
":",
"day",
"=",
"period",
"[",
"'day'",
"]",
"if",
"day",
"not",
"in",
"data_map",
":",
"data_map",
"[",
"day",
"]",
"=",
"{",
"'billable'",
":",
"0",
",",
"'nonbillable'",
":",
"0",
"}",
"data_map",
"[",
"day",
"]",
"[",
"'billable'",
"]",
"+=",
"period",
"[",
"'billable'",
"]",
"data_map",
"[",
"day",
"]",
"[",
"'nonbillable'",
"]",
"+=",
"period",
"[",
"'nonbillable'",
"]",
"return",
"data_map"
] |
Sum billable and non-billable hours across all users.
|
[
"Sum",
"billable",
"and",
"non",
"-",
"billable",
"hours",
"across",
"all",
"users",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/reports/views.py#L314-L329
|
18,001
|
caktus/django-timepiece
|
timepiece/templatetags/timepiece_tags.py
|
add_parameters
|
def add_parameters(url, parameters):
"""
Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %}
"""
if parameters:
sep = '&' if '?' in url else '?'
return '{0}{1}{2}'.format(url, sep, urlencode(parameters))
return url
|
python
|
def add_parameters(url, parameters):
"""
Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %}
"""
if parameters:
sep = '&' if '?' in url else '?'
return '{0}{1}{2}'.format(url, sep, urlencode(parameters))
return url
|
[
"def",
"add_parameters",
"(",
"url",
",",
"parameters",
")",
":",
"if",
"parameters",
":",
"sep",
"=",
"'&'",
"if",
"'?'",
"in",
"url",
"else",
"'?'",
"return",
"'{0}{1}{2}'",
".",
"format",
"(",
"url",
",",
"sep",
",",
"urlencode",
"(",
"parameters",
")",
")",
"return",
"url"
] |
Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %}
|
[
"Appends",
"URL",
"-",
"encoded",
"parameters",
"to",
"the",
"base",
"URL",
".",
"It",
"appends",
"after",
"&",
"if",
"?",
"is",
"found",
"in",
"the",
"URL",
";",
"otherwise",
"it",
"appends",
"using",
"?",
".",
"Keep",
"in",
"mind",
"that",
"this",
"tag",
"does",
"not",
"take",
"into",
"account",
"the",
"value",
"of",
"existing",
"params",
";",
"it",
"is",
"therefore",
"possible",
"to",
"add",
"another",
"value",
"for",
"a",
"pre",
"-",
"existing",
"parameter",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/templatetags/timepiece_tags.py#L22-L41
|
18,002
|
caktus/django-timepiece
|
timepiece/templatetags/timepiece_tags.py
|
get_max_hours
|
def get_max_hours(context):
"""Return the largest number of hours worked or assigned on any project."""
progress = context['project_progress']
return max([0] + [max(p['worked'], p['assigned']) for p in progress])
|
python
|
def get_max_hours(context):
"""Return the largest number of hours worked or assigned on any project."""
progress = context['project_progress']
return max([0] + [max(p['worked'], p['assigned']) for p in progress])
|
[
"def",
"get_max_hours",
"(",
"context",
")",
":",
"progress",
"=",
"context",
"[",
"'project_progress'",
"]",
"return",
"max",
"(",
"[",
"0",
"]",
"+",
"[",
"max",
"(",
"p",
"[",
"'worked'",
"]",
",",
"p",
"[",
"'assigned'",
"]",
")",
"for",
"p",
"in",
"progress",
"]",
")"
] |
Return the largest number of hours worked or assigned on any project.
|
[
"Return",
"the",
"largest",
"number",
"of",
"hours",
"worked",
"or",
"assigned",
"on",
"any",
"project",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/templatetags/timepiece_tags.py#L109-L112
|
18,003
|
caktus/django-timepiece
|
timepiece/templatetags/timepiece_tags.py
|
get_uninvoiced_hours
|
def get_uninvoiced_hours(entries, billable=None):
"""Given an iterable of entries, return the total hours that have
not been invoiced. If billable is passed as 'billable' or 'nonbillable',
limit to the corresponding entries.
"""
statuses = ('invoiced', 'not-invoiced')
if billable is not None:
billable = (billable.lower() == u'billable')
entries = [e for e in entries if e.activity.billable == billable]
hours = sum([e.hours for e in entries if e.status not in statuses])
return '{0:.2f}'.format(hours)
|
python
|
def get_uninvoiced_hours(entries, billable=None):
"""Given an iterable of entries, return the total hours that have
not been invoiced. If billable is passed as 'billable' or 'nonbillable',
limit to the corresponding entries.
"""
statuses = ('invoiced', 'not-invoiced')
if billable is not None:
billable = (billable.lower() == u'billable')
entries = [e for e in entries if e.activity.billable == billable]
hours = sum([e.hours for e in entries if e.status not in statuses])
return '{0:.2f}'.format(hours)
|
[
"def",
"get_uninvoiced_hours",
"(",
"entries",
",",
"billable",
"=",
"None",
")",
":",
"statuses",
"=",
"(",
"'invoiced'",
",",
"'not-invoiced'",
")",
"if",
"billable",
"is",
"not",
"None",
":",
"billable",
"=",
"(",
"billable",
".",
"lower",
"(",
")",
"==",
"u'billable'",
")",
"entries",
"=",
"[",
"e",
"for",
"e",
"in",
"entries",
"if",
"e",
".",
"activity",
".",
"billable",
"==",
"billable",
"]",
"hours",
"=",
"sum",
"(",
"[",
"e",
".",
"hours",
"for",
"e",
"in",
"entries",
"if",
"e",
".",
"status",
"not",
"in",
"statuses",
"]",
")",
"return",
"'{0:.2f}'",
".",
"format",
"(",
"hours",
")"
] |
Given an iterable of entries, return the total hours that have
not been invoiced. If billable is passed as 'billable' or 'nonbillable',
limit to the corresponding entries.
|
[
"Given",
"an",
"iterable",
"of",
"entries",
"return",
"the",
"total",
"hours",
"that",
"have",
"not",
"been",
"invoiced",
".",
"If",
"billable",
"is",
"passed",
"as",
"billable",
"or",
"nonbillable",
"limit",
"to",
"the",
"corresponding",
"entries",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/templatetags/timepiece_tags.py#L116-L126
|
18,004
|
caktus/django-timepiece
|
timepiece/templatetags/timepiece_tags.py
|
humanize_hours
|
def humanize_hours(total_hours, frmt='{hours:02d}:{minutes:02d}:{seconds:02d}',
negative_frmt=None):
"""Given time in hours, return a string representing the time."""
seconds = int(float(total_hours) * 3600)
return humanize_seconds(seconds, frmt, negative_frmt)
|
python
|
def humanize_hours(total_hours, frmt='{hours:02d}:{minutes:02d}:{seconds:02d}',
negative_frmt=None):
"""Given time in hours, return a string representing the time."""
seconds = int(float(total_hours) * 3600)
return humanize_seconds(seconds, frmt, negative_frmt)
|
[
"def",
"humanize_hours",
"(",
"total_hours",
",",
"frmt",
"=",
"'{hours:02d}:{minutes:02d}:{seconds:02d}'",
",",
"negative_frmt",
"=",
"None",
")",
":",
"seconds",
"=",
"int",
"(",
"float",
"(",
"total_hours",
")",
"*",
"3600",
")",
"return",
"humanize_seconds",
"(",
"seconds",
",",
"frmt",
",",
"negative_frmt",
")"
] |
Given time in hours, return a string representing the time.
|
[
"Given",
"time",
"in",
"hours",
"return",
"a",
"string",
"representing",
"the",
"time",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/templatetags/timepiece_tags.py#L130-L134
|
18,005
|
caktus/django-timepiece
|
timepiece/templatetags/timepiece_tags.py
|
_timesheet_url
|
def _timesheet_url(url_name, pk, date=None):
"""Utility to create a time sheet URL with optional date parameters."""
url = reverse(url_name, args=(pk,))
if date:
params = {'month': date.month, 'year': date.year}
return '?'.join((url, urlencode(params)))
return url
|
python
|
def _timesheet_url(url_name, pk, date=None):
"""Utility to create a time sheet URL with optional date parameters."""
url = reverse(url_name, args=(pk,))
if date:
params = {'month': date.month, 'year': date.year}
return '?'.join((url, urlencode(params)))
return url
|
[
"def",
"_timesheet_url",
"(",
"url_name",
",",
"pk",
",",
"date",
"=",
"None",
")",
":",
"url",
"=",
"reverse",
"(",
"url_name",
",",
"args",
"=",
"(",
"pk",
",",
")",
")",
"if",
"date",
":",
"params",
"=",
"{",
"'month'",
":",
"date",
".",
"month",
",",
"'year'",
":",
"date",
".",
"year",
"}",
"return",
"'?'",
".",
"join",
"(",
"(",
"url",
",",
"urlencode",
"(",
"params",
")",
")",
")",
"return",
"url"
] |
Utility to create a time sheet URL with optional date parameters.
|
[
"Utility",
"to",
"create",
"a",
"time",
"sheet",
"URL",
"with",
"optional",
"date",
"parameters",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/templatetags/timepiece_tags.py#L222-L228
|
18,006
|
caktus/django-timepiece
|
timepiece/crm/views.py
|
reject_user_timesheet
|
def reject_user_timesheet(request, user_id):
"""
This allows admins to reject all entries, instead of just one
"""
form = YearMonthForm(request.GET or request.POST)
user = User.objects.get(pk=user_id)
if form.is_valid():
from_date, to_date = form.save()
entries = Entry.no_join.filter(
status=Entry.VERIFIED, user=user, start_time__gte=from_date,
end_time__lte=to_date)
if request.POST.get('yes'):
if entries.exists():
count = entries.count()
Entry.no_join.filter(pk__in=entries).update(status=Entry.UNVERIFIED)
msg = 'You have rejected %d previously verified entries.' \
% count
else:
msg = 'There are no verified entries to reject.'
messages.info(request, msg)
else:
return render(request, 'timepiece/user/timesheet/reject.html', {
'date': from_date,
'timesheet_user': user
})
else:
msg = 'You must provide a month and year for entries to be rejected.'
messages.error(request, msg)
url = reverse('view_user_timesheet', args=(user_id,))
return HttpResponseRedirect(url)
|
python
|
def reject_user_timesheet(request, user_id):
"""
This allows admins to reject all entries, instead of just one
"""
form = YearMonthForm(request.GET or request.POST)
user = User.objects.get(pk=user_id)
if form.is_valid():
from_date, to_date = form.save()
entries = Entry.no_join.filter(
status=Entry.VERIFIED, user=user, start_time__gte=from_date,
end_time__lte=to_date)
if request.POST.get('yes'):
if entries.exists():
count = entries.count()
Entry.no_join.filter(pk__in=entries).update(status=Entry.UNVERIFIED)
msg = 'You have rejected %d previously verified entries.' \
% count
else:
msg = 'There are no verified entries to reject.'
messages.info(request, msg)
else:
return render(request, 'timepiece/user/timesheet/reject.html', {
'date': from_date,
'timesheet_user': user
})
else:
msg = 'You must provide a month and year for entries to be rejected.'
messages.error(request, msg)
url = reverse('view_user_timesheet', args=(user_id,))
return HttpResponseRedirect(url)
|
[
"def",
"reject_user_timesheet",
"(",
"request",
",",
"user_id",
")",
":",
"form",
"=",
"YearMonthForm",
"(",
"request",
".",
"GET",
"or",
"request",
".",
"POST",
")",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"user_id",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"from_date",
",",
"to_date",
"=",
"form",
".",
"save",
"(",
")",
"entries",
"=",
"Entry",
".",
"no_join",
".",
"filter",
"(",
"status",
"=",
"Entry",
".",
"VERIFIED",
",",
"user",
"=",
"user",
",",
"start_time__gte",
"=",
"from_date",
",",
"end_time__lte",
"=",
"to_date",
")",
"if",
"request",
".",
"POST",
".",
"get",
"(",
"'yes'",
")",
":",
"if",
"entries",
".",
"exists",
"(",
")",
":",
"count",
"=",
"entries",
".",
"count",
"(",
")",
"Entry",
".",
"no_join",
".",
"filter",
"(",
"pk__in",
"=",
"entries",
")",
".",
"update",
"(",
"status",
"=",
"Entry",
".",
"UNVERIFIED",
")",
"msg",
"=",
"'You have rejected %d previously verified entries.'",
"%",
"count",
"else",
":",
"msg",
"=",
"'There are no verified entries to reject.'",
"messages",
".",
"info",
"(",
"request",
",",
"msg",
")",
"else",
":",
"return",
"render",
"(",
"request",
",",
"'timepiece/user/timesheet/reject.html'",
",",
"{",
"'date'",
":",
"from_date",
",",
"'timesheet_user'",
":",
"user",
"}",
")",
"else",
":",
"msg",
"=",
"'You must provide a month and year for entries to be rejected.'",
"messages",
".",
"error",
"(",
"request",
",",
"msg",
")",
"url",
"=",
"reverse",
"(",
"'view_user_timesheet'",
",",
"args",
"=",
"(",
"user_id",
",",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"url",
")"
] |
This allows admins to reject all entries, instead of just one
|
[
"This",
"allows",
"admins",
"to",
"reject",
"all",
"entries",
"instead",
"of",
"just",
"one"
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/crm/views.py#L47-L77
|
18,007
|
caktus/django-timepiece
|
setup.py
|
_is_requirement
|
def _is_requirement(line):
"""Returns whether the line is a valid package requirement."""
line = line.strip()
return line and not (line.startswith("-r") or line.startswith("#"))
|
python
|
def _is_requirement(line):
"""Returns whether the line is a valid package requirement."""
line = line.strip()
return line and not (line.startswith("-r") or line.startswith("#"))
|
[
"def",
"_is_requirement",
"(",
"line",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"return",
"line",
"and",
"not",
"(",
"line",
".",
"startswith",
"(",
"\"-r\"",
")",
"or",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
")"
] |
Returns whether the line is a valid package requirement.
|
[
"Returns",
"whether",
"the",
"line",
"is",
"a",
"valid",
"package",
"requirement",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/setup.py#L4-L7
|
18,008
|
caktus/django-timepiece
|
timepiece/utils/search.py
|
SearchMixin.render_to_response
|
def render_to_response(self, context):
"""
When the user makes a search and there is only one result, redirect
to the result's detail page rather than rendering the list.
"""
if self.redirect_if_one_result:
if self.object_list.count() == 1 and self.form.is_bound:
return redirect(self.object_list.get().get_absolute_url())
return super(SearchMixin, self).render_to_response(context)
|
python
|
def render_to_response(self, context):
"""
When the user makes a search and there is only one result, redirect
to the result's detail page rather than rendering the list.
"""
if self.redirect_if_one_result:
if self.object_list.count() == 1 and self.form.is_bound:
return redirect(self.object_list.get().get_absolute_url())
return super(SearchMixin, self).render_to_response(context)
|
[
"def",
"render_to_response",
"(",
"self",
",",
"context",
")",
":",
"if",
"self",
".",
"redirect_if_one_result",
":",
"if",
"self",
".",
"object_list",
".",
"count",
"(",
")",
"==",
"1",
"and",
"self",
".",
"form",
".",
"is_bound",
":",
"return",
"redirect",
"(",
"self",
".",
"object_list",
".",
"get",
"(",
")",
".",
"get_absolute_url",
"(",
")",
")",
"return",
"super",
"(",
"SearchMixin",
",",
"self",
")",
".",
"render_to_response",
"(",
"context",
")"
] |
When the user makes a search and there is only one result, redirect
to the result's detail page rather than rendering the list.
|
[
"When",
"the",
"user",
"makes",
"a",
"search",
"and",
"there",
"is",
"only",
"one",
"result",
"redirect",
"to",
"the",
"result",
"s",
"detail",
"page",
"rather",
"than",
"rendering",
"the",
"list",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/utils/search.py#L61-L69
|
18,009
|
caktus/django-timepiece
|
timepiece/entries/forms.py
|
ClockInForm.clean_start_time
|
def clean_start_time(self):
"""
Make sure that the start time doesn't come before the active entry
"""
start = self.cleaned_data.get('start_time')
if not start:
return start
active_entries = self.user.timepiece_entries.filter(
start_time__gte=start, end_time__isnull=True)
for entry in active_entries:
output = ('The start time is on or before the current entry: '
'%s - %s starting at %s' % (entry.project, entry.activity,
entry.start_time.strftime('%H:%M:%S')))
raise forms.ValidationError(output)
return start
|
python
|
def clean_start_time(self):
"""
Make sure that the start time doesn't come before the active entry
"""
start = self.cleaned_data.get('start_time')
if not start:
return start
active_entries = self.user.timepiece_entries.filter(
start_time__gte=start, end_time__isnull=True)
for entry in active_entries:
output = ('The start time is on or before the current entry: '
'%s - %s starting at %s' % (entry.project, entry.activity,
entry.start_time.strftime('%H:%M:%S')))
raise forms.ValidationError(output)
return start
|
[
"def",
"clean_start_time",
"(",
"self",
")",
":",
"start",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'start_time'",
")",
"if",
"not",
"start",
":",
"return",
"start",
"active_entries",
"=",
"self",
".",
"user",
".",
"timepiece_entries",
".",
"filter",
"(",
"start_time__gte",
"=",
"start",
",",
"end_time__isnull",
"=",
"True",
")",
"for",
"entry",
"in",
"active_entries",
":",
"output",
"=",
"(",
"'The start time is on or before the current entry: '",
"'%s - %s starting at %s'",
"%",
"(",
"entry",
".",
"project",
",",
"entry",
".",
"activity",
",",
"entry",
".",
"start_time",
".",
"strftime",
"(",
"'%H:%M:%S'",
")",
")",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"output",
")",
"return",
"start"
] |
Make sure that the start time doesn't come before the active entry
|
[
"Make",
"sure",
"that",
"the",
"start",
"time",
"doesn",
"t",
"come",
"before",
"the",
"active",
"entry"
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/forms.py#L64-L78
|
18,010
|
caktus/django-timepiece
|
timepiece/entries/forms.py
|
AddUpdateEntryForm.clean
|
def clean(self):
"""
If we're not editing the active entry, ensure that this entry doesn't
conflict with or come after the active entry.
"""
active = utils.get_active_entry(self.user)
start_time = self.cleaned_data.get('start_time', None)
end_time = self.cleaned_data.get('end_time', None)
if active and active.pk != self.instance.pk:
if (start_time and start_time > active.start_time) or \
(end_time and end_time > active.start_time):
raise forms.ValidationError(
'The start time or end time conflict with the active '
'entry: {activity} on {project} starting at '
'{start_time}.'.format(
project=active.project,
activity=active.activity,
start_time=active.start_time.strftime('%H:%M:%S'),
))
month_start = utils.get_month_start(start_time)
next_month = month_start + relativedelta(months=1)
entries = self.instance.user.timepiece_entries.filter(
Q(status=Entry.APPROVED) | Q(status=Entry.INVOICED),
start_time__gte=month_start,
end_time__lt=next_month
)
entry = self.instance
if not self.acting_user.is_superuser:
if (entries.exists() and not entry.id or entry.id and entry.status == Entry.INVOICED):
message = 'You cannot add/edit entries after a timesheet has been ' \
'approved or invoiced. Please correct the start and end times.'
raise forms.ValidationError(message)
return self.cleaned_data
|
python
|
def clean(self):
"""
If we're not editing the active entry, ensure that this entry doesn't
conflict with or come after the active entry.
"""
active = utils.get_active_entry(self.user)
start_time = self.cleaned_data.get('start_time', None)
end_time = self.cleaned_data.get('end_time', None)
if active and active.pk != self.instance.pk:
if (start_time and start_time > active.start_time) or \
(end_time and end_time > active.start_time):
raise forms.ValidationError(
'The start time or end time conflict with the active '
'entry: {activity} on {project} starting at '
'{start_time}.'.format(
project=active.project,
activity=active.activity,
start_time=active.start_time.strftime('%H:%M:%S'),
))
month_start = utils.get_month_start(start_time)
next_month = month_start + relativedelta(months=1)
entries = self.instance.user.timepiece_entries.filter(
Q(status=Entry.APPROVED) | Q(status=Entry.INVOICED),
start_time__gte=month_start,
end_time__lt=next_month
)
entry = self.instance
if not self.acting_user.is_superuser:
if (entries.exists() and not entry.id or entry.id and entry.status == Entry.INVOICED):
message = 'You cannot add/edit entries after a timesheet has been ' \
'approved or invoiced. Please correct the start and end times.'
raise forms.ValidationError(message)
return self.cleaned_data
|
[
"def",
"clean",
"(",
"self",
")",
":",
"active",
"=",
"utils",
".",
"get_active_entry",
"(",
"self",
".",
"user",
")",
"start_time",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'start_time'",
",",
"None",
")",
"end_time",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'end_time'",
",",
"None",
")",
"if",
"active",
"and",
"active",
".",
"pk",
"!=",
"self",
".",
"instance",
".",
"pk",
":",
"if",
"(",
"start_time",
"and",
"start_time",
">",
"active",
".",
"start_time",
")",
"or",
"(",
"end_time",
"and",
"end_time",
">",
"active",
".",
"start_time",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"'The start time or end time conflict with the active '",
"'entry: {activity} on {project} starting at '",
"'{start_time}.'",
".",
"format",
"(",
"project",
"=",
"active",
".",
"project",
",",
"activity",
"=",
"active",
".",
"activity",
",",
"start_time",
"=",
"active",
".",
"start_time",
".",
"strftime",
"(",
"'%H:%M:%S'",
")",
",",
")",
")",
"month_start",
"=",
"utils",
".",
"get_month_start",
"(",
"start_time",
")",
"next_month",
"=",
"month_start",
"+",
"relativedelta",
"(",
"months",
"=",
"1",
")",
"entries",
"=",
"self",
".",
"instance",
".",
"user",
".",
"timepiece_entries",
".",
"filter",
"(",
"Q",
"(",
"status",
"=",
"Entry",
".",
"APPROVED",
")",
"|",
"Q",
"(",
"status",
"=",
"Entry",
".",
"INVOICED",
")",
",",
"start_time__gte",
"=",
"month_start",
",",
"end_time__lt",
"=",
"next_month",
")",
"entry",
"=",
"self",
".",
"instance",
"if",
"not",
"self",
".",
"acting_user",
".",
"is_superuser",
":",
"if",
"(",
"entries",
".",
"exists",
"(",
")",
"and",
"not",
"entry",
".",
"id",
"or",
"entry",
".",
"id",
"and",
"entry",
".",
"status",
"==",
"Entry",
".",
"INVOICED",
")",
":",
"message",
"=",
"'You cannot add/edit entries after a timesheet has been '",
"'approved or invoiced. Please correct the start and end times.'",
"raise",
"forms",
".",
"ValidationError",
"(",
"message",
")",
"return",
"self",
".",
"cleaned_data"
] |
If we're not editing the active entry, ensure that this entry doesn't
conflict with or come after the active entry.
|
[
"If",
"we",
"re",
"not",
"editing",
"the",
"active",
"entry",
"ensure",
"that",
"this",
"entry",
"doesn",
"t",
"conflict",
"with",
"or",
"come",
"after",
"the",
"active",
"entry",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/forms.py#L145-L181
|
18,011
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
clock_in
|
def clock_in(request):
"""For clocking the user into a project."""
user = request.user
# Lock the active entry for the duration of this transaction, to prevent
# creating multiple active entries.
active_entry = utils.get_active_entry(user, select_for_update=True)
initial = dict([(k, v) for k, v in request.GET.items()])
data = request.POST or None
form = ClockInForm(data, initial=initial, user=user, active=active_entry)
if form.is_valid():
entry = form.save()
message = 'You have clocked into {0} on {1}.'.format(
entry.activity.name, entry.project)
messages.info(request, message)
return HttpResponseRedirect(reverse('dashboard'))
return render(request, 'timepiece/entry/clock_in.html', {
'form': form,
'active': active_entry,
})
|
python
|
def clock_in(request):
"""For clocking the user into a project."""
user = request.user
# Lock the active entry for the duration of this transaction, to prevent
# creating multiple active entries.
active_entry = utils.get_active_entry(user, select_for_update=True)
initial = dict([(k, v) for k, v in request.GET.items()])
data = request.POST or None
form = ClockInForm(data, initial=initial, user=user, active=active_entry)
if form.is_valid():
entry = form.save()
message = 'You have clocked into {0} on {1}.'.format(
entry.activity.name, entry.project)
messages.info(request, message)
return HttpResponseRedirect(reverse('dashboard'))
return render(request, 'timepiece/entry/clock_in.html', {
'form': form,
'active': active_entry,
})
|
[
"def",
"clock_in",
"(",
"request",
")",
":",
"user",
"=",
"request",
".",
"user",
"# Lock the active entry for the duration of this transaction, to prevent",
"# creating multiple active entries.",
"active_entry",
"=",
"utils",
".",
"get_active_entry",
"(",
"user",
",",
"select_for_update",
"=",
"True",
")",
"initial",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"request",
".",
"GET",
".",
"items",
"(",
")",
"]",
")",
"data",
"=",
"request",
".",
"POST",
"or",
"None",
"form",
"=",
"ClockInForm",
"(",
"data",
",",
"initial",
"=",
"initial",
",",
"user",
"=",
"user",
",",
"active",
"=",
"active_entry",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"entry",
"=",
"form",
".",
"save",
"(",
")",
"message",
"=",
"'You have clocked into {0} on {1}.'",
".",
"format",
"(",
"entry",
".",
"activity",
".",
"name",
",",
"entry",
".",
"project",
")",
"messages",
".",
"info",
"(",
"request",
",",
"message",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'dashboard'",
")",
")",
"return",
"render",
"(",
"request",
",",
"'timepiece/entry/clock_in.html'",
",",
"{",
"'form'",
":",
"form",
",",
"'active'",
":",
"active_entry",
",",
"}",
")"
] |
For clocking the user into a project.
|
[
"For",
"clocking",
"the",
"user",
"into",
"a",
"project",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L139-L159
|
18,012
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
toggle_pause
|
def toggle_pause(request):
"""Allow the user to pause and unpause the active entry."""
entry = utils.get_active_entry(request.user)
if not entry:
raise Http404
# toggle the paused state
entry.toggle_paused()
entry.save()
# create a message that can be displayed to the user
action = 'paused' if entry.is_paused else 'resumed'
message = 'Your entry, {0} on {1}, has been {2}.'.format(
entry.activity.name, entry.project, action)
messages.info(request, message)
# redirect to the log entry list
return HttpResponseRedirect(reverse('dashboard'))
|
python
|
def toggle_pause(request):
"""Allow the user to pause and unpause the active entry."""
entry = utils.get_active_entry(request.user)
if not entry:
raise Http404
# toggle the paused state
entry.toggle_paused()
entry.save()
# create a message that can be displayed to the user
action = 'paused' if entry.is_paused else 'resumed'
message = 'Your entry, {0} on {1}, has been {2}.'.format(
entry.activity.name, entry.project, action)
messages.info(request, message)
# redirect to the log entry list
return HttpResponseRedirect(reverse('dashboard'))
|
[
"def",
"toggle_pause",
"(",
"request",
")",
":",
"entry",
"=",
"utils",
".",
"get_active_entry",
"(",
"request",
".",
"user",
")",
"if",
"not",
"entry",
":",
"raise",
"Http404",
"# toggle the paused state",
"entry",
".",
"toggle_paused",
"(",
")",
"entry",
".",
"save",
"(",
")",
"# create a message that can be displayed to the user",
"action",
"=",
"'paused'",
"if",
"entry",
".",
"is_paused",
"else",
"'resumed'",
"message",
"=",
"'Your entry, {0} on {1}, has been {2}.'",
".",
"format",
"(",
"entry",
".",
"activity",
".",
"name",
",",
"entry",
".",
"project",
",",
"action",
")",
"messages",
".",
"info",
"(",
"request",
",",
"message",
")",
"# redirect to the log entry list",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'dashboard'",
")",
")"
] |
Allow the user to pause and unpause the active entry.
|
[
"Allow",
"the",
"user",
"to",
"pause",
"and",
"unpause",
"the",
"active",
"entry",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L189-L206
|
18,013
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
reject_entry
|
def reject_entry(request, entry_id):
"""
Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix.
"""
return_url = request.GET.get('next', reverse('dashboard'))
try:
entry = Entry.no_join.get(pk=entry_id)
except:
message = 'No such log entry.'
messages.error(request, message)
return redirect(return_url)
if entry.status == Entry.UNVERIFIED or entry.status == Entry.INVOICED:
msg_text = 'This entry is unverified or is already invoiced.'
messages.error(request, msg_text)
return redirect(return_url)
if request.POST.get('Yes'):
entry.status = Entry.UNVERIFIED
entry.save()
msg_text = 'The entry\'s status was set to unverified.'
messages.info(request, msg_text)
return redirect(return_url)
return render(request, 'timepiece/entry/reject.html', {
'entry': entry,
'next': request.GET.get('next'),
})
|
python
|
def reject_entry(request, entry_id):
"""
Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix.
"""
return_url = request.GET.get('next', reverse('dashboard'))
try:
entry = Entry.no_join.get(pk=entry_id)
except:
message = 'No such log entry.'
messages.error(request, message)
return redirect(return_url)
if entry.status == Entry.UNVERIFIED or entry.status == Entry.INVOICED:
msg_text = 'This entry is unverified or is already invoiced.'
messages.error(request, msg_text)
return redirect(return_url)
if request.POST.get('Yes'):
entry.status = Entry.UNVERIFIED
entry.save()
msg_text = 'The entry\'s status was set to unverified.'
messages.info(request, msg_text)
return redirect(return_url)
return render(request, 'timepiece/entry/reject.html', {
'entry': entry,
'next': request.GET.get('next'),
})
|
[
"def",
"reject_entry",
"(",
"request",
",",
"entry_id",
")",
":",
"return_url",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'next'",
",",
"reverse",
"(",
"'dashboard'",
")",
")",
"try",
":",
"entry",
"=",
"Entry",
".",
"no_join",
".",
"get",
"(",
"pk",
"=",
"entry_id",
")",
"except",
":",
"message",
"=",
"'No such log entry.'",
"messages",
".",
"error",
"(",
"request",
",",
"message",
")",
"return",
"redirect",
"(",
"return_url",
")",
"if",
"entry",
".",
"status",
"==",
"Entry",
".",
"UNVERIFIED",
"or",
"entry",
".",
"status",
"==",
"Entry",
".",
"INVOICED",
":",
"msg_text",
"=",
"'This entry is unverified or is already invoiced.'",
"messages",
".",
"error",
"(",
"request",
",",
"msg_text",
")",
"return",
"redirect",
"(",
"return_url",
")",
"if",
"request",
".",
"POST",
".",
"get",
"(",
"'Yes'",
")",
":",
"entry",
".",
"status",
"=",
"Entry",
".",
"UNVERIFIED",
"entry",
".",
"save",
"(",
")",
"msg_text",
"=",
"'The entry\\'s status was set to unverified.'",
"messages",
".",
"info",
"(",
"request",
",",
"msg_text",
")",
"return",
"redirect",
"(",
"return_url",
")",
"return",
"render",
"(",
"request",
",",
"'timepiece/entry/reject.html'",
",",
"{",
"'entry'",
":",
"entry",
",",
"'next'",
":",
"request",
".",
"GET",
".",
"get",
"(",
"'next'",
")",
",",
"}",
")"
] |
Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix.
|
[
"Admins",
"can",
"reject",
"an",
"entry",
"that",
"has",
"been",
"verified",
"or",
"approved",
"but",
"not",
"invoiced",
"to",
"set",
"its",
"status",
"to",
"unverified",
"for",
"the",
"user",
"to",
"fix",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L255-L282
|
18,014
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
delete_entry
|
def delete_entry(request, entry_id):
"""
Give the user the ability to delete a log entry, with a confirmation
beforehand. If this method is invoked via a GET request, a form asking
for a confirmation of intent will be presented to the user. If this method
is invoked via a POST request, the entry will be deleted.
"""
try:
entry = Entry.no_join.get(pk=entry_id, user=request.user)
except Entry.DoesNotExist:
message = 'No such entry found.'
messages.info(request, message)
url = request.GET.get('next', reverse('dashboard'))
return HttpResponseRedirect(url)
if request.method == 'POST':
key = request.POST.get('key', None)
if key and key == entry.delete_key:
entry.delete()
message = 'Deleted {0} for {1}.'.format(entry.activity.name, entry.project)
messages.info(request, message)
url = request.GET.get('next', reverse('dashboard'))
return HttpResponseRedirect(url)
else:
message = 'You are not authorized to delete this entry!'
messages.error(request, message)
return render(request, 'timepiece/entry/delete.html', {
'entry': entry,
})
|
python
|
def delete_entry(request, entry_id):
"""
Give the user the ability to delete a log entry, with a confirmation
beforehand. If this method is invoked via a GET request, a form asking
for a confirmation of intent will be presented to the user. If this method
is invoked via a POST request, the entry will be deleted.
"""
try:
entry = Entry.no_join.get(pk=entry_id, user=request.user)
except Entry.DoesNotExist:
message = 'No such entry found.'
messages.info(request, message)
url = request.GET.get('next', reverse('dashboard'))
return HttpResponseRedirect(url)
if request.method == 'POST':
key = request.POST.get('key', None)
if key and key == entry.delete_key:
entry.delete()
message = 'Deleted {0} for {1}.'.format(entry.activity.name, entry.project)
messages.info(request, message)
url = request.GET.get('next', reverse('dashboard'))
return HttpResponseRedirect(url)
else:
message = 'You are not authorized to delete this entry!'
messages.error(request, message)
return render(request, 'timepiece/entry/delete.html', {
'entry': entry,
})
|
[
"def",
"delete_entry",
"(",
"request",
",",
"entry_id",
")",
":",
"try",
":",
"entry",
"=",
"Entry",
".",
"no_join",
".",
"get",
"(",
"pk",
"=",
"entry_id",
",",
"user",
"=",
"request",
".",
"user",
")",
"except",
"Entry",
".",
"DoesNotExist",
":",
"message",
"=",
"'No such entry found.'",
"messages",
".",
"info",
"(",
"request",
",",
"message",
")",
"url",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'next'",
",",
"reverse",
"(",
"'dashboard'",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"url",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"key",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'key'",
",",
"None",
")",
"if",
"key",
"and",
"key",
"==",
"entry",
".",
"delete_key",
":",
"entry",
".",
"delete",
"(",
")",
"message",
"=",
"'Deleted {0} for {1}.'",
".",
"format",
"(",
"entry",
".",
"activity",
".",
"name",
",",
"entry",
".",
"project",
")",
"messages",
".",
"info",
"(",
"request",
",",
"message",
")",
"url",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'next'",
",",
"reverse",
"(",
"'dashboard'",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"url",
")",
"else",
":",
"message",
"=",
"'You are not authorized to delete this entry!'",
"messages",
".",
"error",
"(",
"request",
",",
"message",
")",
"return",
"render",
"(",
"request",
",",
"'timepiece/entry/delete.html'",
",",
"{",
"'entry'",
":",
"entry",
",",
"}",
")"
] |
Give the user the ability to delete a log entry, with a confirmation
beforehand. If this method is invoked via a GET request, a form asking
for a confirmation of intent will be presented to the user. If this method
is invoked via a POST request, the entry will be deleted.
|
[
"Give",
"the",
"user",
"the",
"ability",
"to",
"delete",
"a",
"log",
"entry",
"with",
"a",
"confirmation",
"beforehand",
".",
"If",
"this",
"method",
"is",
"invoked",
"via",
"a",
"GET",
"request",
"a",
"form",
"asking",
"for",
"a",
"confirmation",
"of",
"intent",
"will",
"be",
"presented",
"to",
"the",
"user",
".",
"If",
"this",
"method",
"is",
"invoked",
"via",
"a",
"POST",
"request",
"the",
"entry",
"will",
"be",
"deleted",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L286-L315
|
18,015
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
Dashboard.get_hours_per_week
|
def get_hours_per_week(self, user=None):
"""Retrieves the number of hours the user should work per week."""
try:
profile = UserProfile.objects.get(user=user or self.user)
except UserProfile.DoesNotExist:
profile = None
return profile.hours_per_week if profile else Decimal('40.00')
|
python
|
def get_hours_per_week(self, user=None):
"""Retrieves the number of hours the user should work per week."""
try:
profile = UserProfile.objects.get(user=user or self.user)
except UserProfile.DoesNotExist:
profile = None
return profile.hours_per_week if profile else Decimal('40.00')
|
[
"def",
"get_hours_per_week",
"(",
"self",
",",
"user",
"=",
"None",
")",
":",
"try",
":",
"profile",
"=",
"UserProfile",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"user",
"or",
"self",
".",
"user",
")",
"except",
"UserProfile",
".",
"DoesNotExist",
":",
"profile",
"=",
"None",
"return",
"profile",
".",
"hours_per_week",
"if",
"profile",
"else",
"Decimal",
"(",
"'40.00'",
")"
] |
Retrieves the number of hours the user should work per week.
|
[
"Retrieves",
"the",
"number",
"of",
"hours",
"the",
"user",
"should",
"work",
"per",
"week",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L57-L63
|
18,016
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
ScheduleMixin.get_hours_for_week
|
def get_hours_for_week(self, week_start=None):
"""
Gets all ProjectHours entries in the 7-day period beginning on
week_start.
"""
week_start = week_start if week_start else self.week_start
week_end = week_start + relativedelta(days=7)
return ProjectHours.objects.filter(
week_start__gte=week_start, week_start__lt=week_end)
|
python
|
def get_hours_for_week(self, week_start=None):
"""
Gets all ProjectHours entries in the 7-day period beginning on
week_start.
"""
week_start = week_start if week_start else self.week_start
week_end = week_start + relativedelta(days=7)
return ProjectHours.objects.filter(
week_start__gte=week_start, week_start__lt=week_end)
|
[
"def",
"get_hours_for_week",
"(",
"self",
",",
"week_start",
"=",
"None",
")",
":",
"week_start",
"=",
"week_start",
"if",
"week_start",
"else",
"self",
".",
"week_start",
"week_end",
"=",
"week_start",
"+",
"relativedelta",
"(",
"days",
"=",
"7",
")",
"return",
"ProjectHours",
".",
"objects",
".",
"filter",
"(",
"week_start__gte",
"=",
"week_start",
",",
"week_start__lt",
"=",
"week_end",
")"
] |
Gets all ProjectHours entries in the 7-day period beginning on
week_start.
|
[
"Gets",
"all",
"ProjectHours",
"entries",
"in",
"the",
"7",
"-",
"day",
"period",
"beginning",
"on",
"week_start",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L336-L345
|
18,017
|
caktus/django-timepiece
|
timepiece/entries/views.py
|
ScheduleView.get_users_from_project_hours
|
def get_users_from_project_hours(self, project_hours):
"""
Gets a list of the distinct users included in the project hours
entries, ordered by name.
"""
name = ('user__first_name', 'user__last_name')
users = project_hours.values_list('user__id', *name).distinct()\
.order_by(*name)
return users
|
python
|
def get_users_from_project_hours(self, project_hours):
"""
Gets a list of the distinct users included in the project hours
entries, ordered by name.
"""
name = ('user__first_name', 'user__last_name')
users = project_hours.values_list('user__id', *name).distinct()\
.order_by(*name)
return users
|
[
"def",
"get_users_from_project_hours",
"(",
"self",
",",
"project_hours",
")",
":",
"name",
"=",
"(",
"'user__first_name'",
",",
"'user__last_name'",
")",
"users",
"=",
"project_hours",
".",
"values_list",
"(",
"'user__id'",
",",
"*",
"name",
")",
".",
"distinct",
"(",
")",
".",
"order_by",
"(",
"*",
"name",
")",
"return",
"users"
] |
Gets a list of the distinct users included in the project hours
entries, ordered by name.
|
[
"Gets",
"a",
"list",
"of",
"the",
"distinct",
"users",
"included",
"in",
"the",
"project",
"hours",
"entries",
"ordered",
"by",
"name",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L357-L365
|
18,018
|
caktus/django-timepiece
|
timepiece/management/commands/check_entries.py
|
Command.check_all
|
def check_all(self, all_entries, *args, **kwargs):
"""
Go through lists of entries, find overlaps among each, return the total
"""
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps
|
python
|
def check_all(self, all_entries, *args, **kwargs):
"""
Go through lists of entries, find overlaps among each, return the total
"""
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps
|
[
"def",
"check_all",
"(",
"self",
",",
"all_entries",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"all_overlaps",
"=",
"0",
"while",
"True",
":",
"try",
":",
"user_entries",
"=",
"all_entries",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"return",
"all_overlaps",
"else",
":",
"user_total_overlaps",
"=",
"self",
".",
"check_entry",
"(",
"user_entries",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"all_overlaps",
"+=",
"user_total_overlaps"
] |
Go through lists of entries, find overlaps among each, return the total
|
[
"Go",
"through",
"lists",
"of",
"entries",
"find",
"overlaps",
"among",
"each",
"return",
"the",
"total"
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L69-L82
|
18,019
|
caktus/django-timepiece
|
timepiece/management/commands/check_entries.py
|
Command.check_entry
|
def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps
|
python
|
def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps
|
[
"def",
"check_entry",
"(",
"self",
",",
"entries",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"verbosity",
"=",
"kwargs",
".",
"get",
"(",
"'verbosity'",
",",
"1",
")",
"user_total_overlaps",
"=",
"0",
"user",
"=",
"''",
"for",
"index_a",
",",
"entry_a",
"in",
"enumerate",
"(",
"entries",
")",
":",
"# Show the name the first time through",
"if",
"index_a",
"==",
"0",
":",
"if",
"args",
"and",
"verbosity",
">=",
"1",
"or",
"verbosity",
">=",
"2",
":",
"self",
".",
"show_name",
"(",
"entry_a",
".",
"user",
")",
"user",
"=",
"entry_a",
".",
"user",
"for",
"index_b",
"in",
"range",
"(",
"index_a",
",",
"len",
"(",
"entries",
")",
")",
":",
"entry_b",
"=",
"entries",
"[",
"index_b",
"]",
"if",
"entry_a",
".",
"check_overlap",
"(",
"entry_b",
")",
":",
"user_total_overlaps",
"+=",
"1",
"self",
".",
"show_overlap",
"(",
"entry_a",
",",
"entry_b",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"user_total_overlaps",
"and",
"user",
"and",
"verbosity",
">=",
"1",
":",
"overlap_data",
"=",
"{",
"'first'",
":",
"user",
".",
"first_name",
",",
"'last'",
":",
"user",
".",
"last_name",
",",
"'total'",
":",
"user_total_overlaps",
",",
"}",
"self",
".",
"stdout",
".",
"write",
"(",
"'Total overlapping entries for user '",
"+",
"'%(first)s %(last)s: %(total)d'",
"%",
"overlap_data",
")",
"return",
"user_total_overlaps"
] |
With a list of entries, check each entry against every other
|
[
"With",
"a",
"list",
"of",
"entries",
"check",
"each",
"entry",
"against",
"every",
"other"
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L84-L110
|
18,020
|
caktus/django-timepiece
|
timepiece/management/commands/check_entries.py
|
Command.find_start
|
def find_start(self, **kwargs):
"""
Determine the starting point of the query using CLI keyword arguments
"""
week = kwargs.get('week', False)
month = kwargs.get('month', False)
year = kwargs.get('year', False)
days = kwargs.get('days', 0)
# If no flags are True, set to the beginning of last billing window
# to assure we catch all recent violations
start = timezone.now() - relativedelta(months=1, day=1)
# Set the start date based on arguments provided through options
if week:
start = utils.get_week_start()
if month:
start = timezone.now() - relativedelta(day=1)
if year:
start = timezone.now() - relativedelta(day=1, month=1)
if days:
start = timezone.now() - relativedelta(days=days)
start -= relativedelta(hour=0, minute=0, second=0, microsecond=0)
return start
|
python
|
def find_start(self, **kwargs):
"""
Determine the starting point of the query using CLI keyword arguments
"""
week = kwargs.get('week', False)
month = kwargs.get('month', False)
year = kwargs.get('year', False)
days = kwargs.get('days', 0)
# If no flags are True, set to the beginning of last billing window
# to assure we catch all recent violations
start = timezone.now() - relativedelta(months=1, day=1)
# Set the start date based on arguments provided through options
if week:
start = utils.get_week_start()
if month:
start = timezone.now() - relativedelta(day=1)
if year:
start = timezone.now() - relativedelta(day=1, month=1)
if days:
start = timezone.now() - relativedelta(days=days)
start -= relativedelta(hour=0, minute=0, second=0, microsecond=0)
return start
|
[
"def",
"find_start",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"week",
"=",
"kwargs",
".",
"get",
"(",
"'week'",
",",
"False",
")",
"month",
"=",
"kwargs",
".",
"get",
"(",
"'month'",
",",
"False",
")",
"year",
"=",
"kwargs",
".",
"get",
"(",
"'year'",
",",
"False",
")",
"days",
"=",
"kwargs",
".",
"get",
"(",
"'days'",
",",
"0",
")",
"# If no flags are True, set to the beginning of last billing window",
"# to assure we catch all recent violations",
"start",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"relativedelta",
"(",
"months",
"=",
"1",
",",
"day",
"=",
"1",
")",
"# Set the start date based on arguments provided through options",
"if",
"week",
":",
"start",
"=",
"utils",
".",
"get_week_start",
"(",
")",
"if",
"month",
":",
"start",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"relativedelta",
"(",
"day",
"=",
"1",
")",
"if",
"year",
":",
"start",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"relativedelta",
"(",
"day",
"=",
"1",
",",
"month",
"=",
"1",
")",
"if",
"days",
":",
"start",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"relativedelta",
"(",
"days",
"=",
"days",
")",
"start",
"-=",
"relativedelta",
"(",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"return",
"start"
] |
Determine the starting point of the query using CLI keyword arguments
|
[
"Determine",
"the",
"starting",
"point",
"of",
"the",
"query",
"using",
"CLI",
"keyword",
"arguments"
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L112-L133
|
18,021
|
caktus/django-timepiece
|
timepiece/management/commands/check_entries.py
|
Command.find_users
|
def find_users(self, *args):
"""
Returns the users to search given names as args.
Return all users if there are no args provided.
"""
if args:
names = reduce(lambda query, arg: query |
(Q(first_name__icontains=arg) | Q(last_name__icontains=arg)),
args, Q()) # noqa
users = User.objects.filter(names)
# If no args given, check every user
else:
users = User.objects.all()
# Display errors if no user was found
if not users.count() and args:
if len(args) == 1:
raise CommandError('No user was found with the name %s' % args[0])
else:
arg_list = ', '.join(args)
raise CommandError('No users found with the names: %s' % arg_list)
return users
|
python
|
def find_users(self, *args):
"""
Returns the users to search given names as args.
Return all users if there are no args provided.
"""
if args:
names = reduce(lambda query, arg: query |
(Q(first_name__icontains=arg) | Q(last_name__icontains=arg)),
args, Q()) # noqa
users = User.objects.filter(names)
# If no args given, check every user
else:
users = User.objects.all()
# Display errors if no user was found
if not users.count() and args:
if len(args) == 1:
raise CommandError('No user was found with the name %s' % args[0])
else:
arg_list = ', '.join(args)
raise CommandError('No users found with the names: %s' % arg_list)
return users
|
[
"def",
"find_users",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"args",
":",
"names",
"=",
"reduce",
"(",
"lambda",
"query",
",",
"arg",
":",
"query",
"|",
"(",
"Q",
"(",
"first_name__icontains",
"=",
"arg",
")",
"|",
"Q",
"(",
"last_name__icontains",
"=",
"arg",
")",
")",
",",
"args",
",",
"Q",
"(",
")",
")",
"# noqa",
"users",
"=",
"User",
".",
"objects",
".",
"filter",
"(",
"names",
")",
"# If no args given, check every user",
"else",
":",
"users",
"=",
"User",
".",
"objects",
".",
"all",
"(",
")",
"# Display errors if no user was found",
"if",
"not",
"users",
".",
"count",
"(",
")",
"and",
"args",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"raise",
"CommandError",
"(",
"'No user was found with the name %s'",
"%",
"args",
"[",
"0",
"]",
")",
"else",
":",
"arg_list",
"=",
"', '",
".",
"join",
"(",
"args",
")",
"raise",
"CommandError",
"(",
"'No users found with the names: %s'",
"%",
"arg_list",
")",
"return",
"users"
] |
Returns the users to search given names as args.
Return all users if there are no args provided.
|
[
"Returns",
"the",
"users",
"to",
"search",
"given",
"names",
"as",
"args",
".",
"Return",
"all",
"users",
"if",
"there",
"are",
"no",
"args",
"provided",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L135-L155
|
18,022
|
caktus/django-timepiece
|
timepiece/management/commands/check_entries.py
|
Command.find_entries
|
def find_entries(self, users, start, *args, **kwargs):
"""
Find all entries for all users, from a given starting point.
If no starting point is provided, all entries are returned.
"""
forever = kwargs.get('all', False)
for user in users:
if forever:
entries = Entry.objects.filter(user=user).order_by('start_time')
else:
entries = Entry.objects.filter(
user=user, start_time__gte=start).order_by(
'start_time')
yield entries
|
python
|
def find_entries(self, users, start, *args, **kwargs):
"""
Find all entries for all users, from a given starting point.
If no starting point is provided, all entries are returned.
"""
forever = kwargs.get('all', False)
for user in users:
if forever:
entries = Entry.objects.filter(user=user).order_by('start_time')
else:
entries = Entry.objects.filter(
user=user, start_time__gte=start).order_by(
'start_time')
yield entries
|
[
"def",
"find_entries",
"(",
"self",
",",
"users",
",",
"start",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"forever",
"=",
"kwargs",
".",
"get",
"(",
"'all'",
",",
"False",
")",
"for",
"user",
"in",
"users",
":",
"if",
"forever",
":",
"entries",
"=",
"Entry",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
")",
".",
"order_by",
"(",
"'start_time'",
")",
"else",
":",
"entries",
"=",
"Entry",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"start_time__gte",
"=",
"start",
")",
".",
"order_by",
"(",
"'start_time'",
")",
"yield",
"entries"
] |
Find all entries for all users, from a given starting point.
If no starting point is provided, all entries are returned.
|
[
"Find",
"all",
"entries",
"for",
"all",
"users",
"from",
"a",
"given",
"starting",
"point",
".",
"If",
"no",
"starting",
"point",
"is",
"provided",
"all",
"entries",
"are",
"returned",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/management/commands/check_entries.py#L157-L170
|
18,023
|
caktus/django-timepiece
|
timepiece/utils/views.py
|
cbv_decorator
|
def cbv_decorator(function_decorator):
"""Allows a function-based decorator to be used on a CBV."""
def class_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return class_decorator
|
python
|
def cbv_decorator(function_decorator):
"""Allows a function-based decorator to be used on a CBV."""
def class_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return class_decorator
|
[
"def",
"cbv_decorator",
"(",
"function_decorator",
")",
":",
"def",
"class_decorator",
"(",
"View",
")",
":",
"View",
".",
"dispatch",
"=",
"method_decorator",
"(",
"function_decorator",
")",
"(",
"View",
".",
"dispatch",
")",
"return",
"View",
"return",
"class_decorator"
] |
Allows a function-based decorator to be used on a CBV.
|
[
"Allows",
"a",
"function",
"-",
"based",
"decorator",
"to",
"be",
"used",
"on",
"a",
"CBV",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/utils/views.py#L4-L10
|
18,024
|
caktus/django-timepiece
|
timepiece/reports/utils.py
|
date_totals
|
def date_totals(entries, by):
"""Yield a user's name and a dictionary of their hours"""
date_dict = {}
for date, date_entries in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date()
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'],
d_entries[0]['user__last_name']))
elif by == 'project':
name = d_entries[0]['project__name']
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours
return name, pk, date_dict
|
python
|
def date_totals(entries, by):
"""Yield a user's name and a dictionary of their hours"""
date_dict = {}
for date, date_entries in groupby(entries, lambda x: x['date']):
if isinstance(date, datetime.datetime):
date = date.date()
d_entries = list(date_entries)
if by == 'user':
name = ' '.join((d_entries[0]['user__first_name'],
d_entries[0]['user__last_name']))
elif by == 'project':
name = d_entries[0]['project__name']
else:
name = d_entries[0][by]
pk = d_entries[0][by]
hours = get_hours_summary(d_entries)
date_dict[date] = hours
return name, pk, date_dict
|
[
"def",
"date_totals",
"(",
"entries",
",",
"by",
")",
":",
"date_dict",
"=",
"{",
"}",
"for",
"date",
",",
"date_entries",
"in",
"groupby",
"(",
"entries",
",",
"lambda",
"x",
":",
"x",
"[",
"'date'",
"]",
")",
":",
"if",
"isinstance",
"(",
"date",
",",
"datetime",
".",
"datetime",
")",
":",
"date",
"=",
"date",
".",
"date",
"(",
")",
"d_entries",
"=",
"list",
"(",
"date_entries",
")",
"if",
"by",
"==",
"'user'",
":",
"name",
"=",
"' '",
".",
"join",
"(",
"(",
"d_entries",
"[",
"0",
"]",
"[",
"'user__first_name'",
"]",
",",
"d_entries",
"[",
"0",
"]",
"[",
"'user__last_name'",
"]",
")",
")",
"elif",
"by",
"==",
"'project'",
":",
"name",
"=",
"d_entries",
"[",
"0",
"]",
"[",
"'project__name'",
"]",
"else",
":",
"name",
"=",
"d_entries",
"[",
"0",
"]",
"[",
"by",
"]",
"pk",
"=",
"d_entries",
"[",
"0",
"]",
"[",
"by",
"]",
"hours",
"=",
"get_hours_summary",
"(",
"d_entries",
")",
"date_dict",
"[",
"date",
"]",
"=",
"hours",
"return",
"name",
",",
"pk",
",",
"date_dict"
] |
Yield a user's name and a dictionary of their hours
|
[
"Yield",
"a",
"user",
"s",
"name",
"and",
"a",
"dictionary",
"of",
"their",
"hours"
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/reports/utils.py#L12-L31
|
18,025
|
caktus/django-timepiece
|
timepiece/reports/utils.py
|
get_project_totals
|
def get_project_totals(entries, date_headers, hour_type=None, overtime=False,
total_column=False, by='user'):
"""
Yield hour totals grouped by user and date. Optionally including overtime.
"""
totals = [0 for date in date_headers]
rows = []
for thing, thing_entries in groupby(entries, lambda x: x[by]):
name, thing_id, date_dict = date_totals(thing_entries, by)
dates = []
for index, day in enumerate(date_headers):
if isinstance(day, datetime.datetime):
day = day.date()
if hour_type:
total = date_dict.get(day, {}).get(hour_type, 0)
dates.append(total)
else:
billable = date_dict.get(day, {}).get('billable', 0)
nonbillable = date_dict.get(day, {}).get('non_billable', 0)
total = billable + nonbillable
dates.append({
'day': day,
'billable': billable,
'nonbillable': nonbillable,
'total': total
})
totals[index] += total
if total_column:
dates.append(sum(dates))
if overtime:
dates.append(find_overtime(dates))
dates = [date or '' for date in dates]
rows.append((name, thing_id, dates))
if total_column:
totals.append(sum(totals))
totals = [t or '' for t in totals]
yield (rows, totals)
|
python
|
def get_project_totals(entries, date_headers, hour_type=None, overtime=False,
total_column=False, by='user'):
"""
Yield hour totals grouped by user and date. Optionally including overtime.
"""
totals = [0 for date in date_headers]
rows = []
for thing, thing_entries in groupby(entries, lambda x: x[by]):
name, thing_id, date_dict = date_totals(thing_entries, by)
dates = []
for index, day in enumerate(date_headers):
if isinstance(day, datetime.datetime):
day = day.date()
if hour_type:
total = date_dict.get(day, {}).get(hour_type, 0)
dates.append(total)
else:
billable = date_dict.get(day, {}).get('billable', 0)
nonbillable = date_dict.get(day, {}).get('non_billable', 0)
total = billable + nonbillable
dates.append({
'day': day,
'billable': billable,
'nonbillable': nonbillable,
'total': total
})
totals[index] += total
if total_column:
dates.append(sum(dates))
if overtime:
dates.append(find_overtime(dates))
dates = [date or '' for date in dates]
rows.append((name, thing_id, dates))
if total_column:
totals.append(sum(totals))
totals = [t or '' for t in totals]
yield (rows, totals)
|
[
"def",
"get_project_totals",
"(",
"entries",
",",
"date_headers",
",",
"hour_type",
"=",
"None",
",",
"overtime",
"=",
"False",
",",
"total_column",
"=",
"False",
",",
"by",
"=",
"'user'",
")",
":",
"totals",
"=",
"[",
"0",
"for",
"date",
"in",
"date_headers",
"]",
"rows",
"=",
"[",
"]",
"for",
"thing",
",",
"thing_entries",
"in",
"groupby",
"(",
"entries",
",",
"lambda",
"x",
":",
"x",
"[",
"by",
"]",
")",
":",
"name",
",",
"thing_id",
",",
"date_dict",
"=",
"date_totals",
"(",
"thing_entries",
",",
"by",
")",
"dates",
"=",
"[",
"]",
"for",
"index",
",",
"day",
"in",
"enumerate",
"(",
"date_headers",
")",
":",
"if",
"isinstance",
"(",
"day",
",",
"datetime",
".",
"datetime",
")",
":",
"day",
"=",
"day",
".",
"date",
"(",
")",
"if",
"hour_type",
":",
"total",
"=",
"date_dict",
".",
"get",
"(",
"day",
",",
"{",
"}",
")",
".",
"get",
"(",
"hour_type",
",",
"0",
")",
"dates",
".",
"append",
"(",
"total",
")",
"else",
":",
"billable",
"=",
"date_dict",
".",
"get",
"(",
"day",
",",
"{",
"}",
")",
".",
"get",
"(",
"'billable'",
",",
"0",
")",
"nonbillable",
"=",
"date_dict",
".",
"get",
"(",
"day",
",",
"{",
"}",
")",
".",
"get",
"(",
"'non_billable'",
",",
"0",
")",
"total",
"=",
"billable",
"+",
"nonbillable",
"dates",
".",
"append",
"(",
"{",
"'day'",
":",
"day",
",",
"'billable'",
":",
"billable",
",",
"'nonbillable'",
":",
"nonbillable",
",",
"'total'",
":",
"total",
"}",
")",
"totals",
"[",
"index",
"]",
"+=",
"total",
"if",
"total_column",
":",
"dates",
".",
"append",
"(",
"sum",
"(",
"dates",
")",
")",
"if",
"overtime",
":",
"dates",
".",
"append",
"(",
"find_overtime",
"(",
"dates",
")",
")",
"dates",
"=",
"[",
"date",
"or",
"''",
"for",
"date",
"in",
"dates",
"]",
"rows",
".",
"append",
"(",
"(",
"name",
",",
"thing_id",
",",
"dates",
")",
")",
"if",
"total_column",
":",
"totals",
".",
"append",
"(",
"sum",
"(",
"totals",
")",
")",
"totals",
"=",
"[",
"t",
"or",
"''",
"for",
"t",
"in",
"totals",
"]",
"yield",
"(",
"rows",
",",
"totals",
")"
] |
Yield hour totals grouped by user and date. Optionally including overtime.
|
[
"Yield",
"hour",
"totals",
"grouped",
"by",
"user",
"and",
"date",
".",
"Optionally",
"including",
"overtime",
"."
] |
52515dec027664890efbc535429e1ba1ee152f40
|
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/reports/utils.py#L57-L93
|
18,026
|
stanfordnlp/stanza
|
stanza/research/learner.py
|
Learner.validate
|
def validate(self, validation_instances, metrics, iteration=None):
'''
Evaluate this model on `validation_instances` during training and
output a report.
:param validation_instances: The data to use to validate the model.
:type validation_instances: list(instance.Instance)
:param metrics: Functions like those found in the `metrics` module
for quantifying the performance of the learner.
:type metrics: list(function)
:param iteration: A label (anything with a sensible `str()` conversion)
identifying the current iteration in output.
'''
if not validation_instances or not metrics:
return {}
split_id = 'val%s' % iteration if iteration is not None else 'val'
train_results = evaluate.evaluate(self, validation_instances,
metrics=metrics, split_id=split_id)
output.output_results(train_results, split_id)
return train_results
|
python
|
def validate(self, validation_instances, metrics, iteration=None):
'''
Evaluate this model on `validation_instances` during training and
output a report.
:param validation_instances: The data to use to validate the model.
:type validation_instances: list(instance.Instance)
:param metrics: Functions like those found in the `metrics` module
for quantifying the performance of the learner.
:type metrics: list(function)
:param iteration: A label (anything with a sensible `str()` conversion)
identifying the current iteration in output.
'''
if not validation_instances or not metrics:
return {}
split_id = 'val%s' % iteration if iteration is not None else 'val'
train_results = evaluate.evaluate(self, validation_instances,
metrics=metrics, split_id=split_id)
output.output_results(train_results, split_id)
return train_results
|
[
"def",
"validate",
"(",
"self",
",",
"validation_instances",
",",
"metrics",
",",
"iteration",
"=",
"None",
")",
":",
"if",
"not",
"validation_instances",
"or",
"not",
"metrics",
":",
"return",
"{",
"}",
"split_id",
"=",
"'val%s'",
"%",
"iteration",
"if",
"iteration",
"is",
"not",
"None",
"else",
"'val'",
"train_results",
"=",
"evaluate",
".",
"evaluate",
"(",
"self",
",",
"validation_instances",
",",
"metrics",
"=",
"metrics",
",",
"split_id",
"=",
"split_id",
")",
"output",
".",
"output_results",
"(",
"train_results",
",",
"split_id",
")",
"return",
"train_results"
] |
Evaluate this model on `validation_instances` during training and
output a report.
:param validation_instances: The data to use to validate the model.
:type validation_instances: list(instance.Instance)
:param metrics: Functions like those found in the `metrics` module
for quantifying the performance of the learner.
:type metrics: list(function)
:param iteration: A label (anything with a sensible `str()` conversion)
identifying the current iteration in output.
|
[
"Evaluate",
"this",
"model",
"on",
"validation_instances",
"during",
"training",
"and",
"output",
"a",
"report",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/learner.py#L34-L55
|
18,027
|
stanfordnlp/stanza
|
stanza/research/learner.py
|
Learner.predict_and_score
|
def predict_and_score(self, eval_instances, random=False, verbosity=0):
'''
Return most likely outputs and scores for the particular set of
outputs given in `eval_instances`, as a tuple. Return value should
be equivalent to the default implementation of
return (self.predict(eval_instances), self.score(eval_instances))
but subclasses can override this to combine the two calls and reduce
duplicated work. Either the two separate methods or this one (or all
of them) should be overridden.
:param eval_instances: The data to use to evaluate the model.
Instances should have at least the `input` and `output` fields
populated. `output` is needed to define which score is to
be returned.
:param random: If `True`, sample from the probability distribution
defined by the classifier rather than output the most likely
prediction.
:param verbosity: The level of diagnostic output, relative to the
global --verbosity option. Used to adjust output when models
are composed of multiple sub-models.
:type eval_instances: list(instance.Instance)
:returns: tuple(list(output_type), list(float))
'''
if hasattr(self, '_using_default_separate') and self._using_default_separate:
raise NotImplementedError
self._using_default_combined = True
return (self.predict(eval_instances, random=random, verbosity=verbosity),
self.score(eval_instances, verbosity=verbosity))
|
python
|
def predict_and_score(self, eval_instances, random=False, verbosity=0):
'''
Return most likely outputs and scores for the particular set of
outputs given in `eval_instances`, as a tuple. Return value should
be equivalent to the default implementation of
return (self.predict(eval_instances), self.score(eval_instances))
but subclasses can override this to combine the two calls and reduce
duplicated work. Either the two separate methods or this one (or all
of them) should be overridden.
:param eval_instances: The data to use to evaluate the model.
Instances should have at least the `input` and `output` fields
populated. `output` is needed to define which score is to
be returned.
:param random: If `True`, sample from the probability distribution
defined by the classifier rather than output the most likely
prediction.
:param verbosity: The level of diagnostic output, relative to the
global --verbosity option. Used to adjust output when models
are composed of multiple sub-models.
:type eval_instances: list(instance.Instance)
:returns: tuple(list(output_type), list(float))
'''
if hasattr(self, '_using_default_separate') and self._using_default_separate:
raise NotImplementedError
self._using_default_combined = True
return (self.predict(eval_instances, random=random, verbosity=verbosity),
self.score(eval_instances, verbosity=verbosity))
|
[
"def",
"predict_and_score",
"(",
"self",
",",
"eval_instances",
",",
"random",
"=",
"False",
",",
"verbosity",
"=",
"0",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_using_default_separate'",
")",
"and",
"self",
".",
"_using_default_separate",
":",
"raise",
"NotImplementedError",
"self",
".",
"_using_default_combined",
"=",
"True",
"return",
"(",
"self",
".",
"predict",
"(",
"eval_instances",
",",
"random",
"=",
"random",
",",
"verbosity",
"=",
"verbosity",
")",
",",
"self",
".",
"score",
"(",
"eval_instances",
",",
"verbosity",
"=",
"verbosity",
")",
")"
] |
Return most likely outputs and scores for the particular set of
outputs given in `eval_instances`, as a tuple. Return value should
be equivalent to the default implementation of
return (self.predict(eval_instances), self.score(eval_instances))
but subclasses can override this to combine the two calls and reduce
duplicated work. Either the two separate methods or this one (or all
of them) should be overridden.
:param eval_instances: The data to use to evaluate the model.
Instances should have at least the `input` and `output` fields
populated. `output` is needed to define which score is to
be returned.
:param random: If `True`, sample from the probability distribution
defined by the classifier rather than output the most likely
prediction.
:param verbosity: The level of diagnostic output, relative to the
global --verbosity option. Used to adjust output when models
are composed of multiple sub-models.
:type eval_instances: list(instance.Instance)
:returns: tuple(list(output_type), list(float))
|
[
"Return",
"most",
"likely",
"outputs",
"and",
"scores",
"for",
"the",
"particular",
"set",
"of",
"outputs",
"given",
"in",
"eval_instances",
"as",
"a",
"tuple",
".",
"Return",
"value",
"should",
"be",
"equivalent",
"to",
"the",
"default",
"implementation",
"of"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/learner.py#L104-L135
|
18,028
|
stanfordnlp/stanza
|
stanza/research/learner.py
|
Learner.load
|
def load(self, infile):
'''
Deserialize a model from a stored file.
By default, unpickle an entire object. If `dump` is overridden to
use a different storage format, `load` should be as well.
:param file outfile: A file-like object from which to retrieve the
serialized model.
'''
model = pickle.load(infile)
self.__dict__.update(model.__dict__)
|
python
|
def load(self, infile):
'''
Deserialize a model from a stored file.
By default, unpickle an entire object. If `dump` is overridden to
use a different storage format, `load` should be as well.
:param file outfile: A file-like object from which to retrieve the
serialized model.
'''
model = pickle.load(infile)
self.__dict__.update(model.__dict__)
|
[
"def",
"load",
"(",
"self",
",",
"infile",
")",
":",
"model",
"=",
"pickle",
".",
"load",
"(",
"infile",
")",
"self",
".",
"__dict__",
".",
"update",
"(",
"model",
".",
"__dict__",
")"
] |
Deserialize a model from a stored file.
By default, unpickle an entire object. If `dump` is overridden to
use a different storage format, `load` should be as well.
:param file outfile: A file-like object from which to retrieve the
serialized model.
|
[
"Deserialize",
"a",
"model",
"from",
"a",
"stored",
"file",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/learner.py#L153-L164
|
18,029
|
stanfordnlp/stanza
|
stanza/research/iterators.py
|
iter_batches
|
def iter_batches(iterable, batch_size):
'''
Given a sequence or iterable, yield batches from that iterable until it
runs out. Note that this function returns a generator, and also each
batch will be a generator.
:param iterable: The sequence or iterable to split into batches
:param int batch_size: The number of elements of `iterable` to iterate over
in each batch
>>> batches = iter_batches('abcdefghijkl', batch_size=5)
>>> list(next(batches))
['a', 'b', 'c', 'd', 'e']
>>> list(next(batches))
['f', 'g', 'h', 'i', 'j']
>>> list(next(batches))
['k', 'l']
>>> list(next(batches))
Traceback (most recent call last):
...
StopIteration
Warning: It is important to iterate completely over each batch before
requesting the next, or batch sizes will be truncated to 1. For example,
making a list of all batches before asking for the contents of each
will not work:
>>> batches = list(iter_batches('abcdefghijkl', batch_size=5))
>>> len(batches)
12
>>> list(batches[0])
['a']
However, making a list of each individual batch as it is received will
produce expected behavior (as shown in the first example).
'''
# http://stackoverflow.com/a/8290514/4481448
sourceiter = iter(iterable)
while True:
batchiter = islice(sourceiter, batch_size)
yield chain([batchiter.next()], batchiter)
|
python
|
def iter_batches(iterable, batch_size):
'''
Given a sequence or iterable, yield batches from that iterable until it
runs out. Note that this function returns a generator, and also each
batch will be a generator.
:param iterable: The sequence or iterable to split into batches
:param int batch_size: The number of elements of `iterable` to iterate over
in each batch
>>> batches = iter_batches('abcdefghijkl', batch_size=5)
>>> list(next(batches))
['a', 'b', 'c', 'd', 'e']
>>> list(next(batches))
['f', 'g', 'h', 'i', 'j']
>>> list(next(batches))
['k', 'l']
>>> list(next(batches))
Traceback (most recent call last):
...
StopIteration
Warning: It is important to iterate completely over each batch before
requesting the next, or batch sizes will be truncated to 1. For example,
making a list of all batches before asking for the contents of each
will not work:
>>> batches = list(iter_batches('abcdefghijkl', batch_size=5))
>>> len(batches)
12
>>> list(batches[0])
['a']
However, making a list of each individual batch as it is received will
produce expected behavior (as shown in the first example).
'''
# http://stackoverflow.com/a/8290514/4481448
sourceiter = iter(iterable)
while True:
batchiter = islice(sourceiter, batch_size)
yield chain([batchiter.next()], batchiter)
|
[
"def",
"iter_batches",
"(",
"iterable",
",",
"batch_size",
")",
":",
"# http://stackoverflow.com/a/8290514/4481448",
"sourceiter",
"=",
"iter",
"(",
"iterable",
")",
"while",
"True",
":",
"batchiter",
"=",
"islice",
"(",
"sourceiter",
",",
"batch_size",
")",
"yield",
"chain",
"(",
"[",
"batchiter",
".",
"next",
"(",
")",
"]",
",",
"batchiter",
")"
] |
Given a sequence or iterable, yield batches from that iterable until it
runs out. Note that this function returns a generator, and also each
batch will be a generator.
:param iterable: The sequence or iterable to split into batches
:param int batch_size: The number of elements of `iterable` to iterate over
in each batch
>>> batches = iter_batches('abcdefghijkl', batch_size=5)
>>> list(next(batches))
['a', 'b', 'c', 'd', 'e']
>>> list(next(batches))
['f', 'g', 'h', 'i', 'j']
>>> list(next(batches))
['k', 'l']
>>> list(next(batches))
Traceback (most recent call last):
...
StopIteration
Warning: It is important to iterate completely over each batch before
requesting the next, or batch sizes will be truncated to 1. For example,
making a list of all batches before asking for the contents of each
will not work:
>>> batches = list(iter_batches('abcdefghijkl', batch_size=5))
>>> len(batches)
12
>>> list(batches[0])
['a']
However, making a list of each individual batch as it is received will
produce expected behavior (as shown in the first example).
|
[
"Given",
"a",
"sequence",
"or",
"iterable",
"yield",
"batches",
"from",
"that",
"iterable",
"until",
"it",
"runs",
"out",
".",
"Note",
"that",
"this",
"function",
"returns",
"a",
"generator",
"and",
"also",
"each",
"batch",
"will",
"be",
"a",
"generator",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/iterators.py#L4-L44
|
18,030
|
stanfordnlp/stanza
|
stanza/research/iterators.py
|
gen_batches
|
def gen_batches(iterable, batch_size):
'''
Returns a generator object that yields batches from `iterable`.
See `iter_batches` for more details and caveats.
Note that `iter_batches` returns an iterator, which never supports `len()`,
`gen_batches` returns an iterable which supports `len()` if and only if
`iterable` does. This *may* be an iterator, but could be a `SizedGenerator`
object. To obtain an iterator (for example, to use the `next()` function),
call `iter()` on this iterable.
>>> batches = gen_batches('abcdefghijkl', batch_size=5)
>>> len(batches)
3
>>> for batch in batches:
... print(list(batch))
['a', 'b', 'c', 'd', 'e']
['f', 'g', 'h', 'i', 'j']
['k', 'l']
'''
def batches_thunk():
return iter_batches(iterable, batch_size)
try:
length = len(iterable)
except TypeError:
return batches_thunk()
num_batches = (length - 1) // batch_size + 1
return SizedGenerator(batches_thunk, length=num_batches)
|
python
|
def gen_batches(iterable, batch_size):
'''
Returns a generator object that yields batches from `iterable`.
See `iter_batches` for more details and caveats.
Note that `iter_batches` returns an iterator, which never supports `len()`,
`gen_batches` returns an iterable which supports `len()` if and only if
`iterable` does. This *may* be an iterator, but could be a `SizedGenerator`
object. To obtain an iterator (for example, to use the `next()` function),
call `iter()` on this iterable.
>>> batches = gen_batches('abcdefghijkl', batch_size=5)
>>> len(batches)
3
>>> for batch in batches:
... print(list(batch))
['a', 'b', 'c', 'd', 'e']
['f', 'g', 'h', 'i', 'j']
['k', 'l']
'''
def batches_thunk():
return iter_batches(iterable, batch_size)
try:
length = len(iterable)
except TypeError:
return batches_thunk()
num_batches = (length - 1) // batch_size + 1
return SizedGenerator(batches_thunk, length=num_batches)
|
[
"def",
"gen_batches",
"(",
"iterable",
",",
"batch_size",
")",
":",
"def",
"batches_thunk",
"(",
")",
":",
"return",
"iter_batches",
"(",
"iterable",
",",
"batch_size",
")",
"try",
":",
"length",
"=",
"len",
"(",
"iterable",
")",
"except",
"TypeError",
":",
"return",
"batches_thunk",
"(",
")",
"num_batches",
"=",
"(",
"length",
"-",
"1",
")",
"//",
"batch_size",
"+",
"1",
"return",
"SizedGenerator",
"(",
"batches_thunk",
",",
"length",
"=",
"num_batches",
")"
] |
Returns a generator object that yields batches from `iterable`.
See `iter_batches` for more details and caveats.
Note that `iter_batches` returns an iterator, which never supports `len()`,
`gen_batches` returns an iterable which supports `len()` if and only if
`iterable` does. This *may* be an iterator, but could be a `SizedGenerator`
object. To obtain an iterator (for example, to use the `next()` function),
call `iter()` on this iterable.
>>> batches = gen_batches('abcdefghijkl', batch_size=5)
>>> len(batches)
3
>>> for batch in batches:
... print(list(batch))
['a', 'b', 'c', 'd', 'e']
['f', 'g', 'h', 'i', 'j']
['k', 'l']
|
[
"Returns",
"a",
"generator",
"object",
"that",
"yields",
"batches",
"from",
"iterable",
".",
"See",
"iter_batches",
"for",
"more",
"details",
"and",
"caveats",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/iterators.py#L47-L76
|
18,031
|
stanfordnlp/stanza
|
stanza/research/instance.py
|
Instance.inverted
|
def inverted(self):
'''
Return a version of this instance with inputs replaced by outputs and vice versa.
'''
return Instance(input=self.output, output=self.input,
annotated_input=self.annotated_output,
annotated_output=self.annotated_input,
alt_inputs=self.alt_outputs,
alt_outputs=self.alt_inputs,
source=self.source)
|
python
|
def inverted(self):
'''
Return a version of this instance with inputs replaced by outputs and vice versa.
'''
return Instance(input=self.output, output=self.input,
annotated_input=self.annotated_output,
annotated_output=self.annotated_input,
alt_inputs=self.alt_outputs,
alt_outputs=self.alt_inputs,
source=self.source)
|
[
"def",
"inverted",
"(",
"self",
")",
":",
"return",
"Instance",
"(",
"input",
"=",
"self",
".",
"output",
",",
"output",
"=",
"self",
".",
"input",
",",
"annotated_input",
"=",
"self",
".",
"annotated_output",
",",
"annotated_output",
"=",
"self",
".",
"annotated_input",
",",
"alt_inputs",
"=",
"self",
".",
"alt_outputs",
",",
"alt_outputs",
"=",
"self",
".",
"alt_inputs",
",",
"source",
"=",
"self",
".",
"source",
")"
] |
Return a version of this instance with inputs replaced by outputs and vice versa.
|
[
"Return",
"a",
"version",
"of",
"this",
"instance",
"with",
"inputs",
"replaced",
"by",
"outputs",
"and",
"vice",
"versa",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/instance.py#L36-L45
|
18,032
|
stanfordnlp/stanza
|
stanza/util/resource.py
|
get_data_or_download
|
def get_data_or_download(dir_name, file_name, url='', size='unknown'):
"""Returns the data. if the data hasn't been downloaded, then first download the data.
:param dir_name: directory to look in
:param file_name: file name to retrieve
:param url: if the file is not found, then download it from this url
:param size: the expected size
:return: path to the requested file
"""
dname = os.path.join(stanza.DATA_DIR, dir_name)
fname = os.path.join(dname, file_name)
if not os.path.isdir(dname):
assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(dname)
os.makedirs(dname)
if not os.path.isfile(fname):
assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(fname)
logging.warn('downloading from {}. This file could potentially be *very* large! Actual size ({})'.format(url, size))
with open(fname, 'wb') as f:
f.write(get_from_url(url))
return fname
|
python
|
def get_data_or_download(dir_name, file_name, url='', size='unknown'):
"""Returns the data. if the data hasn't been downloaded, then first download the data.
:param dir_name: directory to look in
:param file_name: file name to retrieve
:param url: if the file is not found, then download it from this url
:param size: the expected size
:return: path to the requested file
"""
dname = os.path.join(stanza.DATA_DIR, dir_name)
fname = os.path.join(dname, file_name)
if not os.path.isdir(dname):
assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(dname)
os.makedirs(dname)
if not os.path.isfile(fname):
assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(fname)
logging.warn('downloading from {}. This file could potentially be *very* large! Actual size ({})'.format(url, size))
with open(fname, 'wb') as f:
f.write(get_from_url(url))
return fname
|
[
"def",
"get_data_or_download",
"(",
"dir_name",
",",
"file_name",
",",
"url",
"=",
"''",
",",
"size",
"=",
"'unknown'",
")",
":",
"dname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"stanza",
".",
"DATA_DIR",
",",
"dir_name",
")",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dname",
",",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dname",
")",
":",
"assert",
"url",
",",
"'Could not locate data {}, and url was not specified. Cannot retrieve data.'",
".",
"format",
"(",
"dname",
")",
"os",
".",
"makedirs",
"(",
"dname",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
":",
"assert",
"url",
",",
"'Could not locate data {}, and url was not specified. Cannot retrieve data.'",
".",
"format",
"(",
"fname",
")",
"logging",
".",
"warn",
"(",
"'downloading from {}. This file could potentially be *very* large! Actual size ({})'",
".",
"format",
"(",
"url",
",",
"size",
")",
")",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"get_from_url",
"(",
"url",
")",
")",
"return",
"fname"
] |
Returns the data. if the data hasn't been downloaded, then first download the data.
:param dir_name: directory to look in
:param file_name: file name to retrieve
:param url: if the file is not found, then download it from this url
:param size: the expected size
:return: path to the requested file
|
[
"Returns",
"the",
"data",
".",
"if",
"the",
"data",
"hasn",
"t",
"been",
"downloaded",
"then",
"first",
"download",
"the",
"data",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/util/resource.py#L16-L35
|
18,033
|
stanfordnlp/stanza
|
stanza/text/vocab.py
|
Vocab.add
|
def add(self, word, count=1):
"""Add a word to the vocabulary and return its index.
:param word: word to add to the dictionary.
:param count: how many times to add the word.
:return: index of the added word.
WARNING: this function assumes that if the Vocab currently has N words, then
there is a perfect bijection between these N words and the integers 0 through N-1.
"""
if word not in self:
super(Vocab, self).__setitem__(word, len(self))
self._counts[word] += count
return self[word]
|
python
|
def add(self, word, count=1):
"""Add a word to the vocabulary and return its index.
:param word: word to add to the dictionary.
:param count: how many times to add the word.
:return: index of the added word.
WARNING: this function assumes that if the Vocab currently has N words, then
there is a perfect bijection between these N words and the integers 0 through N-1.
"""
if word not in self:
super(Vocab, self).__setitem__(word, len(self))
self._counts[word] += count
return self[word]
|
[
"def",
"add",
"(",
"self",
",",
"word",
",",
"count",
"=",
"1",
")",
":",
"if",
"word",
"not",
"in",
"self",
":",
"super",
"(",
"Vocab",
",",
"self",
")",
".",
"__setitem__",
"(",
"word",
",",
"len",
"(",
"self",
")",
")",
"self",
".",
"_counts",
"[",
"word",
"]",
"+=",
"count",
"return",
"self",
"[",
"word",
"]"
] |
Add a word to the vocabulary and return its index.
:param word: word to add to the dictionary.
:param count: how many times to add the word.
:return: index of the added word.
WARNING: this function assumes that if the Vocab currently has N words, then
there is a perfect bijection between these N words and the integers 0 through N-1.
|
[
"Add",
"a",
"word",
"to",
"the",
"vocabulary",
"and",
"return",
"its",
"index",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L93-L108
|
18,034
|
stanfordnlp/stanza
|
stanza/text/vocab.py
|
Vocab.subset
|
def subset(self, words):
"""Get a new Vocab containing only the specified subset of words.
If w is in words, but not in the original vocab, it will NOT be in the subset vocab.
Indices will be in the order of `words`. Counts from the original vocab are preserved.
:return (Vocab): a new Vocab object
"""
v = self.__class__(unk=self._unk)
unique = lambda seq: len(set(seq)) == len(seq)
assert unique(words)
for w in words:
if w in self:
v.add(w, count=self.count(w))
return v
|
python
|
def subset(self, words):
"""Get a new Vocab containing only the specified subset of words.
If w is in words, but not in the original vocab, it will NOT be in the subset vocab.
Indices will be in the order of `words`. Counts from the original vocab are preserved.
:return (Vocab): a new Vocab object
"""
v = self.__class__(unk=self._unk)
unique = lambda seq: len(set(seq)) == len(seq)
assert unique(words)
for w in words:
if w in self:
v.add(w, count=self.count(w))
return v
|
[
"def",
"subset",
"(",
"self",
",",
"words",
")",
":",
"v",
"=",
"self",
".",
"__class__",
"(",
"unk",
"=",
"self",
".",
"_unk",
")",
"unique",
"=",
"lambda",
"seq",
":",
"len",
"(",
"set",
"(",
"seq",
")",
")",
"==",
"len",
"(",
"seq",
")",
"assert",
"unique",
"(",
"words",
")",
"for",
"w",
"in",
"words",
":",
"if",
"w",
"in",
"self",
":",
"v",
".",
"add",
"(",
"w",
",",
"count",
"=",
"self",
".",
"count",
"(",
"w",
")",
")",
"return",
"v"
] |
Get a new Vocab containing only the specified subset of words.
If w is in words, but not in the original vocab, it will NOT be in the subset vocab.
Indices will be in the order of `words`. Counts from the original vocab are preserved.
:return (Vocab): a new Vocab object
|
[
"Get",
"a",
"new",
"Vocab",
"containing",
"only",
"the",
"specified",
"subset",
"of",
"words",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L136-L150
|
18,035
|
stanfordnlp/stanza
|
stanza/text/vocab.py
|
Vocab._index2word
|
def _index2word(self):
"""Mapping from indices to words.
WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab.
:return: a list of strings
"""
# TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable
compute_index2word = lambda: self.keys() # this works because self is an OrderedDict
# create if it doesn't exist
try:
self._index2word_cache
except AttributeError:
self._index2word_cache = compute_index2word()
# update if it is out of date
if len(self._index2word_cache) != len(self):
self._index2word_cache = compute_index2word()
return self._index2word_cache
|
python
|
def _index2word(self):
"""Mapping from indices to words.
WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab.
:return: a list of strings
"""
# TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable
compute_index2word = lambda: self.keys() # this works because self is an OrderedDict
# create if it doesn't exist
try:
self._index2word_cache
except AttributeError:
self._index2word_cache = compute_index2word()
# update if it is out of date
if len(self._index2word_cache) != len(self):
self._index2word_cache = compute_index2word()
return self._index2word_cache
|
[
"def",
"_index2word",
"(",
"self",
")",
":",
"# TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable",
"compute_index2word",
"=",
"lambda",
":",
"self",
".",
"keys",
"(",
")",
"# this works because self is an OrderedDict",
"# create if it doesn't exist",
"try",
":",
"self",
".",
"_index2word_cache",
"except",
"AttributeError",
":",
"self",
".",
"_index2word_cache",
"=",
"compute_index2word",
"(",
")",
"# update if it is out of date",
"if",
"len",
"(",
"self",
".",
"_index2word_cache",
")",
"!=",
"len",
"(",
"self",
")",
":",
"self",
".",
"_index2word_cache",
"=",
"compute_index2word",
"(",
")",
"return",
"self",
".",
"_index2word_cache"
] |
Mapping from indices to words.
WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab.
:return: a list of strings
|
[
"Mapping",
"from",
"indices",
"to",
"words",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L153-L174
|
18,036
|
stanfordnlp/stanza
|
stanza/text/vocab.py
|
Vocab.from_dict
|
def from_dict(cls, word2index, unk, counts=None):
"""Create Vocab from an existing string to integer dictionary.
All counts are set to 0.
:param word2index: a dictionary representing a bijection from N words to the integers 0 through N-1.
UNK must be assigned the 0 index.
:param unk: the string representing unk in word2index.
:param counts: (optional) a Counter object mapping words to counts
:return: a created vocab object.
"""
try:
if word2index[unk] != 0:
raise ValueError('unk must be assigned index 0')
except KeyError:
raise ValueError('word2index must have an entry for unk.')
# check that word2index is a bijection
vals = set(word2index.values()) # unique indices
n = len(vals)
bijection = (len(word2index) == n) and (vals == set(range(n)))
if not bijection:
raise ValueError('word2index is not a bijection between N words and the integers 0 through N-1.')
# reverse the dictionary
index2word = {idx: word for word, idx in word2index.iteritems()}
vocab = cls(unk=unk)
for i in xrange(n):
vocab.add(index2word[i])
if counts:
matching_entries = set(word2index.keys()) == set(counts.keys())
if not matching_entries:
raise ValueError('entries of word2index do not match counts (did you include UNK?)')
vocab._counts = counts
return vocab
|
python
|
def from_dict(cls, word2index, unk, counts=None):
"""Create Vocab from an existing string to integer dictionary.
All counts are set to 0.
:param word2index: a dictionary representing a bijection from N words to the integers 0 through N-1.
UNK must be assigned the 0 index.
:param unk: the string representing unk in word2index.
:param counts: (optional) a Counter object mapping words to counts
:return: a created vocab object.
"""
try:
if word2index[unk] != 0:
raise ValueError('unk must be assigned index 0')
except KeyError:
raise ValueError('word2index must have an entry for unk.')
# check that word2index is a bijection
vals = set(word2index.values()) # unique indices
n = len(vals)
bijection = (len(word2index) == n) and (vals == set(range(n)))
if not bijection:
raise ValueError('word2index is not a bijection between N words and the integers 0 through N-1.')
# reverse the dictionary
index2word = {idx: word for word, idx in word2index.iteritems()}
vocab = cls(unk=unk)
for i in xrange(n):
vocab.add(index2word[i])
if counts:
matching_entries = set(word2index.keys()) == set(counts.keys())
if not matching_entries:
raise ValueError('entries of word2index do not match counts (did you include UNK?)')
vocab._counts = counts
return vocab
|
[
"def",
"from_dict",
"(",
"cls",
",",
"word2index",
",",
"unk",
",",
"counts",
"=",
"None",
")",
":",
"try",
":",
"if",
"word2index",
"[",
"unk",
"]",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'unk must be assigned index 0'",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'word2index must have an entry for unk.'",
")",
"# check that word2index is a bijection",
"vals",
"=",
"set",
"(",
"word2index",
".",
"values",
"(",
")",
")",
"# unique indices",
"n",
"=",
"len",
"(",
"vals",
")",
"bijection",
"=",
"(",
"len",
"(",
"word2index",
")",
"==",
"n",
")",
"and",
"(",
"vals",
"==",
"set",
"(",
"range",
"(",
"n",
")",
")",
")",
"if",
"not",
"bijection",
":",
"raise",
"ValueError",
"(",
"'word2index is not a bijection between N words and the integers 0 through N-1.'",
")",
"# reverse the dictionary",
"index2word",
"=",
"{",
"idx",
":",
"word",
"for",
"word",
",",
"idx",
"in",
"word2index",
".",
"iteritems",
"(",
")",
"}",
"vocab",
"=",
"cls",
"(",
"unk",
"=",
"unk",
")",
"for",
"i",
"in",
"xrange",
"(",
"n",
")",
":",
"vocab",
".",
"add",
"(",
"index2word",
"[",
"i",
"]",
")",
"if",
"counts",
":",
"matching_entries",
"=",
"set",
"(",
"word2index",
".",
"keys",
"(",
")",
")",
"==",
"set",
"(",
"counts",
".",
"keys",
"(",
")",
")",
"if",
"not",
"matching_entries",
":",
"raise",
"ValueError",
"(",
"'entries of word2index do not match counts (did you include UNK?)'",
")",
"vocab",
".",
"_counts",
"=",
"counts",
"return",
"vocab"
] |
Create Vocab from an existing string to integer dictionary.
All counts are set to 0.
:param word2index: a dictionary representing a bijection from N words to the integers 0 through N-1.
UNK must be assigned the 0 index.
:param unk: the string representing unk in word2index.
:param counts: (optional) a Counter object mapping words to counts
:return: a created vocab object.
|
[
"Create",
"Vocab",
"from",
"an",
"existing",
"string",
"to",
"integer",
"dictionary",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L205-L246
|
18,037
|
stanfordnlp/stanza
|
stanza/text/vocab.py
|
Vocab.to_file
|
def to_file(self, f):
"""Write vocab to a file.
:param (file) f: a file object, e.g. as returned by calling `open`
File format:
word0<TAB>count0
word1<TAB>count1
...
word with index 0 is on the 0th line and so on...
"""
for word in self._index2word:
count = self._counts[word]
f.write(u'{}\t{}\n'.format(word, count).encode('utf-8'))
|
python
|
def to_file(self, f):
"""Write vocab to a file.
:param (file) f: a file object, e.g. as returned by calling `open`
File format:
word0<TAB>count0
word1<TAB>count1
...
word with index 0 is on the 0th line and so on...
"""
for word in self._index2word:
count = self._counts[word]
f.write(u'{}\t{}\n'.format(word, count).encode('utf-8'))
|
[
"def",
"to_file",
"(",
"self",
",",
"f",
")",
":",
"for",
"word",
"in",
"self",
".",
"_index2word",
":",
"count",
"=",
"self",
".",
"_counts",
"[",
"word",
"]",
"f",
".",
"write",
"(",
"u'{}\\t{}\\n'",
".",
"format",
"(",
"word",
",",
"count",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] |
Write vocab to a file.
:param (file) f: a file object, e.g. as returned by calling `open`
File format:
word0<TAB>count0
word1<TAB>count1
...
word with index 0 is on the 0th line and so on...
|
[
"Write",
"vocab",
"to",
"a",
"file",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L248-L262
|
18,038
|
stanfordnlp/stanza
|
stanza/text/vocab.py
|
EmbeddedVocab.backfill_unk_emb
|
def backfill_unk_emb(self, E, filled_words):
""" Backfills an embedding matrix with the embedding for the unknown token.
:param E: original embedding matrix of dimensions `(vocab_size, emb_dim)`.
:param filled_words: these words will not be backfilled with unk.
NOTE: this function is for internal use.
"""
unk_emb = E[self[self._unk]]
for i, word in enumerate(self):
if word not in filled_words:
E[i] = unk_emb
|
python
|
def backfill_unk_emb(self, E, filled_words):
""" Backfills an embedding matrix with the embedding for the unknown token.
:param E: original embedding matrix of dimensions `(vocab_size, emb_dim)`.
:param filled_words: these words will not be backfilled with unk.
NOTE: this function is for internal use.
"""
unk_emb = E[self[self._unk]]
for i, word in enumerate(self):
if word not in filled_words:
E[i] = unk_emb
|
[
"def",
"backfill_unk_emb",
"(",
"self",
",",
"E",
",",
"filled_words",
")",
":",
"unk_emb",
"=",
"E",
"[",
"self",
"[",
"self",
".",
"_unk",
"]",
"]",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"word",
"not",
"in",
"filled_words",
":",
"E",
"[",
"i",
"]",
"=",
"unk_emb"
] |
Backfills an embedding matrix with the embedding for the unknown token.
:param E: original embedding matrix of dimensions `(vocab_size, emb_dim)`.
:param filled_words: these words will not be backfilled with unk.
NOTE: this function is for internal use.
|
[
"Backfills",
"an",
"embedding",
"matrix",
"with",
"the",
"embedding",
"for",
"the",
"unknown",
"token",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/vocab.py#L304-L315
|
18,039
|
stanfordnlp/stanza
|
stanza/cluster/pick_gpu.py
|
best_gpu
|
def best_gpu(max_usage=USAGE_THRESHOLD, verbose=False):
'''
Return the name of a device to use, either 'cpu' or 'gpu0', 'gpu1',...
The least-used GPU with usage under the constant threshold will be chosen;
ties are broken randomly.
'''
try:
proc = subprocess.Popen("nvidia-smi", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = proc.communicate()
if error:
raise Exception(error)
except Exception, e:
sys.stderr.write("Couldn't run nvidia-smi to find best GPU, using CPU: %s\n" % str(e))
sys.stderr.write("(This is normal if you have no GPU or haven't configured CUDA.)\n")
return "cpu"
usages = parse_output(output)
pct_usage = [max(u.mem, cpu_backoff(u)) for u in usages]
max_usage = min(max_usage, min(pct_usage))
open_gpus = [index for index, usage in enumerate(usages)
if max(usage.mem, cpu_backoff(usage)) <= max_usage]
if verbose:
print('Best GPUs:')
for index in open_gpus:
print('%d: %s fan, %s mem, %s cpu' %
(index, format_percent(usages[index].fan),
format_percent(usages[index].mem),
format_percent(usages[index].cpu)))
if open_gpus:
result = "gpu" + str(random.choice(open_gpus))
else:
result = "cpu"
if verbose:
print('Chosen: ' + result)
return result
|
python
|
def best_gpu(max_usage=USAGE_THRESHOLD, verbose=False):
'''
Return the name of a device to use, either 'cpu' or 'gpu0', 'gpu1',...
The least-used GPU with usage under the constant threshold will be chosen;
ties are broken randomly.
'''
try:
proc = subprocess.Popen("nvidia-smi", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = proc.communicate()
if error:
raise Exception(error)
except Exception, e:
sys.stderr.write("Couldn't run nvidia-smi to find best GPU, using CPU: %s\n" % str(e))
sys.stderr.write("(This is normal if you have no GPU or haven't configured CUDA.)\n")
return "cpu"
usages = parse_output(output)
pct_usage = [max(u.mem, cpu_backoff(u)) for u in usages]
max_usage = min(max_usage, min(pct_usage))
open_gpus = [index for index, usage in enumerate(usages)
if max(usage.mem, cpu_backoff(usage)) <= max_usage]
if verbose:
print('Best GPUs:')
for index in open_gpus:
print('%d: %s fan, %s mem, %s cpu' %
(index, format_percent(usages[index].fan),
format_percent(usages[index].mem),
format_percent(usages[index].cpu)))
if open_gpus:
result = "gpu" + str(random.choice(open_gpus))
else:
result = "cpu"
if verbose:
print('Chosen: ' + result)
return result
|
[
"def",
"best_gpu",
"(",
"max_usage",
"=",
"USAGE_THRESHOLD",
",",
"verbose",
"=",
"False",
")",
":",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"\"nvidia-smi\"",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"output",
",",
"error",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"error",
":",
"raise",
"Exception",
"(",
"error",
")",
"except",
"Exception",
",",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Couldn't run nvidia-smi to find best GPU, using CPU: %s\\n\"",
"%",
"str",
"(",
"e",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"(This is normal if you have no GPU or haven't configured CUDA.)\\n\"",
")",
"return",
"\"cpu\"",
"usages",
"=",
"parse_output",
"(",
"output",
")",
"pct_usage",
"=",
"[",
"max",
"(",
"u",
".",
"mem",
",",
"cpu_backoff",
"(",
"u",
")",
")",
"for",
"u",
"in",
"usages",
"]",
"max_usage",
"=",
"min",
"(",
"max_usage",
",",
"min",
"(",
"pct_usage",
")",
")",
"open_gpus",
"=",
"[",
"index",
"for",
"index",
",",
"usage",
"in",
"enumerate",
"(",
"usages",
")",
"if",
"max",
"(",
"usage",
".",
"mem",
",",
"cpu_backoff",
"(",
"usage",
")",
")",
"<=",
"max_usage",
"]",
"if",
"verbose",
":",
"print",
"(",
"'Best GPUs:'",
")",
"for",
"index",
"in",
"open_gpus",
":",
"print",
"(",
"'%d: %s fan, %s mem, %s cpu'",
"%",
"(",
"index",
",",
"format_percent",
"(",
"usages",
"[",
"index",
"]",
".",
"fan",
")",
",",
"format_percent",
"(",
"usages",
"[",
"index",
"]",
".",
"mem",
")",
",",
"format_percent",
"(",
"usages",
"[",
"index",
"]",
".",
"cpu",
")",
")",
")",
"if",
"open_gpus",
":",
"result",
"=",
"\"gpu\"",
"+",
"str",
"(",
"random",
".",
"choice",
"(",
"open_gpus",
")",
")",
"else",
":",
"result",
"=",
"\"cpu\"",
"if",
"verbose",
":",
"print",
"(",
"'Chosen: '",
"+",
"result",
")",
"return",
"result"
] |
Return the name of a device to use, either 'cpu' or 'gpu0', 'gpu1',...
The least-used GPU with usage under the constant threshold will be chosen;
ties are broken randomly.
|
[
"Return",
"the",
"name",
"of",
"a",
"device",
"to",
"use",
"either",
"cpu",
"or",
"gpu0",
"gpu1",
"...",
"The",
"least",
"-",
"used",
"GPU",
"with",
"usage",
"under",
"the",
"constant",
"threshold",
"will",
"be",
"chosen",
";",
"ties",
"are",
"broken",
"randomly",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/cluster/pick_gpu.py#L28-L67
|
18,040
|
stanfordnlp/stanza
|
stanza/research/evaluate.py
|
evaluate
|
def evaluate(learner, eval_data, metrics, metric_names=None, split_id=None,
write_data=False):
'''
Evaluate `learner` on the instances in `eval_data` according to each
metric in `metric`, and return a dictionary summarizing the values of
the metrics.
Dump the predictions, scores, and metric summaries in JSON format
to "{predictions|scores|results}.`split_id`.json" in the run directory.
:param learner: The model to be evaluated.
:type learner: learner.Learner
:param eval_data: The data to use to evaluate the model.
:type eval_data: list(instance.Instance)
:param metrics: An iterable of functions that defines the standard by
which predictions are evaluated.
:type metrics: Iterable(function(eval_data: list(instance.Instance),
predictions: list(output_type),
scores: list(float)) -> list(float))
:param bool write_data: If `True`, write out the instances in `eval_data`
as JSON, one per line, to the file `data.<split_id>.jsons`.
'''
if metric_names is None:
metric_names = [
(metric.__name__ if hasattr(metric, '__name__')
else ('m%d' % i))
for i, metric in enumerate(metrics)
]
split_prefix = split_id + '.' if split_id else ''
if write_data:
config.dump([inst.__dict__ for inst in eval_data],
'data.%sjsons' % split_prefix,
default=json_default, lines=True)
results = {split_prefix + 'num_params': learner.num_params}
predictions, scores = learner.predict_and_score(eval_data)
config.dump(predictions, 'predictions.%sjsons' % split_prefix, lines=True)
config.dump(scores, 'scores.%sjsons' % split_prefix, lines=True)
for metric, metric_name in zip(metrics, metric_names):
prefix = split_prefix + (metric_name + '.' if metric_name else '')
inst_outputs = metric(eval_data, predictions, scores, learner)
if metric_name in ['data', 'predictions', 'scores']:
warnings.warn('not outputting metric scores for metric "%s" because it would shadow '
'another results file')
else:
config.dump(inst_outputs, '%s.%sjsons' % (metric_name, split_prefix), lines=True)
mean = np.mean(inst_outputs)
gmean = np.exp(np.log(inst_outputs).mean())
sum = np.sum(inst_outputs)
std = np.std(inst_outputs)
results.update({
prefix + 'mean': mean,
prefix + 'gmean': gmean,
prefix + 'sum': sum,
prefix + 'std': std,
# prefix + 'ci_lower': ci_lower,
# prefix + 'ci_upper': ci_upper,
})
config.dump_pretty(results, 'results.%sjson' % split_prefix)
return results
|
python
|
def evaluate(learner, eval_data, metrics, metric_names=None, split_id=None,
write_data=False):
'''
Evaluate `learner` on the instances in `eval_data` according to each
metric in `metric`, and return a dictionary summarizing the values of
the metrics.
Dump the predictions, scores, and metric summaries in JSON format
to "{predictions|scores|results}.`split_id`.json" in the run directory.
:param learner: The model to be evaluated.
:type learner: learner.Learner
:param eval_data: The data to use to evaluate the model.
:type eval_data: list(instance.Instance)
:param metrics: An iterable of functions that defines the standard by
which predictions are evaluated.
:type metrics: Iterable(function(eval_data: list(instance.Instance),
predictions: list(output_type),
scores: list(float)) -> list(float))
:param bool write_data: If `True`, write out the instances in `eval_data`
as JSON, one per line, to the file `data.<split_id>.jsons`.
'''
if metric_names is None:
metric_names = [
(metric.__name__ if hasattr(metric, '__name__')
else ('m%d' % i))
for i, metric in enumerate(metrics)
]
split_prefix = split_id + '.' if split_id else ''
if write_data:
config.dump([inst.__dict__ for inst in eval_data],
'data.%sjsons' % split_prefix,
default=json_default, lines=True)
results = {split_prefix + 'num_params': learner.num_params}
predictions, scores = learner.predict_and_score(eval_data)
config.dump(predictions, 'predictions.%sjsons' % split_prefix, lines=True)
config.dump(scores, 'scores.%sjsons' % split_prefix, lines=True)
for metric, metric_name in zip(metrics, metric_names):
prefix = split_prefix + (metric_name + '.' if metric_name else '')
inst_outputs = metric(eval_data, predictions, scores, learner)
if metric_name in ['data', 'predictions', 'scores']:
warnings.warn('not outputting metric scores for metric "%s" because it would shadow '
'another results file')
else:
config.dump(inst_outputs, '%s.%sjsons' % (metric_name, split_prefix), lines=True)
mean = np.mean(inst_outputs)
gmean = np.exp(np.log(inst_outputs).mean())
sum = np.sum(inst_outputs)
std = np.std(inst_outputs)
results.update({
prefix + 'mean': mean,
prefix + 'gmean': gmean,
prefix + 'sum': sum,
prefix + 'std': std,
# prefix + 'ci_lower': ci_lower,
# prefix + 'ci_upper': ci_upper,
})
config.dump_pretty(results, 'results.%sjson' % split_prefix)
return results
|
[
"def",
"evaluate",
"(",
"learner",
",",
"eval_data",
",",
"metrics",
",",
"metric_names",
"=",
"None",
",",
"split_id",
"=",
"None",
",",
"write_data",
"=",
"False",
")",
":",
"if",
"metric_names",
"is",
"None",
":",
"metric_names",
"=",
"[",
"(",
"metric",
".",
"__name__",
"if",
"hasattr",
"(",
"metric",
",",
"'__name__'",
")",
"else",
"(",
"'m%d'",
"%",
"i",
")",
")",
"for",
"i",
",",
"metric",
"in",
"enumerate",
"(",
"metrics",
")",
"]",
"split_prefix",
"=",
"split_id",
"+",
"'.'",
"if",
"split_id",
"else",
"''",
"if",
"write_data",
":",
"config",
".",
"dump",
"(",
"[",
"inst",
".",
"__dict__",
"for",
"inst",
"in",
"eval_data",
"]",
",",
"'data.%sjsons'",
"%",
"split_prefix",
",",
"default",
"=",
"json_default",
",",
"lines",
"=",
"True",
")",
"results",
"=",
"{",
"split_prefix",
"+",
"'num_params'",
":",
"learner",
".",
"num_params",
"}",
"predictions",
",",
"scores",
"=",
"learner",
".",
"predict_and_score",
"(",
"eval_data",
")",
"config",
".",
"dump",
"(",
"predictions",
",",
"'predictions.%sjsons'",
"%",
"split_prefix",
",",
"lines",
"=",
"True",
")",
"config",
".",
"dump",
"(",
"scores",
",",
"'scores.%sjsons'",
"%",
"split_prefix",
",",
"lines",
"=",
"True",
")",
"for",
"metric",
",",
"metric_name",
"in",
"zip",
"(",
"metrics",
",",
"metric_names",
")",
":",
"prefix",
"=",
"split_prefix",
"+",
"(",
"metric_name",
"+",
"'.'",
"if",
"metric_name",
"else",
"''",
")",
"inst_outputs",
"=",
"metric",
"(",
"eval_data",
",",
"predictions",
",",
"scores",
",",
"learner",
")",
"if",
"metric_name",
"in",
"[",
"'data'",
",",
"'predictions'",
",",
"'scores'",
"]",
":",
"warnings",
".",
"warn",
"(",
"'not outputting metric scores for metric \"%s\" because it would shadow '",
"'another results file'",
")",
"else",
":",
"config",
".",
"dump",
"(",
"inst_outputs",
",",
"'%s.%sjsons'",
"%",
"(",
"metric_name",
",",
"split_prefix",
")",
",",
"lines",
"=",
"True",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"inst_outputs",
")",
"gmean",
"=",
"np",
".",
"exp",
"(",
"np",
".",
"log",
"(",
"inst_outputs",
")",
".",
"mean",
"(",
")",
")",
"sum",
"=",
"np",
".",
"sum",
"(",
"inst_outputs",
")",
"std",
"=",
"np",
".",
"std",
"(",
"inst_outputs",
")",
"results",
".",
"update",
"(",
"{",
"prefix",
"+",
"'mean'",
":",
"mean",
",",
"prefix",
"+",
"'gmean'",
":",
"gmean",
",",
"prefix",
"+",
"'sum'",
":",
"sum",
",",
"prefix",
"+",
"'std'",
":",
"std",
",",
"# prefix + 'ci_lower': ci_lower,",
"# prefix + 'ci_upper': ci_upper,",
"}",
")",
"config",
".",
"dump_pretty",
"(",
"results",
",",
"'results.%sjson'",
"%",
"split_prefix",
")",
"return",
"results"
] |
Evaluate `learner` on the instances in `eval_data` according to each
metric in `metric`, and return a dictionary summarizing the values of
the metrics.
Dump the predictions, scores, and metric summaries in JSON format
to "{predictions|scores|results}.`split_id`.json" in the run directory.
:param learner: The model to be evaluated.
:type learner: learner.Learner
:param eval_data: The data to use to evaluate the model.
:type eval_data: list(instance.Instance)
:param metrics: An iterable of functions that defines the standard by
which predictions are evaluated.
:type metrics: Iterable(function(eval_data: list(instance.Instance),
predictions: list(output_type),
scores: list(float)) -> list(float))
:param bool write_data: If `True`, write out the instances in `eval_data`
as JSON, one per line, to the file `data.<split_id>.jsons`.
|
[
"Evaluate",
"learner",
"on",
"the",
"instances",
"in",
"eval_data",
"according",
"to",
"each",
"metric",
"in",
"metric",
"and",
"return",
"a",
"dictionary",
"summarizing",
"the",
"values",
"of",
"the",
"metrics",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/evaluate.py#L7-L78
|
18,041
|
stanfordnlp/stanza
|
stanza/nlp/protobuf_json.py
|
json2pb
|
def json2pb(pb, js, useFieldNumber=False):
''' convert JSON string to google.protobuf.descriptor instance '''
for field in pb.DESCRIPTOR.fields:
if useFieldNumber:
key = field.number
else:
key = field.name
if key not in js:
continue
if field.type == FD.TYPE_MESSAGE:
pass
elif field.type in _js2ftype:
ftype = _js2ftype[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
value = js[key]
if field.label == FD.LABEL_REPEATED:
pb_value = getattr(pb, field.name, None)
for v in value:
if field.type == FD.TYPE_MESSAGE:
json2pb(pb_value.add(), v, useFieldNumber=useFieldNumber)
else:
pb_value.append(ftype(v))
else:
if field.type == FD.TYPE_MESSAGE:
json2pb(getattr(pb, field.name, None), value, useFieldNumber=useFieldNumber)
else:
setattr(pb, field.name, ftype(value))
return pb
|
python
|
def json2pb(pb, js, useFieldNumber=False):
''' convert JSON string to google.protobuf.descriptor instance '''
for field in pb.DESCRIPTOR.fields:
if useFieldNumber:
key = field.number
else:
key = field.name
if key not in js:
continue
if field.type == FD.TYPE_MESSAGE:
pass
elif field.type in _js2ftype:
ftype = _js2ftype[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
value = js[key]
if field.label == FD.LABEL_REPEATED:
pb_value = getattr(pb, field.name, None)
for v in value:
if field.type == FD.TYPE_MESSAGE:
json2pb(pb_value.add(), v, useFieldNumber=useFieldNumber)
else:
pb_value.append(ftype(v))
else:
if field.type == FD.TYPE_MESSAGE:
json2pb(getattr(pb, field.name, None), value, useFieldNumber=useFieldNumber)
else:
setattr(pb, field.name, ftype(value))
return pb
|
[
"def",
"json2pb",
"(",
"pb",
",",
"js",
",",
"useFieldNumber",
"=",
"False",
")",
":",
"for",
"field",
"in",
"pb",
".",
"DESCRIPTOR",
".",
"fields",
":",
"if",
"useFieldNumber",
":",
"key",
"=",
"field",
".",
"number",
"else",
":",
"key",
"=",
"field",
".",
"name",
"if",
"key",
"not",
"in",
"js",
":",
"continue",
"if",
"field",
".",
"type",
"==",
"FD",
".",
"TYPE_MESSAGE",
":",
"pass",
"elif",
"field",
".",
"type",
"in",
"_js2ftype",
":",
"ftype",
"=",
"_js2ftype",
"[",
"field",
".",
"type",
"]",
"else",
":",
"raise",
"ParseError",
"(",
"\"Field %s.%s of type '%d' is not supported\"",
"%",
"(",
"pb",
".",
"__class__",
".",
"__name__",
",",
"field",
".",
"name",
",",
"field",
".",
"type",
",",
")",
")",
"value",
"=",
"js",
"[",
"key",
"]",
"if",
"field",
".",
"label",
"==",
"FD",
".",
"LABEL_REPEATED",
":",
"pb_value",
"=",
"getattr",
"(",
"pb",
",",
"field",
".",
"name",
",",
"None",
")",
"for",
"v",
"in",
"value",
":",
"if",
"field",
".",
"type",
"==",
"FD",
".",
"TYPE_MESSAGE",
":",
"json2pb",
"(",
"pb_value",
".",
"add",
"(",
")",
",",
"v",
",",
"useFieldNumber",
"=",
"useFieldNumber",
")",
"else",
":",
"pb_value",
".",
"append",
"(",
"ftype",
"(",
"v",
")",
")",
"else",
":",
"if",
"field",
".",
"type",
"==",
"FD",
".",
"TYPE_MESSAGE",
":",
"json2pb",
"(",
"getattr",
"(",
"pb",
",",
"field",
".",
"name",
",",
"None",
")",
",",
"value",
",",
"useFieldNumber",
"=",
"useFieldNumber",
")",
"else",
":",
"setattr",
"(",
"pb",
",",
"field",
".",
"name",
",",
"ftype",
"(",
"value",
")",
")",
"return",
"pb"
] |
convert JSON string to google.protobuf.descriptor instance
|
[
"convert",
"JSON",
"string",
"to",
"google",
".",
"protobuf",
".",
"descriptor",
"instance"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/protobuf_json.py#L51-L79
|
18,042
|
stanfordnlp/stanza
|
stanza/nlp/corenlp.py
|
CoreNLPClient.annotate_json
|
def annotate_json(self, text, annotators=None):
"""Return a JSON dict from the CoreNLP server, containing annotations of the text.
:param (str) text: Text to annotate.
:param (list[str]) annotators: a list of annotator names
:return (dict): a dict of annotations
"""
# WARN(chaganty): I'd like to deprecate this function -- we
# should just use annotate().json
#properties = {
# 'annotators': ','.join(annotators or self.default_annotators),
# 'outputFormat': 'json',
#}
#return self._request(text, properties).json(strict=False)
doc = self.annotate(text, annotators)
return doc.json
|
python
|
def annotate_json(self, text, annotators=None):
"""Return a JSON dict from the CoreNLP server, containing annotations of the text.
:param (str) text: Text to annotate.
:param (list[str]) annotators: a list of annotator names
:return (dict): a dict of annotations
"""
# WARN(chaganty): I'd like to deprecate this function -- we
# should just use annotate().json
#properties = {
# 'annotators': ','.join(annotators or self.default_annotators),
# 'outputFormat': 'json',
#}
#return self._request(text, properties).json(strict=False)
doc = self.annotate(text, annotators)
return doc.json
|
[
"def",
"annotate_json",
"(",
"self",
",",
"text",
",",
"annotators",
"=",
"None",
")",
":",
"# WARN(chaganty): I'd like to deprecate this function -- we",
"# should just use annotate().json",
"#properties = {",
"# 'annotators': ','.join(annotators or self.default_annotators),",
"# 'outputFormat': 'json',",
"#}",
"#return self._request(text, properties).json(strict=False)",
"doc",
"=",
"self",
".",
"annotate",
"(",
"text",
",",
"annotators",
")",
"return",
"doc",
".",
"json"
] |
Return a JSON dict from the CoreNLP server, containing annotations of the text.
:param (str) text: Text to annotate.
:param (list[str]) annotators: a list of annotator names
:return (dict): a dict of annotations
|
[
"Return",
"a",
"JSON",
"dict",
"from",
"the",
"CoreNLP",
"server",
"containing",
"annotations",
"of",
"the",
"text",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/corenlp.py#L79-L96
|
18,043
|
stanfordnlp/stanza
|
stanza/nlp/corenlp.py
|
CoreNLPClient.annotate_proto
|
def annotate_proto(self, text, annotators=None):
"""Return a Document protocol buffer from the CoreNLP server, containing annotations of the text.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
:return (CoreNLP_pb2.Document): a Document protocol buffer
"""
properties = {
'annotators': ','.join(annotators or self.default_annotators),
'outputFormat': 'serialized',
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
}
r = self._request(text, properties)
buffer = r.content # bytes
size, pos = _DecodeVarint(buffer, 0)
buffer = buffer[pos:(pos + size)]
doc = CoreNLP_pb2.Document()
doc.ParseFromString(buffer)
return doc
|
python
|
def annotate_proto(self, text, annotators=None):
"""Return a Document protocol buffer from the CoreNLP server, containing annotations of the text.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
:return (CoreNLP_pb2.Document): a Document protocol buffer
"""
properties = {
'annotators': ','.join(annotators or self.default_annotators),
'outputFormat': 'serialized',
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
}
r = self._request(text, properties)
buffer = r.content # bytes
size, pos = _DecodeVarint(buffer, 0)
buffer = buffer[pos:(pos + size)]
doc = CoreNLP_pb2.Document()
doc.ParseFromString(buffer)
return doc
|
[
"def",
"annotate_proto",
"(",
"self",
",",
"text",
",",
"annotators",
"=",
"None",
")",
":",
"properties",
"=",
"{",
"'annotators'",
":",
"','",
".",
"join",
"(",
"annotators",
"or",
"self",
".",
"default_annotators",
")",
",",
"'outputFormat'",
":",
"'serialized'",
",",
"'serializer'",
":",
"'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'",
"}",
"r",
"=",
"self",
".",
"_request",
"(",
"text",
",",
"properties",
")",
"buffer",
"=",
"r",
".",
"content",
"# bytes",
"size",
",",
"pos",
"=",
"_DecodeVarint",
"(",
"buffer",
",",
"0",
")",
"buffer",
"=",
"buffer",
"[",
"pos",
":",
"(",
"pos",
"+",
"size",
")",
"]",
"doc",
"=",
"CoreNLP_pb2",
".",
"Document",
"(",
")",
"doc",
".",
"ParseFromString",
"(",
"buffer",
")",
"return",
"doc"
] |
Return a Document protocol buffer from the CoreNLP server, containing annotations of the text.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
:return (CoreNLP_pb2.Document): a Document protocol buffer
|
[
"Return",
"a",
"Document",
"protocol",
"buffer",
"from",
"the",
"CoreNLP",
"server",
"containing",
"annotations",
"of",
"the",
"text",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/corenlp.py#L98-L118
|
18,044
|
stanfordnlp/stanza
|
stanza/nlp/corenlp.py
|
CoreNLPClient.annotate
|
def annotate(self, text, annotators=None):
"""Return an AnnotatedDocument from the CoreNLP server.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
See a list of valid annotator names here:
http://stanfordnlp.github.io/CoreNLP/annotators.html
:return (AnnotatedDocument): an annotated document
"""
doc_pb = self.annotate_proto(text, annotators)
return AnnotatedDocument.from_pb(doc_pb)
|
python
|
def annotate(self, text, annotators=None):
"""Return an AnnotatedDocument from the CoreNLP server.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
See a list of valid annotator names here:
http://stanfordnlp.github.io/CoreNLP/annotators.html
:return (AnnotatedDocument): an annotated document
"""
doc_pb = self.annotate_proto(text, annotators)
return AnnotatedDocument.from_pb(doc_pb)
|
[
"def",
"annotate",
"(",
"self",
",",
"text",
",",
"annotators",
"=",
"None",
")",
":",
"doc_pb",
"=",
"self",
".",
"annotate_proto",
"(",
"text",
",",
"annotators",
")",
"return",
"AnnotatedDocument",
".",
"from_pb",
"(",
"doc_pb",
")"
] |
Return an AnnotatedDocument from the CoreNLP server.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
See a list of valid annotator names here:
http://stanfordnlp.github.io/CoreNLP/annotators.html
:return (AnnotatedDocument): an annotated document
|
[
"Return",
"an",
"AnnotatedDocument",
"from",
"the",
"CoreNLP",
"server",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/corenlp.py#L120-L132
|
18,045
|
stanfordnlp/stanza
|
stanza/nlp/corenlp.py
|
ProtobufBacked.from_pb
|
def from_pb(cls, pb):
"""Instantiate the object from a protocol buffer.
Args:
pb (protobuf)
Save a reference to the protocol buffer on the object.
"""
obj = cls._from_pb(pb)
obj._pb = pb
return obj
|
python
|
def from_pb(cls, pb):
"""Instantiate the object from a protocol buffer.
Args:
pb (protobuf)
Save a reference to the protocol buffer on the object.
"""
obj = cls._from_pb(pb)
obj._pb = pb
return obj
|
[
"def",
"from_pb",
"(",
"cls",
",",
"pb",
")",
":",
"obj",
"=",
"cls",
".",
"_from_pb",
"(",
"pb",
")",
"obj",
".",
"_pb",
"=",
"pb",
"return",
"obj"
] |
Instantiate the object from a protocol buffer.
Args:
pb (protobuf)
Save a reference to the protocol buffer on the object.
|
[
"Instantiate",
"the",
"object",
"from",
"a",
"protocol",
"buffer",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/corenlp.py#L157-L167
|
18,046
|
stanfordnlp/stanza
|
stanza/nlp/corenlp.py
|
AnnotatedEntity.character_span
|
def character_span(self):
"""
Returns the character span of the token
"""
begin, end = self.token_span
return (self.sentence[begin].character_span[0], self.sentence[end-1].character_span[-1])
|
python
|
def character_span(self):
"""
Returns the character span of the token
"""
begin, end = self.token_span
return (self.sentence[begin].character_span[0], self.sentence[end-1].character_span[-1])
|
[
"def",
"character_span",
"(",
"self",
")",
":",
"begin",
",",
"end",
"=",
"self",
".",
"token_span",
"return",
"(",
"self",
".",
"sentence",
"[",
"begin",
"]",
".",
"character_span",
"[",
"0",
"]",
",",
"self",
".",
"sentence",
"[",
"end",
"-",
"1",
"]",
".",
"character_span",
"[",
"-",
"1",
"]",
")"
] |
Returns the character span of the token
|
[
"Returns",
"the",
"character",
"span",
"of",
"the",
"token"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/corenlp.py#L810-L815
|
18,047
|
stanfordnlp/stanza
|
stanza/research/summary_basic.py
|
TensorBoardLogger.log_proto
|
def log_proto(self, proto, step_num):
"""Log a Summary protobuf to the event file.
:param proto: a Summary protobuf
:param step_num: the iteration number at which this value was logged
"""
self.summ_writer.add_summary(proto, step_num)
return proto
|
python
|
def log_proto(self, proto, step_num):
"""Log a Summary protobuf to the event file.
:param proto: a Summary protobuf
:param step_num: the iteration number at which this value was logged
"""
self.summ_writer.add_summary(proto, step_num)
return proto
|
[
"def",
"log_proto",
"(",
"self",
",",
"proto",
",",
"step_num",
")",
":",
"self",
".",
"summ_writer",
".",
"add_summary",
"(",
"proto",
",",
"step_num",
")",
"return",
"proto"
] |
Log a Summary protobuf to the event file.
:param proto: a Summary protobuf
:param step_num: the iteration number at which this value was logged
|
[
"Log",
"a",
"Summary",
"protobuf",
"to",
"the",
"event",
"file",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/summary_basic.py#L23-L30
|
18,048
|
stanfordnlp/stanza
|
stanza/research/summary_basic.py
|
TensorBoardLogger.log
|
def log(self, key, val, step_num):
"""Directly log a scalar value to the event file.
:param string key: a name for the value
:param val: a float
:param step_num: the iteration number at which this value was logged
"""
try:
ph, summ = self.summaries[key]
except KeyError:
# if we haven't defined a variable for this key, define one
with self.g.as_default():
ph = tf.placeholder(tf.float32, (), name=key) # scalar
summ = tf.scalar_summary(key, ph)
self.summaries[key] = (ph, summ)
summary_str = self.sess.run(summ, {ph: val})
self.summ_writer.add_summary(summary_str, step_num)
return val
|
python
|
def log(self, key, val, step_num):
"""Directly log a scalar value to the event file.
:param string key: a name for the value
:param val: a float
:param step_num: the iteration number at which this value was logged
"""
try:
ph, summ = self.summaries[key]
except KeyError:
# if we haven't defined a variable for this key, define one
with self.g.as_default():
ph = tf.placeholder(tf.float32, (), name=key) # scalar
summ = tf.scalar_summary(key, ph)
self.summaries[key] = (ph, summ)
summary_str = self.sess.run(summ, {ph: val})
self.summ_writer.add_summary(summary_str, step_num)
return val
|
[
"def",
"log",
"(",
"self",
",",
"key",
",",
"val",
",",
"step_num",
")",
":",
"try",
":",
"ph",
",",
"summ",
"=",
"self",
".",
"summaries",
"[",
"key",
"]",
"except",
"KeyError",
":",
"# if we haven't defined a variable for this key, define one",
"with",
"self",
".",
"g",
".",
"as_default",
"(",
")",
":",
"ph",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"(",
")",
",",
"name",
"=",
"key",
")",
"# scalar",
"summ",
"=",
"tf",
".",
"scalar_summary",
"(",
"key",
",",
"ph",
")",
"self",
".",
"summaries",
"[",
"key",
"]",
"=",
"(",
"ph",
",",
"summ",
")",
"summary_str",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"summ",
",",
"{",
"ph",
":",
"val",
"}",
")",
"self",
".",
"summ_writer",
".",
"add_summary",
"(",
"summary_str",
",",
"step_num",
")",
"return",
"val"
] |
Directly log a scalar value to the event file.
:param string key: a name for the value
:param val: a float
:param step_num: the iteration number at which this value was logged
|
[
"Directly",
"log",
"a",
"scalar",
"value",
"to",
"the",
"event",
"file",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/summary_basic.py#L32-L50
|
18,049
|
stanfordnlp/stanza
|
stanza/monitoring/summary.py
|
read_events
|
def read_events(stream):
'''
Read and return as a generator a sequence of Event protos from
file-like object `stream`.
'''
header_size = struct.calcsize('<QI')
len_size = struct.calcsize('<Q')
footer_size = struct.calcsize('<I')
while True:
header = stream.read(header_size)
if len(header) == 0:
break
elif len(header) < header_size:
raise SummaryReaderException('unexpected EOF (expected a %d-byte header, '
'got %d bytes)' % (header_size, len(header)))
data_len, len_crc = struct.unpack('<QI', header)
len_crc_actual = masked_crc(header[:len_size])
if len_crc_actual != len_crc:
raise SummaryReaderException('incorrect length CRC (%d != %d)' %
(len_crc_actual, len_crc))
data = stream.read(data_len)
if len(data) < data_len:
raise SummaryReaderException('unexpected EOF (expected %d bytes, got %d)' %
(data_len, len(data)))
yield Event.FromString(data)
footer = stream.read(footer_size)
if len(footer) < footer_size:
raise SummaryReaderException('unexpected EOF (expected a %d-byte footer, '
'got %d bytes)' % (footer_size, len(footer)))
data_crc, = struct.unpack('<I', footer)
data_crc_actual = masked_crc(data)
if data_crc_actual != data_crc:
raise SummaryReaderException('incorrect data CRC (%d != %d)' %
(data_crc_actual, data_crc))
|
python
|
def read_events(stream):
'''
Read and return as a generator a sequence of Event protos from
file-like object `stream`.
'''
header_size = struct.calcsize('<QI')
len_size = struct.calcsize('<Q')
footer_size = struct.calcsize('<I')
while True:
header = stream.read(header_size)
if len(header) == 0:
break
elif len(header) < header_size:
raise SummaryReaderException('unexpected EOF (expected a %d-byte header, '
'got %d bytes)' % (header_size, len(header)))
data_len, len_crc = struct.unpack('<QI', header)
len_crc_actual = masked_crc(header[:len_size])
if len_crc_actual != len_crc:
raise SummaryReaderException('incorrect length CRC (%d != %d)' %
(len_crc_actual, len_crc))
data = stream.read(data_len)
if len(data) < data_len:
raise SummaryReaderException('unexpected EOF (expected %d bytes, got %d)' %
(data_len, len(data)))
yield Event.FromString(data)
footer = stream.read(footer_size)
if len(footer) < footer_size:
raise SummaryReaderException('unexpected EOF (expected a %d-byte footer, '
'got %d bytes)' % (footer_size, len(footer)))
data_crc, = struct.unpack('<I', footer)
data_crc_actual = masked_crc(data)
if data_crc_actual != data_crc:
raise SummaryReaderException('incorrect data CRC (%d != %d)' %
(data_crc_actual, data_crc))
|
[
"def",
"read_events",
"(",
"stream",
")",
":",
"header_size",
"=",
"struct",
".",
"calcsize",
"(",
"'<QI'",
")",
"len_size",
"=",
"struct",
".",
"calcsize",
"(",
"'<Q'",
")",
"footer_size",
"=",
"struct",
".",
"calcsize",
"(",
"'<I'",
")",
"while",
"True",
":",
"header",
"=",
"stream",
".",
"read",
"(",
"header_size",
")",
"if",
"len",
"(",
"header",
")",
"==",
"0",
":",
"break",
"elif",
"len",
"(",
"header",
")",
"<",
"header_size",
":",
"raise",
"SummaryReaderException",
"(",
"'unexpected EOF (expected a %d-byte header, '",
"'got %d bytes)'",
"%",
"(",
"header_size",
",",
"len",
"(",
"header",
")",
")",
")",
"data_len",
",",
"len_crc",
"=",
"struct",
".",
"unpack",
"(",
"'<QI'",
",",
"header",
")",
"len_crc_actual",
"=",
"masked_crc",
"(",
"header",
"[",
":",
"len_size",
"]",
")",
"if",
"len_crc_actual",
"!=",
"len_crc",
":",
"raise",
"SummaryReaderException",
"(",
"'incorrect length CRC (%d != %d)'",
"%",
"(",
"len_crc_actual",
",",
"len_crc",
")",
")",
"data",
"=",
"stream",
".",
"read",
"(",
"data_len",
")",
"if",
"len",
"(",
"data",
")",
"<",
"data_len",
":",
"raise",
"SummaryReaderException",
"(",
"'unexpected EOF (expected %d bytes, got %d)'",
"%",
"(",
"data_len",
",",
"len",
"(",
"data",
")",
")",
")",
"yield",
"Event",
".",
"FromString",
"(",
"data",
")",
"footer",
"=",
"stream",
".",
"read",
"(",
"footer_size",
")",
"if",
"len",
"(",
"footer",
")",
"<",
"footer_size",
":",
"raise",
"SummaryReaderException",
"(",
"'unexpected EOF (expected a %d-byte footer, '",
"'got %d bytes)'",
"%",
"(",
"footer_size",
",",
"len",
"(",
"footer",
")",
")",
")",
"data_crc",
",",
"=",
"struct",
".",
"unpack",
"(",
"'<I'",
",",
"footer",
")",
"data_crc_actual",
"=",
"masked_crc",
"(",
"data",
")",
"if",
"data_crc_actual",
"!=",
"data_crc",
":",
"raise",
"SummaryReaderException",
"(",
"'incorrect data CRC (%d != %d)'",
"%",
"(",
"data_crc_actual",
",",
"data_crc",
")",
")"
] |
Read and return as a generator a sequence of Event protos from
file-like object `stream`.
|
[
"Read",
"and",
"return",
"as",
"a",
"generator",
"a",
"sequence",
"of",
"Event",
"protos",
"from",
"file",
"-",
"like",
"object",
"stream",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/summary.py#L286-L322
|
18,050
|
stanfordnlp/stanza
|
stanza/monitoring/summary.py
|
write_events
|
def write_events(stream, events):
'''
Write a sequence of Event protos to file-like object `stream`.
'''
for event in events:
data = event.SerializeToString()
len_field = struct.pack('<Q', len(data))
len_crc = struct.pack('<I', masked_crc(len_field))
data_crc = struct.pack('<I', masked_crc(data))
stream.write(len_field)
stream.write(len_crc)
stream.write(data)
stream.write(data_crc)
|
python
|
def write_events(stream, events):
'''
Write a sequence of Event protos to file-like object `stream`.
'''
for event in events:
data = event.SerializeToString()
len_field = struct.pack('<Q', len(data))
len_crc = struct.pack('<I', masked_crc(len_field))
data_crc = struct.pack('<I', masked_crc(data))
stream.write(len_field)
stream.write(len_crc)
stream.write(data)
stream.write(data_crc)
|
[
"def",
"write_events",
"(",
"stream",
",",
"events",
")",
":",
"for",
"event",
"in",
"events",
":",
"data",
"=",
"event",
".",
"SerializeToString",
"(",
")",
"len_field",
"=",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"len",
"(",
"data",
")",
")",
"len_crc",
"=",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"masked_crc",
"(",
"len_field",
")",
")",
"data_crc",
"=",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"masked_crc",
"(",
"data",
")",
")",
"stream",
".",
"write",
"(",
"len_field",
")",
"stream",
".",
"write",
"(",
"len_crc",
")",
"stream",
".",
"write",
"(",
"data",
")",
"stream",
".",
"write",
"(",
"data_crc",
")"
] |
Write a sequence of Event protos to file-like object `stream`.
|
[
"Write",
"a",
"sequence",
"of",
"Event",
"protos",
"to",
"file",
"-",
"like",
"object",
"stream",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/summary.py#L325-L337
|
18,051
|
stanfordnlp/stanza
|
stanza/monitoring/summary.py
|
SummaryWriter.log_image
|
def log_image(self, step, tag, val):
'''
Write an image event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Image in RGB format with values from
0 to 255; a 3-D array with index order (row, column, channel).
`val.shape[-1] == 3`
'''
# TODO: support floating-point tensors, 4-D tensors, grayscale
if len(val.shape) != 3:
raise ValueError('`log_image` value should be a 3-D tensor, instead got shape %s' %
(val.shape,))
if val.shape[2] != 3:
raise ValueError('Last dimension of `log_image` value should be 3 (RGB), '
'instead got shape %s' %
(val.shape,))
fakefile = StringIO()
png.Writer(size=(val.shape[1], val.shape[0])).write(
fakefile, val.reshape(val.shape[0], val.shape[1] * val.shape[2]))
encoded = fakefile.getvalue()
# https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto
RGB = 3
image = Summary.Image(height=val.shape[0], width=val.shape[1],
colorspace=RGB, encoded_image_string=encoded)
summary = Summary(value=[Summary.Value(tag=tag, image=image)])
self._add_event(step, summary)
|
python
|
def log_image(self, step, tag, val):
'''
Write an image event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Image in RGB format with values from
0 to 255; a 3-D array with index order (row, column, channel).
`val.shape[-1] == 3`
'''
# TODO: support floating-point tensors, 4-D tensors, grayscale
if len(val.shape) != 3:
raise ValueError('`log_image` value should be a 3-D tensor, instead got shape %s' %
(val.shape,))
if val.shape[2] != 3:
raise ValueError('Last dimension of `log_image` value should be 3 (RGB), '
'instead got shape %s' %
(val.shape,))
fakefile = StringIO()
png.Writer(size=(val.shape[1], val.shape[0])).write(
fakefile, val.reshape(val.shape[0], val.shape[1] * val.shape[2]))
encoded = fakefile.getvalue()
# https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto
RGB = 3
image = Summary.Image(height=val.shape[0], width=val.shape[1],
colorspace=RGB, encoded_image_string=encoded)
summary = Summary(value=[Summary.Value(tag=tag, image=image)])
self._add_event(step, summary)
|
[
"def",
"log_image",
"(",
"self",
",",
"step",
",",
"tag",
",",
"val",
")",
":",
"# TODO: support floating-point tensors, 4-D tensors, grayscale",
"if",
"len",
"(",
"val",
".",
"shape",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'`log_image` value should be a 3-D tensor, instead got shape %s'",
"%",
"(",
"val",
".",
"shape",
",",
")",
")",
"if",
"val",
".",
"shape",
"[",
"2",
"]",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Last dimension of `log_image` value should be 3 (RGB), '",
"'instead got shape %s'",
"%",
"(",
"val",
".",
"shape",
",",
")",
")",
"fakefile",
"=",
"StringIO",
"(",
")",
"png",
".",
"Writer",
"(",
"size",
"=",
"(",
"val",
".",
"shape",
"[",
"1",
"]",
",",
"val",
".",
"shape",
"[",
"0",
"]",
")",
")",
".",
"write",
"(",
"fakefile",
",",
"val",
".",
"reshape",
"(",
"val",
".",
"shape",
"[",
"0",
"]",
",",
"val",
".",
"shape",
"[",
"1",
"]",
"*",
"val",
".",
"shape",
"[",
"2",
"]",
")",
")",
"encoded",
"=",
"fakefile",
".",
"getvalue",
"(",
")",
"# https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto",
"RGB",
"=",
"3",
"image",
"=",
"Summary",
".",
"Image",
"(",
"height",
"=",
"val",
".",
"shape",
"[",
"0",
"]",
",",
"width",
"=",
"val",
".",
"shape",
"[",
"1",
"]",
",",
"colorspace",
"=",
"RGB",
",",
"encoded_image_string",
"=",
"encoded",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"image",
"=",
"image",
")",
"]",
")",
"self",
".",
"_add_event",
"(",
"step",
",",
"summary",
")"
] |
Write an image event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Image in RGB format with values from
0 to 255; a 3-D array with index order (row, column, channel).
`val.shape[-1] == 3`
|
[
"Write",
"an",
"image",
"event",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/summary.py#L106-L133
|
18,052
|
stanfordnlp/stanza
|
stanza/monitoring/summary.py
|
SummaryWriter.log_scalar
|
def log_scalar(self, step, tag, val):
'''
Write a scalar event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param float val: Scalar to graph at this time step (y-axis)
'''
summary = Summary(value=[Summary.Value(tag=tag, simple_value=float(np.float32(val)))])
self._add_event(step, summary)
|
python
|
def log_scalar(self, step, tag, val):
'''
Write a scalar event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param float val: Scalar to graph at this time step (y-axis)
'''
summary = Summary(value=[Summary.Value(tag=tag, simple_value=float(np.float32(val)))])
self._add_event(step, summary)
|
[
"def",
"log_scalar",
"(",
"self",
",",
"step",
",",
"tag",
",",
"val",
")",
":",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"simple_value",
"=",
"float",
"(",
"np",
".",
"float32",
"(",
"val",
")",
")",
")",
"]",
")",
"self",
".",
"_add_event",
"(",
"step",
",",
"summary",
")"
] |
Write a scalar event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param float val: Scalar to graph at this time step (y-axis)
|
[
"Write",
"a",
"scalar",
"event",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/summary.py#L135-L144
|
18,053
|
stanfordnlp/stanza
|
stanza/monitoring/summary.py
|
SummaryWriter.log_histogram
|
def log_histogram(self, step, tag, val):
'''
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
'''
hist = Histogram()
hist.add(val)
summary = Summary(value=[Summary.Value(tag=tag, histo=hist.encode_to_proto())])
self._add_event(step, summary)
|
python
|
def log_histogram(self, step, tag, val):
'''
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
'''
hist = Histogram()
hist.add(val)
summary = Summary(value=[Summary.Value(tag=tag, histo=hist.encode_to_proto())])
self._add_event(step, summary)
|
[
"def",
"log_histogram",
"(",
"self",
",",
"step",
",",
"tag",
",",
"val",
")",
":",
"hist",
"=",
"Histogram",
"(",
")",
"hist",
".",
"add",
"(",
"val",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"histo",
"=",
"hist",
".",
"encode_to_proto",
"(",
")",
")",
"]",
")",
"self",
".",
"_add_event",
"(",
"step",
",",
"summary",
")"
] |
Write a histogram event.
:param int step: Time step (x-axis in TensorBoard graphs)
:param str tag: Label for this value
:param numpy.ndarray val: Arbitrary-dimensional array containing
values to be aggregated in the resulting histogram.
|
[
"Write",
"a",
"histogram",
"event",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/summary.py#L146-L158
|
18,054
|
stanfordnlp/stanza
|
stanza/research/config.py
|
options
|
def options(allow_partial=False, read=False):
'''
Get the object containing the values of the parsed command line options.
:param bool allow_partial: If `True`, ignore unrecognized arguments and allow
the options to be re-parsed next time `options` is called. This
also suppresses overwrite checking (the check is performed the first
time `options` is called with `allow_partial=False`).
:param bool read: If `True`, do not create or overwrite a `config.json`
file, and do not check whether such file already exists. Use for scripts
that read from the run directory rather than/in addition to writing to it.
:return argparse.Namespace: An object storing the values of the options specified
to the parser returned by `get_options_parser()`.
'''
global _options
if allow_partial:
opts, extras = _options_parser.parse_known_args()
if opts.run_dir:
mkdirp(opts.run_dir)
return opts
if _options is None:
# Add back in the help option (only show help and quit once arguments are finalized)
_options_parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
_options = _options_parser.parse_args()
if _options.run_dir:
mkdirp(_options.run_dir, overwrite=_options.overwrite or read)
if not read:
options_dump = vars(_options)
# People should be able to rerun an experiment with -C config.json safely.
# Don't include the overwrite option, since using a config from an experiment
# done with -O should still require passing -O for it to be overwritten again.
del options_dump['overwrite']
# And don't write the name of the other config file in this new one! It's
# probably harmless (config file interpretation can't be chained with the
# config option), but still confusing.
del options_dump['config']
dump_pretty(options_dump, 'config.json')
return _options
|
python
|
def options(allow_partial=False, read=False):
'''
Get the object containing the values of the parsed command line options.
:param bool allow_partial: If `True`, ignore unrecognized arguments and allow
the options to be re-parsed next time `options` is called. This
also suppresses overwrite checking (the check is performed the first
time `options` is called with `allow_partial=False`).
:param bool read: If `True`, do not create or overwrite a `config.json`
file, and do not check whether such file already exists. Use for scripts
that read from the run directory rather than/in addition to writing to it.
:return argparse.Namespace: An object storing the values of the options specified
to the parser returned by `get_options_parser()`.
'''
global _options
if allow_partial:
opts, extras = _options_parser.parse_known_args()
if opts.run_dir:
mkdirp(opts.run_dir)
return opts
if _options is None:
# Add back in the help option (only show help and quit once arguments are finalized)
_options_parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
_options = _options_parser.parse_args()
if _options.run_dir:
mkdirp(_options.run_dir, overwrite=_options.overwrite or read)
if not read:
options_dump = vars(_options)
# People should be able to rerun an experiment with -C config.json safely.
# Don't include the overwrite option, since using a config from an experiment
# done with -O should still require passing -O for it to be overwritten again.
del options_dump['overwrite']
# And don't write the name of the other config file in this new one! It's
# probably harmless (config file interpretation can't be chained with the
# config option), but still confusing.
del options_dump['config']
dump_pretty(options_dump, 'config.json')
return _options
|
[
"def",
"options",
"(",
"allow_partial",
"=",
"False",
",",
"read",
"=",
"False",
")",
":",
"global",
"_options",
"if",
"allow_partial",
":",
"opts",
",",
"extras",
"=",
"_options_parser",
".",
"parse_known_args",
"(",
")",
"if",
"opts",
".",
"run_dir",
":",
"mkdirp",
"(",
"opts",
".",
"run_dir",
")",
"return",
"opts",
"if",
"_options",
"is",
"None",
":",
"# Add back in the help option (only show help and quit once arguments are finalized)",
"_options_parser",
".",
"add_argument",
"(",
"'-h'",
",",
"'--help'",
",",
"action",
"=",
"'help'",
",",
"default",
"=",
"argparse",
".",
"SUPPRESS",
",",
"help",
"=",
"'show this help message and exit'",
")",
"_options",
"=",
"_options_parser",
".",
"parse_args",
"(",
")",
"if",
"_options",
".",
"run_dir",
":",
"mkdirp",
"(",
"_options",
".",
"run_dir",
",",
"overwrite",
"=",
"_options",
".",
"overwrite",
"or",
"read",
")",
"if",
"not",
"read",
":",
"options_dump",
"=",
"vars",
"(",
"_options",
")",
"# People should be able to rerun an experiment with -C config.json safely.",
"# Don't include the overwrite option, since using a config from an experiment",
"# done with -O should still require passing -O for it to be overwritten again.",
"del",
"options_dump",
"[",
"'overwrite'",
"]",
"# And don't write the name of the other config file in this new one! It's",
"# probably harmless (config file interpretation can't be chained with the",
"# config option), but still confusing.",
"del",
"options_dump",
"[",
"'config'",
"]",
"dump_pretty",
"(",
"options_dump",
",",
"'config.json'",
")",
"return",
"_options"
] |
Get the object containing the values of the parsed command line options.
:param bool allow_partial: If `True`, ignore unrecognized arguments and allow
the options to be re-parsed next time `options` is called. This
also suppresses overwrite checking (the check is performed the first
time `options` is called with `allow_partial=False`).
:param bool read: If `True`, do not create or overwrite a `config.json`
file, and do not check whether such file already exists. Use for scripts
that read from the run directory rather than/in addition to writing to it.
:return argparse.Namespace: An object storing the values of the options specified
to the parser returned by `get_options_parser()`.
|
[
"Get",
"the",
"object",
"containing",
"the",
"values",
"of",
"the",
"parsed",
"command",
"line",
"options",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/config.py#L88-L130
|
18,055
|
stanfordnlp/stanza
|
stanza/ml/embeddings.py
|
Embeddings.inner_products
|
def inner_products(self, vec):
"""Get the inner product of a vector with every embedding.
:param (np.array) vector: the query vector
:return (list[tuple[str, float]]): a map of embeddings to inner products
"""
products = self.array.dot(vec)
return self._word_to_score(np.arange(len(products)), products)
|
python
|
def inner_products(self, vec):
"""Get the inner product of a vector with every embedding.
:param (np.array) vector: the query vector
:return (list[tuple[str, float]]): a map of embeddings to inner products
"""
products = self.array.dot(vec)
return self._word_to_score(np.arange(len(products)), products)
|
[
"def",
"inner_products",
"(",
"self",
",",
"vec",
")",
":",
"products",
"=",
"self",
".",
"array",
".",
"dot",
"(",
"vec",
")",
"return",
"self",
".",
"_word_to_score",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"products",
")",
")",
",",
"products",
")"
] |
Get the inner product of a vector with every embedding.
:param (np.array) vector: the query vector
:return (list[tuple[str, float]]): a map of embeddings to inner products
|
[
"Get",
"the",
"inner",
"product",
"of",
"a",
"vector",
"with",
"every",
"embedding",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/embeddings.py#L50-L58
|
18,056
|
stanfordnlp/stanza
|
stanza/ml/embeddings.py
|
Embeddings._word_to_score
|
def _word_to_score(self, ids, scores):
"""Return a map from each word to its score.
:param (np.array) ids: a vector of word ids
:param (np.array) scores: a vector of scores
:return (dict[unicode, float]): a map from each word (unicode) to its score (float)
"""
# should be 1-D vectors
assert len(ids.shape) == 1
assert ids.shape == scores.shape
w2s = {}
for i in range(len(ids)):
w2s[self.vocab.index2word(ids[i])] = scores[i]
return w2s
|
python
|
def _word_to_score(self, ids, scores):
"""Return a map from each word to its score.
:param (np.array) ids: a vector of word ids
:param (np.array) scores: a vector of scores
:return (dict[unicode, float]): a map from each word (unicode) to its score (float)
"""
# should be 1-D vectors
assert len(ids.shape) == 1
assert ids.shape == scores.shape
w2s = {}
for i in range(len(ids)):
w2s[self.vocab.index2word(ids[i])] = scores[i]
return w2s
|
[
"def",
"_word_to_score",
"(",
"self",
",",
"ids",
",",
"scores",
")",
":",
"# should be 1-D vectors",
"assert",
"len",
"(",
"ids",
".",
"shape",
")",
"==",
"1",
"assert",
"ids",
".",
"shape",
"==",
"scores",
".",
"shape",
"w2s",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ids",
")",
")",
":",
"w2s",
"[",
"self",
".",
"vocab",
".",
"index2word",
"(",
"ids",
"[",
"i",
"]",
")",
"]",
"=",
"scores",
"[",
"i",
"]",
"return",
"w2s"
] |
Return a map from each word to its score.
:param (np.array) ids: a vector of word ids
:param (np.array) scores: a vector of scores
:return (dict[unicode, float]): a map from each word (unicode) to its score (float)
|
[
"Return",
"a",
"map",
"from",
"each",
"word",
"to",
"its",
"score",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/embeddings.py#L60-L75
|
18,057
|
stanfordnlp/stanza
|
stanza/ml/embeddings.py
|
Embeddings._init_lsh_forest
|
def _init_lsh_forest(self):
"""Construct an LSH forest for nearest neighbor search."""
import sklearn.neighbors
lshf = sklearn.neighbors.LSHForest()
lshf.fit(self.array)
return lshf
|
python
|
def _init_lsh_forest(self):
"""Construct an LSH forest for nearest neighbor search."""
import sklearn.neighbors
lshf = sklearn.neighbors.LSHForest()
lshf.fit(self.array)
return lshf
|
[
"def",
"_init_lsh_forest",
"(",
"self",
")",
":",
"import",
"sklearn",
".",
"neighbors",
"lshf",
"=",
"sklearn",
".",
"neighbors",
".",
"LSHForest",
"(",
")",
"lshf",
".",
"fit",
"(",
"self",
".",
"array",
")",
"return",
"lshf"
] |
Construct an LSH forest for nearest neighbor search.
|
[
"Construct",
"an",
"LSH",
"forest",
"for",
"nearest",
"neighbor",
"search",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/embeddings.py#L88-L93
|
18,058
|
stanfordnlp/stanza
|
stanza/ml/embeddings.py
|
Embeddings.to_dict
|
def to_dict(self):
"""Convert to dictionary.
:return (dict): A dict mapping from strings to vectors.
"""
d = {}
for word, idx in self.vocab.iteritems():
d[word] = self.array[idx].tolist()
return d
|
python
|
def to_dict(self):
"""Convert to dictionary.
:return (dict): A dict mapping from strings to vectors.
"""
d = {}
for word, idx in self.vocab.iteritems():
d[word] = self.array[idx].tolist()
return d
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"for",
"word",
",",
"idx",
"in",
"self",
".",
"vocab",
".",
"iteritems",
"(",
")",
":",
"d",
"[",
"word",
"]",
"=",
"self",
".",
"array",
"[",
"idx",
"]",
".",
"tolist",
"(",
")",
"return",
"d"
] |
Convert to dictionary.
:return (dict): A dict mapping from strings to vectors.
|
[
"Convert",
"to",
"dictionary",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/embeddings.py#L113-L121
|
18,059
|
stanfordnlp/stanza
|
stanza/ml/embeddings.py
|
Embeddings.to_files
|
def to_files(self, array_file, vocab_file):
"""Write the embedding matrix and the vocab to files.
:param (file) array_file: file to write array to
:param (file) vocab_file: file to write vocab to
"""
logging.info('Writing array...')
np.save(array_file, self.array)
logging.info('Writing vocab...')
self.vocab.to_file(vocab_file)
|
python
|
def to_files(self, array_file, vocab_file):
"""Write the embedding matrix and the vocab to files.
:param (file) array_file: file to write array to
:param (file) vocab_file: file to write vocab to
"""
logging.info('Writing array...')
np.save(array_file, self.array)
logging.info('Writing vocab...')
self.vocab.to_file(vocab_file)
|
[
"def",
"to_files",
"(",
"self",
",",
"array_file",
",",
"vocab_file",
")",
":",
"logging",
".",
"info",
"(",
"'Writing array...'",
")",
"np",
".",
"save",
"(",
"array_file",
",",
"self",
".",
"array",
")",
"logging",
".",
"info",
"(",
"'Writing vocab...'",
")",
"self",
".",
"vocab",
".",
"to_file",
"(",
"vocab_file",
")"
] |
Write the embedding matrix and the vocab to files.
:param (file) array_file: file to write array to
:param (file) vocab_file: file to write vocab to
|
[
"Write",
"the",
"embedding",
"matrix",
"and",
"the",
"vocab",
"to",
"files",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/embeddings.py#L136-L145
|
18,060
|
stanfordnlp/stanza
|
stanza/ml/embeddings.py
|
Embeddings.from_files
|
def from_files(cls, array_file, vocab_file):
"""Load the embedding matrix and the vocab from files.
:param (file) array_file: file to read array from
:param (file) vocab_file: file to read vocab from
:return (Embeddings): an Embeddings object
"""
logging.info('Loading array...')
array = np.load(array_file)
logging.info('Loading vocab...')
vocab = Vocab.from_file(vocab_file)
return cls(array, vocab)
|
python
|
def from_files(cls, array_file, vocab_file):
"""Load the embedding matrix and the vocab from files.
:param (file) array_file: file to read array from
:param (file) vocab_file: file to read vocab from
:return (Embeddings): an Embeddings object
"""
logging.info('Loading array...')
array = np.load(array_file)
logging.info('Loading vocab...')
vocab = Vocab.from_file(vocab_file)
return cls(array, vocab)
|
[
"def",
"from_files",
"(",
"cls",
",",
"array_file",
",",
"vocab_file",
")",
":",
"logging",
".",
"info",
"(",
"'Loading array...'",
")",
"array",
"=",
"np",
".",
"load",
"(",
"array_file",
")",
"logging",
".",
"info",
"(",
"'Loading vocab...'",
")",
"vocab",
"=",
"Vocab",
".",
"from_file",
"(",
"vocab_file",
")",
"return",
"cls",
"(",
"array",
",",
"vocab",
")"
] |
Load the embedding matrix and the vocab from files.
:param (file) array_file: file to read array from
:param (file) vocab_file: file to read vocab from
:return (Embeddings): an Embeddings object
|
[
"Load",
"the",
"embedding",
"matrix",
"and",
"the",
"vocab",
"from",
"files",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/embeddings.py#L148-L160
|
18,061
|
stanfordnlp/stanza
|
stanza/research/codalab.py
|
get_uuids
|
def get_uuids():
"""List all bundle UUIDs in the worksheet."""
result = shell('cl ls -w {} -u'.format(worksheet))
uuids = result.split('\n')
uuids = uuids[1:-1] # trim non uuids
return uuids
|
python
|
def get_uuids():
"""List all bundle UUIDs in the worksheet."""
result = shell('cl ls -w {} -u'.format(worksheet))
uuids = result.split('\n')
uuids = uuids[1:-1] # trim non uuids
return uuids
|
[
"def",
"get_uuids",
"(",
")",
":",
"result",
"=",
"shell",
"(",
"'cl ls -w {} -u'",
".",
"format",
"(",
"worksheet",
")",
")",
"uuids",
"=",
"result",
".",
"split",
"(",
"'\\n'",
")",
"uuids",
"=",
"uuids",
"[",
"1",
":",
"-",
"1",
"]",
"# trim non uuids",
"return",
"uuids"
] |
List all bundle UUIDs in the worksheet.
|
[
"List",
"all",
"bundle",
"UUIDs",
"in",
"the",
"worksheet",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/codalab.py#L48-L53
|
18,062
|
stanfordnlp/stanza
|
stanza/research/codalab.py
|
open_file
|
def open_file(uuid, path):
"""Get the raw file content within a particular bundle at a particular path.
Path have no leading slash.
"""
# create temporary file just so we can get an unused file path
f = tempfile.NamedTemporaryFile()
f.close() # close and delete right away
fname = f.name
# download file to temporary path
cmd ='cl down -o {} -w {} {}/{}'.format(fname, worksheet, uuid, path)
try:
shell(cmd)
except RuntimeError:
try:
os.remove(fname) # if file exists, remove it
except OSError:
pass
raise IOError('Failed to open file {}/{}'.format(uuid, path))
f = open(fname)
yield f
f.close()
os.remove(fname)
|
python
|
def open_file(uuid, path):
"""Get the raw file content within a particular bundle at a particular path.
Path have no leading slash.
"""
# create temporary file just so we can get an unused file path
f = tempfile.NamedTemporaryFile()
f.close() # close and delete right away
fname = f.name
# download file to temporary path
cmd ='cl down -o {} -w {} {}/{}'.format(fname, worksheet, uuid, path)
try:
shell(cmd)
except RuntimeError:
try:
os.remove(fname) # if file exists, remove it
except OSError:
pass
raise IOError('Failed to open file {}/{}'.format(uuid, path))
f = open(fname)
yield f
f.close()
os.remove(fname)
|
[
"def",
"open_file",
"(",
"uuid",
",",
"path",
")",
":",
"# create temporary file just so we can get an unused file path",
"f",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"f",
".",
"close",
"(",
")",
"# close and delete right away",
"fname",
"=",
"f",
".",
"name",
"# download file to temporary path",
"cmd",
"=",
"'cl down -o {} -w {} {}/{}'",
".",
"format",
"(",
"fname",
",",
"worksheet",
",",
"uuid",
",",
"path",
")",
"try",
":",
"shell",
"(",
"cmd",
")",
"except",
"RuntimeError",
":",
"try",
":",
"os",
".",
"remove",
"(",
"fname",
")",
"# if file exists, remove it",
"except",
"OSError",
":",
"pass",
"raise",
"IOError",
"(",
"'Failed to open file {}/{}'",
".",
"format",
"(",
"uuid",
",",
"path",
")",
")",
"f",
"=",
"open",
"(",
"fname",
")",
"yield",
"f",
"f",
".",
"close",
"(",
")",
"os",
".",
"remove",
"(",
"fname",
")"
] |
Get the raw file content within a particular bundle at a particular path.
Path have no leading slash.
|
[
"Get",
"the",
"raw",
"file",
"content",
"within",
"a",
"particular",
"bundle",
"at",
"a",
"particular",
"path",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/codalab.py#L57-L81
|
18,063
|
stanfordnlp/stanza
|
stanza/research/codalab.py
|
Bundle.load_img
|
def load_img(self, img_path):
"""
Return an image object that can be immediately plotted with matplotlib
"""
with open_file(self.uuid, img_path) as f:
return mpimg.imread(f)
|
python
|
def load_img(self, img_path):
"""
Return an image object that can be immediately plotted with matplotlib
"""
with open_file(self.uuid, img_path) as f:
return mpimg.imread(f)
|
[
"def",
"load_img",
"(",
"self",
",",
"img_path",
")",
":",
"with",
"open_file",
"(",
"self",
".",
"uuid",
",",
"img_path",
")",
"as",
"f",
":",
"return",
"mpimg",
".",
"imread",
"(",
"f",
")"
] |
Return an image object that can be immediately plotted with matplotlib
|
[
"Return",
"an",
"image",
"object",
"that",
"can",
"be",
"immediately",
"plotted",
"with",
"matplotlib"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/codalab.py#L126-L131
|
18,064
|
stanfordnlp/stanza
|
stanza/research/output.py
|
output_results
|
def output_results(results, split_id='results', output_stream=None):
'''
Log `results` readably to `output_stream`, with a header
containing `split_id`.
:param results: a dictionary of summary statistics from an evaluation
:type results: dict(str -> object)
:param str split_id: an identifier for the source of `results` (e.g. 'dev')
:param file output_stream: the file-like object to which to log the results
(default: stdout)
:type split_id: str
'''
if output_stream is None:
output_stream = sys.stdout
output_stream.write('----- %s -----\n' % split_id)
for name in sorted(results.keys()):
output_stream.write('%s: %s\n' % (name, repr(results[name])))
output_stream.flush()
|
python
|
def output_results(results, split_id='results', output_stream=None):
'''
Log `results` readably to `output_stream`, with a header
containing `split_id`.
:param results: a dictionary of summary statistics from an evaluation
:type results: dict(str -> object)
:param str split_id: an identifier for the source of `results` (e.g. 'dev')
:param file output_stream: the file-like object to which to log the results
(default: stdout)
:type split_id: str
'''
if output_stream is None:
output_stream = sys.stdout
output_stream.write('----- %s -----\n' % split_id)
for name in sorted(results.keys()):
output_stream.write('%s: %s\n' % (name, repr(results[name])))
output_stream.flush()
|
[
"def",
"output_results",
"(",
"results",
",",
"split_id",
"=",
"'results'",
",",
"output_stream",
"=",
"None",
")",
":",
"if",
"output_stream",
"is",
"None",
":",
"output_stream",
"=",
"sys",
".",
"stdout",
"output_stream",
".",
"write",
"(",
"'----- %s -----\\n'",
"%",
"split_id",
")",
"for",
"name",
"in",
"sorted",
"(",
"results",
".",
"keys",
"(",
")",
")",
":",
"output_stream",
".",
"write",
"(",
"'%s: %s\\n'",
"%",
"(",
"name",
",",
"repr",
"(",
"results",
"[",
"name",
"]",
")",
")",
")",
"output_stream",
".",
"flush",
"(",
")"
] |
Log `results` readably to `output_stream`, with a header
containing `split_id`.
:param results: a dictionary of summary statistics from an evaluation
:type results: dict(str -> object)
:param str split_id: an identifier for the source of `results` (e.g. 'dev')
:param file output_stream: the file-like object to which to log the results
(default: stdout)
:type split_id: str
|
[
"Log",
"results",
"readably",
"to",
"output_stream",
"with",
"a",
"header",
"containing",
"split_id",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/output.py#L4-L25
|
18,065
|
stanfordnlp/stanza
|
stanza/ml/tensorflow_utils.py
|
labels_to_onehots
|
def labels_to_onehots(labels, num_classes):
"""Convert a vector of integer class labels to a matrix of one-hot target vectors.
:param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,).
:param num_classes: the total number of classes
:return: has shape (batch_size, num_classes)
"""
batch_size = labels.get_shape().as_list()[0]
with tf.name_scope("one_hot"):
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
sparse_ptrs = tf.concat(1, [indices, labels], name="ptrs")
onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes],
1.0, 0.0)
return onehots
|
python
|
def labels_to_onehots(labels, num_classes):
"""Convert a vector of integer class labels to a matrix of one-hot target vectors.
:param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,).
:param num_classes: the total number of classes
:return: has shape (batch_size, num_classes)
"""
batch_size = labels.get_shape().as_list()[0]
with tf.name_scope("one_hot"):
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
sparse_ptrs = tf.concat(1, [indices, labels], name="ptrs")
onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes],
1.0, 0.0)
return onehots
|
[
"def",
"labels_to_onehots",
"(",
"labels",
",",
"num_classes",
")",
":",
"batch_size",
"=",
"labels",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"0",
"]",
"with",
"tf",
".",
"name_scope",
"(",
"\"one_hot\"",
")",
":",
"labels",
"=",
"tf",
".",
"expand_dims",
"(",
"labels",
",",
"1",
")",
"indices",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"range",
"(",
"0",
",",
"batch_size",
",",
"1",
")",
",",
"1",
")",
"sparse_ptrs",
"=",
"tf",
".",
"concat",
"(",
"1",
",",
"[",
"indices",
",",
"labels",
"]",
",",
"name",
"=",
"\"ptrs\"",
")",
"onehots",
"=",
"tf",
".",
"sparse_to_dense",
"(",
"sparse_ptrs",
",",
"[",
"batch_size",
",",
"num_classes",
"]",
",",
"1.0",
",",
"0.0",
")",
"return",
"onehots"
] |
Convert a vector of integer class labels to a matrix of one-hot target vectors.
:param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,).
:param num_classes: the total number of classes
:return: has shape (batch_size, num_classes)
|
[
"Convert",
"a",
"vector",
"of",
"integer",
"class",
"labels",
"to",
"a",
"matrix",
"of",
"one",
"-",
"hot",
"target",
"vectors",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/ml/tensorflow_utils.py#L6-L21
|
18,066
|
stanfordnlp/stanza
|
stanza/monitoring/progress.py
|
ProgressMonitor.start_task
|
def start_task(self, name, size):
'''
Add a task to the stack. If, for example, `name` is `'Iteration'` and
`size` is 10, progress on that task will be shown as
..., Iteration <p> of 10, ...
:param str name: A descriptive name for the type of subtask that is
being completed.
:param int size: The total number of subtasks to complete.
'''
if len(self.task_stack) == 0:
self.start_time = datetime.datetime.now()
self.task_stack.append(Task(name, size, 0))
|
python
|
def start_task(self, name, size):
'''
Add a task to the stack. If, for example, `name` is `'Iteration'` and
`size` is 10, progress on that task will be shown as
..., Iteration <p> of 10, ...
:param str name: A descriptive name for the type of subtask that is
being completed.
:param int size: The total number of subtasks to complete.
'''
if len(self.task_stack) == 0:
self.start_time = datetime.datetime.now()
self.task_stack.append(Task(name, size, 0))
|
[
"def",
"start_task",
"(",
"self",
",",
"name",
",",
"size",
")",
":",
"if",
"len",
"(",
"self",
".",
"task_stack",
")",
"==",
"0",
":",
"self",
".",
"start_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"task_stack",
".",
"append",
"(",
"Task",
"(",
"name",
",",
"size",
",",
"0",
")",
")"
] |
Add a task to the stack. If, for example, `name` is `'Iteration'` and
`size` is 10, progress on that task will be shown as
..., Iteration <p> of 10, ...
:param str name: A descriptive name for the type of subtask that is
being completed.
:param int size: The total number of subtasks to complete.
|
[
"Add",
"a",
"task",
"to",
"the",
"stack",
".",
"If",
"for",
"example",
"name",
"is",
"Iteration",
"and",
"size",
"is",
"10",
"progress",
"on",
"that",
"task",
"will",
"be",
"shown",
"as"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/progress.py#L58-L71
|
18,067
|
stanfordnlp/stanza
|
stanza/monitoring/progress.py
|
ProgressMonitor.progress
|
def progress(self, p):
'''
Update the current progress on the task at the top of the stack.
:param int p: The current subtask number, between 0 and `size`
(passed to `start_task`), inclusive.
'''
self.task_stack[-1] = self.task_stack[-1]._replace(progress=p)
self.progress_report()
|
python
|
def progress(self, p):
'''
Update the current progress on the task at the top of the stack.
:param int p: The current subtask number, between 0 and `size`
(passed to `start_task`), inclusive.
'''
self.task_stack[-1] = self.task_stack[-1]._replace(progress=p)
self.progress_report()
|
[
"def",
"progress",
"(",
"self",
",",
"p",
")",
":",
"self",
".",
"task_stack",
"[",
"-",
"1",
"]",
"=",
"self",
".",
"task_stack",
"[",
"-",
"1",
"]",
".",
"_replace",
"(",
"progress",
"=",
"p",
")",
"self",
".",
"progress_report",
"(",
")"
] |
Update the current progress on the task at the top of the stack.
:param int p: The current subtask number, between 0 and `size`
(passed to `start_task`), inclusive.
|
[
"Update",
"the",
"current",
"progress",
"on",
"the",
"task",
"at",
"the",
"top",
"of",
"the",
"stack",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/progress.py#L73-L81
|
18,068
|
stanfordnlp/stanza
|
stanza/monitoring/progress.py
|
ProgressMonitor.end_task
|
def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop()
|
python
|
def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop()
|
[
"def",
"end_task",
"(",
"self",
")",
":",
"self",
".",
"progress",
"(",
"self",
".",
"task_stack",
"[",
"-",
"1",
"]",
".",
"size",
")",
"self",
".",
"task_stack",
".",
"pop",
"(",
")"
] |
Remove the current task from the stack.
|
[
"Remove",
"the",
"current",
"task",
"from",
"the",
"stack",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/progress.py#L83-L88
|
18,069
|
stanfordnlp/stanza
|
stanza/monitoring/progress.py
|
ProgressMonitor.progress_report
|
def progress_report(self, force=False):
'''
Print the current progress.
:param bool force: If `True`, print the report regardless of the
elapsed time since the last progress report.
'''
now = datetime.datetime.now()
if (len(self.task_stack) > 1 or self.task_stack[0] > 0) and \
now - self.last_report < self.resolution and not force:
return
stack_printout = ', '.join('%s %s of %s' % (t.name, t.progress, t.size)
for t in self.task_stack)
frac_done = self.fraction_done()
if frac_done == 0.0:
now_str = now.strftime('%c')
eta_str = 'unknown on %s' % now_str
else:
elapsed = (now - self.start_time)
estimated_length = elapsed.total_seconds() / frac_done
eta = self.start_time + datetime.timedelta(seconds=estimated_length)
eta_str = eta.strftime('%c')
print '%s (~%d%% done, ETA %s)' % (stack_printout,
round(frac_done * 100.0),
eta_str)
self.last_report = datetime.datetime.now()
|
python
|
def progress_report(self, force=False):
'''
Print the current progress.
:param bool force: If `True`, print the report regardless of the
elapsed time since the last progress report.
'''
now = datetime.datetime.now()
if (len(self.task_stack) > 1 or self.task_stack[0] > 0) and \
now - self.last_report < self.resolution and not force:
return
stack_printout = ', '.join('%s %s of %s' % (t.name, t.progress, t.size)
for t in self.task_stack)
frac_done = self.fraction_done()
if frac_done == 0.0:
now_str = now.strftime('%c')
eta_str = 'unknown on %s' % now_str
else:
elapsed = (now - self.start_time)
estimated_length = elapsed.total_seconds() / frac_done
eta = self.start_time + datetime.timedelta(seconds=estimated_length)
eta_str = eta.strftime('%c')
print '%s (~%d%% done, ETA %s)' % (stack_printout,
round(frac_done * 100.0),
eta_str)
self.last_report = datetime.datetime.now()
|
[
"def",
"progress_report",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"if",
"(",
"len",
"(",
"self",
".",
"task_stack",
")",
">",
"1",
"or",
"self",
".",
"task_stack",
"[",
"0",
"]",
">",
"0",
")",
"and",
"now",
"-",
"self",
".",
"last_report",
"<",
"self",
".",
"resolution",
"and",
"not",
"force",
":",
"return",
"stack_printout",
"=",
"', '",
".",
"join",
"(",
"'%s %s of %s'",
"%",
"(",
"t",
".",
"name",
",",
"t",
".",
"progress",
",",
"t",
".",
"size",
")",
"for",
"t",
"in",
"self",
".",
"task_stack",
")",
"frac_done",
"=",
"self",
".",
"fraction_done",
"(",
")",
"if",
"frac_done",
"==",
"0.0",
":",
"now_str",
"=",
"now",
".",
"strftime",
"(",
"'%c'",
")",
"eta_str",
"=",
"'unknown on %s'",
"%",
"now_str",
"else",
":",
"elapsed",
"=",
"(",
"now",
"-",
"self",
".",
"start_time",
")",
"estimated_length",
"=",
"elapsed",
".",
"total_seconds",
"(",
")",
"/",
"frac_done",
"eta",
"=",
"self",
".",
"start_time",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"estimated_length",
")",
"eta_str",
"=",
"eta",
".",
"strftime",
"(",
"'%c'",
")",
"print",
"'%s (~%d%% done, ETA %s)'",
"%",
"(",
"stack_printout",
",",
"round",
"(",
"frac_done",
"*",
"100.0",
")",
",",
"eta_str",
")",
"self",
".",
"last_report",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"
] |
Print the current progress.
:param bool force: If `True`, print the report regardless of the
elapsed time since the last progress report.
|
[
"Print",
"the",
"current",
"progress",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/progress.py#L90-L118
|
18,070
|
stanfordnlp/stanza
|
stanza/text/dataset.py
|
Dataset.write_conll
|
def write_conll(self, fname):
"""
Serializes the dataset in CONLL format to fname
"""
if 'label' not in self.fields:
raise InvalidFieldsException("dataset is not in CONLL format: missing label field")
def instance_to_conll(inst):
tab = [v for k, v in inst.items() if k != 'label']
return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)]))
with open(fname, 'wb') as f:
f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label'])))
for i, d in enumerate(self):
f.write('\n{}'.format(instance_to_conll(d)))
if i != len(self) - 1:
f.write('\n')
|
python
|
def write_conll(self, fname):
"""
Serializes the dataset in CONLL format to fname
"""
if 'label' not in self.fields:
raise InvalidFieldsException("dataset is not in CONLL format: missing label field")
def instance_to_conll(inst):
tab = [v for k, v in inst.items() if k != 'label']
return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)]))
with open(fname, 'wb') as f:
f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label'])))
for i, d in enumerate(self):
f.write('\n{}'.format(instance_to_conll(d)))
if i != len(self) - 1:
f.write('\n')
|
[
"def",
"write_conll",
"(",
"self",
",",
"fname",
")",
":",
"if",
"'label'",
"not",
"in",
"self",
".",
"fields",
":",
"raise",
"InvalidFieldsException",
"(",
"\"dataset is not in CONLL format: missing label field\"",
")",
"def",
"instance_to_conll",
"(",
"inst",
")",
":",
"tab",
"=",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"inst",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"'label'",
"]",
"return",
"'{}\\n{}'",
".",
"format",
"(",
"inst",
"[",
"'label'",
"]",
",",
"'\\n'",
".",
"join",
"(",
"[",
"'\\t'",
".",
"join",
"(",
"[",
"'-'",
"if",
"e",
"is",
"None",
"else",
"str",
"(",
"e",
")",
"for",
"e",
"in",
"row",
"]",
")",
"for",
"row",
"in",
"zip",
"(",
"*",
"tab",
")",
"]",
")",
")",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'# {}'",
".",
"format",
"(",
"'\\t'",
".",
"join",
"(",
"[",
"k",
"for",
"k",
"in",
"self",
".",
"fields",
"if",
"k",
"!=",
"'label'",
"]",
")",
")",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"self",
")",
":",
"f",
".",
"write",
"(",
"'\\n{}'",
".",
"format",
"(",
"instance_to_conll",
"(",
"d",
")",
")",
")",
"if",
"i",
"!=",
"len",
"(",
"self",
")",
"-",
"1",
":",
"f",
".",
"write",
"(",
"'\\n'",
")"
] |
Serializes the dataset in CONLL format to fname
|
[
"Serializes",
"the",
"dataset",
"in",
"CONLL",
"format",
"to",
"fname"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/dataset.py#L122-L138
|
18,071
|
stanfordnlp/stanza
|
stanza/text/dataset.py
|
Dataset.convert
|
def convert(self, converters, in_place=False):
"""
Applies transformations to the dataset.
:param converters: A dictionary specifying the function to apply to each field. If a field is missing from the dictionary, then it will not be transformed.
:param in_place: Whether to perform the transformation in place or create a new dataset instance
:return: the transformed dataset instance
"""
dataset = self if in_place else self.__class__(OrderedDict([(name, data[:]) for name, data in self.fields.items()]))
for name, convert in converters.items():
if name not in self.fields.keys():
raise InvalidFieldsException('Converter specified for non-existent field {}'.format(name))
for i, d in enumerate(dataset.fields[name]):
dataset.fields[name][i] = convert(d)
return dataset
|
python
|
def convert(self, converters, in_place=False):
"""
Applies transformations to the dataset.
:param converters: A dictionary specifying the function to apply to each field. If a field is missing from the dictionary, then it will not be transformed.
:param in_place: Whether to perform the transformation in place or create a new dataset instance
:return: the transformed dataset instance
"""
dataset = self if in_place else self.__class__(OrderedDict([(name, data[:]) for name, data in self.fields.items()]))
for name, convert in converters.items():
if name not in self.fields.keys():
raise InvalidFieldsException('Converter specified for non-existent field {}'.format(name))
for i, d in enumerate(dataset.fields[name]):
dataset.fields[name][i] = convert(d)
return dataset
|
[
"def",
"convert",
"(",
"self",
",",
"converters",
",",
"in_place",
"=",
"False",
")",
":",
"dataset",
"=",
"self",
"if",
"in_place",
"else",
"self",
".",
"__class__",
"(",
"OrderedDict",
"(",
"[",
"(",
"name",
",",
"data",
"[",
":",
"]",
")",
"for",
"name",
",",
"data",
"in",
"self",
".",
"fields",
".",
"items",
"(",
")",
"]",
")",
")",
"for",
"name",
",",
"convert",
"in",
"converters",
".",
"items",
"(",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"fields",
".",
"keys",
"(",
")",
":",
"raise",
"InvalidFieldsException",
"(",
"'Converter specified for non-existent field {}'",
".",
"format",
"(",
"name",
")",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dataset",
".",
"fields",
"[",
"name",
"]",
")",
":",
"dataset",
".",
"fields",
"[",
"name",
"]",
"[",
"i",
"]",
"=",
"convert",
"(",
"d",
")",
"return",
"dataset"
] |
Applies transformations to the dataset.
:param converters: A dictionary specifying the function to apply to each field. If a field is missing from the dictionary, then it will not be transformed.
:param in_place: Whether to perform the transformation in place or create a new dataset instance
:return: the transformed dataset instance
|
[
"Applies",
"transformations",
"to",
"the",
"dataset",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/dataset.py#L140-L156
|
18,072
|
stanfordnlp/stanza
|
stanza/text/dataset.py
|
Dataset.shuffle
|
def shuffle(self):
"""
Re-indexes the dataset in random order
:return: the shuffled dataset instance
"""
order = range(len(self))
random.shuffle(order)
for name, data in self.fields.items():
reindexed = []
for _, i in enumerate(order):
reindexed.append(data[i])
self.fields[name] = reindexed
return self
|
python
|
def shuffle(self):
"""
Re-indexes the dataset in random order
:return: the shuffled dataset instance
"""
order = range(len(self))
random.shuffle(order)
for name, data in self.fields.items():
reindexed = []
for _, i in enumerate(order):
reindexed.append(data[i])
self.fields[name] = reindexed
return self
|
[
"def",
"shuffle",
"(",
"self",
")",
":",
"order",
"=",
"range",
"(",
"len",
"(",
"self",
")",
")",
"random",
".",
"shuffle",
"(",
"order",
")",
"for",
"name",
",",
"data",
"in",
"self",
".",
"fields",
".",
"items",
"(",
")",
":",
"reindexed",
"=",
"[",
"]",
"for",
"_",
",",
"i",
"in",
"enumerate",
"(",
"order",
")",
":",
"reindexed",
".",
"append",
"(",
"data",
"[",
"i",
"]",
")",
"self",
".",
"fields",
"[",
"name",
"]",
"=",
"reindexed",
"return",
"self"
] |
Re-indexes the dataset in random order
:return: the shuffled dataset instance
|
[
"Re",
"-",
"indexes",
"the",
"dataset",
"in",
"random",
"order"
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/dataset.py#L158-L171
|
18,073
|
stanfordnlp/stanza
|
stanza/text/dataset.py
|
Dataset.pad
|
def pad(cls, sequences, padding, pad_len=None):
"""
Pads a list of sequences such that they form a matrix.
:param sequences: a list of sequences of varying lengths.
:param padding: the value of padded cells.
:param pad_len: the length of the maximum padded sequence.
"""
max_len = max([len(s) for s in sequences])
pad_len = pad_len or max_len
assert pad_len >= max_len, 'pad_len {} must be greater or equal to the longest sequence {}'.format(pad_len, max_len)
for i, s in enumerate(sequences):
sequences[i] = [padding] * (pad_len - len(s)) + s
return np.array(sequences)
|
python
|
def pad(cls, sequences, padding, pad_len=None):
"""
Pads a list of sequences such that they form a matrix.
:param sequences: a list of sequences of varying lengths.
:param padding: the value of padded cells.
:param pad_len: the length of the maximum padded sequence.
"""
max_len = max([len(s) for s in sequences])
pad_len = pad_len or max_len
assert pad_len >= max_len, 'pad_len {} must be greater or equal to the longest sequence {}'.format(pad_len, max_len)
for i, s in enumerate(sequences):
sequences[i] = [padding] * (pad_len - len(s)) + s
return np.array(sequences)
|
[
"def",
"pad",
"(",
"cls",
",",
"sequences",
",",
"padding",
",",
"pad_len",
"=",
"None",
")",
":",
"max_len",
"=",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"sequences",
"]",
")",
"pad_len",
"=",
"pad_len",
"or",
"max_len",
"assert",
"pad_len",
">=",
"max_len",
",",
"'pad_len {} must be greater or equal to the longest sequence {}'",
".",
"format",
"(",
"pad_len",
",",
"max_len",
")",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"sequences",
"[",
"i",
"]",
"=",
"[",
"padding",
"]",
"*",
"(",
"pad_len",
"-",
"len",
"(",
"s",
")",
")",
"+",
"s",
"return",
"np",
".",
"array",
"(",
"sequences",
")"
] |
Pads a list of sequences such that they form a matrix.
:param sequences: a list of sequences of varying lengths.
:param padding: the value of padded cells.
:param pad_len: the length of the maximum padded sequence.
|
[
"Pads",
"a",
"list",
"of",
"sequences",
"such",
"that",
"they",
"form",
"a",
"matrix",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/text/dataset.py#L208-L221
|
18,074
|
stanfordnlp/stanza
|
stanza/research/metrics.py
|
bleu
|
def bleu(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return corpus-level BLEU score of `predictions` using the `output`
field of the instances in `eval_data` as references. This is returned
as a length-1 list of floats.
This uses the NLTK unsmoothed implementation, which has been known
to have some bugs. This function patches over the biggest bug, which
is that NLTK ignores n-gram overlap counts of 0 (this should result
in a zero BLEU score).
>>> data = [Instance('input', 'this is the good'),
... Instance('input', 'the bad'),
... Instance('input', 'and the ugly')]
>>> bleu(data, ['this is the good', 'the good', 'seriously really good']) # doctest: +ELLIPSIS
[0.65599...]
>>> np.exp(np.mean([np.log(5. / 9.), np.log(3. / 6.),
... np.log(2. / 3.), np.log(1. / 1.)])) # doctest: +ELLIPSIS
0.65599...
'''
ref_groups = ([inst.output.split()]
if isinstance(inst.output, basestring) else
[_maybe_tokenize(r) for r in inst.output]
for inst in eval_data)
return [corpus_bleu(ref_groups, [p.split() for p in predictions])]
|
python
|
def bleu(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return corpus-level BLEU score of `predictions` using the `output`
field of the instances in `eval_data` as references. This is returned
as a length-1 list of floats.
This uses the NLTK unsmoothed implementation, which has been known
to have some bugs. This function patches over the biggest bug, which
is that NLTK ignores n-gram overlap counts of 0 (this should result
in a zero BLEU score).
>>> data = [Instance('input', 'this is the good'),
... Instance('input', 'the bad'),
... Instance('input', 'and the ugly')]
>>> bleu(data, ['this is the good', 'the good', 'seriously really good']) # doctest: +ELLIPSIS
[0.65599...]
>>> np.exp(np.mean([np.log(5. / 9.), np.log(3. / 6.),
... np.log(2. / 3.), np.log(1. / 1.)])) # doctest: +ELLIPSIS
0.65599...
'''
ref_groups = ([inst.output.split()]
if isinstance(inst.output, basestring) else
[_maybe_tokenize(r) for r in inst.output]
for inst in eval_data)
return [corpus_bleu(ref_groups, [p.split() for p in predictions])]
|
[
"def",
"bleu",
"(",
"eval_data",
",",
"predictions",
",",
"scores",
"=",
"'ignored'",
",",
"learner",
"=",
"'ignored'",
")",
":",
"ref_groups",
"=",
"(",
"[",
"inst",
".",
"output",
".",
"split",
"(",
")",
"]",
"if",
"isinstance",
"(",
"inst",
".",
"output",
",",
"basestring",
")",
"else",
"[",
"_maybe_tokenize",
"(",
"r",
")",
"for",
"r",
"in",
"inst",
".",
"output",
"]",
"for",
"inst",
"in",
"eval_data",
")",
"return",
"[",
"corpus_bleu",
"(",
"ref_groups",
",",
"[",
"p",
".",
"split",
"(",
")",
"for",
"p",
"in",
"predictions",
"]",
")",
"]"
] |
Return corpus-level BLEU score of `predictions` using the `output`
field of the instances in `eval_data` as references. This is returned
as a length-1 list of floats.
This uses the NLTK unsmoothed implementation, which has been known
to have some bugs. This function patches over the biggest bug, which
is that NLTK ignores n-gram overlap counts of 0 (this should result
in a zero BLEU score).
>>> data = [Instance('input', 'this is the good'),
... Instance('input', 'the bad'),
... Instance('input', 'and the ugly')]
>>> bleu(data, ['this is the good', 'the good', 'seriously really good']) # doctest: +ELLIPSIS
[0.65599...]
>>> np.exp(np.mean([np.log(5. / 9.), np.log(3. / 6.),
... np.log(2. / 3.), np.log(1. / 1.)])) # doctest: +ELLIPSIS
0.65599...
|
[
"Return",
"corpus",
"-",
"level",
"BLEU",
"score",
"of",
"predictions",
"using",
"the",
"output",
"field",
"of",
"the",
"instances",
"in",
"eval_data",
"as",
"references",
".",
"This",
"is",
"returned",
"as",
"a",
"length",
"-",
"1",
"list",
"of",
"floats",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/metrics.py#L70-L94
|
18,075
|
stanfordnlp/stanza
|
stanza/research/metrics.py
|
squared_error
|
def squared_error(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the squared error of each prediction in `predictions` with respect
to the correct output in `eval_data`.
>>> data = [Instance('input', (0., 0., 1.)),
... Instance('input', (0., 1., 1.)),
... Instance('input', (1., 0., 0.))]
>>> squared_error(data, [(0., 1., 1.), (0., 1., 1.), (-1., 1., 0.)])
[1.0, 0.0, 5.0]
'''
return [np.sum((np.array(pred) - np.array(inst.output)) ** 2)
for inst, pred in zip(eval_data, predictions)]
|
python
|
def squared_error(eval_data, predictions, scores='ignored', learner='ignored'):
'''
Return the squared error of each prediction in `predictions` with respect
to the correct output in `eval_data`.
>>> data = [Instance('input', (0., 0., 1.)),
... Instance('input', (0., 1., 1.)),
... Instance('input', (1., 0., 0.))]
>>> squared_error(data, [(0., 1., 1.), (0., 1., 1.), (-1., 1., 0.)])
[1.0, 0.0, 5.0]
'''
return [np.sum((np.array(pred) - np.array(inst.output)) ** 2)
for inst, pred in zip(eval_data, predictions)]
|
[
"def",
"squared_error",
"(",
"eval_data",
",",
"predictions",
",",
"scores",
"=",
"'ignored'",
",",
"learner",
"=",
"'ignored'",
")",
":",
"return",
"[",
"np",
".",
"sum",
"(",
"(",
"np",
".",
"array",
"(",
"pred",
")",
"-",
"np",
".",
"array",
"(",
"inst",
".",
"output",
")",
")",
"**",
"2",
")",
"for",
"inst",
",",
"pred",
"in",
"zip",
"(",
"eval_data",
",",
"predictions",
")",
"]"
] |
Return the squared error of each prediction in `predictions` with respect
to the correct output in `eval_data`.
>>> data = [Instance('input', (0., 0., 1.)),
... Instance('input', (0., 1., 1.)),
... Instance('input', (1., 0., 0.))]
>>> squared_error(data, [(0., 1., 1.), (0., 1., 1.), (-1., 1., 0.)])
[1.0, 0.0, 5.0]
|
[
"Return",
"the",
"squared",
"error",
"of",
"each",
"prediction",
"in",
"predictions",
"with",
"respect",
"to",
"the",
"correct",
"output",
"in",
"eval_data",
"."
] |
920c55d8eaa1e7105971059c66eb448a74c100d6
|
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/metrics.py#L122-L134
|
18,076
|
drdoctr/doctr
|
doctr/local.py
|
encrypt_variable
|
def encrypt_variable(variable, build_repo, *, tld='.org', public_key=None,
travis_token=None, **login_kwargs):
"""
Encrypt an environment variable for ``build_repo`` for Travis
``variable`` should be a bytes object, of the form ``b'ENV=value'``.
``build_repo`` is the repo that ``doctr deploy`` will be run from. It
should be like 'drdoctr/doctr'.
``tld`` should be ``'.org'`` for travis-ci.org and ``'.com'`` for
travis-ci.com.
``public_key`` should be a pem format public key, obtained from Travis if
not provided.
If the repo is private, travis_token should be as returned by
``get_temporary_token(**login_kwargs)``. A token being present
automatically implies ``tld='.com'``.
"""
if not isinstance(variable, bytes):
raise TypeError("variable should be bytes")
if not b"=" in variable:
raise ValueError("variable should be of the form 'VARIABLE=value'")
if not public_key:
_headers = {
'Content-Type': 'application/json',
'User-Agent': 'MyClient/1.0.0',
}
headersv2 = {**_headers, **Travis_APIv2}
headersv3 = {**_headers, **Travis_APIv3}
if travis_token:
headersv3['Authorization'] = 'token {}'.format(travis_token)
res = requests.get('https://api.travis-ci.com/repo/{build_repo}/key_pair/generated'.format(build_repo=urllib.parse.quote(build_repo,
safe='')), headers=headersv3)
if res.json().get('file') == 'not found':
raise RuntimeError("Could not find the Travis public key for %s" % build_repo)
public_key = res.json()['public_key']
else:
res = requests.get('https://api.travis-ci{tld}/repos/{build_repo}/key'.format(build_repo=build_repo,
tld=tld),
headers=headersv2)
public_key = res.json()['key']
if res.status_code == requests.codes.not_found:
raise RuntimeError('Could not find requested repo on Travis. Is Travis enabled?')
res.raise_for_status()
public_key = public_key.replace("RSA PUBLIC KEY", "PUBLIC KEY").encode('utf-8')
key = serialization.load_pem_public_key(public_key, backend=default_backend())
pad = padding.PKCS1v15()
return base64.b64encode(key.encrypt(variable, pad))
|
python
|
def encrypt_variable(variable, build_repo, *, tld='.org', public_key=None,
travis_token=None, **login_kwargs):
"""
Encrypt an environment variable for ``build_repo`` for Travis
``variable`` should be a bytes object, of the form ``b'ENV=value'``.
``build_repo`` is the repo that ``doctr deploy`` will be run from. It
should be like 'drdoctr/doctr'.
``tld`` should be ``'.org'`` for travis-ci.org and ``'.com'`` for
travis-ci.com.
``public_key`` should be a pem format public key, obtained from Travis if
not provided.
If the repo is private, travis_token should be as returned by
``get_temporary_token(**login_kwargs)``. A token being present
automatically implies ``tld='.com'``.
"""
if not isinstance(variable, bytes):
raise TypeError("variable should be bytes")
if not b"=" in variable:
raise ValueError("variable should be of the form 'VARIABLE=value'")
if not public_key:
_headers = {
'Content-Type': 'application/json',
'User-Agent': 'MyClient/1.0.0',
}
headersv2 = {**_headers, **Travis_APIv2}
headersv3 = {**_headers, **Travis_APIv3}
if travis_token:
headersv3['Authorization'] = 'token {}'.format(travis_token)
res = requests.get('https://api.travis-ci.com/repo/{build_repo}/key_pair/generated'.format(build_repo=urllib.parse.quote(build_repo,
safe='')), headers=headersv3)
if res.json().get('file') == 'not found':
raise RuntimeError("Could not find the Travis public key for %s" % build_repo)
public_key = res.json()['public_key']
else:
res = requests.get('https://api.travis-ci{tld}/repos/{build_repo}/key'.format(build_repo=build_repo,
tld=tld),
headers=headersv2)
public_key = res.json()['key']
if res.status_code == requests.codes.not_found:
raise RuntimeError('Could not find requested repo on Travis. Is Travis enabled?')
res.raise_for_status()
public_key = public_key.replace("RSA PUBLIC KEY", "PUBLIC KEY").encode('utf-8')
key = serialization.load_pem_public_key(public_key, backend=default_backend())
pad = padding.PKCS1v15()
return base64.b64encode(key.encrypt(variable, pad))
|
[
"def",
"encrypt_variable",
"(",
"variable",
",",
"build_repo",
",",
"*",
",",
"tld",
"=",
"'.org'",
",",
"public_key",
"=",
"None",
",",
"travis_token",
"=",
"None",
",",
"*",
"*",
"login_kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"variable",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"\"variable should be bytes\"",
")",
"if",
"not",
"b\"=\"",
"in",
"variable",
":",
"raise",
"ValueError",
"(",
"\"variable should be of the form 'VARIABLE=value'\"",
")",
"if",
"not",
"public_key",
":",
"_headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
",",
"'User-Agent'",
":",
"'MyClient/1.0.0'",
",",
"}",
"headersv2",
"=",
"{",
"*",
"*",
"_headers",
",",
"*",
"*",
"Travis_APIv2",
"}",
"headersv3",
"=",
"{",
"*",
"*",
"_headers",
",",
"*",
"*",
"Travis_APIv3",
"}",
"if",
"travis_token",
":",
"headersv3",
"[",
"'Authorization'",
"]",
"=",
"'token {}'",
".",
"format",
"(",
"travis_token",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"'https://api.travis-ci.com/repo/{build_repo}/key_pair/generated'",
".",
"format",
"(",
"build_repo",
"=",
"urllib",
".",
"parse",
".",
"quote",
"(",
"build_repo",
",",
"safe",
"=",
"''",
")",
")",
",",
"headers",
"=",
"headersv3",
")",
"if",
"res",
".",
"json",
"(",
")",
".",
"get",
"(",
"'file'",
")",
"==",
"'not found'",
":",
"raise",
"RuntimeError",
"(",
"\"Could not find the Travis public key for %s\"",
"%",
"build_repo",
")",
"public_key",
"=",
"res",
".",
"json",
"(",
")",
"[",
"'public_key'",
"]",
"else",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"'https://api.travis-ci{tld}/repos/{build_repo}/key'",
".",
"format",
"(",
"build_repo",
"=",
"build_repo",
",",
"tld",
"=",
"tld",
")",
",",
"headers",
"=",
"headersv2",
")",
"public_key",
"=",
"res",
".",
"json",
"(",
")",
"[",
"'key'",
"]",
"if",
"res",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"not_found",
":",
"raise",
"RuntimeError",
"(",
"'Could not find requested repo on Travis. Is Travis enabled?'",
")",
"res",
".",
"raise_for_status",
"(",
")",
"public_key",
"=",
"public_key",
".",
"replace",
"(",
"\"RSA PUBLIC KEY\"",
",",
"\"PUBLIC KEY\"",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"key",
"=",
"serialization",
".",
"load_pem_public_key",
"(",
"public_key",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"pad",
"=",
"padding",
".",
"PKCS1v15",
"(",
")",
"return",
"base64",
".",
"b64encode",
"(",
"key",
".",
"encrypt",
"(",
"variable",
",",
"pad",
")",
")"
] |
Encrypt an environment variable for ``build_repo`` for Travis
``variable`` should be a bytes object, of the form ``b'ENV=value'``.
``build_repo`` is the repo that ``doctr deploy`` will be run from. It
should be like 'drdoctr/doctr'.
``tld`` should be ``'.org'`` for travis-ci.org and ``'.com'`` for
travis-ci.com.
``public_key`` should be a pem format public key, obtained from Travis if
not provided.
If the repo is private, travis_token should be as returned by
``get_temporary_token(**login_kwargs)``. A token being present
automatically implies ``tld='.com'``.
|
[
"Encrypt",
"an",
"environment",
"variable",
"for",
"build_repo",
"for",
"Travis"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L28-L84
|
18,077
|
drdoctr/doctr
|
doctr/local.py
|
encrypt_to_file
|
def encrypt_to_file(contents, filename):
"""
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
"""
if not filename.endswith('.enc'):
raise ValueError("%s does not end with .enc" % filename)
key = Fernet.generate_key()
fer = Fernet(key)
encrypted_file = fer.encrypt(contents)
with open(filename, 'wb') as f:
f.write(encrypted_file)
return key
|
python
|
def encrypt_to_file(contents, filename):
"""
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
"""
if not filename.endswith('.enc'):
raise ValueError("%s does not end with .enc" % filename)
key = Fernet.generate_key()
fer = Fernet(key)
encrypted_file = fer.encrypt(contents)
with open(filename, 'wb') as f:
f.write(encrypted_file)
return key
|
[
"def",
"encrypt_to_file",
"(",
"contents",
",",
"filename",
")",
":",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.enc'",
")",
":",
"raise",
"ValueError",
"(",
"\"%s does not end with .enc\"",
"%",
"filename",
")",
"key",
"=",
"Fernet",
".",
"generate_key",
"(",
")",
"fer",
"=",
"Fernet",
"(",
"key",
")",
"encrypted_file",
"=",
"fer",
".",
"encrypt",
"(",
"contents",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"encrypted_file",
")",
"return",
"key"
] |
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
|
[
"Encrypts",
"contents",
"and",
"writes",
"it",
"to",
"filename",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L86-L109
|
18,078
|
drdoctr/doctr
|
doctr/local.py
|
GitHub_login
|
def GitHub_login(*, username=None, password=None, OTP=None, headers=None):
"""
Login to GitHub.
If no username, password, or OTP (2-factor authentication code) are
provided, they will be requested from the command line.
Returns a dict of kwargs that can be passed to functions that require
authenticated connections to GitHub.
"""
if not username:
username = input("What is your GitHub username? ")
if not password:
password = getpass("Enter the GitHub password for {username}: ".format(username=username))
headers = headers or {}
if OTP:
headers['X-GitHub-OTP'] = OTP
auth = HTTPBasicAuth(username, password)
r = requests.get('https://api.github.com/', auth=auth, headers=headers)
if r.status_code == 401:
two_factor = r.headers.get('X-GitHub-OTP')
if two_factor:
if OTP:
print(red("Invalid authentication code"))
# For SMS, we have to make a fake request (that will fail without
# the OTP) to get GitHub to send it. See https://github.com/drdoctr/doctr/pull/203
auth_header = base64.urlsafe_b64encode(bytes(username + ':' + password, 'utf8')).decode()
login_kwargs = {'auth': None, 'headers': {'Authorization': 'Basic {}'.format(auth_header)}}
try:
generate_GitHub_token(**login_kwargs)
except (requests.exceptions.HTTPError, GitHubError):
pass
print("A two-factor authentication code is required:", two_factor.split(';')[1].strip())
OTP = input("Authentication code: ")
return GitHub_login(username=username, password=password, OTP=OTP, headers=headers)
raise AuthenticationFailed("invalid username or password")
GitHub_raise_for_status(r)
return {'auth': auth, 'headers': headers}
|
python
|
def GitHub_login(*, username=None, password=None, OTP=None, headers=None):
"""
Login to GitHub.
If no username, password, or OTP (2-factor authentication code) are
provided, they will be requested from the command line.
Returns a dict of kwargs that can be passed to functions that require
authenticated connections to GitHub.
"""
if not username:
username = input("What is your GitHub username? ")
if not password:
password = getpass("Enter the GitHub password for {username}: ".format(username=username))
headers = headers or {}
if OTP:
headers['X-GitHub-OTP'] = OTP
auth = HTTPBasicAuth(username, password)
r = requests.get('https://api.github.com/', auth=auth, headers=headers)
if r.status_code == 401:
two_factor = r.headers.get('X-GitHub-OTP')
if two_factor:
if OTP:
print(red("Invalid authentication code"))
# For SMS, we have to make a fake request (that will fail without
# the OTP) to get GitHub to send it. See https://github.com/drdoctr/doctr/pull/203
auth_header = base64.urlsafe_b64encode(bytes(username + ':' + password, 'utf8')).decode()
login_kwargs = {'auth': None, 'headers': {'Authorization': 'Basic {}'.format(auth_header)}}
try:
generate_GitHub_token(**login_kwargs)
except (requests.exceptions.HTTPError, GitHubError):
pass
print("A two-factor authentication code is required:", two_factor.split(';')[1].strip())
OTP = input("Authentication code: ")
return GitHub_login(username=username, password=password, OTP=OTP, headers=headers)
raise AuthenticationFailed("invalid username or password")
GitHub_raise_for_status(r)
return {'auth': auth, 'headers': headers}
|
[
"def",
"GitHub_login",
"(",
"*",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"OTP",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"if",
"not",
"username",
":",
"username",
"=",
"input",
"(",
"\"What is your GitHub username? \"",
")",
"if",
"not",
"password",
":",
"password",
"=",
"getpass",
"(",
"\"Enter the GitHub password for {username}: \"",
".",
"format",
"(",
"username",
"=",
"username",
")",
")",
"headers",
"=",
"headers",
"or",
"{",
"}",
"if",
"OTP",
":",
"headers",
"[",
"'X-GitHub-OTP'",
"]",
"=",
"OTP",
"auth",
"=",
"HTTPBasicAuth",
"(",
"username",
",",
"password",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"'https://api.github.com/'",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
")",
"if",
"r",
".",
"status_code",
"==",
"401",
":",
"two_factor",
"=",
"r",
".",
"headers",
".",
"get",
"(",
"'X-GitHub-OTP'",
")",
"if",
"two_factor",
":",
"if",
"OTP",
":",
"print",
"(",
"red",
"(",
"\"Invalid authentication code\"",
")",
")",
"# For SMS, we have to make a fake request (that will fail without",
"# the OTP) to get GitHub to send it. See https://github.com/drdoctr/doctr/pull/203",
"auth_header",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"bytes",
"(",
"username",
"+",
"':'",
"+",
"password",
",",
"'utf8'",
")",
")",
".",
"decode",
"(",
")",
"login_kwargs",
"=",
"{",
"'auth'",
":",
"None",
",",
"'headers'",
":",
"{",
"'Authorization'",
":",
"'Basic {}'",
".",
"format",
"(",
"auth_header",
")",
"}",
"}",
"try",
":",
"generate_GitHub_token",
"(",
"*",
"*",
"login_kwargs",
")",
"except",
"(",
"requests",
".",
"exceptions",
".",
"HTTPError",
",",
"GitHubError",
")",
":",
"pass",
"print",
"(",
"\"A two-factor authentication code is required:\"",
",",
"two_factor",
".",
"split",
"(",
"';'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"OTP",
"=",
"input",
"(",
"\"Authentication code: \"",
")",
"return",
"GitHub_login",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"OTP",
"=",
"OTP",
",",
"headers",
"=",
"headers",
")",
"raise",
"AuthenticationFailed",
"(",
"\"invalid username or password\"",
")",
"GitHub_raise_for_status",
"(",
"r",
")",
"return",
"{",
"'auth'",
":",
"auth",
",",
"'headers'",
":",
"headers",
"}"
] |
Login to GitHub.
If no username, password, or OTP (2-factor authentication code) are
provided, they will be requested from the command line.
Returns a dict of kwargs that can be passed to functions that require
authenticated connections to GitHub.
|
[
"Login",
"to",
"GitHub",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L114-L158
|
18,079
|
drdoctr/doctr
|
doctr/local.py
|
GitHub_post
|
def GitHub_post(data, url, *, auth, headers):
"""
POST the data ``data`` to GitHub.
Returns the json response from the server, or raises on error status.
"""
r = requests.post(url, auth=auth, headers=headers, data=json.dumps(data))
GitHub_raise_for_status(r)
return r.json()
|
python
|
def GitHub_post(data, url, *, auth, headers):
"""
POST the data ``data`` to GitHub.
Returns the json response from the server, or raises on error status.
"""
r = requests.post(url, auth=auth, headers=headers, data=json.dumps(data))
GitHub_raise_for_status(r)
return r.json()
|
[
"def",
"GitHub_post",
"(",
"data",
",",
"url",
",",
"*",
",",
"auth",
",",
"headers",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"GitHub_raise_for_status",
"(",
"r",
")",
"return",
"r",
".",
"json",
"(",
")"
] |
POST the data ``data`` to GitHub.
Returns the json response from the server, or raises on error status.
|
[
"POST",
"the",
"data",
"data",
"to",
"GitHub",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L215-L224
|
18,080
|
drdoctr/doctr
|
doctr/local.py
|
get_travis_token
|
def get_travis_token(*, GitHub_token=None, **login_kwargs):
"""
Generate a temporary token for authenticating with Travis
The GitHub token can be passed in to the ``GitHub_token`` keyword
argument. If no token is passed in, a GitHub token is generated
temporarily, and then immediately deleted.
This is needed to activate a private repo
Returns the secret token. It should be added to the headers like
headers['Authorization'] = "token {}".format(token)
"""
_headers = {
'Content-Type': 'application/json',
'User-Agent': 'MyClient/1.0.0',
}
headersv2 = {**_headers, **Travis_APIv2}
token_id = None
try:
if not GitHub_token:
print(green("I need to generate a temporary token with GitHub to authenticate with Travis. You may get a warning email from GitHub about this."))
print(green("It will be deleted immediately. If you still see it after this at https://github.com/settings/tokens after please delete it manually."))
# /auth/github doesn't seem to exist in the Travis API v3.
tok_dict = generate_GitHub_token(scopes=["read:org", "user:email", "repo"],
note="temporary token for doctr to auth against travis (delete me)",
**login_kwargs)
GitHub_token = tok_dict['token']
token_id = tok_dict['id']
data = {'github_token': GitHub_token}
res = requests.post('https://api.travis-ci.com/auth/github', data=json.dumps(data), headers=headersv2)
return res.json()['access_token']
finally:
if token_id:
delete_GitHub_token(token_id, **login_kwargs)
|
python
|
def get_travis_token(*, GitHub_token=None, **login_kwargs):
"""
Generate a temporary token for authenticating with Travis
The GitHub token can be passed in to the ``GitHub_token`` keyword
argument. If no token is passed in, a GitHub token is generated
temporarily, and then immediately deleted.
This is needed to activate a private repo
Returns the secret token. It should be added to the headers like
headers['Authorization'] = "token {}".format(token)
"""
_headers = {
'Content-Type': 'application/json',
'User-Agent': 'MyClient/1.0.0',
}
headersv2 = {**_headers, **Travis_APIv2}
token_id = None
try:
if not GitHub_token:
print(green("I need to generate a temporary token with GitHub to authenticate with Travis. You may get a warning email from GitHub about this."))
print(green("It will be deleted immediately. If you still see it after this at https://github.com/settings/tokens after please delete it manually."))
# /auth/github doesn't seem to exist in the Travis API v3.
tok_dict = generate_GitHub_token(scopes=["read:org", "user:email", "repo"],
note="temporary token for doctr to auth against travis (delete me)",
**login_kwargs)
GitHub_token = tok_dict['token']
token_id = tok_dict['id']
data = {'github_token': GitHub_token}
res = requests.post('https://api.travis-ci.com/auth/github', data=json.dumps(data), headers=headersv2)
return res.json()['access_token']
finally:
if token_id:
delete_GitHub_token(token_id, **login_kwargs)
|
[
"def",
"get_travis_token",
"(",
"*",
",",
"GitHub_token",
"=",
"None",
",",
"*",
"*",
"login_kwargs",
")",
":",
"_headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
",",
"'User-Agent'",
":",
"'MyClient/1.0.0'",
",",
"}",
"headersv2",
"=",
"{",
"*",
"*",
"_headers",
",",
"*",
"*",
"Travis_APIv2",
"}",
"token_id",
"=",
"None",
"try",
":",
"if",
"not",
"GitHub_token",
":",
"print",
"(",
"green",
"(",
"\"I need to generate a temporary token with GitHub to authenticate with Travis. You may get a warning email from GitHub about this.\"",
")",
")",
"print",
"(",
"green",
"(",
"\"It will be deleted immediately. If you still see it after this at https://github.com/settings/tokens after please delete it manually.\"",
")",
")",
"# /auth/github doesn't seem to exist in the Travis API v3.",
"tok_dict",
"=",
"generate_GitHub_token",
"(",
"scopes",
"=",
"[",
"\"read:org\"",
",",
"\"user:email\"",
",",
"\"repo\"",
"]",
",",
"note",
"=",
"\"temporary token for doctr to auth against travis (delete me)\"",
",",
"*",
"*",
"login_kwargs",
")",
"GitHub_token",
"=",
"tok_dict",
"[",
"'token'",
"]",
"token_id",
"=",
"tok_dict",
"[",
"'id'",
"]",
"data",
"=",
"{",
"'github_token'",
":",
"GitHub_token",
"}",
"res",
"=",
"requests",
".",
"post",
"(",
"'https://api.travis-ci.com/auth/github'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"headers",
"=",
"headersv2",
")",
"return",
"res",
".",
"json",
"(",
")",
"[",
"'access_token'",
"]",
"finally",
":",
"if",
"token_id",
":",
"delete_GitHub_token",
"(",
"token_id",
",",
"*",
"*",
"login_kwargs",
")"
] |
Generate a temporary token for authenticating with Travis
The GitHub token can be passed in to the ``GitHub_token`` keyword
argument. If no token is passed in, a GitHub token is generated
temporarily, and then immediately deleted.
This is needed to activate a private repo
Returns the secret token. It should be added to the headers like
headers['Authorization'] = "token {}".format(token)
|
[
"Generate",
"a",
"temporary",
"token",
"for",
"authenticating",
"with",
"Travis"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L227-L264
|
18,081
|
drdoctr/doctr
|
doctr/local.py
|
generate_GitHub_token
|
def generate_GitHub_token(*, note="Doctr token for pushing to gh-pages from Travis", scopes=None, **login_kwargs):
"""
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
"""
if scopes is None:
scopes = ['public_repo']
AUTH_URL = "https://api.github.com/authorizations"
data = {
"scopes": scopes,
"note": note,
"note_url": "https://github.com/drdoctr/doctr",
"fingerprint": str(uuid.uuid4()),
}
return GitHub_post(data, AUTH_URL, **login_kwargs)
|
python
|
def generate_GitHub_token(*, note="Doctr token for pushing to gh-pages from Travis", scopes=None, **login_kwargs):
"""
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
"""
if scopes is None:
scopes = ['public_repo']
AUTH_URL = "https://api.github.com/authorizations"
data = {
"scopes": scopes,
"note": note,
"note_url": "https://github.com/drdoctr/doctr",
"fingerprint": str(uuid.uuid4()),
}
return GitHub_post(data, AUTH_URL, **login_kwargs)
|
[
"def",
"generate_GitHub_token",
"(",
"*",
",",
"note",
"=",
"\"Doctr token for pushing to gh-pages from Travis\"",
",",
"scopes",
"=",
"None",
",",
"*",
"*",
"login_kwargs",
")",
":",
"if",
"scopes",
"is",
"None",
":",
"scopes",
"=",
"[",
"'public_repo'",
"]",
"AUTH_URL",
"=",
"\"https://api.github.com/authorizations\"",
"data",
"=",
"{",
"\"scopes\"",
":",
"scopes",
",",
"\"note\"",
":",
"note",
",",
"\"note_url\"",
":",
"\"https://github.com/drdoctr/doctr\"",
",",
"\"fingerprint\"",
":",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
",",
"}",
"return",
"GitHub_post",
"(",
"data",
",",
"AUTH_URL",
",",
"*",
"*",
"login_kwargs",
")"
] |
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
|
[
"Generate",
"a",
"GitHub",
"token",
"for",
"pushing",
"from",
"Travis"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L267-L288
|
18,082
|
drdoctr/doctr
|
doctr/local.py
|
delete_GitHub_token
|
def delete_GitHub_token(token_id, *, auth, headers):
"""Delete a temporary GitHub token"""
r = requests.delete('https://api.github.com/authorizations/{id}'.format(id=token_id), auth=auth, headers=headers)
GitHub_raise_for_status(r)
|
python
|
def delete_GitHub_token(token_id, *, auth, headers):
"""Delete a temporary GitHub token"""
r = requests.delete('https://api.github.com/authorizations/{id}'.format(id=token_id), auth=auth, headers=headers)
GitHub_raise_for_status(r)
|
[
"def",
"delete_GitHub_token",
"(",
"token_id",
",",
"*",
",",
"auth",
",",
"headers",
")",
":",
"r",
"=",
"requests",
".",
"delete",
"(",
"'https://api.github.com/authorizations/{id}'",
".",
"format",
"(",
"id",
"=",
"token_id",
")",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
")",
"GitHub_raise_for_status",
"(",
"r",
")"
] |
Delete a temporary GitHub token
|
[
"Delete",
"a",
"temporary",
"GitHub",
"token"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L291-L294
|
18,083
|
drdoctr/doctr
|
doctr/local.py
|
upload_GitHub_deploy_key
|
def upload_GitHub_deploy_key(deploy_repo, ssh_key, *, read_only=False,
title="Doctr deploy key for pushing to gh-pages from Travis", **login_kwargs):
"""
Uploads a GitHub deploy key to ``deploy_repo``.
If ``read_only=True``, the deploy_key will not be able to write to the
repo.
"""
DEPLOY_KEY_URL = "https://api.github.com/repos/{deploy_repo}/keys".format(deploy_repo=deploy_repo)
data = {
"title": title,
"key": ssh_key,
"read_only": read_only,
}
return GitHub_post(data, DEPLOY_KEY_URL, **login_kwargs)
|
python
|
def upload_GitHub_deploy_key(deploy_repo, ssh_key, *, read_only=False,
title="Doctr deploy key for pushing to gh-pages from Travis", **login_kwargs):
"""
Uploads a GitHub deploy key to ``deploy_repo``.
If ``read_only=True``, the deploy_key will not be able to write to the
repo.
"""
DEPLOY_KEY_URL = "https://api.github.com/repos/{deploy_repo}/keys".format(deploy_repo=deploy_repo)
data = {
"title": title,
"key": ssh_key,
"read_only": read_only,
}
return GitHub_post(data, DEPLOY_KEY_URL, **login_kwargs)
|
[
"def",
"upload_GitHub_deploy_key",
"(",
"deploy_repo",
",",
"ssh_key",
",",
"*",
",",
"read_only",
"=",
"False",
",",
"title",
"=",
"\"Doctr deploy key for pushing to gh-pages from Travis\"",
",",
"*",
"*",
"login_kwargs",
")",
":",
"DEPLOY_KEY_URL",
"=",
"\"https://api.github.com/repos/{deploy_repo}/keys\"",
".",
"format",
"(",
"deploy_repo",
"=",
"deploy_repo",
")",
"data",
"=",
"{",
"\"title\"",
":",
"title",
",",
"\"key\"",
":",
"ssh_key",
",",
"\"read_only\"",
":",
"read_only",
",",
"}",
"return",
"GitHub_post",
"(",
"data",
",",
"DEPLOY_KEY_URL",
",",
"*",
"*",
"login_kwargs",
")"
] |
Uploads a GitHub deploy key to ``deploy_repo``.
If ``read_only=True``, the deploy_key will not be able to write to the
repo.
|
[
"Uploads",
"a",
"GitHub",
"deploy",
"key",
"to",
"deploy_repo",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L297-L312
|
18,084
|
drdoctr/doctr
|
doctr/local.py
|
generate_ssh_key
|
def generate_ssh_key():
"""
Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings.
"""
key = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=4096
)
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption())
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
return private_key, public_key
|
python
|
def generate_ssh_key():
"""
Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings.
"""
key = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=4096
)
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption())
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
return private_key, public_key
|
[
"def",
"generate_ssh_key",
"(",
")",
":",
"key",
"=",
"rsa",
".",
"generate_private_key",
"(",
"backend",
"=",
"default_backend",
"(",
")",
",",
"public_exponent",
"=",
"65537",
",",
"key_size",
"=",
"4096",
")",
"private_key",
"=",
"key",
".",
"private_bytes",
"(",
"serialization",
".",
"Encoding",
".",
"PEM",
",",
"serialization",
".",
"PrivateFormat",
".",
"PKCS8",
",",
"serialization",
".",
"NoEncryption",
"(",
")",
")",
"public_key",
"=",
"key",
".",
"public_key",
"(",
")",
".",
"public_bytes",
"(",
"serialization",
".",
"Encoding",
".",
"OpenSSH",
",",
"serialization",
".",
"PublicFormat",
".",
"OpenSSH",
")",
"return",
"private_key",
",",
"public_key"
] |
Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings.
|
[
"Generates",
"an",
"SSH",
"deploy",
"public",
"and",
"private",
"key",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L314-L335
|
18,085
|
drdoctr/doctr
|
doctr/local.py
|
guess_github_repo
|
def guess_github_repo():
"""
Guesses the github repo for the current directory
Returns False if no guess can be made.
"""
p = subprocess.run(['git', 'ls-remote', '--get-url', 'origin'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
if p.stderr or p.returncode:
return False
url = p.stdout.decode('utf-8').strip()
m = GIT_URL.fullmatch(url)
if not m:
return False
return m.group(1)
|
python
|
def guess_github_repo():
"""
Guesses the github repo for the current directory
Returns False if no guess can be made.
"""
p = subprocess.run(['git', 'ls-remote', '--get-url', 'origin'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
if p.stderr or p.returncode:
return False
url = p.stdout.decode('utf-8').strip()
m = GIT_URL.fullmatch(url)
if not m:
return False
return m.group(1)
|
[
"def",
"guess_github_repo",
"(",
")",
":",
"p",
"=",
"subprocess",
".",
"run",
"(",
"[",
"'git'",
",",
"'ls-remote'",
",",
"'--get-url'",
",",
"'origin'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"check",
"=",
"False",
")",
"if",
"p",
".",
"stderr",
"or",
"p",
".",
"returncode",
":",
"return",
"False",
"url",
"=",
"p",
".",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"m",
"=",
"GIT_URL",
".",
"fullmatch",
"(",
"url",
")",
"if",
"not",
"m",
":",
"return",
"False",
"return",
"m",
".",
"group",
"(",
"1",
")"
] |
Guesses the github repo for the current directory
Returns False if no guess can be made.
|
[
"Guesses",
"the",
"github",
"repo",
"for",
"the",
"current",
"directory"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L453-L468
|
18,086
|
drdoctr/doctr
|
doctr/__main__.py
|
get_config
|
def get_config():
"""
This load some configuration from the ``.travis.yml``, if file is present,
``doctr`` key if present.
"""
p = Path('.travis.yml')
if not p.exists():
return {}
with p.open() as f:
travis_config = yaml.safe_load(f.read())
config = travis_config.get('doctr', {})
if not isinstance(config, dict):
raise ValueError('config is not a dict: {}'.format(config))
return config
|
python
|
def get_config():
"""
This load some configuration from the ``.travis.yml``, if file is present,
``doctr`` key if present.
"""
p = Path('.travis.yml')
if not p.exists():
return {}
with p.open() as f:
travis_config = yaml.safe_load(f.read())
config = travis_config.get('doctr', {})
if not isinstance(config, dict):
raise ValueError('config is not a dict: {}'.format(config))
return config
|
[
"def",
"get_config",
"(",
")",
":",
"p",
"=",
"Path",
"(",
"'.travis.yml'",
")",
"if",
"not",
"p",
".",
"exists",
"(",
")",
":",
"return",
"{",
"}",
"with",
"p",
".",
"open",
"(",
")",
"as",
"f",
":",
"travis_config",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
".",
"read",
"(",
")",
")",
"config",
"=",
"travis_config",
".",
"get",
"(",
"'doctr'",
",",
"{",
"}",
")",
"if",
"not",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'config is not a dict: {}'",
".",
"format",
"(",
"config",
")",
")",
"return",
"config"
] |
This load some configuration from the ``.travis.yml``, if file is present,
``doctr`` key if present.
|
[
"This",
"load",
"some",
"configuration",
"from",
"the",
".",
"travis",
".",
"yml",
"if",
"file",
"is",
"present",
"doctr",
"key",
"if",
"present",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/__main__.py#L219-L234
|
18,087
|
drdoctr/doctr
|
doctr/travis.py
|
decrypt_file
|
def decrypt_file(file, key):
"""
Decrypts the file ``file``.
The encrypted file is assumed to end with the ``.enc`` extension. The
decrypted file is saved to the same location without the ``.enc``
extension.
The permissions on the decrypted file are automatically set to 0o600.
See also :func:`doctr.local.encrypt_file`.
"""
if not file.endswith('.enc'):
raise ValueError("%s does not end with .enc" % file)
fer = Fernet(key)
with open(file, 'rb') as f:
decrypted_file = fer.decrypt(f.read())
with open(file[:-4], 'wb') as f:
f.write(decrypted_file)
os.chmod(file[:-4], 0o600)
|
python
|
def decrypt_file(file, key):
"""
Decrypts the file ``file``.
The encrypted file is assumed to end with the ``.enc`` extension. The
decrypted file is saved to the same location without the ``.enc``
extension.
The permissions on the decrypted file are automatically set to 0o600.
See also :func:`doctr.local.encrypt_file`.
"""
if not file.endswith('.enc'):
raise ValueError("%s does not end with .enc" % file)
fer = Fernet(key)
with open(file, 'rb') as f:
decrypted_file = fer.decrypt(f.read())
with open(file[:-4], 'wb') as f:
f.write(decrypted_file)
os.chmod(file[:-4], 0o600)
|
[
"def",
"decrypt_file",
"(",
"file",
",",
"key",
")",
":",
"if",
"not",
"file",
".",
"endswith",
"(",
"'.enc'",
")",
":",
"raise",
"ValueError",
"(",
"\"%s does not end with .enc\"",
"%",
"file",
")",
"fer",
"=",
"Fernet",
"(",
"key",
")",
"with",
"open",
"(",
"file",
",",
"'rb'",
")",
"as",
"f",
":",
"decrypted_file",
"=",
"fer",
".",
"decrypt",
"(",
"f",
".",
"read",
"(",
")",
")",
"with",
"open",
"(",
"file",
"[",
":",
"-",
"4",
"]",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"decrypted_file",
")",
"os",
".",
"chmod",
"(",
"file",
"[",
":",
"-",
"4",
"]",
",",
"0o600",
")"
] |
Decrypts the file ``file``.
The encrypted file is assumed to end with the ``.enc`` extension. The
decrypted file is saved to the same location without the ``.enc``
extension.
The permissions on the decrypted file are automatically set to 0o600.
See also :func:`doctr.local.encrypt_file`.
|
[
"Decrypts",
"the",
"file",
"file",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L23-L47
|
18,088
|
drdoctr/doctr
|
doctr/travis.py
|
setup_deploy_key
|
def setup_deploy_key(keypath='github_deploy_key', key_ext='.enc', env_name='DOCTR_DEPLOY_ENCRYPTION_KEY'):
"""
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility.
"""
key = os.environ.get(env_name, os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None))
if not key:
raise RuntimeError("{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error."
.format(env_name=env_name))
# Legacy keyfile name
if (not os.path.isfile(keypath + key_ext) and
os.path.isfile('github_deploy_key' + key_ext)):
keypath = 'github_deploy_key'
key_filename = os.path.basename(keypath)
key = key.encode('utf-8')
decrypt_file(keypath + key_ext, key)
key_path = os.path.expanduser("~/.ssh/" + key_filename)
os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True)
os.rename(keypath, key_path)
with open(os.path.expanduser("~/.ssh/config"), 'a') as f:
f.write("Host github.com"
' IdentityFile "%s"'
" LogLevel ERROR\n" % key_path)
# start ssh-agent and add key to it
# info from SSH agent has to be put into the environment
agent_info = subprocess.check_output(['ssh-agent', '-s'])
agent_info = agent_info.decode('utf-8')
agent_info = agent_info.split()
AUTH_SOCK = agent_info[0].split('=')[1][:-1]
AGENT_PID = agent_info[3].split('=')[1][:-1]
os.putenv('SSH_AUTH_SOCK', AUTH_SOCK)
os.putenv('SSH_AGENT_PID', AGENT_PID)
run(['ssh-add', os.path.expanduser('~/.ssh/' + key_filename)])
|
python
|
def setup_deploy_key(keypath='github_deploy_key', key_ext='.enc', env_name='DOCTR_DEPLOY_ENCRYPTION_KEY'):
"""
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility.
"""
key = os.environ.get(env_name, os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None))
if not key:
raise RuntimeError("{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error."
.format(env_name=env_name))
# Legacy keyfile name
if (not os.path.isfile(keypath + key_ext) and
os.path.isfile('github_deploy_key' + key_ext)):
keypath = 'github_deploy_key'
key_filename = os.path.basename(keypath)
key = key.encode('utf-8')
decrypt_file(keypath + key_ext, key)
key_path = os.path.expanduser("~/.ssh/" + key_filename)
os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True)
os.rename(keypath, key_path)
with open(os.path.expanduser("~/.ssh/config"), 'a') as f:
f.write("Host github.com"
' IdentityFile "%s"'
" LogLevel ERROR\n" % key_path)
# start ssh-agent and add key to it
# info from SSH agent has to be put into the environment
agent_info = subprocess.check_output(['ssh-agent', '-s'])
agent_info = agent_info.decode('utf-8')
agent_info = agent_info.split()
AUTH_SOCK = agent_info[0].split('=')[1][:-1]
AGENT_PID = agent_info[3].split('=')[1][:-1]
os.putenv('SSH_AUTH_SOCK', AUTH_SOCK)
os.putenv('SSH_AGENT_PID', AGENT_PID)
run(['ssh-add', os.path.expanduser('~/.ssh/' + key_filename)])
|
[
"def",
"setup_deploy_key",
"(",
"keypath",
"=",
"'github_deploy_key'",
",",
"key_ext",
"=",
"'.enc'",
",",
"env_name",
"=",
"'DOCTR_DEPLOY_ENCRYPTION_KEY'",
")",
":",
"key",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"env_name",
",",
"os",
".",
"environ",
".",
"get",
"(",
"\"DOCTR_DEPLOY_ENCRYPTION_KEY\"",
",",
"None",
")",
")",
"if",
"not",
"key",
":",
"raise",
"RuntimeError",
"(",
"\"{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error.\"",
".",
"format",
"(",
"env_name",
"=",
"env_name",
")",
")",
"# Legacy keyfile name",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"keypath",
"+",
"key_ext",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"'github_deploy_key'",
"+",
"key_ext",
")",
")",
":",
"keypath",
"=",
"'github_deploy_key'",
"key_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"keypath",
")",
"key",
"=",
"key",
".",
"encode",
"(",
"'utf-8'",
")",
"decrypt_file",
"(",
"keypath",
"+",
"key_ext",
",",
"key",
")",
"key_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh/\"",
"+",
"key_filename",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh\"",
")",
",",
"exist_ok",
"=",
"True",
")",
"os",
".",
"rename",
"(",
"keypath",
",",
"key_path",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh/config\"",
")",
",",
"'a'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"Host github.com\"",
"' IdentityFile \"%s\"'",
"\" LogLevel ERROR\\n\"",
"%",
"key_path",
")",
"# start ssh-agent and add key to it",
"# info from SSH agent has to be put into the environment",
"agent_info",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'ssh-agent'",
",",
"'-s'",
"]",
")",
"agent_info",
"=",
"agent_info",
".",
"decode",
"(",
"'utf-8'",
")",
"agent_info",
"=",
"agent_info",
".",
"split",
"(",
")",
"AUTH_SOCK",
"=",
"agent_info",
"[",
"0",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"AGENT_PID",
"=",
"agent_info",
"[",
"3",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"os",
".",
"putenv",
"(",
"'SSH_AUTH_SOCK'",
",",
"AUTH_SOCK",
")",
"os",
".",
"putenv",
"(",
"'SSH_AGENT_PID'",
",",
"AGENT_PID",
")",
"run",
"(",
"[",
"'ssh-add'",
",",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.ssh/'",
"+",
"key_filename",
")",
"]",
")"
] |
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility.
|
[
"Decrypts",
"the",
"deploy",
"key",
"and",
"configures",
"it",
"with",
"ssh"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L49-L95
|
18,089
|
drdoctr/doctr
|
doctr/travis.py
|
get_token
|
def get_token():
"""
Get the encrypted GitHub token in Travis.
Make sure the contents this variable do not leak. The ``run()`` function
will remove this from the output, so always use it.
"""
token = os.environ.get("GH_TOKEN", None)
if not token:
token = "GH_TOKEN environment variable not set"
token = token.encode('utf-8')
return token
|
python
|
def get_token():
"""
Get the encrypted GitHub token in Travis.
Make sure the contents this variable do not leak. The ``run()`` function
will remove this from the output, so always use it.
"""
token = os.environ.get("GH_TOKEN", None)
if not token:
token = "GH_TOKEN environment variable not set"
token = token.encode('utf-8')
return token
|
[
"def",
"get_token",
"(",
")",
":",
"token",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"GH_TOKEN\"",
",",
"None",
")",
"if",
"not",
"token",
":",
"token",
"=",
"\"GH_TOKEN environment variable not set\"",
"token",
"=",
"token",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"token"
] |
Get the encrypted GitHub token in Travis.
Make sure the contents this variable do not leak. The ``run()`` function
will remove this from the output, so always use it.
|
[
"Get",
"the",
"encrypted",
"GitHub",
"token",
"in",
"Travis",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L114-L125
|
18,090
|
drdoctr/doctr
|
doctr/travis.py
|
run
|
def run(args, shell=False, exit=True):
"""
Run the command ``args``.
Automatically hides the secret GitHub token from the output.
If shell=False (recommended for most commands), args should be a list of
strings. If shell=True, args should be a string of the command to run.
If exit=True, it exits on nonzero returncode. Otherwise it returns the
returncode.
"""
if "GH_TOKEN" in os.environ:
token = get_token()
else:
token = b''
if not shell:
command = ' '.join(map(shlex.quote, args))
else:
command = args
command = command.replace(token.decode('utf-8'), '~'*len(token))
print(blue(command))
sys.stdout.flush()
returncode = run_command_hiding_token(args, token, shell=shell)
if exit and returncode != 0:
sys.exit(red("%s failed: %s" % (command, returncode)))
return returncode
|
python
|
def run(args, shell=False, exit=True):
"""
Run the command ``args``.
Automatically hides the secret GitHub token from the output.
If shell=False (recommended for most commands), args should be a list of
strings. If shell=True, args should be a string of the command to run.
If exit=True, it exits on nonzero returncode. Otherwise it returns the
returncode.
"""
if "GH_TOKEN" in os.environ:
token = get_token()
else:
token = b''
if not shell:
command = ' '.join(map(shlex.quote, args))
else:
command = args
command = command.replace(token.decode('utf-8'), '~'*len(token))
print(blue(command))
sys.stdout.flush()
returncode = run_command_hiding_token(args, token, shell=shell)
if exit and returncode != 0:
sys.exit(red("%s failed: %s" % (command, returncode)))
return returncode
|
[
"def",
"run",
"(",
"args",
",",
"shell",
"=",
"False",
",",
"exit",
"=",
"True",
")",
":",
"if",
"\"GH_TOKEN\"",
"in",
"os",
".",
"environ",
":",
"token",
"=",
"get_token",
"(",
")",
"else",
":",
"token",
"=",
"b''",
"if",
"not",
"shell",
":",
"command",
"=",
"' '",
".",
"join",
"(",
"map",
"(",
"shlex",
".",
"quote",
",",
"args",
")",
")",
"else",
":",
"command",
"=",
"args",
"command",
"=",
"command",
".",
"replace",
"(",
"token",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"'~'",
"*",
"len",
"(",
"token",
")",
")",
"print",
"(",
"blue",
"(",
"command",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"returncode",
"=",
"run_command_hiding_token",
"(",
"args",
",",
"token",
",",
"shell",
"=",
"shell",
")",
"if",
"exit",
"and",
"returncode",
"!=",
"0",
":",
"sys",
".",
"exit",
"(",
"red",
"(",
"\"%s failed: %s\"",
"%",
"(",
"command",
",",
"returncode",
")",
")",
")",
"return",
"returncode"
] |
Run the command ``args``.
Automatically hides the secret GitHub token from the output.
If shell=False (recommended for most commands), args should be a list of
strings. If shell=True, args should be a string of the command to run.
If exit=True, it exits on nonzero returncode. Otherwise it returns the
returncode.
|
[
"Run",
"the",
"command",
"args",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L127-L156
|
18,091
|
drdoctr/doctr
|
doctr/travis.py
|
get_current_repo
|
def get_current_repo():
"""
Get the GitHub repo name for the current directory.
Assumes that the repo is in the ``origin`` remote.
"""
remote_url = subprocess.check_output(['git', 'config', '--get',
'remote.origin.url']).decode('utf-8')
# Travis uses the https clone url
_, org, git_repo = remote_url.rsplit('.git', 1)[0].rsplit('/', 2)
return (org + '/' + git_repo)
|
python
|
def get_current_repo():
"""
Get the GitHub repo name for the current directory.
Assumes that the repo is in the ``origin`` remote.
"""
remote_url = subprocess.check_output(['git', 'config', '--get',
'remote.origin.url']).decode('utf-8')
# Travis uses the https clone url
_, org, git_repo = remote_url.rsplit('.git', 1)[0].rsplit('/', 2)
return (org + '/' + git_repo)
|
[
"def",
"get_current_repo",
"(",
")",
":",
"remote_url",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'--get'",
",",
"'remote.origin.url'",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"# Travis uses the https clone url",
"_",
",",
"org",
",",
"git_repo",
"=",
"remote_url",
".",
"rsplit",
"(",
"'.git'",
",",
"1",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'/'",
",",
"2",
")",
"return",
"(",
"org",
"+",
"'/'",
"+",
"git_repo",
")"
] |
Get the GitHub repo name for the current directory.
Assumes that the repo is in the ``origin`` remote.
|
[
"Get",
"the",
"GitHub",
"repo",
"name",
"for",
"the",
"current",
"directory",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L158-L169
|
18,092
|
drdoctr/doctr
|
doctr/travis.py
|
get_travis_branch
|
def get_travis_branch():
"""Get the name of the branch that the PR is from.
Note that this is not simply ``$TRAVIS_BRANCH``. the ``push`` build will
use the correct branch (the branch that the PR is from) but the ``pr``
build will use the _target_ of the PR (usually master). So instead, we ask
for ``$TRAVIS_PULL_REQUEST_BRANCH`` if it's a PR build, and
``$TRAVIS_BRANCH`` if it's a push build.
"""
if os.environ.get("TRAVIS_PULL_REQUEST", "") == "true":
return os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "")
else:
return os.environ.get("TRAVIS_BRANCH", "")
|
python
|
def get_travis_branch():
"""Get the name of the branch that the PR is from.
Note that this is not simply ``$TRAVIS_BRANCH``. the ``push`` build will
use the correct branch (the branch that the PR is from) but the ``pr``
build will use the _target_ of the PR (usually master). So instead, we ask
for ``$TRAVIS_PULL_REQUEST_BRANCH`` if it's a PR build, and
``$TRAVIS_BRANCH`` if it's a push build.
"""
if os.environ.get("TRAVIS_PULL_REQUEST", "") == "true":
return os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "")
else:
return os.environ.get("TRAVIS_BRANCH", "")
|
[
"def",
"get_travis_branch",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_PULL_REQUEST\"",
",",
"\"\"",
")",
"==",
"\"true\"",
":",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_PULL_REQUEST_BRANCH\"",
",",
"\"\"",
")",
"else",
":",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_BRANCH\"",
",",
"\"\"",
")"
] |
Get the name of the branch that the PR is from.
Note that this is not simply ``$TRAVIS_BRANCH``. the ``push`` build will
use the correct branch (the branch that the PR is from) but the ``pr``
build will use the _target_ of the PR (usually master). So instead, we ask
for ``$TRAVIS_PULL_REQUEST_BRANCH`` if it's a PR build, and
``$TRAVIS_BRANCH`` if it's a push build.
|
[
"Get",
"the",
"name",
"of",
"the",
"branch",
"that",
"the",
"PR",
"is",
"from",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L171-L183
|
18,093
|
drdoctr/doctr
|
doctr/travis.py
|
set_git_user_email
|
def set_git_user_email():
"""
Set global user and email for git user if not already present on system
"""
username = subprocess.run(shlex.split('git config user.name'), stdout=subprocess.PIPE).stdout.strip().decode('utf-8')
if not username or username == "Travis CI User":
run(['git', 'config', '--global', 'user.name', "Doctr (Travis CI)"])
else:
print("Not setting git user name, as it's already set to %r" % username)
email = subprocess.run(shlex.split('git config user.email'), stdout=subprocess.PIPE).stdout.strip().decode('utf-8')
if not email or email == "travis@example.org":
# We need a dummy email or git will fail. We use this one as per
# https://help.github.com/articles/keeping-your-email-address-private/.
run(['git', 'config', '--global', 'user.email', 'drdoctr@users.noreply.github.com'])
else:
print("Not setting git user email, as it's already set to %r" % email)
|
python
|
def set_git_user_email():
"""
Set global user and email for git user if not already present on system
"""
username = subprocess.run(shlex.split('git config user.name'), stdout=subprocess.PIPE).stdout.strip().decode('utf-8')
if not username or username == "Travis CI User":
run(['git', 'config', '--global', 'user.name', "Doctr (Travis CI)"])
else:
print("Not setting git user name, as it's already set to %r" % username)
email = subprocess.run(shlex.split('git config user.email'), stdout=subprocess.PIPE).stdout.strip().decode('utf-8')
if not email or email == "travis@example.org":
# We need a dummy email or git will fail. We use this one as per
# https://help.github.com/articles/keeping-your-email-address-private/.
run(['git', 'config', '--global', 'user.email', 'drdoctr@users.noreply.github.com'])
else:
print("Not setting git user email, as it's already set to %r" % email)
|
[
"def",
"set_git_user_email",
"(",
")",
":",
"username",
"=",
"subprocess",
".",
"run",
"(",
"shlex",
".",
"split",
"(",
"'git config user.name'",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"stdout",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"not",
"username",
"or",
"username",
"==",
"\"Travis CI User\"",
":",
"run",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'--global'",
",",
"'user.name'",
",",
"\"Doctr (Travis CI)\"",
"]",
")",
"else",
":",
"print",
"(",
"\"Not setting git user name, as it's already set to %r\"",
"%",
"username",
")",
"email",
"=",
"subprocess",
".",
"run",
"(",
"shlex",
".",
"split",
"(",
"'git config user.email'",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"stdout",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"not",
"email",
"or",
"email",
"==",
"\"travis@example.org\"",
":",
"# We need a dummy email or git will fail. We use this one as per",
"# https://help.github.com/articles/keeping-your-email-address-private/.",
"run",
"(",
"[",
"'git'",
",",
"'config'",
",",
"'--global'",
",",
"'user.email'",
",",
"'drdoctr@users.noreply.github.com'",
"]",
")",
"else",
":",
"print",
"(",
"\"Not setting git user email, as it's already set to %r\"",
"%",
"email",
")"
] |
Set global user and email for git user if not already present on system
|
[
"Set",
"global",
"user",
"and",
"email",
"for",
"git",
"user",
"if",
"not",
"already",
"present",
"on",
"system"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L272-L288
|
18,094
|
drdoctr/doctr
|
doctr/travis.py
|
checkout_deploy_branch
|
def checkout_deploy_branch(deploy_branch, canpush=True):
"""
Checkout the deploy branch, creating it if it doesn't exist.
"""
# Create an empty branch with .nojekyll if it doesn't already exist
create_deploy_branch(deploy_branch, push=canpush)
remote_branch = "doctr_remote/{}".format(deploy_branch)
print("Checking out doctr working branch tracking", remote_branch)
clear_working_branch()
# If gh-pages doesn't exist the above create_deploy_branch() will create
# it we can push, but if we can't, it won't and the --track would fail.
if run(['git', 'rev-parse', '--verify', remote_branch], exit=False) == 0:
extra_args = ['--track', remote_branch]
else:
extra_args = []
run(['git', 'checkout', '-b', DOCTR_WORKING_BRANCH] + extra_args)
print("Done")
return canpush
|
python
|
def checkout_deploy_branch(deploy_branch, canpush=True):
"""
Checkout the deploy branch, creating it if it doesn't exist.
"""
# Create an empty branch with .nojekyll if it doesn't already exist
create_deploy_branch(deploy_branch, push=canpush)
remote_branch = "doctr_remote/{}".format(deploy_branch)
print("Checking out doctr working branch tracking", remote_branch)
clear_working_branch()
# If gh-pages doesn't exist the above create_deploy_branch() will create
# it we can push, but if we can't, it won't and the --track would fail.
if run(['git', 'rev-parse', '--verify', remote_branch], exit=False) == 0:
extra_args = ['--track', remote_branch]
else:
extra_args = []
run(['git', 'checkout', '-b', DOCTR_WORKING_BRANCH] + extra_args)
print("Done")
return canpush
|
[
"def",
"checkout_deploy_branch",
"(",
"deploy_branch",
",",
"canpush",
"=",
"True",
")",
":",
"# Create an empty branch with .nojekyll if it doesn't already exist",
"create_deploy_branch",
"(",
"deploy_branch",
",",
"push",
"=",
"canpush",
")",
"remote_branch",
"=",
"\"doctr_remote/{}\"",
".",
"format",
"(",
"deploy_branch",
")",
"print",
"(",
"\"Checking out doctr working branch tracking\"",
",",
"remote_branch",
")",
"clear_working_branch",
"(",
")",
"# If gh-pages doesn't exist the above create_deploy_branch() will create",
"# it we can push, but if we can't, it won't and the --track would fail.",
"if",
"run",
"(",
"[",
"'git'",
",",
"'rev-parse'",
",",
"'--verify'",
",",
"remote_branch",
"]",
",",
"exit",
"=",
"False",
")",
"==",
"0",
":",
"extra_args",
"=",
"[",
"'--track'",
",",
"remote_branch",
"]",
"else",
":",
"extra_args",
"=",
"[",
"]",
"run",
"(",
"[",
"'git'",
",",
"'checkout'",
",",
"'-b'",
",",
"DOCTR_WORKING_BRANCH",
"]",
"+",
"extra_args",
")",
"print",
"(",
"\"Done\"",
")",
"return",
"canpush"
] |
Checkout the deploy branch, creating it if it doesn't exist.
|
[
"Checkout",
"the",
"deploy",
"branch",
"creating",
"it",
"if",
"it",
"doesn",
"t",
"exist",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L290-L308
|
18,095
|
drdoctr/doctr
|
doctr/travis.py
|
deploy_branch_exists
|
def deploy_branch_exists(deploy_branch):
"""
Check if there is a remote branch with name specified in ``deploy_branch``.
Note that default ``deploy_branch`` is ``gh-pages`` for regular repos and
``master`` for ``github.io`` repos.
This isn't completely robust. If there are multiple remotes and you have a
``deploy_branch`` branch on the non-default remote, this won't see it.
"""
remote_name = 'doctr_remote'
branch_names = subprocess.check_output(['git', 'branch', '-r']).decode('utf-8').split()
return '{}/{}'.format(remote_name, deploy_branch) in branch_names
|
python
|
def deploy_branch_exists(deploy_branch):
"""
Check if there is a remote branch with name specified in ``deploy_branch``.
Note that default ``deploy_branch`` is ``gh-pages`` for regular repos and
``master`` for ``github.io`` repos.
This isn't completely robust. If there are multiple remotes and you have a
``deploy_branch`` branch on the non-default remote, this won't see it.
"""
remote_name = 'doctr_remote'
branch_names = subprocess.check_output(['git', 'branch', '-r']).decode('utf-8').split()
return '{}/{}'.format(remote_name, deploy_branch) in branch_names
|
[
"def",
"deploy_branch_exists",
"(",
"deploy_branch",
")",
":",
"remote_name",
"=",
"'doctr_remote'",
"branch_names",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'branch'",
",",
"'-r'",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
")",
"return",
"'{}/{}'",
".",
"format",
"(",
"remote_name",
",",
"deploy_branch",
")",
"in",
"branch_names"
] |
Check if there is a remote branch with name specified in ``deploy_branch``.
Note that default ``deploy_branch`` is ``gh-pages`` for regular repos and
``master`` for ``github.io`` repos.
This isn't completely robust. If there are multiple remotes and you have a
``deploy_branch`` branch on the non-default remote, this won't see it.
|
[
"Check",
"if",
"there",
"is",
"a",
"remote",
"branch",
"with",
"name",
"specified",
"in",
"deploy_branch",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L315-L328
|
18,096
|
drdoctr/doctr
|
doctr/travis.py
|
create_deploy_branch
|
def create_deploy_branch(deploy_branch, push=True):
"""
If there is no remote branch with name specified in ``deploy_branch``,
create one.
Note that default ``deploy_branch`` is ``gh-pages`` for regular
repos and ``master`` for ``github.io`` repos.
Return True if ``deploy_branch`` was created, False if not.
"""
if not deploy_branch_exists(deploy_branch):
print("Creating {} branch on doctr_remote".format(deploy_branch))
clear_working_branch()
run(['git', 'checkout', '--orphan', DOCTR_WORKING_BRANCH])
# delete everything in the new ref. this is non-destructive to existing
# refs/branches, etc...
run(['git', 'rm', '-rf', '.'])
print("Adding .nojekyll file to working branch")
run(['touch', '.nojekyll'])
run(['git', 'add', '.nojekyll'])
run(['git', 'commit', '-m', 'Create new {} branch with .nojekyll'.format(deploy_branch)])
if push:
print("Pushing working branch to remote {} branch".format(deploy_branch))
run(['git', 'push', '-u', 'doctr_remote', '{}:{}'.format(DOCTR_WORKING_BRANCH, deploy_branch)])
# return to master branch and clear the working branch
run(['git', 'checkout', 'master'])
run(['git', 'branch', '-D', DOCTR_WORKING_BRANCH])
# fetch the remote so that doctr_remote/{deploy_branch} is resolved
run(['git', 'fetch', 'doctr_remote'])
return True
return False
|
python
|
def create_deploy_branch(deploy_branch, push=True):
"""
If there is no remote branch with name specified in ``deploy_branch``,
create one.
Note that default ``deploy_branch`` is ``gh-pages`` for regular
repos and ``master`` for ``github.io`` repos.
Return True if ``deploy_branch`` was created, False if not.
"""
if not deploy_branch_exists(deploy_branch):
print("Creating {} branch on doctr_remote".format(deploy_branch))
clear_working_branch()
run(['git', 'checkout', '--orphan', DOCTR_WORKING_BRANCH])
# delete everything in the new ref. this is non-destructive to existing
# refs/branches, etc...
run(['git', 'rm', '-rf', '.'])
print("Adding .nojekyll file to working branch")
run(['touch', '.nojekyll'])
run(['git', 'add', '.nojekyll'])
run(['git', 'commit', '-m', 'Create new {} branch with .nojekyll'.format(deploy_branch)])
if push:
print("Pushing working branch to remote {} branch".format(deploy_branch))
run(['git', 'push', '-u', 'doctr_remote', '{}:{}'.format(DOCTR_WORKING_BRANCH, deploy_branch)])
# return to master branch and clear the working branch
run(['git', 'checkout', 'master'])
run(['git', 'branch', '-D', DOCTR_WORKING_BRANCH])
# fetch the remote so that doctr_remote/{deploy_branch} is resolved
run(['git', 'fetch', 'doctr_remote'])
return True
return False
|
[
"def",
"create_deploy_branch",
"(",
"deploy_branch",
",",
"push",
"=",
"True",
")",
":",
"if",
"not",
"deploy_branch_exists",
"(",
"deploy_branch",
")",
":",
"print",
"(",
"\"Creating {} branch on doctr_remote\"",
".",
"format",
"(",
"deploy_branch",
")",
")",
"clear_working_branch",
"(",
")",
"run",
"(",
"[",
"'git'",
",",
"'checkout'",
",",
"'--orphan'",
",",
"DOCTR_WORKING_BRANCH",
"]",
")",
"# delete everything in the new ref. this is non-destructive to existing",
"# refs/branches, etc...",
"run",
"(",
"[",
"'git'",
",",
"'rm'",
",",
"'-rf'",
",",
"'.'",
"]",
")",
"print",
"(",
"\"Adding .nojekyll file to working branch\"",
")",
"run",
"(",
"[",
"'touch'",
",",
"'.nojekyll'",
"]",
")",
"run",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'.nojekyll'",
"]",
")",
"run",
"(",
"[",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"'Create new {} branch with .nojekyll'",
".",
"format",
"(",
"deploy_branch",
")",
"]",
")",
"if",
"push",
":",
"print",
"(",
"\"Pushing working branch to remote {} branch\"",
".",
"format",
"(",
"deploy_branch",
")",
")",
"run",
"(",
"[",
"'git'",
",",
"'push'",
",",
"'-u'",
",",
"'doctr_remote'",
",",
"'{}:{}'",
".",
"format",
"(",
"DOCTR_WORKING_BRANCH",
",",
"deploy_branch",
")",
"]",
")",
"# return to master branch and clear the working branch",
"run",
"(",
"[",
"'git'",
",",
"'checkout'",
",",
"'master'",
"]",
")",
"run",
"(",
"[",
"'git'",
",",
"'branch'",
",",
"'-D'",
",",
"DOCTR_WORKING_BRANCH",
"]",
")",
"# fetch the remote so that doctr_remote/{deploy_branch} is resolved",
"run",
"(",
"[",
"'git'",
",",
"'fetch'",
",",
"'doctr_remote'",
"]",
")",
"return",
"True",
"return",
"False"
] |
If there is no remote branch with name specified in ``deploy_branch``,
create one.
Note that default ``deploy_branch`` is ``gh-pages`` for regular
repos and ``master`` for ``github.io`` repos.
Return True if ``deploy_branch`` was created, False if not.
|
[
"If",
"there",
"is",
"no",
"remote",
"branch",
"with",
"name",
"specified",
"in",
"deploy_branch",
"create",
"one",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L330-L361
|
18,097
|
drdoctr/doctr
|
doctr/travis.py
|
find_sphinx_build_dir
|
def find_sphinx_build_dir():
"""
Find build subfolder within sphinx docs directory.
This is called by :func:`commit_docs` if keyword arg ``built_docs`` is not
specified on the command line.
"""
build = glob.glob('**/*build/html', recursive=True)
if not build:
raise RuntimeError("Could not find Sphinx build directory automatically")
build_folder = build[0]
return build_folder
|
python
|
def find_sphinx_build_dir():
"""
Find build subfolder within sphinx docs directory.
This is called by :func:`commit_docs` if keyword arg ``built_docs`` is not
specified on the command line.
"""
build = glob.glob('**/*build/html', recursive=True)
if not build:
raise RuntimeError("Could not find Sphinx build directory automatically")
build_folder = build[0]
return build_folder
|
[
"def",
"find_sphinx_build_dir",
"(",
")",
":",
"build",
"=",
"glob",
".",
"glob",
"(",
"'**/*build/html'",
",",
"recursive",
"=",
"True",
")",
"if",
"not",
"build",
":",
"raise",
"RuntimeError",
"(",
"\"Could not find Sphinx build directory automatically\"",
")",
"build_folder",
"=",
"build",
"[",
"0",
"]",
"return",
"build_folder"
] |
Find build subfolder within sphinx docs directory.
This is called by :func:`commit_docs` if keyword arg ``built_docs`` is not
specified on the command line.
|
[
"Find",
"build",
"subfolder",
"within",
"sphinx",
"docs",
"directory",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L363-L375
|
18,098
|
drdoctr/doctr
|
doctr/travis.py
|
copy_to_tmp
|
def copy_to_tmp(source):
"""
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
"""
tmp_dir = tempfile.mkdtemp()
# Use pathlib because os.path.basename is different depending on whether
# the path ends in a /
p = pathlib.Path(source)
dirname = p.name or 'temp'
new_dir = os.path.join(tmp_dir, dirname)
if os.path.isdir(source):
shutil.copytree(source, new_dir)
else:
shutil.copy2(source, new_dir)
return new_dir
|
python
|
def copy_to_tmp(source):
"""
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
"""
tmp_dir = tempfile.mkdtemp()
# Use pathlib because os.path.basename is different depending on whether
# the path ends in a /
p = pathlib.Path(source)
dirname = p.name or 'temp'
new_dir = os.path.join(tmp_dir, dirname)
if os.path.isdir(source):
shutil.copytree(source, new_dir)
else:
shutil.copy2(source, new_dir)
return new_dir
|
[
"def",
"copy_to_tmp",
"(",
"source",
")",
":",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"# Use pathlib because os.path.basename is different depending on whether",
"# the path ends in a /",
"p",
"=",
"pathlib",
".",
"Path",
"(",
"source",
")",
"dirname",
"=",
"p",
".",
"name",
"or",
"'temp'",
"new_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"dirname",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"source",
")",
":",
"shutil",
".",
"copytree",
"(",
"source",
",",
"new_dir",
")",
"else",
":",
"shutil",
".",
"copy2",
"(",
"source",
",",
"new_dir",
")",
"return",
"new_dir"
] |
Copies ``source`` to a temporary directory, and returns the copied
location.
If source is a file, the copied location is also a file.
|
[
"Copies",
"source",
"to",
"a",
"temporary",
"directory",
"and",
"returns",
"the",
"copied",
"location",
"."
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L383-L400
|
18,099
|
drdoctr/doctr
|
doctr/travis.py
|
is_subdir
|
def is_subdir(a, b):
"""
Return true if a is a subdirectory of b
"""
a, b = map(os.path.abspath, [a, b])
return os.path.commonpath([a, b]) == b
|
python
|
def is_subdir(a, b):
"""
Return true if a is a subdirectory of b
"""
a, b = map(os.path.abspath, [a, b])
return os.path.commonpath([a, b]) == b
|
[
"def",
"is_subdir",
"(",
"a",
",",
"b",
")",
":",
"a",
",",
"b",
"=",
"map",
"(",
"os",
".",
"path",
".",
"abspath",
",",
"[",
"a",
",",
"b",
"]",
")",
"return",
"os",
".",
"path",
".",
"commonpath",
"(",
"[",
"a",
",",
"b",
"]",
")",
"==",
"b"
] |
Return true if a is a subdirectory of b
|
[
"Return",
"true",
"if",
"a",
"is",
"a",
"subdirectory",
"of",
"b"
] |
0f19ff78c8239efcc98d417f36b0a31d9be01ba5
|
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L402-L408
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.