_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q41900 | CommandCapsule.get_tasks_changed_since | train | def get_tasks_changed_since(self, since):
""" Returns a list of tasks that were changed recently."""
changed_tasks = []
for task in self.client.filter_tasks({'status': 'pending'}):
if task.get(
'modified',
task.get(
'entry',
datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc)
)
) >= since:
changed_tasks.append(task)
return changed_tasks | python | {
"resource": ""
} |
q41901 | ContentManager.evergreen | train | def evergreen(self, included_channel_ids=None, excluded_channel_ids=None, **kwargs):
"""
Search containing any evergreen piece of Content.
:included_channel_ids list: Contains ids for channel ids relevant to the query.
:excluded_channel_ids list: Contains ids for channel ids excluded from the query.
"""
eqs = self.search(**kwargs)
eqs = eqs.filter(Evergreen())
if included_channel_ids:
eqs = eqs.filter(VideohubChannel(included_ids=included_channel_ids))
if excluded_channel_ids:
eqs = eqs.filter(VideohubChannel(excluded_ids=excluded_channel_ids))
return eqs | python | {
"resource": ""
} |
q41902 | ContentManager.evergreen_video | train | def evergreen_video(self, **kwargs):
"""Filter evergreen content to exclusively video content."""
eqs = self.evergreen(**kwargs)
eqs = eqs.filter(VideohubVideo())
return eqs | python | {
"resource": ""
} |
q41903 | ContentManager.instant_articles | train | def instant_articles(self, **kwargs):
"""
QuerySet including all published content approved for instant articles.
Instant articles are configured via FeatureType. FeatureType.instant_article = True.
"""
eqs = self.search(**kwargs).sort('-last_modified', '-published')
return eqs.filter(InstantArticle()) | python | {
"resource": ""
} |
q41904 | ContentManager.sponsored | train | def sponsored(self, **kwargs):
"""Search containing any sponsored pieces of Content."""
eqs = self.search(**kwargs)
eqs = eqs.filter(AllSponsored())
published_offset = getattr(settings, "RECENT_SPONSORED_OFFSET_HOURS", None)
if published_offset:
now = timezone.now()
eqs = eqs.filter(
Published(
after=now - timezone.timedelta(hours=published_offset),
before=now
)
)
return eqs | python | {
"resource": ""
} |
q41905 | ContentManager.search | train | def search(self, **kwargs):
"""
Query using ElasticSearch, returning an elasticsearch queryset.
:param kwargs: keyword arguments (optional)
* query : ES Query spec
* tags : content tags
* types : content types
* feature_types : featured types
* published : date range
"""
search_query = super(ContentManager, self).search()
if "query" in kwargs:
search_query = search_query.query("match", _all=kwargs.get("query"))
else:
search_query = search_query.sort('-published', '-last_modified')
# Right now we have "Before", "After" (datetimes),
# and "published" (a boolean). Should simplify this in the future.
if "before" in kwargs or "after" in kwargs:
published_filter = Published(before=kwargs.get("before"), after=kwargs.get("after"))
search_query = search_query.filter(published_filter)
else:
# TODO: kill this "published" param. it sucks
if kwargs.get("published", True) and "status" not in kwargs:
published_filter = Published()
search_query = search_query.filter(published_filter)
if "status" in kwargs:
search_query = search_query.filter(Status(kwargs["status"]))
if "excluded_ids" in kwargs:
exclusion_filter = ~es_filter.Ids(values=kwargs.get("excluded_ids", []))
search_query = search_query.filter(exclusion_filter)
tag_filter = Tags(kwargs.get("tags", []))
search_query = search_query.filter(tag_filter)
author_filter = Authors(kwargs.get("authors", []))
search_query = search_query.filter(author_filter)
feature_type_filter = FeatureTypes(kwargs.get("feature_types", []))
search_query = search_query.filter(feature_type_filter)
# Is this good enough? Are we even using this feature at all?
types = kwargs.pop("types", [])
if types:
search_query._doc_type = types
return search_query | python | {
"resource": ""
} |
q41906 | IxnObject.get_obj_class | train | def get_obj_class(self, obj_type):
""" Returns the object class based on parent and object types.
In most cases the object class can be derived from object type alone but sometimes the
same object type name is used for different object types so the parent (or even
grandparent) type is required in order to determine the exact object type.
For example, interface object type can be child of vport or router (ospf etc.). In the
first case the required class is IxnInterface while in the later case it is IxnObject.
:param obj_type: IXN object type.
:return: object class if specific class else IxnObject.
"""
if obj_type in IxnObject.str_2_class:
if type(IxnObject.str_2_class[obj_type]) is dict:
if self.obj_type() in IxnObject.str_2_class[obj_type]:
return IxnObject.str_2_class[obj_type][self.obj_type()]
elif self.obj_parent().obj_type() in IxnObject.str_2_class[obj_type]:
return IxnObject.str_2_class[obj_type][self.obj_parent().obj_type()]
else:
return IxnObject.str_2_class[obj_type]
return IxnObject | python | {
"resource": ""
} |
q41907 | IxnObject.get_child_static | train | def get_child_static(self, objType, seq_number=None):
""" Returns IxnObject representing the requested child without reading it from the IXN.
Statically build the child object reference based on the requested object type and sequence number and build
the IxnObject with this calculated object reference.
Ideally we would prefer to never use this function and always read the child dynamically but this has huge
impact on performance so we use the static approach wherever possible.
"""
child_obj_ref = self.obj_ref() + '/' + objType
if seq_number:
child_obj_ref += ':' + str(seq_number)
child_obj = self.get_object_by_ref(child_obj_ref)
child_obj_type = self.get_obj_class(objType)
return child_obj if child_obj else child_obj_type(parent=self, objType=objType, objRef=child_obj_ref) | python | {
"resource": ""
} |
q41908 | ConversationsAPI.list_conversations | train | def list_conversations(self, filter=None, filter_mode=None, include=None, include_all_conversation_ids=None, interleave_submissions=None, scope=None):
"""
List conversations.
Returns the list of conversations for the current user, most recent ones first.
"""
path = {}
data = {}
params = {}
# OPTIONAL - scope
"""When set, only return conversations of the specified type. For example,
set to "unread" to return only conversations that haven't been read.
The default behavior is to return all non-archived conversations (i.e.
read and unread)."""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""When set, only return conversations for the specified courses, groups
or users. The id should be prefixed with its type, e.g. "user_123" or
"course_456". Can be an array (by setting "filter[]") or single value
(by setting "filter")"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""When filter[] contains multiple filters, combine them with this mode,
filtering conversations that at have at least all of the contexts ("and")
or at least one of the contexts ("or")"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - interleave_submissions
"""(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored."""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - include_all_conversation_ids
"""Default is false. If true,
the top-level element of the response will be an object rather than
an array, and will have the keys "conversations" which will contain the
paged conversation data, and "conversation_ids" which will contain the
ids of all conversations under this scope/filter in the same order."""
if include_all_conversation_ids is not None:
params["include_all_conversation_ids"] = include_all_conversation_ids
# OPTIONAL - include
""""participant_avatars":: Optionally include an "avatar_url" key for each user participanting in the conversation"""
if include is not None:
self._validate_enum(include, ["participant_avatars"])
params["include"] = include
self.logger.debug("GET /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/conversations".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41909 | ConversationsAPI.create_conversation | train | def create_conversation(self, body, recipients, attachment_ids=None, context_code=None, filter=None, filter_mode=None, group_conversation=None, media_comment_id=None, media_comment_type=None, mode=None, scope=None, subject=None, user_note=None):
"""
Create a conversation.
Create a new conversation with one or more recipients. If there is already
an existing private conversation with the given recipients, it will be
reused.
"""
path = {}
data = {}
params = {}
# REQUIRED - recipients
"""An array of recipient ids. These may be user ids or course/group ids
prefixed with "course_" or "group_" respectively, e.g.
recipients[]=1&recipients[]=2&recipients[]=course_3"""
data["recipients"] = recipients
# OPTIONAL - subject
"""The subject of the conversation. This is ignored when reusing a
conversation. Maximum length is 255 characters."""
if subject is not None:
data["subject"] = subject
# REQUIRED - body
"""The message to be sent"""
data["body"] = body
# OPTIONAL - group_conversation
"""Defaults to false. If true, this will be a group conversation (i.e. all
recipients may see all messages and replies). If false, individual private
conversations will be started with each recipient. Must be set false if the
number of recipients is over the set maximum (default is 100)."""
if group_conversation is not None:
data["group_conversation"] = group_conversation
# OPTIONAL - attachment_ids
"""An array of attachments ids. These must be files that have been previously
uploaded to the sender's "conversation attachments" folder."""
if attachment_ids is not None:
data["attachment_ids"] = attachment_ids
# OPTIONAL - media_comment_id
"""Media comment id of an audio of video file to be associated with this
message."""
if media_comment_id is not None:
data["media_comment_id"] = media_comment_id
# OPTIONAL - media_comment_type
"""Type of the associated media file"""
if media_comment_type is not None:
self._validate_enum(media_comment_type, ["audio", "video"])
data["media_comment_type"] = media_comment_type
# OPTIONAL - user_note
"""Will add a faculty journal entry for each recipient as long as the user
making the api call has permission, the recipient is a student and
faculty journals are enabled in the account."""
if user_note is not None:
data["user_note"] = user_note
# OPTIONAL - mode
"""Determines whether the messages will be created/sent synchronously or
asynchronously. Defaults to sync, and this option is ignored if this is a
group conversation or there is just one recipient (i.e. it must be a bulk
private message). When sent async, the response will be an empty array
(batch status can be queried via the {api:ConversationsController#batches batches API})"""
if mode is not None:
self._validate_enum(mode, ["sync", "async"])
data["mode"] = mode
# OPTIONAL - scope
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
# OPTIONAL - filter
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter is not None:
data["filter"] = filter
# OPTIONAL - filter_mode
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
# OPTIONAL - context_code
"""The course or group that is the context for this conversation. Same format
as courses or groups in the recipients argument."""
if context_code is not None:
data["context_code"] = context_code
self.logger.debug("POST /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/conversations".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41910 | ConversationsAPI.edit_conversation | train | def edit_conversation(self, id, conversation_starred=None, conversation_subscribed=None, conversation_workflow_state=None, filter=None, filter_mode=None, scope=None):
"""
Edit a conversation.
Updates attributes for a single conversation.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - conversation[workflow_state]
"""Change the state of this conversation"""
if conversation_workflow_state is not None:
self._validate_enum(conversation_workflow_state, ["read", "unread", "archived"])
data["conversation[workflow_state]"] = conversation_workflow_state
# OPTIONAL - conversation[subscribed]
"""Toggle the current user's subscription to the conversation (only valid for
group conversations). If unsubscribed, the user will still have access to
the latest messages, but the conversation won't be automatically flagged
as unread, nor will it jump to the top of the inbox."""
if conversation_subscribed is not None:
data["conversation[subscribed]"] = conversation_subscribed
# OPTIONAL - conversation[starred]
"""Toggle the starred state of the current user's view of the conversation."""
if conversation_starred is not None:
data["conversation[starred]"] = conversation_starred
# OPTIONAL - scope
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
# OPTIONAL - filter
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter is not None:
data["filter"] = filter
# OPTIONAL - filter_mode
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
self.logger.debug("PUT /api/v1/conversations/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/conversations/{id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41911 | update_preferences_by_category | train | def update_preferences_by_category(self, category, communication_channel_id, notification_preferences_frequency):
"""
Update preferences by category.
Change the preferences for multiple notifications based on the category for a single communication channel
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - communication_channel_id
"""ID"""
path["communication_channel_id"] = communication_channel_id
# REQUIRED - PATH - category
"""The name of the category. Must be parameterized (e.g. The category "Course Content" should be "course_content")"""
path["category"] = category
# REQUIRED - notification_preferences[frequency]
"""The desired frequency for each notification in the category"""
data["notification_preferences[frequency]"] = notification_preferences_frequency
self.logger.debug("PUT /api/v1/users/self/communication_channels/{communication_channel_id}/notification_preference_categories/{category} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/self/communication_channels/{communication_channel_id}/notification_preference_categories/{category}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41912 | UsersAPI.list_users_in_account | train | def list_users_in_account(self, account_id, search_term=None):
"""
List users in account.
Retrieve the list of users associated with this account.
@example_request
curl https://<canvas>/api/v1/accounts/self/users?search_term=<search value> \
-X GET \
-H 'Authorization: Bearer <token>'
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - search_term
"""The partial name or full ID of the users to match and return in the
results list. Must be at least 3 characters.
Note that the API will prefer matching on canonical user ID if the ID has
a numeric form. It will only search against other fields if non-numeric
in form, or if the numeric value doesn't yield any matches. Queries by
administrative users will search on SIS ID, name, or email address; non-
administrative queries will only be compared against name."""
if search_term is not None:
params["search_term"] = search_term
self.logger.debug("GET /api/v1/accounts/{account_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/users".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41913 | UsersAPI.self_register_user | train | def self_register_user(self, user_name, account_id, user_terms_of_use, pseudonym_unique_id, communication_channel_address=None, communication_channel_type=None, user_birthdate=None, user_locale=None, user_short_name=None, user_sortable_name=None, user_time_zone=None):
"""
Self register a user.
Self register and return a new user and pseudonym for an account.
If self-registration is enabled on the account, you can use this
endpoint to self register new users.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - user[name]
"""The full name of the user. This name will be used by teacher for grading."""
data["user[name]"] = user_name
# OPTIONAL - user[short_name]
"""User's name as it will be displayed in discussions, messages, and comments."""
if user_short_name is not None:
data["user[short_name]"] = user_short_name
# OPTIONAL - user[sortable_name]
"""User's name as used to sort alphabetically in lists."""
if user_sortable_name is not None:
data["user[sortable_name]"] = user_sortable_name
# OPTIONAL - user[time_zone]
"""The time zone for the user. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if user_time_zone is not None:
data["user[time_zone]"] = user_time_zone
# OPTIONAL - user[locale]
"""The user's preferred language, from the list of languages Canvas supports.
This is in RFC-5646 format."""
if user_locale is not None:
data["user[locale]"] = user_locale
# OPTIONAL - user[birthdate]
"""The user's birth date."""
if user_birthdate is not None:
data["user[birthdate]"] = user_birthdate
# REQUIRED - user[terms_of_use]
"""Whether the user accepts the terms of use."""
data["user[terms_of_use]"] = user_terms_of_use
# REQUIRED - pseudonym[unique_id]
"""User's login ID. Must be a valid email address."""
data["pseudonym[unique_id]"] = pseudonym_unique_id
# OPTIONAL - communication_channel[type]
"""The communication channel type, e.g. 'email' or 'sms'."""
if communication_channel_type is not None:
data["communication_channel[type]"] = communication_channel_type
# OPTIONAL - communication_channel[address]
"""The communication channel address, e.g. the user's email address."""
if communication_channel_address is not None:
data["communication_channel[address]"] = communication_channel_address
self.logger.debug("POST /api/v1/accounts/{account_id}/self_registration with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/self_registration".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41914 | UsersAPI.update_user_settings | train | def update_user_settings(self, id, collapse_global_nav=None, manual_mark_as_read=None):
"""
Update user settings.
Update an existing user's settings.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - manual_mark_as_read
"""If true, require user to manually mark discussion posts as read (don't
auto-mark as read)."""
if manual_mark_as_read is not None:
params["manual_mark_as_read"] = manual_mark_as_read
# OPTIONAL - collapse_global_nav
"""If true, the user's page loads with the global navigation collapsed"""
if collapse_global_nav is not None:
params["collapse_global_nav"] = collapse_global_nav
self.logger.debug("GET /api/v1/users/{id}/settings with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{id}/settings".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41915 | UsersAPI.get_custom_color | train | def get_custom_color(self, id, asset_string):
"""
Get custom color.
Returns the custom colors that have been saved for a user for a given context.
The asset_string parameter should be in the format 'context_id', for example
'course_42'.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - asset_string
"""ID"""
path["asset_string"] = asset_string
self.logger.debug("GET /api/v1/users/{id}/colors/{asset_string} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{id}/colors/{asset_string}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41916 | UsersAPI.edit_user | train | def edit_user(self, id, user_avatar_token=None, user_avatar_url=None, user_email=None, user_locale=None, user_name=None, user_short_name=None, user_sortable_name=None, user_time_zone=None):
"""
Edit a user.
Modify an existing user. To modify a user's login, see the documentation for logins.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - user[name]
"""The full name of the user. This name will be used by teacher for grading."""
if user_name is not None:
data["user[name]"] = user_name
# OPTIONAL - user[short_name]
"""User's name as it will be displayed in discussions, messages, and comments."""
if user_short_name is not None:
data["user[short_name]"] = user_short_name
# OPTIONAL - user[sortable_name]
"""User's name as used to sort alphabetically in lists."""
if user_sortable_name is not None:
data["user[sortable_name]"] = user_sortable_name
# OPTIONAL - user[time_zone]
"""The time zone for the user. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if user_time_zone is not None:
data["user[time_zone]"] = user_time_zone
# OPTIONAL - user[email]
"""The default email address of the user."""
if user_email is not None:
data["user[email]"] = user_email
# OPTIONAL - user[locale]
"""The user's preferred language, from the list of languages Canvas supports.
This is in RFC-5646 format."""
if user_locale is not None:
data["user[locale]"] = user_locale
# OPTIONAL - user[avatar][token]
"""A unique representation of the avatar record to assign as the user's
current avatar. This token can be obtained from the user avatars endpoint.
This supersedes the user [avatar] [url] argument, and if both are included
the url will be ignored. Note: this is an internal representation and is
subject to change without notice. It should be consumed with this api
endpoint and used in the user update endpoint, and should not be
constructed by the client."""
if user_avatar_token is not None:
data["user[avatar][token]"] = user_avatar_token
# OPTIONAL - user[avatar][url]
"""To set the user's avatar to point to an external url, do not include a
token and instead pass the url here. Warning: For maximum compatibility,
please use 128 px square images."""
if user_avatar_url is not None:
data["user[avatar][url]"] = user_avatar_url
self.logger.debug("PUT /api/v1/users/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/{id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41917 | UsersAPI.merge_user_into_another_user_destination_user_id | train | def merge_user_into_another_user_destination_user_id(self, id, destination_user_id):
"""
Merge user into another user.
Merge a user into another user.
To merge users, the caller must have permissions to manage both users. This
should be considered irreversible. This will delete the user and move all
the data into the destination user.
When finding users by SIS ids in different accounts the
destination_account_id is required.
The account can also be identified by passing the domain in destination_account_id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - destination_user_id
"""ID"""
path["destination_user_id"] = destination_user_id
self.logger.debug("PUT /api/v1/users/{id}/merge_into/{destination_user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/{id}/merge_into/{destination_user_id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41918 | UsersAPI.get_user_profile | train | def get_user_profile(self, user_id):
"""
Get user profile.
Returns user profile data, including user id, name, and profile pic.
When requesting the profile for the user accessing the API, the user's
calendar feed URL will be returned as well.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
self.logger.debug("GET /api/v1/users/{user_id}/profile with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/profile".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41919 | UsersAPI.list_user_page_views | train | def list_user_page_views(self, user_id, end_time=None, start_time=None):
"""
List user page views.
Return the user's page view history in json format, similar to the
available CSV download. Pagination is used as described in API basics
section. Page views are returned in descending order, newest to oldest.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want page views."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want page views."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/users/{user_id}/page_views with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/page_views".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41920 | UsersAPI.list_course_nicknames | train | def list_course_nicknames(self):
"""
List course nicknames.
Returns all course nicknames you have set.
"""
path = {}
data = {}
params = {}
self.logger.debug("GET /api/v1/users/self/course_nicknames with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/self/course_nicknames".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41921 | UsersAPI.set_course_nickname | train | def set_course_nickname(self, nickname, course_id):
"""
Set course nickname.
Set a nickname for the given course. This will replace the course's name
in output of API calls you make subsequently, as well as in selected
places in the Canvas web user interface.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - nickname
"""The nickname to set. It must be non-empty and shorter than 60 characters."""
data["nickname"] = nickname
self.logger.debug("PUT /api/v1/users/self/course_nicknames/{course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/self/course_nicknames/{course_id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41922 | SisImportsAPI.import_sis_data | train | def import_sis_data(self, account_id, add_sis_stickiness=None, attachment=None, batch_mode=None, batch_mode_term_id=None, clear_sis_stickiness=None, diffing_data_set_identifier=None, diffing_remaster_data_set=None, extension=None, import_type=None, override_sis_stickiness=None):
"""
Import SIS data.
Import SIS data into Canvas. Must be on a root account with SIS imports
enabled.
For more information on the format that's expected here, please see the
"SIS CSV" section in the API docs.
"""
path = {}
data = {}
params = {}
files = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - import_type
"""Choose the data format for reading SIS data. With a standard Canvas
install, this option can only be 'instructure_csv', and if unprovided,
will be assumed to be so. Can be part of the query string."""
if import_type is not None:
data["import_type"] = import_type
# OPTIONAL - attachment
"""There are two ways to post SIS import data - either via a
multipart/form-data form-field-style attachment, or via a non-multipart
raw post request.
'attachment' is required for multipart/form-data style posts. Assumed to
be SIS data from a file upload form field named 'attachment'.
Examples:
curl -F attachment=@<filename> -H "Authorization: Bearer <token>" \
'https://<canvas>/api/v1/accounts/<account_id>/sis_imports.json?import_type=instructure_csv'
If you decide to do a raw post, you can skip the 'attachment' argument,
but you will then be required to provide a suitable Content-Type header.
You are encouraged to also provide the 'extension' argument.
Examples:
curl -H 'Content-Type: application/octet-stream' --data-binary @<filename>.zip \
-H "Authorization: Bearer <token>" \
'https://<canvas>/api/v1/accounts/<account_id>/sis_imports.json?import_type=instructure_csv&extension=zip'
curl -H 'Content-Type: application/zip' --data-binary @<filename>.zip \
-H "Authorization: Bearer <token>" \
'https://<canvas>/api/v1/accounts/<account_id>/sis_imports.json?import_type=instructure_csv'
curl -H 'Content-Type: text/csv' --data-binary @<filename>.csv \
-H "Authorization: Bearer <token>" \
'https://<canvas>/api/v1/accounts/<account_id>/sis_imports.json?import_type=instructure_csv'
curl -H 'Content-Type: text/csv' --data-binary @<filename>.csv \
-H "Authorization: Bearer <token>" \
'https://<canvas>/api/v1/accounts/<account_id>/sis_imports.json?import_type=instructure_csv&batch_mode=1&batch_mode_term_id=15'"""
if attachment is not None:
if type(attachment) is file:
files['attachment'] = (os.path.basename(attachment.name), attachment)
elif os.path.exists(attachment):
files['attachment'] = (os.path.basename(attachment), open(attachment, 'rb'))
else:
raise ValueError('The attachment must be an open file or a path to a readable file.')
# OPTIONAL - extension
"""Recommended for raw post request style imports. This field will be used to
distinguish between zip, xml, csv, and other file format extensions that
would usually be provided with the filename in the multipart post request
scenario. If not provided, this value will be inferred from the
Content-Type, falling back to zip-file format if all else fails."""
if extension is not None:
data["extension"] = extension
# OPTIONAL - batch_mode
"""If set, this SIS import will be run in batch mode, deleting any data
previously imported via SIS that is not present in this latest import.
See the SIS CSV Format page for details."""
if batch_mode is not None:
data["batch_mode"] = batch_mode
# OPTIONAL - batch_mode_term_id
"""Limit deletions to only this term. Required if batch mode is enabled."""
if batch_mode_term_id is not None:
data["batch_mode_term_id"] = batch_mode_term_id
# OPTIONAL - override_sis_stickiness
"""Many fields on records in Canvas can be marked "sticky," which means that
when something changes in the UI apart from the SIS, that field gets
"stuck." In this way, by default, SIS imports do not override UI changes.
If this field is present, however, it will tell the SIS import to ignore
"stickiness" and override all fields."""
if override_sis_stickiness is not None:
data["override_sis_stickiness"] = override_sis_stickiness
# OPTIONAL - add_sis_stickiness
"""This option, if present, will process all changes as if they were UI
changes. This means that "stickiness" will be added to changed fields.
This option is only processed if 'override_sis_stickiness' is also provided."""
if add_sis_stickiness is not None:
data["add_sis_stickiness"] = add_sis_stickiness
# OPTIONAL - clear_sis_stickiness
"""This option, if present, will clear "stickiness" from all fields touched
by this import. Requires that 'override_sis_stickiness' is also provided.
If 'add_sis_stickiness' is also provided, 'clear_sis_stickiness' will
overrule the behavior of 'add_sis_stickiness'"""
if clear_sis_stickiness is not None:
data["clear_sis_stickiness"] = clear_sis_stickiness
# OPTIONAL - diffing_data_set_identifier
"""If set on a CSV import, Canvas will attempt to optimize the SIS import by
comparing this set of CSVs to the previous set that has the same data set
identifier, and only appliying the difference between the two. See the
SIS CSV Format documentation for more details."""
if diffing_data_set_identifier is not None:
data["diffing_data_set_identifier"] = diffing_data_set_identifier
# OPTIONAL - diffing_remaster_data_set
"""If true, and diffing_data_set_identifier is sent, this SIS import will be
part of the data set, but diffing will not be performed. See the SIS CSV
Format documentation for details."""
if diffing_remaster_data_set is not None:
data["diffing_remaster_data_set"] = diffing_remaster_data_set
self.logger.debug("POST /api/v1/accounts/{account_id}/sis_imports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/sis_imports".format(**path), data=data, params=params, files=files, single_item=True) | python | {
"resource": ""
} |
q41923 | SisImportsAPI.abort_all_pending_sis_imports | train | def abort_all_pending_sis_imports(self, account_id):
"""
Abort all pending SIS imports.
Abort already created but not processed or processing SIS imports.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
self.logger.debug("PUT /api/v1/accounts/{account_id}/sis_imports/abort_all_pending with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/accounts/{account_id}/sis_imports/abort_all_pending".format(**path), data=data, params=params) | python | {
"resource": ""
} |
q41924 | patch_egg_info | train | def patch_egg_info(force_hg_version=False):
"""
A hack to replace egg_info.tagged_version with a wrapped version
that will use the mercurial version if indicated.
`force_hg_version` is used for hgtools itself.
"""
from setuptools.command.egg_info import egg_info
from pkg_resources import safe_version
import functools
orig_ver = egg_info.tagged_version
@functools.wraps(orig_ver)
def tagged_version(self):
vcs_param = (
getattr(self.distribution, 'use_vcs_version', False)
or getattr(self.distribution, 'use_hg_version', False)
)
using_hg_version = force_hg_version or vcs_param
if force_hg_version:
# disable patched `tagged_version` to avoid affecting
# subsequent installs in the same interpreter instance.
egg_info.tagged_version = orig_ver
if using_hg_version:
result = safe_version(self.distribution.get_version())
else:
result = orig_ver(self)
self.tag_build = result
return result
egg_info.tagged_version = tagged_version | python | {
"resource": ""
} |
q41925 | get_or_create | train | def get_or_create(name, value):
"""
returns the storage space defined by name
if space does not exist yet it will first be created with the
specified value
"""
if name not in storage:
storage[name] = value
return storage[name] | python | {
"resource": ""
} |
q41926 | parse_module | train | def parse_module(module):
'''Parse a module's attributes and generate a markdown document.'''
attributes = [
(name, type_)
for (name, type_) in getmembers(module)
if (isclass(type_) or isfunction(type_))
and type_.__module__ == module.__name__
and not type_.__name__.startswith('_')
]
attribute_docs = ['## {0}'.format(module.__name__), '']
if module.__doc__:
docstring, _ = _parse_docstring(module.__doc__)
attribute_docs.append(docstring)
if hasattr(module, '__all__'):
for name in module.__all__:
link = '+ [{0}](./{0}.md)'.format(name)
attribute_docs.append(link)
for (name, type_) in attributes:
if isfunction(type_):
attribute_docs.append(_parse_function(module, name, type_))
else:
attribute_docs.append(_parse_class(module, name, type_))
return u'{0}\n'.format(
u'\n'.join(attribute_docs).strip()
) | python | {
"resource": ""
} |
q41927 | Retry | train | def Retry(target=None, args=[], kwargs={},
options={"retry": True, "interval": 1}):
"""
options
retry
True, infinity retries
False, no retries
Number, retries times
interval
time period for retry
return
None if no success
Message if success
"""
retry = options["retry"]
interval = options["interval"]
while True:
try:
resp = target(*args, **kwargs)
# status error
if resp.code == 200:
return resp
_logger.debug("Request got response status: %s"
% (resp.code,) + " retry: %s" % (retry,))
except TimeoutError:
_logger.debug("Request message is timeout")
_logger.debug(args)
_logger.debug(kwargs)
# register unsuccessful goes here
# infinity retry
if retry is True:
sleep(interval)
continue
# no retry
if retry is False:
return None
# retrying
try:
retry = retry - 1
if retry <= 0:
return None
except TypeError as e:
raise e
sleep(interval) | python | {
"resource": ""
} |
q41928 | Publish.create_response | train | def create_response(self, message, sign):
"""
return function for response
"""
def _response(code=200, data=None):
"""
_response
"""
resp_msg = message.to_response(code=code, data=data, sign=sign)
with self._session.session_lock:
mid = self._conn.publish(topic="/controller",
qos=0, payload=resp_msg.to_dict())
session = self._session.create(resp_msg, mid=mid, age=10)
logging.debug("sending response as mid: %s" % mid)
return self._wait_published(session, no_response=True)
return _response | python | {
"resource": ""
} |
q41929 | AssignmentGroupsAPI.list_assignment_groups | train | def list_assignment_groups(self, course_id, exclude_assignment_submission_types=None, grading_period_id=None, include=None, override_assignment_dates=None, scope_assignments_to_student=None):
"""
List assignment groups.
Returns the list of assignment groups for the current context. The returned
groups are sorted by their position field.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""Associations to include with the group. "discussion_topic", "all_dates"
"assignment_visibility" & "submission" are only valid are only valid if "assignments" is also included.
The "assignment_visibility" option additionally requires that the Differentiated Assignments course feature be turned on."""
if include is not None:
self._validate_enum(include, ["assignments", "discussion_topic", "all_dates", "assignment_visibility", "overrides", "submission"])
params["include"] = include
# OPTIONAL - exclude_assignment_submission_types
"""If "assignments" are included, those with the specified submission types
will be excluded from the assignment groups."""
if exclude_assignment_submission_types is not None:
self._validate_enum(exclude_assignment_submission_types, ["online_quiz", "discussion_topic", "wiki_page", "external_tool"])
params["exclude_assignment_submission_types"] = exclude_assignment_submission_types
# OPTIONAL - override_assignment_dates
"""Apply assignment overrides for each assignment, defaults to true."""
if override_assignment_dates is not None:
params["override_assignment_dates"] = override_assignment_dates
# OPTIONAL - grading_period_id
"""The id of the grading period in which assignment groups are being requested
(Requires the Multiple Grading Periods feature turned on.)"""
if grading_period_id is not None:
params["grading_period_id"] = grading_period_id
# OPTIONAL - scope_assignments_to_student
"""If true, all assignments returned will apply to the current user in the
specified grading period. If assignments apply to other students in the
specified grading period, but not the current user, they will not be
returned. (Requires the grading_period_id argument and the Multiple Grading
Periods feature turned on. In addition, the current user must be a student.)"""
if scope_assignments_to_student is not None:
params["scope_assignments_to_student"] = scope_assignments_to_student
self.logger.debug("GET /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignment_groups".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41930 | AssignmentGroupsAPI.get_assignment_group | train | def get_assignment_group(self, course_id, assignment_group_id, grading_period_id=None, include=None, override_assignment_dates=None):
"""
Get an Assignment Group.
Returns the assignment group with the given id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_group_id
"""ID"""
path["assignment_group_id"] = assignment_group_id
# OPTIONAL - include
"""Associations to include with the group. "discussion_topic" and "assignment_visibility" and "submission"
are only valid if "assignments" is also included. The "assignment_visibility" option additionally
requires that the Differentiated Assignments course feature be turned on."""
if include is not None:
self._validate_enum(include, ["assignments", "discussion_topic", "assignment_visibility", "submission"])
params["include"] = include
# OPTIONAL - override_assignment_dates
"""Apply assignment overrides for each assignment, defaults to true."""
if override_assignment_dates is not None:
params["override_assignment_dates"] = override_assignment_dates
# OPTIONAL - grading_period_id
"""The id of the grading period in which assignment groups are being requested
(Requires the Multiple Grading Periods account feature turned on)"""
if grading_period_id is not None:
params["grading_period_id"] = grading_period_id
self.logger.debug("GET /api/v1/courses/{course_id}/assignment_groups/{assignment_group_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignment_groups/{assignment_group_id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41931 | AssignmentGroupsAPI.create_assignment_group | train | def create_assignment_group(self, course_id, group_weight=None, integration_data=None, name=None, position=None, rules=None, sis_source_id=None):
"""
Create an Assignment Group.
Create a new assignment group for this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - name
"""The assignment group's name"""
if name is not None:
data["name"] = name
# OPTIONAL - position
"""The position of this assignment group in relation to the other assignment groups"""
if position is not None:
data["position"] = position
# OPTIONAL - group_weight
"""The percent of the total grade that this assignment group represents"""
if group_weight is not None:
data["group_weight"] = group_weight
# OPTIONAL - sis_source_id
"""The sis source id of the Assignment Group"""
if sis_source_id is not None:
data["sis_source_id"] = sis_source_id
# OPTIONAL - integration_data
"""The integration data of the Assignment Group"""
if integration_data is not None:
data["integration_data"] = integration_data
# OPTIONAL - rules
"""The grading rules that are applied within this assignment group
See the Assignment Group object definition for format"""
if rules is not None:
data["rules"] = rules
self.logger.debug("POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/assignment_groups".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41932 | AssignmentGroupsAPI.edit_assignment_group | train | def edit_assignment_group(self, course_id, assignment_group_id):
"""
Edit an Assignment Group.
Modify an existing Assignment Group.
Accepts the same parameters as Assignment Group creation
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_group_id
"""ID"""
path["assignment_group_id"] = assignment_group_id
self.logger.debug("PUT /api/v1/courses/{course_id}/assignment_groups/{assignment_group_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignment_groups/{assignment_group_id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41933 | QuizQuestionGroupsAPI.update_question_group | train | def update_question_group(self, id, quiz_id, course_id, quiz_groups_name=None, quiz_groups_pick_count=None, quiz_groups_question_points=None):
"""
Update a question group.
Update a question group
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - quiz_groups[name]
"""The name of the question group."""
if quiz_groups_name is not None:
data["quiz_groups[name]"] = quiz_groups_name
# OPTIONAL - quiz_groups[pick_count]
"""The number of questions to randomly select for this group."""
if quiz_groups_pick_count is not None:
data["quiz_groups[pick_count]"] = quiz_groups_pick_count
# OPTIONAL - quiz_groups[question_points]
"""The number of points to assign to each question in the group."""
if quiz_groups_question_points is not None:
data["quiz_groups[question_points]"] = quiz_groups_question_points
self.logger.debug("PUT /api/v1/courses/{course_id}/quizzes/{quiz_id}/groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/groups/{id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41934 | CoursesAPI.list_your_courses | train | def list_your_courses(self, enrollment_role=None, enrollment_role_id=None, enrollment_state=None, enrollment_type=None, include=None, state=None):
"""
List your courses.
Returns the list of active courses for the current user.
"""
path = {}
data = {}
params = {}
# OPTIONAL - enrollment_type
"""When set, only return courses where the user is enrolled as this type. For
example, set to "teacher" to return only courses where the user is
enrolled as a Teacher. This argument is ignored if enrollment_role is given."""
if enrollment_type is not None:
self._validate_enum(enrollment_type, ["teacher", "student", "ta", "observer", "designer"])
params["enrollment_type"] = enrollment_type
# OPTIONAL - enrollment_role
"""Deprecated
When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a base role type of
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'."""
if enrollment_role is not None:
params["enrollment_role"] = enrollment_role
# OPTIONAL - enrollment_role_id
"""When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a built_in role type of
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'."""
if enrollment_role_id is not None:
params["enrollment_role_id"] = enrollment_role_id
# OPTIONAL - enrollment_state
"""When set, only return courses where the user has an enrollment with the given state.
This will respect section/course/term date overrides."""
if enrollment_state is not None:
self._validate_enum(enrollment_state, ["active", "invited_or_pending", "completed"])
params["enrollment_state"] = enrollment_state
# OPTIONAL - include
"""- "needs_grading_count": Optional information to include with each Course.
When needs_grading_count is given, and the current user has grading
rights, the total number of submissions needing grading for all
assignments is returned.
- "syllabus_body": Optional information to include with each Course.
When syllabus_body is given the user-generated html for the course
syllabus is returned.
- "public_description": Optional information to include with each Course.
When public_description is given the user-generated text for the course
public description is returned.
- "total_scores": Optional information to include with each Course.
When total_scores is given, any student enrollments will also
include the fields 'computed_current_score', 'computed_final_score',
'computed_current_grade', and 'computed_final_grade' (see Enrollment
documentation for more information on these fields). This argument
is ignored if the course is configured to hide final grades.
- "current_grading_period_scores": Optional information to include with
each Course. When current_grading_period_scores is given and total_scores
is given, any student enrollments will also include the fields
'multiple_grading_periods_enabled',
'totals_for_all_grading_periods_option', 'current_grading_period_title',
'current_grading_period_id', current_period_computed_current_score',
'current_period_computed_final_score',
'current_period_computed_current_grade', and
'current_period_computed_final_grade' (see Enrollment documentation for
more information on these fields). In addition, when this argument is
passed, the course will have a 'multiple_grading_periods_enabled' attribute
on it. This argument is ignored if the course is configured to hide final
grades or if the total_scores argument is not included.
- "term": Optional information to include with each Course. When
term is given, the information for the enrollment term for each course
is returned.
- "course_progress": Optional information to include with each Course.
When course_progress is given, each course will include a
'course_progress' object with the fields: 'requirement_count', an integer
specifying the total number of requirements in the course,
'requirement_completed_count', an integer specifying the total number of
requirements in this course that have been completed, and
'next_requirement_url', a string url to the next requirement item, and
'completed_at', the date the course was completed (null if incomplete).
'next_requirement_url' will be null if all requirements have been
completed or the current module does not require sequential progress.
"course_progress" will return an error message if the course is not
module based or the user is not enrolled as a student in the course.
- "sections": Section enrollment information to include with each Course.
Returns an array of hashes containing the section ID (id), section name
(name), start and end dates (start_at, end_at), as well as the enrollment
type (enrollment_role, e.g. 'StudentEnrollment').
- "storage_quota_used_mb": The amount of storage space used by the files in this course
- "total_students": Optional information to include with each Course.
Returns an integer for the total amount of active and invited students.
- "passback_status": Include the grade passback_status
- "favorites": Optional information to include with each Course.
Indicates if the user has marked the course as a favorite course.
- "teachers": Teacher information to include with each Course.
Returns an array of hashes containing the {api:Users:UserDisplay UserDisplay} information
for each teacher in the course.
- "observed_users": Optional information to include with each Course.
Will include data for observed users if the current user has an
observer enrollment.
- "tabs": Optional information to include with each Course.
Will include the list of tabs configured for each course. See the
{api:TabsController#index List available tabs API} for more information."""
if include is not None:
self._validate_enum(include, ["needs_grading_count", "syllabus_body", "public_description", "total_scores", "current_grading_period_scores", "term", "course_progress", "sections", "storage_quota_used_mb", "total_students", "passback_status", "favorites", "teachers", "observed_users"])
params["include"] = include
# OPTIONAL - state
"""If set, only return courses that are in the given state(s).
By default, "available" is returned for students and observers, and
anything except "deleted", for all other enrollment types"""
if state is not None:
self._validate_enum(state, ["unpublished", "available", "completed", "deleted"])
params["state"] = state
self.logger.debug("GET /api/v1/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41935 | CoursesAPI.list_courses_for_user | train | def list_courses_for_user(self, user_id, enrollment_state=None, include=None, state=None):
"""
List courses for a user.
Returns a list of active courses for this user. To view the course list for a user other than yourself, you must be either an observer of that user or an administrator.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - include
"""- "needs_grading_count": Optional information to include with each Course.
When needs_grading_count is given, and the current user has grading
rights, the total number of submissions needing grading for all
assignments is returned.
- "syllabus_body": Optional information to include with each Course.
When syllabus_body is given the user-generated html for the course
syllabus is returned.
- "public_description": Optional information to include with each Course.
When public_description is given the user-generated text for the course
public description is returned.
- "total_scores": Optional information to include with each Course.
When total_scores is given, any student enrollments will also
include the fields 'computed_current_score', 'computed_final_score',
'computed_current_grade', and 'computed_final_grade' (see Enrollment
documentation for more information on these fields). This argument
is ignored if the course is configured to hide final grades.
- "current_grading_period_scores": Optional information to include with
each Course. When current_grading_period_scores is given and total_scores
is given, any student enrollments will also include the fields
'multiple_grading_periods_enabled',
'totals_for_all_grading_periods_option', 'current_grading_period_title',
'current_grading_period_id', current_period_computed_current_score',
'current_period_computed_final_score',
'current_period_computed_current_grade', and
'current_period_computed_final_grade' (see Enrollment documentation for
more information on these fields). In addition, when this argument is
passed, the course will have a 'multiple_grading_periods_enabled' attribute
on it. This argument is ignored if the course is configured to hide final
grades or if the total_scores argument is not included.
- "term": Optional information to include with each Course. When
term is given, the information for the enrollment term for each course
is returned.
- "course_progress": Optional information to include with each Course.
When course_progress is given, each course will include a
'course_progress' object with the fields: 'requirement_count', an integer
specifying the total number of requirements in the course,
'requirement_completed_count', an integer specifying the total number of
requirements in this course that have been completed, and
'next_requirement_url', a string url to the next requirement item, and
'completed_at', the date the course was completed (null if incomplete).
'next_requirement_url' will be null if all requirements have been
completed or the current module does not require sequential progress.
"course_progress" will return an error message if the course is not
module based or the user is not enrolled as a student in the course.
- "sections": Section enrollment information to include with each Course.
Returns an array of hashes containing the section ID (id), section name
(name), start and end dates (start_at, end_at), as well as the enrollment
type (enrollment_role, e.g. 'StudentEnrollment').
- "storage_quota_used_mb": The amount of storage space used by the files in this course
- "total_students": Optional information to include with each Course.
Returns an integer for the total amount of active and invited students.
- "passback_status": Include the grade passback_status
- "favorites": Optional information to include with each Course.
Indicates if the user has marked the course as a favorite course.
- "teachers": Teacher information to include with each Course.
Returns an array of hashes containing the {api:Users:UserDisplay UserDisplay} information
for each teacher in the course.
- "observed_users": Optional information to include with each Course.
Will include data for observed users if the current user has an
observer enrollment.
- "tabs": Optional information to include with each Course.
Will include the list of tabs configured for each course. See the
{api:TabsController#index List available tabs API} for more information."""
if include is not None:
self._validate_enum(include, ["needs_grading_count", "syllabus_body", "public_description", "total_scores", "current_grading_period_scores", "term", "course_progress", "sections", "storage_quota_used_mb", "total_students", "passback_status", "favorites", "teachers", "observed_users"])
params["include"] = include
# OPTIONAL - state
"""If set, only return courses that are in the given state(s).
By default, "available" is returned for students and observers, and
anything except "deleted", for all other enrollment types"""
if state is not None:
self._validate_enum(state, ["unpublished", "available", "completed", "deleted"])
params["state"] = state
# OPTIONAL - enrollment_state
"""When set, only return courses where the user has an enrollment with the given state.
This will respect section/course/term date overrides."""
if enrollment_state is not None:
self._validate_enum(enrollment_state, ["active", "invited_or_pending", "completed"])
params["enrollment_state"] = enrollment_state
self.logger.debug("GET /api/v1/users/{user_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/courses".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41936 | CoursesAPI.list_users_in_course_users | train | def list_users_in_course_users(self, course_id, enrollment_role=None, enrollment_role_id=None, enrollment_state=None, enrollment_type=None, include=None, search_term=None, user_id=None, user_ids=None):
"""
List users in course.
Returns the list of users in this course. And optionally the user's enrollments in the course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""The partial name or full ID of the users to match and return in the results list."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - enrollment_type
"""When set, only return users where the user is enrolled as this type.
"student_view" implies include[]=test_student.
This argument is ignored if enrollment_role is given."""
if enrollment_type is not None:
self._validate_enum(enrollment_type, ["teacher", "student", "student_view", "ta", "observer", "designer"])
params["enrollment_type"] = enrollment_type
# OPTIONAL - enrollment_role
"""Deprecated
When set, only return users enrolled with the specified course-level role. This can be
a role created with the {api:RoleOverridesController#add_role Add Role API} or a
base role type of 'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment',
'ObserverEnrollment', or 'DesignerEnrollment'."""
if enrollment_role is not None:
params["enrollment_role"] = enrollment_role
# OPTIONAL - enrollment_role_id
"""When set, only return courses where the user is enrolled with the specified
course-level role. This can be a role created with the
{api:RoleOverridesController#add_role Add Role API} or a built_in role id with type
'StudentEnrollment', 'TeacherEnrollment', 'TaEnrollment', 'ObserverEnrollment',
or 'DesignerEnrollment'."""
if enrollment_role_id is not None:
params["enrollment_role_id"] = enrollment_role_id
# OPTIONAL - include
"""- "email": Optional user email.
- "enrollments":
Optionally include with each Course the user's current and invited
enrollments. If the user is enrolled as a student, and the account has
permission to manage or view all grades, each enrollment will include a
'grades' key with 'current_score', 'final_score', 'current_grade' and
'final_grade' values.
- "locked": Optionally include whether an enrollment is locked.
- "avatar_url": Optionally include avatar_url.
- "bio": Optionally include each user's bio.
- "test_student": Optionally include the course's Test Student,
if present. Default is to not include Test Student.
- "custom_links": Optionally include plugin-supplied custom links for each student,
such as analytics information"""
if include is not None:
self._validate_enum(include, ["email", "enrollments", "locked", "avatar_url", "test_student", "bio", "custom_links"])
params["include"] = include
# OPTIONAL - user_id
"""If this parameter is given and it corresponds to a user in the course,
the +page+ parameter will be ignored and the page containing the specified user
will be returned instead."""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - user_ids
"""If included, the course users set will only include users with IDs
specified by the param. Note: this will not work in conjunction
with the "user_id" argument but multiple user_ids can be included."""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - enrollment_state
"""When set, only return users where the enrollment workflow state is of one of the given types.
"active" and "invited" enrollments are returned by default."""
if enrollment_state is not None:
self._validate_enum(enrollment_state, ["active", "invited", "rejected", "completed", "inactive"])
params["enrollment_state"] = enrollment_state
self.logger.debug("GET /api/v1/courses/{course_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/users".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q41937 | CoursesAPI.conclude_course | train | def conclude_course(self, id, event):
"""
Conclude a course.
Delete or conclude an existing course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - event
"""The action to take on the course."""
self._validate_enum(event, ["delete", "conclude"])
params["event"] = event
self.logger.debug("DELETE /api/v1/courses/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41938 | CoursesAPI.reset_course | train | def reset_course(self, course_id):
"""
Reset a course.
Deletes the current course, and creates a new equivalent course with
no content, but all sections and users moved over.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
self.logger.debug("POST /api/v1/courses/{course_id}/reset_content with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/reset_content".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q41939 | CoursesAPI.permissions | train | def permissions(self, course_id, permissions=None):
"""
Permissions.
Returns permission information for provided course & current_user
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - permissions
"""List of permissions to check against authenticated user"""
if permissions is not None:
params["permissions"] = permissions
self.logger.debug("GET /api/v1/courses/{course_id}/permissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/permissions".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41940 | page | train | def page(title, description, element_list=None, tab_list=None):
"""
Returns a dictionary representing a new page to display elements.
This can be thought of as a simple container for displaying multiple
types of information. The ``section`` method can be used to create
separate tabs.
Args:
title: The title to display
description: A description of the section
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
tab_list: A list of tabs to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tabs.
"""
_page = {
'Type': 'Page',
'Title': title,
'Description': description,
'Data': {},
}
if element_list is not None:
if isinstance(element_list, list):
_page['Data']['Elements'] = element_list
else:
_page['Data']['Elements'] = [element_list]
if tab_list is not None:
if isinstance(tab_list, list):
_page['Data']['Tabs'] = tab_list
else:
_page['Data']['Tabs'] = [tab_list]
return _page | python | {
"resource": ""
} |
q41941 | tab | train | def tab(tab_name, element_list=None, section_list=None):
"""
Returns a dictionary representing a new tab to display elements.
This can be thought of as a simple container for displaying multiple
types of information.
Args:
tab_name: The title to display
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
section_list: A list of sections to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tab.
"""
_tab = {
'Type': 'Tab',
'Title': tab_name,
}
if element_list is not None:
if isinstance(element_list, list):
_tab['Elements'] = element_list
else:
_tab['Elements'] = [element_list]
if section_list is not None:
if isinstance(section_list, list):
_tab['Sections'] = section_list
else:
if 'Elements' not in section_list:
_tab['Elements'] = element_list
else:
_tab['Elements'].append(element_list)
return _tab | python | {
"resource": ""
} |
q41942 | section | train | def section(title, element_list):
"""
Returns a dictionary representing a new section. Sections
contain a list of elements that are displayed separately from
the global elements on the page.
Args:
title: The title of the section to be displayed
element_list: The list of elements to display within the section
Returns:
A dictionary with metadata specifying that it is to be rendered as
a section containing multiple elements
"""
sect = {
'Type': 'Section',
'Title': title,
}
if isinstance(element_list, list):
sect['Elements'] = element_list
else:
sect['Elements'] = [element_list]
return sect | python | {
"resource": ""
} |
q41943 | image | train | def image(title, desc, image_name, group=None, height=None):
"""
Builds an image element. Image elements are primarily created
and then wrapped into an image gallery element. This is not required
behavior, however and it's independent usage should be allowed depending
on the behavior required.
The Javascript will search for the `image_name` in the component's
`imgs` directory when rendering. For example, all verification images
are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification
case's output page will search for `image_name` within that directory.
Args:
title: The title to display
desc: A description of the image or plot
image_name: The filename of the image
group: (optional) Title of lightbox group to join
height: (optional) Height of image thumbnail to draw
Returns:
A dictionary with the metadata specifying that it is to be
rendered as an image element
"""
ie = {
'Type': 'Image',
'Title': title,
'Description': desc,
'Plot File': image_name,
}
if group:
ie['Group'] = group
if height:
ie['Height'] = height
return ie | python | {
"resource": ""
} |
q41944 | Execute.commands | train | def commands(self):
"""
Fetch individual SQL commands from a SQL commands containing many commands.
:return: List of commands
"""
# Retrieve all commands via split function or splitting on ';'
print('\tRetrieving commands from', self.sql_script)
print('\tUsing command splitter algorithm {0}'.format(self.split_algo))
with Timer('\tRetrieved commands in'):
# Split commands
# sqlparse packages split function combined with sql_split function
if self.split_algo is 'sql_parse':
commands = SplitCommands(self.sql_script).sql_parse
# Split on every ';' (unreliable)
elif self.split_algo is 'simple_split':
commands = SplitCommands(self.sql_script).simple_split()
# sqlparse package without additional splitting
elif self.split_algo is 'sql_parse_nosplit':
commands = SplitCommands(self.sql_script).sql_parse_nosplit
# Parse every char of the SQL commands and determine breakpoints
elif self.split_algo is 'sql_split':
commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False)
else:
commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False)
# remove dbo. prefixes from table names
cleaned_commands = [com.replace("dbo.", '') for com in commands]
return cleaned_commands | python | {
"resource": ""
} |
q41945 | Execute.execute | train | def execute(self, commands=None, ignored_commands=('DROP', 'UNLOCK', 'LOCK'), execute_fails=True,
max_executions=MAX_EXECUTION_ATTEMPTS):
"""
Sequentially execute a list of SQL commands.
Check if commands property has already been fetched, if so use the
fetched_commands rather than getting them again.
:param commands: List of SQL commands
:param ignored_commands: Boolean, skip SQL commands that begin with 'DROP'
:param execute_fails: Boolean, attempt to execute failed commands again
:param max_executions: Int, max number of attempted executions
:return: Successful and failed commands
"""
# Break connection
self._MySQL.disconnect()
self._execute_iters += 1
if self._execute_iters > 0:
print('\tExecuting commands attempt #{0}'.format(self._execute_iters))
# Retrieve commands from sql_script if no commands are provided
commands = self.commands if not commands else commands
# Remove 'DROP' commands
if ignored_commands:
commands = filter_commands(commands, ignored_commands)
# Reestablish connection
self._MySQL.reconnect()
# Execute list of commands
fail, success = self._execute_commands(commands)
# Dump failed commands to text files
print('\t' + str(success), 'successful commands')
if len(fail) > 1 and self._dump_fails:
# Dump failed commands
dump_dir = self.dump_commands(fail)
# Execute failed commands
if execute_fails and self._execute_iters < max_executions:
return self._execute_commands_from_dir(dump_dir)
return fail, success | python | {
"resource": ""
} |
q41946 | Execute._execute_commands | train | def _execute_commands(self, commands, fails=False):
"""Execute commands and get list of failed commands and count of successful commands"""
# Confirm that prepare_statements flag is on
if self._prep_statements:
prepared_commands = [prepare_sql(c) for c in tqdm(commands, total=len(commands),
desc='Prepping SQL Commands')]
print('\tCommands prepared', len(prepared_commands))
else:
prepared_commands = commands
desc = 'Executing SQL Commands' if not fails else 'Executing Failed SQL Commands'
fail, success = [], 0
for command in tqdm(prepared_commands, total=len(prepared_commands), desc=desc):
# Attempt to execute command and skip command if error is raised
try:
self._MySQL.executemore(command)
success += 1
except:
fail.append(command)
self._MySQL._commit()
return fail, success | python | {
"resource": ""
} |
q41947 | Execute._execute_commands_from_dir | train | def _execute_commands_from_dir(self, directory):
"""Re-attempt to split and execute the failed commands"""
# Get file paths and contents
commands = get_commands_from_dir(directory)
# Execute failed commands again
print('\tAttempting to execute {0} failed commands'.format(len(commands)))
return self.execute(commands, ignored_commands=None, execute_fails=True) | python | {
"resource": ""
} |
q41948 | Execute.dump_commands | train | def dump_commands(self, commands):
"""Dump commands wrapper for external access."""
# Get base directory
directory = os.path.join(os.path.dirname(self.sql_script), 'fails')
# Get file name to be used for folder name
fname = os.path.basename(self.sql_script.rsplit('.')[0])
return dump_commands(commands, directory, fname) | python | {
"resource": ""
} |
q41949 | get_unresolved_variables | train | def get_unresolved_variables(f):
"""
Gets unresolved vars from file
"""
reporter = RReporter()
checkPath(f, reporter=reporter)
return dict(reporter.messages) | python | {
"resource": ""
} |
q41950 | index_modules | train | def index_modules(idx=None, path=None):
"""
Indexes objs from all modules
"""
suppress_output()
modules = defaultdict(list)
pkglist = pkgutil.walk_packages(onerror=lambda x: True)
print(pkglist)
if path:
pkglist = pkgutil.walk_packages(path, onerror=lambda x: True)
for modl, name, ispkg in pkglist:
try:
path = os.path.join(modl.path, name.split('.')[-1])
except AttributeError:
# Triggered on zipimport.zipimporter
continue
if os.path.isdir(path):
path = os.path.join(path, '__init__')
path += '.py'
objs = []
if os.path.exists(path):
try:
objs = read_objs_from_path(path)
except:
continue
elif not re.search(MODULE_BLACKLIST, name):
try:
mod = __import__(name)
objs = [k for k in dir(mod) if not k.startswith('__')]
except:
continue
else:
continue
for obj in objs:
if name not in modules[obj]:
modules[obj].append(name)
suppress_output(True)
return merge_dicts(idx, dict(modules)) | python | {
"resource": ""
} |
q41951 | WebApplication.includes | train | def includes(self):
""" return includes from config """
r = dict([(k, sorted(copy.deepcopy(v).values(), key=lambda x:x.get("order",0))) for k,v in self.get_config("includes").items()])
if self.version is not None:
for k,v in r.items():
for j in v:
j["path"] = self.versioned_url(j["path"])
return r | python | {
"resource": ""
} |
q41952 | WebApplication.render | train | def render(self, tmpl_name, request_env):
"""
Render the specified template and return the output.
Args:
tmpl_name (str): file name of the template
request_env (dict): request environment
Returns:
str - the rendered template
"""
return super(WebApplication, self).render(tmpl_name, request_env) | python | {
"resource": ""
} |
q41953 | list_submissions_for_multiple_assignments_courses | train | def list_submissions_for_multiple_assignments_courses(self, course_id, assignment_ids=None, grading_period_id=None, grouped=None, include=None, order=None, order_direction=None, student_ids=None):
"""
List submissions for multiple assignments.
Get all existing submissions for a given set of students and assignments.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - student_ids
"""List of student ids to return submissions for. If this argument is
omitted, return submissions for the calling user. Students may only list
their own submissions. Observers may only list those of associated
students. The special id "all" will return submissions for all students
in the course/section as appropriate."""
if student_ids is not None:
params["student_ids"] = student_ids
# OPTIONAL - assignment_ids
"""List of assignments to return submissions for. If none are given,
submissions for all assignments are returned."""
if assignment_ids is not None:
params["assignment_ids"] = assignment_ids
# OPTIONAL - grouped
"""If this argument is present, the response will be grouped by student,
rather than a flat array of submissions."""
if grouped is not None:
params["grouped"] = grouped
# OPTIONAL - grading_period_id
"""The id of the grading period in which submissions are being requested
(Requires the Multiple Grading Periods account feature turned on)"""
if grading_period_id is not None:
params["grading_period_id"] = grading_period_id
# OPTIONAL - order
"""The order submissions will be returned in. Defaults to "id". Doesn't
affect results for "grouped" mode."""
if order is not None:
self._validate_enum(order, ["id", "graded_at"])
params["order"] = order
# OPTIONAL - order_direction
"""Determines whether ordered results are retured in ascending or descending
order. Defaults to "ascending". Doesn't affect results for "grouped" mode."""
if order_direction is not None:
self._validate_enum(order_direction, ["ascending", "descending"])
params["order_direction"] = order_direction
# OPTIONAL - include
"""Associations to include with the group. `total_scores` requires the
`grouped` argument."""
if include is not None:
self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "total_scores", "visibility", "course", "user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/students/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/students/submissions".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41954 | Filter.parse | train | def parse(self, data):
"""Parse operator and value from filter's data."""
val = data.get(self.name, missing)
if not isinstance(val, dict):
return (self.operators['$eq'], self.field.deserialize(val)),
return tuple(
(
self.operators[op],
(self.field.deserialize(val)) if op not in self.list_ops else [
self.field.deserialize(v) for v in val])
for (op, val) in val.items() if op in self.operators
) | python | {
"resource": ""
} |
q41955 | Filter.apply | train | def apply(self, collection, ops, **kwargs):
"""Apply the filter to collection."""
validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa
return [o for o in collection if validator(o)] | python | {
"resource": ""
} |
q41956 | pages | train | def pages():
"""Load pages."""
p1 = Page(
url='/example1',
title='My page with default template',
description='my description',
content='hello default page',
template_name='invenio_pages/default.html',
)
p2 = Page(
url='/example2',
title='My page with my template',
description='my description',
content='hello my page',
template_name='app/mytemplate.html',
)
with db.session.begin_nested():
db.session.add(p1)
db.session.add(p2)
db.session.commit() | python | {
"resource": ""
} |
q41957 | PollChoicesAPI.list_poll_choices_in_poll | train | def list_poll_choices_in_poll(self, poll_id):
"""
List poll choices in a poll.
Returns the list of PollChoices in this poll.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - poll_id
"""ID"""
path["poll_id"] = poll_id
self.logger.debug("GET /api/v1/polls/{poll_id}/poll_choices with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/polls/{poll_id}/poll_choices".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41958 | PollChoicesAPI.create_single_poll_choice | train | def create_single_poll_choice(self, poll_id, poll_choices_text, poll_choices_is_correct=None, poll_choices_position=None):
"""
Create a single poll choice.
Create a new poll choice for this poll
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - poll_id
"""ID"""
path["poll_id"] = poll_id
# REQUIRED - poll_choices[text]
"""The descriptive text of the poll choice."""
data["poll_choices[text]"] = poll_choices_text
# OPTIONAL - poll_choices[is_correct]
"""Whether this poll choice is considered correct or not. Defaults to false."""
if poll_choices_is_correct is not None:
data["poll_choices[is_correct]"] = poll_choices_is_correct
# OPTIONAL - poll_choices[position]
"""The order this poll choice should be returned in the context it's sibling poll choices."""
if poll_choices_position is not None:
data["poll_choices[position]"] = poll_choices_position
self.logger.debug("POST /api/v1/polls/{poll_id}/poll_choices with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_choices".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41959 | select_token | train | def select_token(request, scopes='', new=False):
"""
Presents the user with a selection of applicable tokens for the requested view.
"""
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
'tokens': tokens,
'base_template': app_settings.ESI_BASE_TEMPLATE,
}
return render(r, 'esi/select_token.html', context=context)
return _token_list(request) | python | {
"resource": ""
} |
q41960 | PrimaryKey.get_primary_key | train | def get_primary_key(self, table):
"""Retrieve the column which is the primary key for a table."""
for column in self.get_schema(table):
if len(column) > 3 and 'pri' in column[3].lower():
return column[0] | python | {
"resource": ""
} |
q41961 | PrimaryKey.set_primary_key | train | def set_primary_key(self, table, column):
"""Create a Primary Key constraint on a specific column when the table is already created."""
self.execute('ALTER TABLE {0} ADD PRIMARY KEY ({1})'.format(wrap(table), column))
self._printer('\tAdded primary key to {0} on column {1}'.format(wrap(table), column)) | python | {
"resource": ""
} |
q41962 | PrimaryKey.set_primary_keys_auto | train | def set_primary_keys_auto(self, tables=None):
"""
Create primary keys for every table in the connected database.
Checks that each table has a primary key. If a table does not have a key
then each column is analyzed to determine if it contains only unique values.
If no columns exist containing only unique values then a new 'ID' column
is created to serve as a auto_incrementing primary key.
"""
# Retrieve list of tables if not provided
tables = tables if tables else self.tables
# Resolve primary keys and return list of table, primary_key tuples
return [(table, self.set_primary_key_auto(table)) for table in tables] | python | {
"resource": ""
} |
q41963 | PrimaryKey.set_primary_key_auto | train | def set_primary_key_auto(self, table):
"""
Analysis a table and set a primary key.
Determine primary key by identifying a column with unique values
or creating a new column.
:param table: Table to alter
:return: Primary Key column
"""
# Confirm no primary key exists
pk = self.get_primary_key(table)
if not pk:
# Determine if there is a unique column that can become the PK
unique_col = self.get_unique_column(table)
# Set primary key
if unique_col:
self.set_primary_key(table, unique_col)
# Create unique 'ID' column
else:
unique_col = self.add_column(table, primary_key=True)
return unique_col
else:
return pk | python | {
"resource": ""
} |
q41964 | PrimaryKey.drop_primary_key | train | def drop_primary_key(self, table):
"""Drop a Primary Key constraint for a specific table."""
if self.get_primary_key(table):
self.execute('ALTER TABLE {0} DROP PRIMARY KEY'.format(wrap(table))) | python | {
"resource": ""
} |
q41965 | ForeignKey.set_foreign_key | train | def set_foreign_key(self, parent_table, parent_column, child_table, child_column):
"""Create a Foreign Key constraint on a column from a table."""
self.execute('ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'.format(parent_table, parent_column,
child_table, child_column)) | python | {
"resource": ""
} |
q41966 | _is_api_key_correct | train | def _is_api_key_correct(request):
"""Return whether the Geckoboard API key on the request is correct."""
api_key = getattr(settings, 'GECKOBOARD_API_KEY', None)
if api_key is None:
return True
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
if auth[0].lower() == b'basic':
request_key = base64.b64decode(auth[1]).split(b':')[0]
return request_key == api_key
return False | python | {
"resource": ""
} |
q41967 | _encrypt | train | def _encrypt(data):
"""Equivalent to OpenSSL using 256 bit AES in CBC mode"""
BS = AES.block_size
def pad(s):
n = BS - len(s) % BS
char = chr(n).encode('utf8')
return s + n * char
password = settings.GECKOBOARD_PASSWORD
salt = Random.new().read(BS - len('Salted__'))
key, iv = _derive_key_and_iv(password, salt, 32, BS)
cipher = AES.new(key, AES.MODE_CBC, iv)
encrypted = b'Salted__' + salt + cipher.encrypt(pad(data))
return base64.b64encode(encrypted) | python | {
"resource": ""
} |
q41968 | _render | train | def _render(request, data, encrypted, format=None):
"""
Render the data to Geckoboard. If the `format` parameter is passed
to the widget it defines the output format. Otherwise the output
format is based on the `format` request parameter.
A `format` paramater of ``json`` or ``2`` renders JSON output, any
other value renders XML.
"""
if not format:
format = request.POST.get('format', '')
if not format:
format = request.GET.get('format', '')
if format == 'json' or format == '2':
return _render_json(data, encrypted)
else:
return _render_xml(data, encrypted) | python | {
"resource": ""
} |
q41969 | print_report | train | def print_report(runner_results):
"""
Print collated report with output and errors if any
"""
error_report = collections.defaultdict(list)
output_report = collections.defaultdict(list)
success_report = list()
for runner_info in runner_results:
hostname = runner_info['console']
error = runner_info['error']
output = runner_info['output']
if error:
error_report[error].append(hostname)
elif output:
output_report[output].append(hostname)
else:
success_report.append(hostname)
if error_report:
print("Errors : ")
for error in error_report:
print("{0} -- [{1}] {2}".format(error.strip(), len(error_report[error]), ", ".join(error_report[error])))
print()
if output_report:
for output in output_report:
print("{0} -- [{1}] {2}".format(output, len(output_report[output]), ", ".join(output_report[output])))
if success_report:
print("Completed config on {0} hosts".format(len(success_report))) | python | {
"resource": ""
} |
q41970 | Runner.ipmi_method | train | def ipmi_method(self, command):
"""Use ipmitool to run commands with ipmi protocol
"""
ipmi = ipmitool(self.console, self.password, self.username)
if command == "reboot":
self.ipmi_method(command="status")
if self.output == "Chassis Power is off":
command = "on"
ipmi.execute(self.ipmi_map[command])
if ipmi.status:
self.error = ipmi.error.strip()
else:
self.output = ipmi.output.strip()
self.status = ipmi.status | python | {
"resource": ""
} |
q41971 | Runner.run | train | def run(self):
"""Start thread run here
"""
try:
if self.command == "pxer":
self.ipmi_method(command="pxe")
if self.status == 0 or self.status == None:
self.command = "reboot"
else:
return
self.ipmi_method(self.command)
except Exception as e:
self.error = str(e) | python | {
"resource": ""
} |
q41972 | IxnTrafficItem._create | train | def _create(self):
""" Create new object on IxNetwork.
:return: IXN object reference.
"""
if 'name' in self._data:
obj_ref = self.api.add(self.obj_parent(), self.obj_type(), name=self.obj_name())
else:
obj_ref = self.api.add(self.obj_parent(), self.obj_type())
self.api.commit()
return self.api.remapIds(obj_ref) | python | {
"resource": ""
} |
q41973 | Transport.merge | train | def merge(self, *args):
"""
Merge multiple dictionary objects into one.
:param variadic args: Multiple dictionary items
:return dict
"""
values = []
for entry in args:
values = values + list(entry.items())
return dict(values) | python | {
"resource": ""
} |
q41974 | Transport.parseLegacy | train | def parseLegacy(self, response):
"""
Parse a legacy response and try and catch any errors. If we have multiple
responses we wont catch any exceptions, we will return the errors
row by row
:param dict response: The response string returned from request()
:return Returns a dictionary or a list (list for multiple responses)
"""
lines = response.splitlines()
result = []
pattern = re.compile('([A-Za-z]+):((.(?![A-Za-z]+:))*)')
for line in lines:
matches = pattern.findall(line)
row = {}
for match in matches:
row[match[0]] = match[1].strip()
try:
error = row['ERR'].split(',')
except KeyError:
pass
else:
row['code'] = error[0] if len(error) == 2 else 0
row['error'] = error[1].strip() if len(error) == 2 else error[0]
del row['ERR']
# If this response is a single row response, then we will throw
# an exception to alert the user of any failures.
if (len(lines) == 1):
raise ClickatellError(row['error'], row['code'])
finally:
result.append(row)
return result if len(result) > 1 else result[0] | python | {
"resource": ""
} |
q41975 | Transport.parseRest | train | def parseRest(self, response):
"""
Parse a REST response. If the response contains an error field, we will
raise it as an exception.
"""
body = json.loads(response)
try:
error = body['error']['description']
code = body['error']['code']
except Exception:
return body['data']
else:
raise ClickatellError(error, code); | python | {
"resource": ""
} |
q41976 | Transport.request | train | def request(self, action, data={}, headers={}, method='GET'):
"""
Run the HTTP request against the Clickatell API
:param str action: The API action
:param dict data: The request parameters
:param dict headers: The request headers (if any)
:param str method: The HTTP method
:return: The request response
"""
url = ('https' if self.secure else 'http') + '://' + self.endpoint
url = url + '/' + action
# Set the User-Agent
userAgent = "".join(["ClickatellPython/0.1.2", " ", "Python/", platform.python_version()])
headers = self.merge({ "User-Agent": userAgent }, headers)
try:
func = getattr(requests, method.lower())
except AttributeError:
raise Exception('HTTP method ' + method + ' unsupported.')
resp = func(url, params=data, data=json.dumps(data), headers=headers)
# Set the coding before unwrapping the text
resp.encoding = 'utf-8'
content = resp.text
return content | python | {
"resource": ""
} |
q41977 | OperationsViewSet.serialize_operations | train | def serialize_operations(self, operations):
"""Serialize a list of operations into JSON."""
serialized_ops = []
for operation in operations:
serializer = self.get_serializer_class(operation.__class__)
serialized_ops.append(serializer(operation).data)
return serialized_ops | python | {
"resource": ""
} |
q41978 | OperationsViewSet.get | train | def get(self, request, pzone_pk):
"""Get all the operations for a given pzone."""
# attempt to get given pzone
try:
pzone = PZone.objects.get(pk=pzone_pk)
except PZone.DoesNotExist:
raise Http404("Cannot find given pzone.")
# bulid filters
filters = {"pzone": pzone}
if "from" in request.GET:
parsed = dateparse.parse_datetime(request.GET["from"])
if parsed is not None:
filters["when__gte"] = parsed
if "to" in request.GET:
parsed = dateparse.parse_datetime(request.GET["to"])
if parsed is not None:
filters["when__lt"] = parsed
# get operations and serialize them
operations = PZoneOperation.objects.filter(**filters)
# return a json response with serialized operations
return Response(self.serialize_operations(operations), content_type="application/json") | python | {
"resource": ""
} |
q41979 | OperationsViewSet.post | train | def post(self, request, pzone_pk):
"""Add a new operation to the given pzone, return json of the new operation."""
# attempt to get given content list
pzone = None
try:
pzone = PZone.objects.get(pk=pzone_pk)
except PZone.DoesNotExist:
raise Http404("Cannot find given pzone.")
json_obj = []
http_status = 500
json_op = json.loads(request.body.decode("utf8"))
if not isinstance(json_op, list):
json_op = [json_op]
for data in json_op:
try:
serializer = self.get_serializer_class_by_name(data["type_name"])
except ContentType.DoesNotExist as e:
json_obj = {"errors": [str(e)]}
http_status = 400
break
serialized = serializer(data=data)
if serialized.is_valid():
# object is valid, save it
serialized.save()
# set response data
json_obj.append(serialized.data)
http_status = 200
else:
# object is not valid, return errors in a 400 response
json_obj = serialized.errors
http_status = 400
break
if http_status == 200 and len(json_obj) == 1:
json_obj = json_obj[0]
# cache the time in seconds until the next operation occurs
next_ops = PZoneOperation.objects.filter(when__lte=timezone.now())
if len(next_ops) > 0:
# we have at least one operation, ordered soonest first
next_op = next_ops[0]
# cache with expiry number of seconds until op should exec
cache.set('pzone-operation-expiry-' + pzone.name, next_op.when, 60 * 60 * 5)
return Response(
json_obj,
status=http_status,
content_type="application/json"
) | python | {
"resource": ""
} |
q41980 | OperationsViewSet.delete | train | def delete(self, request, pzone_pk, operation_pk):
"""Remove an operation from the given pzone."""
# note : we're not using the pzone_pk here since it's not actually
# necessary for getting an operation by pk, but it sure makes the urls
# nicer!
# attempt to delete operation
try:
operation = PZoneOperation.objects.get(pk=operation_pk)
except PZoneOperation.DoesNotExist:
raise Http404("Cannot find given operation.")
# delete operation
operation.delete()
# successful delete, return 204
return Response("", 204) | python | {
"resource": ""
} |
q41981 | PZoneViewSet.perform_update | train | def perform_update(self, serializer):
"""creates a record in the `bulbs.promotion.PZoneHistory`
:param obj: the instance saved
:param created: boolean expressing if the object was newly created (`False` if updated)
"""
instance = serializer.save()
# create history object
instance.history.create(data=instance.data) | python | {
"resource": ""
} |
q41982 | PZoneViewSet.retrieve | train | def retrieve(self, request, *args, **kwargs):
"""Retrieve pzone as a preview or applied if no preview is provided."""
when_param = get_query_params(self.request).get("preview", None)
pk = self.kwargs["pk"]
when = None
if when_param:
try:
when = parse_date(when_param)
except ValueError:
# invalid format, set back to None
when = None
pzone = None
if when:
# we have a date, use it
pzone = PZone.objects.preview(pk=pk, when=when)
else:
# we have no date, just get the pzone
pzone = PZone.objects.applied(pk=pk)
# turn content list into json
return Response(PZoneSerializer(pzone).data, content_type="application/json") | python | {
"resource": ""
} |
q41983 | EventLoop.current | train | def current(cls):
"""Get the current event loop singleton object.
"""
try:
return _tls.loop
except AttributeError:
# create loop only for main thread
if threading.current_thread().name == 'MainThread':
_tls.loop = cls()
return _tls.loop
raise RuntimeError('there is no event loop created in the current thread') | python | {
"resource": ""
} |
q41984 | set_volume | train | def set_volume(percentage):
'''Set the volume.
Sets the volume to a given percentage (integer between 0 and 100).
Args:
percentage (int): The percentage (as a 0 to 100 integer) to set the volume to.
Raises:
ValueError: if the percentage is >100 or <0.
'''
if percentage > 100 or percentage < 0:
raise ValueError('percentage must be an integer between 0 and 100')
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
# OS X uses 0-10 instead of percentage
volume_int = percentage / 10
sp.Popen(['osascript', '-e', 'set Volume %d' % volume_int]).wait()
else:
# Linux/Unix
formatted = str(percentage) + '%'
sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait() | python | {
"resource": ""
} |
q41985 | get_volume | train | def get_volume():
'''Get the volume.
Get the current volume.
Returns:
int: The current volume (percentage, between 0 and 100).
'''
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
volume = system.get_cmd_out(
['osascript', '-e', 'set ovol to output volume of (get volume settings); return the quoted form of ovol'])
return int(volume) * 10
else:
# Linux/Unix
volume = system.get_cmd_out(
('amixer get Master |grep % |awk \'{print $5}\'|'
'sed -e \'s/\[//\' -e \'s/\]//\' | head -n1'))
return int(volume.replace('%', '')) | python | {
"resource": ""
} |
q41986 | increase_volume | train | def increase_volume(percentage):
'''Increase the volume.
Increase the volume by a given percentage.
Args:
percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by.
Raises:
ValueError: if the percentage is >100 or <0.
'''
if percentage > 100 or percentage < 0:
raise ValueError('percentage must be an integer between 0 and 100')
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
volume_int = percentage / 10
old_volume = get()
new_volume = old_volume + volume_int
if new_volume > 10:
new_volume = 10
set_volume(new_volume * 10)
else:
# Linux/Unix
formatted = '%d%%+' % percentage
# + or - increases/decreases in amixer
sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait() | python | {
"resource": ""
} |
q41987 | mute | train | def mute():
'''Mute the volume.
Mutes the volume.
'''
# NOTE: mute != 0 volume
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
sp.Popen(['osascript', '-e', 'set volume output muted true']).wait()
else:
# Linux/Unix
if unix_is_pulseaudio_server():
sp.Popen(['amixer', '--quiet', '-D', 'pulse', 'sset',
'Master', 'mute']).wait() # sset is *not* a typo
else:
sp.Popen(['amixer', '--quiet', 'sset', 'Master', 'mute']).wait() | python | {
"resource": ""
} |
q41988 | unmute | train | def unmute():
'''Unmute the volume.
Unmutes the system volume.
Note:
On some systems, volume is restored to its previous level after unmute, or set to 100.
'''
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
sp.Popen(['osascript', '-e', 'set volume output muted false']).wait()
else:
# Linux/Unix
if unix_is_pulseaudio_server():
sp.Popen(['amixer', '--quiet', '-D', 'pulse', 'sset',
'Master', 'unmute']).wait() # sset is *not* a typo
else:
sp.Popen(['amixer', '--quiet', 'sset', 'Master', 'unmute']).wait() | python | {
"resource": ""
} |
q41989 | AuthenticationsLogAPI.query_by_login | train | def query_by_login(self, login_id, end_time=None, start_time=None):
"""
Query by login.
List authentication events for a given login.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - login_id
"""ID"""
path["login_id"] = login_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/logins/{login_id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41990 | AuthenticationsLogAPI.query_by_account | train | def query_by_account(self, account_id, end_time=None, start_time=None):
"""
Query by account.
List authentication events for a given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/accounts/{account_id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41991 | AuthenticationsLogAPI.query_by_user | train | def query_by_user(self, user_id, end_time=None, start_time=None):
"""
Query by user.
List authentication events for a given user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/users/{user_id}".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41992 | PluginDict._sorted_keys | train | def _sorted_keys(self):
"""
Return list of keys sorted by version
Sorting is done based on :py:func:`pkg_resources.parse_version`
"""
try:
keys = self._cache['sorted_keys']
except KeyError:
keys = self._cache['sorted_keys'] = sorted(self.keys(), key=parse_version)
return keys | python | {
"resource": ""
} |
q41993 | PluginDict._process_blacklist | train | def _process_blacklist(self, blacklist):
"""
Process blacklist into set of excluded versions
"""
# Assume blacklist is correct format since it is checked by PluginLoader
blacklist_cache = {}
blacklist_cache_old = self._cache.get('blacklist', {})
for entry in blacklist:
blackkey = (entry.version, entry.operator)
if blackkey in blacklist_cache:
continue
elif blackkey in blacklist_cache_old:
blacklist_cache[blackkey] = blacklist_cache_old[blackkey]
else:
entry_cache = blacklist_cache[blackkey] = set()
blackversion = parse_version(entry.version or '0')
blackop = OPERATORS[entry.operator]
for key in self:
if blackop(parse_version(key), blackversion):
entry_cache.add(key)
self._cache['blacklist'] = blacklist_cache
return set().union(*blacklist_cache.values()) | python | {
"resource": ""
} |
q41994 | find | train | def find(pred, items):
"""
Find the index of the first element in items for which pred returns
True
>>> find(lambda x: x > 3, range(100))
4
>>> find(lambda x: x < -3, range(100)) is None
True
"""
for i, item in enumerate(items):
if pred(item):
return i | python | {
"resource": ""
} |
q41995 | SummableVersion.reset_less_significant | train | def reset_less_significant(self, significant_version):
"""
Reset to zero all version info less significant than the
indicated version.
>>> ver = SummableVersion('3.1.2')
>>> ver.reset_less_significant(SummableVersion('0.1'))
>>> str(ver)
'3.1'
"""
def nonzero(x):
return x != 0
version_len = 3 # strict versions are always a tuple of 3
significant_pos = rfind(nonzero, significant_version.version)
significant_pos = version_len + significant_pos + 1
self.version = (
self.version[:significant_pos]
+ (0,) * (version_len - significant_pos)) | python | {
"resource": ""
} |
q41996 | VersionManagement.get_tagged_version | train | def get_tagged_version(self):
"""
Get the version of the local working set as a StrictVersion or
None if no viable tag exists. If the local working set is itself
the tagged commit and the tip and there are no local
modifications, use the tag on the parent changeset.
"""
tags = list(self.get_tags())
if 'tip' in tags and not self.is_modified():
tags = self.get_parent_tags('tip')
versions = self.__versions_from_tags(tags)
return self.__best_version(versions) | python | {
"resource": ""
} |
q41997 | VersionManagement.get_next_version | train | def get_next_version(self, increment=None):
"""
Return the next version based on prior tagged releases.
"""
increment = increment or self.increment
return self.infer_next_version(self.get_latest_version(), increment) | python | {
"resource": ""
} |
q41998 | AccountNotificationsAPI.create_global_notification | train | def create_global_notification(self, account_id, account_notification_end_at, account_notification_subject, account_notification_message, account_notification_start_at, account_notification_icon=None, account_notification_roles=None):
"""
Create a global notification.
Create and return a new global notification for an account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - account_notification[subject]
"""The subject of the notification."""
data["account_notification[subject]"] = account_notification_subject
# REQUIRED - account_notification[message]
"""The message body of the notification."""
data["account_notification[message]"] = account_notification_message
# REQUIRED - account_notification[start_at]
"""The start date and time of the notification in ISO8601 format.
e.g. 2014-01-01T01:00Z"""
data["account_notification[start_at]"] = account_notification_start_at
# REQUIRED - account_notification[end_at]
"""The end date and time of the notification in ISO8601 format.
e.g. 2014-01-01T01:00Z"""
data["account_notification[end_at]"] = account_notification_end_at
# OPTIONAL - account_notification[icon]
"""The icon to display with the notification.
Note: Defaults to warning."""
if account_notification_icon is not None:
self._validate_enum(account_notification_icon, ["warning", "information", "question", "error", "calendar"])
data["account_notification[icon]"] = account_notification_icon
# OPTIONAL - account_notification_roles
"""The role(s) to send global notification to. Note: ommitting this field will send to everyone
Example:
account_notification_roles: ["StudentEnrollment", "TeacherEnrollment"]"""
if account_notification_roles is not None:
data["account_notification_roles"] = account_notification_roles
self.logger.debug("POST /api/v1/accounts/{account_id}/account_notifications with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/account_notifications".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q41999 | adapt_persistent_instance | train | def adapt_persistent_instance(persistent_object, target_rest_class=None, attribute_filter=None):
"""
Adapts a single persistent instance to a REST model; at present this is a
common method for all persistent backends.
Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as
for discussion on this feature
"""
# try and get the adapter and the REST class for the persistent object
if target_rest_class is None:
adapter_instance = registry.get_adapter_for_persistent_model(persistent_object)
else:
if inspect.isclass(target_rest_class):
target_rest_class = target_rest_class()
adapter_instance = registry.get_adapter_for_persistent_model(persistent_object, target_rest_class)
# would raise an exception if the attribute_filter differs from the target_rest_class
if attribute_filter is not None and isinstance(attribute_filter, parser.AttributeFilter):
parser.AttributeFilter.from_model(target_rest_class).conforms_to_template_filter(attribute_filter)
# convert filter to immutable if it isn't already
if isinstance(attribute_filter, parser.AttributeFilter):
attribute_filter = attribute_filter.as_immutable()
return adapter_instance.adapt_persistent_to_rest(persistent_object, attribute_filter) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.