_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q42200 | same_page_choosen | train | def same_page_choosen(form, field):
"""Check that we are not trying to assign list page itself as a child."""
if form._obj is not None:
if field.data.id == form._obj.list_id:
raise ValidationError(
_('You cannot assign list page itself as a child.')) | python | {
"resource": ""
} |
q42201 | cols_str | train | def cols_str(columns):
"""Concatenate list of columns into a string."""
cols = ""
for c in columns:
cols = cols + wrap(c) + ', '
return cols[:-2] | python | {
"resource": ""
} |
q42202 | join_cols | train | def join_cols(cols):
"""Join list of columns into a string for a SQL query"""
return ", ".join([i for i in cols]) if isinstance(cols, (list, tuple, set)) else cols | python | {
"resource": ""
} |
q42203 | create_order_classes | train | def create_order_classes(model_label, order_field_names):
"""
Create order model and admin class.
Add order model to order.models module and register admin class.
Connect ordered_objects manager to related model.
"""
# Seperate model_label into parts.
labels = resolve_labels(model_label)
# Get model class for model_label string.
model = get_model(labels['app'], labels['model'])
# Dynamic Classes
class OrderItemBase(models.Model):
"""
Dynamic order class base.
"""
item = models.ForeignKey(model_label)
timestamp = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
app_label = 'order'
class Admin(admin.ModelAdmin):
"""
Dynamic order admin class.
"""
list_display = ('item_link',) + tuple(order_field_names)
list_editable = order_field_names
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
list_url = reverse('admin:%s_%s_changelist' % (labels['app'], \
labels['model'].lower()))
add_url = reverse('admin:%s_%s_add' % (labels['app'], \
labels['model'].lower()))
result = super(Admin, self).changelist_view(
request,
extra_context={
'add_url': add_url,
'list_url': list_url,
'related_opts': model._meta,
}
)
# XXX: Sanitize order on list save.
# if (request.method == "POST" and self.list_editable and \
# '_save' in request.POST):
# sanitize_order(self.model)
return result
def item_link(self, obj):
url = reverse('admin:%s_%s_change' % (labels['app'], \
labels['model'].lower()), args=(obj.item.id,))
return '<a href="%s">%s</a>' % (url, str(obj.item))
item_link.allow_tags = True
item_link.short_description = 'Item'
# Set up a dictionary to simulate declarations within a class.
attrs = {
'__module__': 'order.models',
}
# Create provided order fields and add to attrs.
fields = {}
for field in order_field_names:
fields[field] = models.IntegerField()
attrs.update(fields)
# Create the class which automatically triggers Django model processing.
order_item_class_name = resolve_order_item_class_name(labels)
order_model = type(order_item_class_name, (OrderItemBase, ), attrs)
# Register admin model.
admin.site.register(order_model, Admin)
# Add user_order_by method to base QuerySet.
from order import managers
setattr(QuerySet, 'user_order_by', managers.user_order_by)
# Return created model class.
return order_model | python | {
"resource": ""
} |
q42204 | create_order_objects | train | def create_order_objects(model, order_fields):
"""
Create order items for objects already present in the database.
"""
for rel in model._meta.get_all_related_objects():
rel_model = rel.model
if rel_model.__module__ == 'order.models':
objs = model.objects.all()
values = {}
for order_field in order_fields:
order_objs = rel_model.objects.all().order_by('-%s' \
% order_field)
try:
values[order_field] = getattr(order_objs[0], \
order_field) + 1
except IndexError:
values[order_field] = 1
for obj in objs:
try:
rel_model.objects.get(item=obj)
except rel_model.DoesNotExist:
rel_model.objects.create(item=obj, **values)
for key in values:
values[key] += 1 | python | {
"resource": ""
} |
q42205 | is_orderable | train | def is_orderable(cls):
"""
Checks if the provided class is specified as an orderable in
settings.ORDERABLE_MODELS. If it is return its settings.
"""
if not getattr(settings, 'ORDERABLE_MODELS', None):
return False
labels = resolve_labels(cls)
if labels['app_model'] in settings.ORDERABLE_MODELS:
return settings.ORDERABLE_MODELS[labels['app_model']]
return False | python | {
"resource": ""
} |
q42206 | resolve_labels | train | def resolve_labels(model_label):
"""
Seperate model_label into parts.
Returns dictionary with app, model and app_model strings.
"""
labels = {}
# Resolve app label.
labels['app'] = model_label.split('.')[0]
# Resolve model label
labels['model'] = model_label.split('.')[-1]
# Resolve module_app_model label.
labels['app_model'] = '%s.%s' % (labels['app'], labels['model'])
return labels | python | {
"resource": ""
} |
q42207 | ContentViewSet.get_serializer_class | train | def get_serializer_class(self):
"""gets the class type of the serializer
:return: `rest_framework.Serializer`
"""
klass = None
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg in self.kwargs:
# Looks like this is a detail...
klass = self.get_object().__class__
elif "doctype" in self.request.REQUEST:
base = self.model.get_base_class()
doctypes = indexable_registry.families[base]
try:
klass = doctypes[self.request.REQUEST["doctype"]]
except KeyError:
raise Http404
if hasattr(klass, "get_serializer_class"):
return klass.get_serializer_class()
# TODO: fix deprecation warning here -- `get_serializer_class` is going away soon!
return super(ContentViewSet, self).get_serializer_class() | python | {
"resource": ""
} |
q42208 | ContentViewSet.publish | train | def publish(self, request, **kwargs):
"""sets the `published` value of the `Content`
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
content = self.get_object()
if "published" in get_request_data(request):
if not get_request_data(request)["published"]:
content.published = None
else:
publish_dt = parse_datetime(get_request_data(request)["published"])
if publish_dt:
publish_dt = publish_dt.astimezone(timezone.utc)
else:
publish_dt = None
content.published = publish_dt
else:
content.published = timezone.now()
content.save()
LogEntry.objects.log(request.user, content, content.get_status())
return Response({"status": content.get_status(), "published": content.published}) | python | {
"resource": ""
} |
q42209 | ContentViewSet.trash | train | def trash(self, request, **kwargs):
"""Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index
Content is not actually deleted, merely hidden by deleted from ES index.import
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
content = self.get_object()
content.indexed = False
content.save()
LogEntry.objects.log(request.user, content, "Trashed")
return Response({"status": "Trashed"}) | python | {
"resource": ""
} |
q42210 | ContentViewSet.contributions | train | def contributions(self, request, **kwargs):
"""gets or adds contributions
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
# Check if the contribution app is installed
if Contribution not in get_models():
return Response([])
if request.method == "POST":
serializer = ContributionSerializer(data=get_request_data(request), many=True)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
return Response(serializer.data)
else:
content_pk = kwargs.get('pk', None)
if content_pk is None:
return Response([], status=status.HTTP_404_NOT_FOUND)
queryset = Contribution.search_objects.search().filter(
es_filter.Term(**{'content.id': content_pk})
)
serializer = ContributionSerializer(queryset[:queryset.count()].sort('id'), many=True)
return Response(serializer.data) | python | {
"resource": ""
} |
q42211 | ContentViewSet.create_token | train | def create_token(self, request, **kwargs):
"""Create a new obfuscated url info to use for accessing unpublished content.
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
data = {
"content": self.get_object().id,
"create_date": get_request_data(request)["create_date"],
"expire_date": get_request_data(request)["expire_date"]
}
serializer = ObfuscatedUrlInfoSerializer(data=data)
if not serializer.is_valid():
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
content_type="application/json",
)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK, content_type="application/json") | python | {
"resource": ""
} |
q42212 | ContentViewSet.list_tokens | train | def list_tokens(self, request, **kwargs):
"""List all tokens for this content instance.
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
# no date checking is done here to make it more obvious if there's an issue with the
# number of records. Date filtering will be done on the frontend.
infos = [ObfuscatedUrlInfoSerializer(info).data
for info in ObfuscatedUrlInfo.objects.filter(content=self.get_object())]
return Response(infos, status=status.HTTP_200_OK, content_type="application/json") | python | {
"resource": ""
} |
q42213 | LogEntryViewSet.get_queryset | train | def get_queryset(self):
"""creates the base queryset object for the serializer
:return: an instance of `django.db.models.QuerySet`
"""
qs = LogEntry.objects.all()
content_id = get_query_params(self.request).get("content", None)
if content_id:
qs = qs.filter(object_id=content_id)
return qs | python | {
"resource": ""
} |
q42214 | AuthorViewSet.get_queryset | train | def get_queryset(self):
"""created the base queryset object for the serializer limited to users within the authors
groups and having `is_staff`
:return: `django.db.models.QuerySet`
"""
author_filter = getattr(settings, "BULBS_AUTHOR_FILTER", {"is_staff": True})
queryset = self.model.objects.filter(**author_filter).distinct()
return queryset | python | {
"resource": ""
} |
q42215 | MeViewSet.retrieve | train | def retrieve(self, request, *args, **kwargs):
"""gets basic information about the user
:param request: a WSGI request object
:param args: inline arguments (optional)
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
data = UserSerializer().to_representation(request.user)
# add superuser flag only if user is a superuser, putting it here so users can only
# tell if they are themselves superusers
if request.user.is_superuser:
data['is_superuser'] = True
# attempt to add a firebase token if we have a firebase secret
secret = getattr(settings, 'FIREBASE_SECRET', None)
if secret:
# use firebase auth to provide auth variables to firebase security api
firebase_auth_payload = {
'id': request.user.pk,
'username': request.user.username,
'email': request.user.email,
'is_staff': request.user.is_staff
}
data['firebase_token'] = create_token(secret, firebase_auth_payload)
return Response(data) | python | {
"resource": ""
} |
q42216 | ContentTypeViewSet.list | train | def list(self, request):
"""Search the doctypes for this model."""
query = get_query_params(request).get("search", "")
results = []
base = self.model.get_base_class()
doctypes = indexable_registry.families[base]
for doctype, klass in doctypes.items():
name = klass._meta.verbose_name.title()
if query.lower() in name.lower():
results.append(dict(
name=name,
doctype=doctype
))
results.sort(key=lambda x: x["name"])
return Response(dict(results=results)) | python | {
"resource": ""
} |
q42217 | CourseAuditLogAPI.query_by_course | train | def query_by_course(self, course_id, end_time=None, start_time=None):
"""
Query by course.
List course change events for a given course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/course/courses/{course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/course/courses/{course_id}".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q42218 | vary_radius | train | def vary_radius(dt):
"""Vary the disc radius over time"""
global time
time += dt
disc.inner_radius = disc.outer_radius = 2.5 + math.sin(time / 2.0) * 1.5 | python | {
"resource": ""
} |
q42219 | to_str | train | def to_str(delta, extended=False):
"""Format a datetime.timedelta to a duration string"""
total_seconds = delta.total_seconds()
sign = "-" if total_seconds < 0 else ""
nanoseconds = abs(total_seconds * _second_size)
if total_seconds < 1:
result_str = _to_str_small(nanoseconds, extended)
else:
result_str = _to_str_large(nanoseconds, extended)
return "{}{}".format(sign, result_str) | python | {
"resource": ""
} |
q42220 | Alter.create_database | train | def create_database(self, name):
"""Create a new database."""
statement = "CREATE DATABASE {0} DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci".format(wrap(name))
return self.execute(statement) | python | {
"resource": ""
} |
q42221 | Alter.create_table | train | def create_table(self, name, data, columns=None, add_pk=True):
"""Generate and execute a create table query by parsing a 2D dataset"""
# TODO: Issue occurs when bool values exist in data
# Remove if the table exists
if name in self.tables:
self.drop(name)
# Set headers list
if not columns:
columns = data[0]
# Validate data shape
for row in data:
assert len(row) == len(columns)
# Create dictionary of column types
col_types = {columns[i]: sql_column_type([d[i] for d in data], prefer_int=True, prefer_varchar=True)
for i in range(0, len(columns))}
# Join column types into SQL string
cols = ''.join(['\t{0} {1},\n'.format(name, type_) for name, type_ in col_types.items()])[:-2] + '\n'
statement = 'CREATE TABLE {0} ({1}{2})'.format(name, '\n', cols)
self.execute(statement)
if add_pk:
self.set_primary_key_auto()
return True | python | {
"resource": ""
} |
q42222 | Notification._get_notification | train | def _get_notification(self, email, token):
''' Consulta o status do pagamento '''
url = u'{notification_url}{notification_code}?email={email}&token={token}'.format(
notification_url=self.notification_url,
notification_code=self.notification_code,
email=email,
token=token)
req = requests.get(url)
if req.status_code == 200:
self.xml = req.text
logger.debug( u'XML com informacoes da transacao recebido: {0}'.format(self.xml) )
transaction_dict = xmltodict.parse(self.xml)
# Validar informações recebidas
transaction_schema(transaction_dict)
self.transaction = transaction_dict.get('transaction')
else:
raise PagSeguroApiException(
u'Erro ao fazer request para a API de notificacao:' +
' HTTP Status=%s - Response: %s' % (req.status_code, req.text)) | python | {
"resource": ""
} |
q42223 | Notification.items | train | def items(self):
''' Lista dos items do pagamento
'''
if type(self.transaction['items']['item']) == list:
return self.transaction['items']['item']
else:
return [self.transaction['items']['item'],] | python | {
"resource": ""
} |
q42224 | PollsAPI.create_single_poll | train | def create_single_poll(self, polls_question, polls_description=None):
"""
Create a single poll.
Create a new poll for the current user
"""
path = {}
data = {}
params = {}
# REQUIRED - polls[question]
"""The title of the poll."""
data["polls[question]"] = polls_question
# OPTIONAL - polls[description]
"""A brief description or instructions for the poll."""
if polls_description is not None:
data["polls[description]"] = polls_description
self.logger.debug("POST /api/v1/polls with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/polls".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q42225 | content_deleted | train | def content_deleted(sender, instance=None, **kwargs):
"""removes content from the ES index when deleted from DB
"""
if getattr(instance, "_index", True):
cls = instance.get_real_instance_class()
index = cls.search_objects.mapping.index
doc_type = cls.search_objects.mapping.doc_type
cls.search_objects.client.delete(index, doc_type, instance.id, ignore=[404]) | python | {
"resource": ""
} |
q42226 | Content.thumbnail | train | def thumbnail(self):
"""Read-only attribute that provides the value of the thumbnail to display.
"""
# check if there is a valid thumbnail override
if self.thumbnail_override.id is not None:
return self.thumbnail_override
# otherwise, just try to grab the first image
first_image = self.first_image
if first_image is not None:
return first_image
# no override for thumbnail and no non-none image field, just return override,
# which is a blank image field
return self.thumbnail_override | python | {
"resource": ""
} |
q42227 | Content.first_image | train | def first_image(self):
"""Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
"""
# loop through image fields and grab the first non-none one
for model_field in self._meta.fields:
if isinstance(model_field, ImageField):
if model_field.name is not 'thumbnail_override':
field_value = getattr(self, model_field.name)
if field_value.id is not None:
return field_value
# no non-none images, return None
return None | python | {
"resource": ""
} |
q42228 | Content.get_absolute_url | train | def get_absolute_url(self):
"""produces a url to link directly to this instance, given the URL config
:return: `str`
"""
try:
url = reverse("content-detail-view", kwargs={"pk": self.pk, "slug": self.slug})
except NoReverseMatch:
url = None
return url | python | {
"resource": ""
} |
q42229 | Content.ordered_tags | train | def ordered_tags(self):
"""gets the related tags
:return: `list` of `Tag` instances
"""
tags = list(self.tags.all())
return sorted(
tags,
key=lambda tag: ((type(tag) != Tag) * 100000) + tag.count(),
reverse=True
) | python | {
"resource": ""
} |
q42230 | Content.save | train | def save(self, *args, **kwargs):
"""creates the slug, queues up for indexing and saves the instance
:param args: inline arguments (optional)
:param kwargs: keyword arguments
:return: `bulbs.content.Content`
"""
if not self.slug:
self.slug = slugify(self.build_slug())[:self._meta.get_field("slug").max_length]
if not self.is_indexed:
if kwargs is None:
kwargs = {}
kwargs["index"] = False
content = super(Content, self).save(*args, **kwargs)
index_content_contributions.delay(self.id)
index_content_report_content_proxy.delay(self.id)
post_to_instant_articles_api.delay(self.id)
return content | python | {
"resource": ""
} |
q42231 | LogEntryManager.log | train | def log(self, user, content, message):
"""creates a new log record
:param user: user
:param content: content instance
:param message: change information
"""
return self.create(
user=user,
content_type=ContentType.objects.get_for_model(content),
object_id=content.pk,
change_message=message
) | python | {
"resource": ""
} |
q42232 | ObfuscatedUrlInfo.save | train | def save(self, *args, **kwargs):
"""sets uuid for url
:param args: inline arguments (optional)
:param kwargs: keyword arguments (optional)
:return: `super.save()`
"""
if not self.id: # this is a totally new instance, create uuid value
self.url_uuid = str(uuid.uuid4()).replace("-", "")
super(ObfuscatedUrlInfo, self).save(*args, **kwargs) | python | {
"resource": ""
} |
q42233 | splaylist.write_playlist_file | train | def write_playlist_file(self, localdir):
""" Check if playlist exists in local directory. """
path = "{0}/playlists".format(localdir)
if not os.path.exists(path):
os.makedirs(path)
filepath = "{0}/{1}".format(path, self.gen_filename())
playlist = open(filepath, "w")
for track in self.get_tracks():
playlist.write("{0}/{1}.mp3\n".format(
os.path.abspath(track.gen_localdir(localdir)),
track.gen_filename()))
playlist.close() | python | {
"resource": ""
} |
q42234 | Model._create_instance_attributes | train | def _create_instance_attributes(self, arguments):
"""
Copies class level attribute templates and makes instance placeholders
This step is required for direct uses of Model classes. This creates a
copy of attribute_names ignores methods and private variables.
DataCollection types are deep copied to ignore memory reference conflicts.
DataType instances are initialized to None or default value.
"""
for attribute_name, type_instance in self.getmembers():
if isinstance(type_instance, DataType):
self._templates[attribute_name] = type_instance
value = None
if attribute_name in arguments:
value = arguments[attribute_name]
try:
self._attributes[attribute_name] = type_instance.validate(value)
# we can safely ignore required warnings during initialization
except exception.RequiredAttributeError:
self._attributes[attribute_name] = None | python | {
"resource": ""
} |
q42235 | Model.get_attribute_keys | train | def get_attribute_keys(self):
"""
Returns a list of managed attributes for the Model class
Implemented for use with data adapters, can be used to quickly make a list of the
attribute names in a prestans model
"""
_attribute_keys = list()
for attribute_name, type_instance in self.getmembers():
if isinstance(type_instance, DataType):
_attribute_keys.append(attribute_name)
return _attribute_keys | python | {
"resource": ""
} |
q42236 | Model.as_serializable | train | def as_serializable(self, attribute_filter=None, minified=False):
"""
Returns a dictionary with attributes and pure python representation of
the data instances. If an attribute filter is provided as_serializable
will respect the visibility.
The response is used by serializers to return data to client
:param attribute_filter:
:type attribute_filter: prestans.parser.AttributeFilter
:param minified:
:type minified: bool
"""
from prestans.parser import AttributeFilter
from prestans.parser import AttributeFilterImmutable
from prestans.types import Array
model_dictionary = dict()
rewrite_map = self.attribute_rewrite_map()
# convert filter to immutable if it isn't already
if isinstance(attribute_filter, AttributeFilter):
attribute_filter = attribute_filter.as_immutable()
for attribute_name, type_instance in self.getmembers():
if isinstance(attribute_filter, (AttributeFilter, AttributeFilterImmutable)) and \
not attribute_filter.is_attribute_visible(attribute_name):
continue
# support minification
serialized_attribute_name = attribute_name
if minified is True:
serialized_attribute_name = rewrite_map[attribute_name]
if attribute_name not in self._attributes or self._attributes[attribute_name] is None:
if isinstance(type_instance, Array):
model_dictionary[serialized_attribute_name] = []
else:
model_dictionary[serialized_attribute_name] = None
continue
if isinstance(type_instance, DataCollection):
sub_attribute_filter = None
if isinstance(attribute_filter, (AttributeFilter, AttributeFilterImmutable)) and attribute_name in attribute_filter:
sub_attribute_filter = getattr(attribute_filter, attribute_name)
model_dictionary[serialized_attribute_name] = self._attributes[attribute_name].as_serializable(sub_attribute_filter, minified)
elif isinstance(type_instance, DataStructure):
python_value = self._attributes[attribute_name]
serializable_value = type_instance.as_serializable(python_value)
model_dictionary[serialized_attribute_name] = serializable_value
elif isinstance(type_instance, DataType):
model_dictionary[serialized_attribute_name] = self._attributes[attribute_name]
return model_dictionary | python | {
"resource": ""
} |
q42237 | EnrollmentsAPI.enroll_user_courses | train | def enroll_user_courses(self, course_id, enrollment_type, enrollment_user_id, enrollment_associated_user_id=None, enrollment_course_section_id=None, enrollment_enrollment_state=None, enrollment_limit_privileges_to_course_section=None, enrollment_notify=None, enrollment_role=None, enrollment_role_id=None, enrollment_self_enrolled=None, enrollment_self_enrollment_code=None):
"""
Enroll a user.
Create a new user enrollment for a course or section.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - enrollment[user_id]
"""The ID of the user to be enrolled in the course."""
data["enrollment[user_id]"] = enrollment_user_id
# REQUIRED - enrollment[type]
"""Enroll the user as a student, teacher, TA, observer, or designer. If no
value is given, the type will be inferred by enrollment[role] if supplied,
otherwise 'StudentEnrollment' will be used."""
self._validate_enum(enrollment_type, ["StudentEnrollment", "TeacherEnrollment", "TaEnrollment", "ObserverEnrollment", "DesignerEnrollment"])
data["enrollment[type]"] = enrollment_type
# OPTIONAL - enrollment[role]
"""Assigns a custom course-level role to the user."""
if enrollment_role is not None:
data["enrollment[role]"] = enrollment_role
# OPTIONAL - enrollment[role_id]
"""Assigns a custom course-level role to the user."""
if enrollment_role_id is not None:
data["enrollment[role_id]"] = enrollment_role_id
# OPTIONAL - enrollment[enrollment_state]
"""If set to 'active,' student will be immediately enrolled in the course.
Otherwise they will be required to accept a course invitation. Default is
'invited.'.
If set to 'inactive', student will be listed in the course roster for
teachers, but will not be able to participate in the course until
their enrollment is activated."""
if enrollment_enrollment_state is not None:
self._validate_enum(enrollment_enrollment_state, ["active", "invited", "inactive"])
data["enrollment[enrollment_state]"] = enrollment_enrollment_state
# OPTIONAL - enrollment[course_section_id]
"""The ID of the course section to enroll the student in. If the
section-specific URL is used, this argument is redundant and will be
ignored."""
if enrollment_course_section_id is not None:
data["enrollment[course_section_id]"] = enrollment_course_section_id
# OPTIONAL - enrollment[limit_privileges_to_course_section]
"""If set, the enrollment will only allow the user to see and interact with
users enrolled in the section given by course_section_id.
* For teachers and TAs, this includes grading privileges.
* Section-limited students will not see any users (including teachers
and TAs) not enrolled in their sections.
* Users may have other enrollments that grant privileges to
multiple sections in the same course."""
if enrollment_limit_privileges_to_course_section is not None:
data["enrollment[limit_privileges_to_course_section]"] = enrollment_limit_privileges_to_course_section
# OPTIONAL - enrollment[notify]
"""If true, a notification will be sent to the enrolled user.
Notifications are not sent by default."""
if enrollment_notify is not None:
data["enrollment[notify]"] = enrollment_notify
# OPTIONAL - enrollment[self_enrollment_code]
"""If the current user is not allowed to manage enrollments in this
course, but the course allows self-enrollment, the user can self-
enroll as a student in the default section by passing in a valid
code. When self-enrolling, the user_id must be 'self'. The
enrollment_state will be set to 'active' and all other arguments
will be ignored."""
if enrollment_self_enrollment_code is not None:
data["enrollment[self_enrollment_code]"] = enrollment_self_enrollment_code
# OPTIONAL - enrollment[self_enrolled]
"""If true, marks the enrollment as a self-enrollment, which gives
students the ability to drop the course if desired. Defaults to false."""
if enrollment_self_enrolled is not None:
data["enrollment[self_enrolled]"] = enrollment_self_enrolled
# OPTIONAL - enrollment[associated_user_id]
"""For an observer enrollment, the ID of a student to observe. The
caller must have +manage_students+ permission in the course.
This is a one-off operation; to automatically observe all a
student's enrollments (for example, as a parent), please use
the {api:UserObserveesController#create User Observees API}."""
if enrollment_associated_user_id is not None:
data["enrollment[associated_user_id]"] = enrollment_associated_user_id
self.logger.debug("POST /api/v1/courses/{course_id}/enrollments with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/enrollments".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42238 | EnrollmentsAPI.conclude_deactivate_or_delete_enrollment | train | def conclude_deactivate_or_delete_enrollment(self, id, course_id, task=None):
"""
Conclude, deactivate, or delete an enrollment.
Conclude, deactivate, or delete an enrollment. If the +task+ argument isn't given, the enrollment
will be concluded.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - task
"""The action to take on the enrollment.
When inactive, a user will still appear in the course roster to admins, but be unable to participate.
("inactivate" and "deactivate" are equivalent tasks)"""
if task is not None:
self._validate_enum(task, ["conclude", "delete", "inactivate", "deactivate"])
params["task"] = task
self.logger.debug("DELETE /api/v1/courses/{course_id}/enrollments/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/enrollments/{id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42239 | filter_ints_based_on_vlan | train | def filter_ints_based_on_vlan(interfaces, vlan, count=1):
""" Filter list of interfaces based on VLAN presence or absence criteria.
:param interfaces: list of interfaces to filter.
:param vlan: boolean indicating whether to filter interfaces with or without VLAN.
:param vlan: number of expected VLANs (note that when vlanEnable == False, vlanCount == 1)
:return: interfaces with VLAN(s) if vlan == True and vlanCount == count else interfaces without
VLAN(s).
:todo: add vlanEnable and vlanCount to interface/range/deviceGroup classes.
"""
filtered_interfaces = []
for interface in interfaces:
if interface.obj_type() == 'interface':
ixn_vlan = interface.get_object_by_type('vlan')
vlanEnable = is_true(ixn_vlan.get_attribute('vlanEnable'))
vlanCount = int(ixn_vlan.get_attribute('vlanCount'))
elif interface.obj_type() == 'range':
ixn_vlan = interface.get_object_by_type('vlanRange')
vlanEnable = is_true(ixn_vlan.get_attribute('enabled'))
vlanCount = len(ixn_vlan.get_objects_by_type('vlanIdInfo'))
else:
ixn_vlan = interface.get_object_by_type('ethernet')
vlanEnable = is_true(ixn_vlan.get_attribute('useVlans'))
vlanCount = int(ixn_vlan.get_attribute('vlanCount'))
if not (vlanEnable ^ vlan) and vlanCount == count:
filtered_interfaces.append(interface)
return filtered_interfaces | python | {
"resource": ""
} |
q42240 | IxnInterface._create | train | def _create(self, **attributes):
""" Create new interface on IxNetwork.
Set enabled and description (==name).
:return: interface object reference.
"""
attributes['enabled'] = True
if 'name' in self._data:
attributes['description'] = self._data['name']
obj_ref = self.api.add(self.obj_parent(), self.obj_type(), **attributes)
self.api.commit()
return self.api.remapIds(obj_ref) | python | {
"resource": ""
} |
q42241 | DatabaseSchemaEditor._constraint_names | train | def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result | python | {
"resource": ""
} |
q42242 | login_required | train | def login_required(http_method_handler):
"""
provides a decorator for RESTRequestHandler methods to check for authenticated users
RESTRequestHandler subclass must have a auth_context instance, refer to prestans.auth
for the parent class definition.
If decorator is used and no auth_context is provided the client will be denied access.
Handler will return a 401 Unauthorized if the user is not logged in, the service does
not redirect to login handler page, this is the client's responsibility.
auth_context_handler instance provides a message called get_current_user, use this
to obtain a reference to an authenticated user profile.
If all goes well, the original handler definition is executed.
"""
@wraps(http_method_handler)
def secure_http_method_handler(self, *args, **kwargs):
if not self.__provider_config__.authentication:
_message = "Service available to authenticated users only, no auth context provider set in handler"
authentication_error = prestans.exception.AuthenticationError(_message)
authentication_error.request = self.request
raise authentication_error
if not self.__provider_config__.authentication.is_authenticated_user():
authentication_error = prestans.exception.AuthenticationError()
authentication_error.request = self.request
raise authentication_error
http_method_handler(self, *args, **kwargs)
return secure_http_method_handler | python | {
"resource": ""
} |
q42243 | role_required | train | def role_required(role_name=None):
"""
Authenticates a HTTP method handler based on a provided role
With a little help from Peter Cole's Blog
http://mrcoles.com/blog/3-decorator-examples-and-awesome-python/
"""
def _role_required(http_method_handler):
@wraps(http_method_handler)
def secure_http_method_handler(self, *args, **kwargs):
# role name must be provided
if role_name is None:
_message = "Role name must be provided"
authorization_error = prestans.exception.AuthorizationError(_message)
authorization_error.request = self.request
raise authorization_error
# authentication context must be set
if not self.__provider_config__.authentication:
_message = "Service available to authenticated users only, no auth context provider set in handler"
authentication_error = prestans.exception.AuthenticationError(_message)
authentication_error.request = self.request
raise authentication_error
# check for the role by calling current_user_has_role
if not self.__provider_config__.authentication.current_user_has_role(role_name):
authorization_error = prestans.exception.AuthorizationError(role_name)
authorization_error.request = self.request
raise authorization_error
http_method_handler(self, *args, **kwargs)
return wraps(http_method_handler)(secure_http_method_handler)
return _role_required | python | {
"resource": ""
} |
q42244 | access_required | train | def access_required(config=None):
"""
Authenticates a HTTP method handler based on a custom set of arguments
"""
def _access_required(http_method_handler):
def secure_http_method_handler(self, *args, **kwargs):
# authentication context must be set
if not self.__provider_config__.authentication:
_message = "Service available to authenticated users only, no auth context provider set in handler"
authentication_error = prestans.exception.AuthenticationError(_message)
authentication_error.request = self.request
raise authentication_error
# check for access by calling is_authorized_user
if not self.__provider_config__.authentication.is_authorized_user(config):
_message = "Service available to authorized users only"
authorization_error = prestans.exception.AuthorizationError(_message)
authorization_error.request = self.request
raise authorization_error
http_method_handler(self, *args, **kwargs)
return wraps(http_method_handler)(secure_http_method_handler)
return _access_required | python | {
"resource": ""
} |
q42245 | gen_cmake_command | train | def gen_cmake_command(config):
"""
Generate CMake command.
"""
from autocmake.extract import extract_list
s = []
s.append("\n\ndef gen_cmake_command(options, arguments):")
s.append(' """')
s.append(" Generate CMake command based on options and arguments.")
s.append(' """')
s.append(" command = []")
for env in config['export']:
s.append(' command.append({0})'.format(env))
s.append(" command.append(arguments['--cmake-executable'])")
for definition in config['define']:
s.append(' command.append({0})'.format(definition))
s.append(" command.append('-DCMAKE_BUILD_TYPE={0}'.format(arguments['--type']))")
s.append(" command.append('-G\"{0}\"'.format(arguments['--generator']))")
s.append(" if arguments['--cmake-options'] != \"''\":")
s.append(" command.append(arguments['--cmake-options'])")
s.append(" if arguments['--prefix']:")
s.append(" command.append('-DCMAKE_INSTALL_PREFIX=\"{0}\"'.format(arguments['--prefix']))")
s.append("\n return ' '.join(command)")
return '\n'.join(s) | python | {
"resource": ""
} |
q42246 | gen_setup | train | def gen_setup(config, default_build_type, relative_path, setup_script_name):
"""
Generate setup script.
"""
from autocmake.extract import extract_list
s = []
s.append('#!/usr/bin/env python')
s.append('\n{0}'.format(autogenerated_notice()))
s.append('\nimport os')
s.append('import sys')
s.append('assert sys.version_info >= (2, 6), \'Python >= 2.6 is required\'')
s.append("\nsys.path.insert(0, '{0}')".format(relative_path))
s.append('from autocmake import configure')
s.append('from autocmake.external import docopt')
s.append('\n\noptions = """')
s.append('Usage:')
s.append(' ./{0} [options] [<builddir>]'.format(setup_script_name))
s.append(' ./{0} (-h | --help)'.format(setup_script_name))
s.append('\nOptions:')
options = []
for opt in config['docopt']:
first = opt.split()[0].strip()
rest = ' '.join(opt.split()[1:]).strip()
options.append([first, rest])
options.append(['--type=<TYPE>', 'Set the CMake build type (debug, release, relwithdebinfo, minsizerel) [default: {0}].'.format(default_build_type)])
options.append(['--generator=<STRING>', 'Set the CMake build system generator [default: Unix Makefiles].'])
options.append(['--show', 'Show CMake command and exit.'])
options.append(['--cmake-executable=<CMAKE_EXECUTABLE>', 'Set the CMake executable [default: cmake].'])
options.append(['--cmake-options=<STRING>', "Define options to CMake [default: '']."])
options.append(['--prefix=<PATH>', 'Set the install path for make install.'])
options.append(['<builddir>', 'Build directory.'])
options.append(['-h --help', 'Show this screen.'])
s.append(align_options(options))
s.append('"""')
s.append(gen_cmake_command(config))
s.append("\n")
s.append("# parse command line args")
s.append("try:")
s.append(" arguments = docopt.docopt(options, argv=None)")
s.append("except docopt.DocoptExit:")
s.append(r" sys.stderr.write('ERROR: bad input to {0}\n'.format(sys.argv[0]))")
s.append(" sys.stderr.write(options)")
s.append(" sys.exit(-1)")
s.append("\n")
s.append("# use extensions to validate/post-process args")
s.append("if configure.module_exists('extensions'):")
s.append(" import extensions")
s.append(" arguments = extensions.postprocess_args(sys.argv, arguments)")
s.append("\n")
s.append("root_directory = os.path.dirname(os.path.realpath(__file__))")
s.append("\n")
s.append("build_path = arguments['<builddir>']")
s.append("\n")
s.append("# create cmake command")
s.append("cmake_command = '{0} -H{1}'.format(gen_cmake_command(options, arguments), root_directory)")
s.append("\n")
s.append("# run cmake")
s.append("configure.configure(root_directory, build_path, cmake_command, arguments['--show'])")
return s | python | {
"resource": ""
} |
q42247 | gen_cmakelists | train | def gen_cmakelists(project_name, project_language, min_cmake_version, default_build_type, relative_path, modules):
"""
Generate CMakeLists.txt.
"""
import os
s = []
s.append(autogenerated_notice())
s.append('\n# set minimum cmake version')
s.append('cmake_minimum_required(VERSION {0} FATAL_ERROR)'.format(min_cmake_version))
s.append('\n# project name')
s.append('project({0} {1})'.format(project_name, project_language))
s.append('\n# do not rebuild if rules (compiler flags) change')
s.append('set(CMAKE_SKIP_RULE_DEPENDENCY TRUE)')
build_type_capitalized = {'debug': 'Debug',
'release': 'Release',
'relwithdebinfo': 'RelWithDebInfo',
'minsizerel': 'MinSizeRel'}
_build_type = build_type_capitalized[default_build_type]
s.append('\n# if CMAKE_BUILD_TYPE undefined, we set it to {0}'.format(_build_type))
s.append('if(NOT CMAKE_BUILD_TYPE)')
s.append(' set(CMAKE_BUILD_TYPE "{0}")'.format(_build_type))
s.append('endif()')
if len(modules) > 0:
s.append('\n# directories which hold included cmake modules')
module_paths = [module.path for module in modules]
module_paths.append('downloaded') # this is done to be able to find fetched modules when testing
module_paths = list(set(module_paths))
module_paths.sort() # we do this to always get the same order and to minimize diffs
for directory in module_paths:
rel_cmake_module_path = os.path.join(relative_path, directory)
# on windows cmake corrects this so we have to make it wrong again
rel_cmake_module_path = rel_cmake_module_path.replace('\\', '/')
s.append('set(CMAKE_MODULE_PATH ${{CMAKE_MODULE_PATH}} ${{PROJECT_SOURCE_DIR}}/{0})'.format(rel_cmake_module_path))
if len(modules) > 0:
s.append('\n# included cmake modules')
for module in modules:
s.append('include({0})'.format(os.path.splitext(module.name)[0]))
return s | python | {
"resource": ""
} |
q42248 | align_options | train | def align_options(options):
"""
Indents flags and aligns help texts.
"""
l = 0
for opt in options:
if len(opt[0]) > l:
l = len(opt[0])
s = []
for opt in options:
s.append(' {0}{1} {2}'.format(opt[0], ' ' * (l - len(opt[0])), opt[1]))
return '\n'.join(s) | python | {
"resource": ""
} |
q42249 | format_exception | train | def format_exception(etype, value, tback, limit=None):
"""
Python 2 compatible version of traceback.format_exception
Accepts negative limits like the Python 3 version
"""
rtn = ['Traceback (most recent call last):\n']
if limit is None or limit >= 0:
rtn.extend(traceback.format_tb(tback, limit))
else:
rtn.extend(traceback.format_list(traceback.extract_tb(tback)[limit:]))
rtn.extend(traceback.format_exception_only(etype, value))
return rtn | python | {
"resource": ""
} |
q42250 | PluginLoader.load_modules | train | def load_modules(self):
"""
Locate and import modules from locations specified during initialization.
Locations include:
- Program's standard library (``library``)
- `Entry points <Entry point_>`_ (``entry_point``)
- Specified modules (``modules``)
- Specified paths (``paths``)
If a malformed child plugin class is imported, a :py:exc:`PluginWarning` will be issued,
the class is skipped, and loading operations continue.
If an invalid `entry point <Entry point_>`_ is specified, an :py:exc:`EntryPointWarning`
is issued and loading operations continue.
"""
# Start with standard library
if self.library:
LOGGER.info('Loading plugins from standard library')
libmod = _import_module(self.library)
_recursive_import(libmod)
# Get entry points
if self.entry_point:
LOGGER.info('Loading plugins from entry points group %s', self.entry_point)
for epoint in iter_entry_points(group=self.entry_point):
try:
mod = _import_module(epoint)
except PluginImportError as e:
warnings.warn("Module %s can not be loaded for entry point %s: %s" %
(epoint.module_name, epoint.name, e), EntryPointWarning)
continue
# If we have a package, walk it
if ismodule(mod):
_recursive_import(mod)
else:
warnings.warn("Entry point '%s' is not a module or package" % epoint.name,
EntryPointWarning)
# Load auxiliary modules
if self.modules:
for mod in self.modules:
LOGGER.info('Loading plugins from %s', mod)
_recursive_import(_import_module(mod))
# Load auxiliary paths
if self.paths:
auth_paths_mod = importlib.import_module(self.prefix_package)
initial_path = auth_paths_mod.__path__[:]
# Append each path to module path
for path in self.paths:
modpath = os.path.realpath(path)
if os.path.isdir(modpath):
LOGGER.info('Adding %s as a plugin search path', path)
if modpath not in auth_paths_mod.__path__:
auth_paths_mod.__path__.append(modpath)
else:
LOGGER.info("Configured plugin path '%s' is not a valid directory", path)
# Walk packages
try:
_recursive_import(auth_paths_mod)
finally:
# Restore Path
auth_paths_mod.__path__[:] = initial_path
self.loaded = True | python | {
"resource": ""
} |
q42251 | PluginLoader.plugins | train | def plugins(self):
"""
Newest version of all plugins in the group filtered by ``blacklist``
Returns:
dict: Nested dictionary of plugins accessible through dot-notation.
Plugins are returned in a nested dictionary, but can also be accessed through dot-notion.
Just as when accessing an undefined dictionary key with index-notation,
a :py:exc:`KeyError` will be raised if the plugin type or plugin does not exist.
Parent types are always included.
Child plugins will only be included if a valid, non-blacklisted plugin is available.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist, newest_only=True,
type_filter=self.type_filter) | python | {
"resource": ""
} |
q42252 | PluginLoader.plugins_all | train | def plugins_all(self):
"""
All resulting versions of all plugins in the group filtered by ``blacklist``
Returns:
dict: Nested dictionary of plugins accessible through dot-notation.
Similar to :py:attr:`plugins`, but lowest level is a regular dictionary of
all unfiltered plugin versions for the given plugin type and name.
Parent types are always included.
Child plugins will only be included if at least one valid, non-blacklisted plugin
is available.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist,
type_filter=self.type_filter) | python | {
"resource": ""
} |
q42253 | Insert.insert_uniques | train | def insert_uniques(self, table, columns, values):
"""
Insert multiple rows into a table that do not already exist.
If the rows primary key already exists, the rows values will be updated.
If the rows primary key does not exists, a new row will be inserted
"""
# Rows that exist in the table
existing_rows = self.select(table, columns)
# Rows that DO NOT exist in the table
unique = diff(existing_rows, values, y_only=True) # Get values that are not in existing_rows
# Keys that exist in the table
keys = self.get_primary_key_vals(table)
# Primary key's column index
pk_col = self.get_primary_key(table)
pk_index = columns.index(pk_col)
# Split list of unique rows into list of rows to update and rows to insert
to_insert, to_update = [], []
for index, row in enumerate(unique):
# Primary key is not in list of pk values, insert new row
if row[pk_index] not in keys:
to_insert.append(unique[index])
# Primary key exists, update row rather than insert
elif row[pk_index] in keys:
to_update.append(unique[index])
# Insert new rows
if len(to_insert) > 0:
self.insert_many(table, columns, to_insert)
# Update existing rows
if len(to_update) > 0:
self.update_many(table, columns, to_update, pk_col, 0)
# No inserted or updated rows
if len(to_insert) < 1 and len(to_update) < 0:
self._printer('No rows added to', table) | python | {
"resource": ""
} |
q42254 | Insert.insert | train | def insert(self, table, columns, values, execute=True):
"""Insert a single row into a table."""
# TODO: Cant accept lists?
# Concatenate statement
cols, vals = get_col_val_str(columns)
statement = "INSERT INTO {0} ({1}) VALUES ({2})".format(wrap(table), cols, vals)
# Execute statement
if execute:
self._cursor.execute(statement, values)
self._commit()
self._printer('\tMySQL row successfully inserted into {0}'.format(table))
# Only return statement
else:
return statement | python | {
"resource": ""
} |
q42255 | Insert.insert_many | train | def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True):
"""
Insert multiple rows into a table.
If only one row is found, self.insert method will be used.
"""
# Make values a list of lists if it is a flat list
if not isinstance(values[0], (list, set, tuple)):
values = []
for v in values:
if v is not None and len(v) > 0:
values.append([v])
else:
values.append([None])
# Concatenate statement
cols, vals = get_col_val_str(columns)
statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)
if execute and len(values) > limit:
while len(values) > 0:
vals = [values.pop(0) for i in range(0, min(limit, len(values)))]
self._cursor.executemany(statement, vals)
self._commit()
elif execute:
# Execute statement
self._cursor.executemany(statement, values)
self._commit()
self._printer('\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')
# Only return statement
else:
return statement | python | {
"resource": ""
} |
q42256 | QuizzesAPI.list_quizzes_in_course | train | def list_quizzes_in_course(self, course_id, search_term=None):
"""
List quizzes in a course.
Returns the list of Quizzes in this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""The partial title of the quizzes to match and return."""
if search_term is not None:
params["search_term"] = search_term
self.logger.debug("GET /api/v1/courses/{course_id}/quizzes with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q42257 | QuizzesAPI.create_quiz | train | def create_quiz(self, course_id, quiz_title, quiz_access_code=None, quiz_allowed_attempts=None, quiz_assignment_group_id=None, quiz_cant_go_back=None, quiz_description=None, quiz_due_at=None, quiz_hide_correct_answers_at=None, quiz_hide_results=None, quiz_ip_filter=None, quiz_lock_at=None, quiz_one_question_at_a_time=None, quiz_one_time_results=None, quiz_only_visible_to_overrides=None, quiz_published=None, quiz_quiz_type=None, quiz_scoring_policy=None, quiz_show_correct_answers=None, quiz_show_correct_answers_at=None, quiz_show_correct_answers_last_attempt=None, quiz_shuffle_answers=None, quiz_time_limit=None, quiz_unlock_at=None):
"""
Create a quiz.
Create a new quiz for this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - quiz[title]
"""The quiz title."""
data["quiz[title]"] = quiz_title
# OPTIONAL - quiz[description]
"""A description of the quiz."""
if quiz_description is not None:
data["quiz[description]"] = quiz_description
# OPTIONAL - quiz[quiz_type]
"""The type of quiz."""
if quiz_quiz_type is not None:
self._validate_enum(quiz_quiz_type, ["practice_quiz", "assignment", "graded_survey", "survey"])
data["quiz[quiz_type]"] = quiz_quiz_type
# OPTIONAL - quiz[assignment_group_id]
"""The assignment group id to put the assignment in. Defaults to the top
assignment group in the course. Only valid if the quiz is graded, i.e. if
quiz_type is "assignment" or "graded_survey"."""
if quiz_assignment_group_id is not None:
data["quiz[assignment_group_id]"] = quiz_assignment_group_id
# OPTIONAL - quiz[time_limit]
"""Time limit to take this quiz, in minutes. Set to null for no time limit.
Defaults to null."""
if quiz_time_limit is not None:
data["quiz[time_limit]"] = quiz_time_limit
# OPTIONAL - quiz[shuffle_answers]
"""If true, quiz answers for multiple choice questions will be randomized for
each student. Defaults to false."""
if quiz_shuffle_answers is not None:
data["quiz[shuffle_answers]"] = quiz_shuffle_answers
# OPTIONAL - quiz[hide_results]
"""Dictates whether or not quiz results are hidden from students.
If null, students can see their results after any attempt.
If "always", students can never see their results.
If "until_after_last_attempt", students can only see results after their
last attempt. (Only valid if allowed_attempts > 1). Defaults to null."""
if quiz_hide_results is not None:
self._validate_enum(quiz_hide_results, ["always", "until_after_last_attempt"])
data["quiz[hide_results]"] = quiz_hide_results
# OPTIONAL - quiz[show_correct_answers]
"""Only valid if hide_results=null
If false, hides correct answers from students when quiz results are viewed.
Defaults to true."""
if quiz_show_correct_answers is not None:
data["quiz[show_correct_answers]"] = quiz_show_correct_answers
# OPTIONAL - quiz[show_correct_answers_last_attempt]
"""Only valid if show_correct_answers=true and allowed_attempts > 1
If true, hides correct answers from students when quiz results are viewed
until they submit the last attempt for the quiz.
Defaults to false."""
if quiz_show_correct_answers_last_attempt is not None:
data["quiz[show_correct_answers_last_attempt]"] = quiz_show_correct_answers_last_attempt
# OPTIONAL - quiz[show_correct_answers_at]
"""Only valid if show_correct_answers=true
If set, the correct answers will be visible by students only after this
date, otherwise the correct answers are visible once the student hands in
their quiz submission."""
if quiz_show_correct_answers_at is not None:
data["quiz[show_correct_answers_at]"] = quiz_show_correct_answers_at
# OPTIONAL - quiz[hide_correct_answers_at]
"""Only valid if show_correct_answers=true
If set, the correct answers will stop being visible once this date has
passed. Otherwise, the correct answers will be visible indefinitely."""
if quiz_hide_correct_answers_at is not None:
data["quiz[hide_correct_answers_at]"] = quiz_hide_correct_answers_at
# OPTIONAL - quiz[allowed_attempts]
"""Number of times a student is allowed to take a quiz.
Set to -1 for unlimited attempts.
Defaults to 1."""
if quiz_allowed_attempts is not None:
data["quiz[allowed_attempts]"] = quiz_allowed_attempts
# OPTIONAL - quiz[scoring_policy]
"""Required and only valid if allowed_attempts > 1.
Scoring policy for a quiz that students can take multiple times.
Defaults to "keep_highest"."""
if quiz_scoring_policy is not None:
self._validate_enum(quiz_scoring_policy, ["keep_highest", "keep_latest"])
data["quiz[scoring_policy]"] = quiz_scoring_policy
# OPTIONAL - quiz[one_question_at_a_time]
"""If true, shows quiz to student one question at a time.
Defaults to false."""
if quiz_one_question_at_a_time is not None:
data["quiz[one_question_at_a_time]"] = quiz_one_question_at_a_time
# OPTIONAL - quiz[cant_go_back]
"""Only valid if one_question_at_a_time=true
If true, questions are locked after answering.
Defaults to false."""
if quiz_cant_go_back is not None:
data["quiz[cant_go_back]"] = quiz_cant_go_back
# OPTIONAL - quiz[access_code]
"""Restricts access to the quiz with a password.
For no access code restriction, set to null.
Defaults to null."""
if quiz_access_code is not None:
data["quiz[access_code]"] = quiz_access_code
# OPTIONAL - quiz[ip_filter]
"""Restricts access to the quiz to computers in a specified IP range.
Filters can be a comma-separated list of addresses, or an address followed by a mask
Examples:
"192.168.217.1"
"192.168.217.1/24"
"192.168.217.1/255.255.255.0"
For no IP filter restriction, set to null.
Defaults to null."""
if quiz_ip_filter is not None:
data["quiz[ip_filter]"] = quiz_ip_filter
# OPTIONAL - quiz[due_at]
"""The day/time the quiz is due.
Accepts times in ISO 8601 format, e.g. 2011-10-21T18:48Z."""
if quiz_due_at is not None:
data["quiz[due_at]"] = quiz_due_at
# OPTIONAL - quiz[lock_at]
"""The day/time the quiz is locked for students.
Accepts times in ISO 8601 format, e.g. 2011-10-21T18:48Z."""
if quiz_lock_at is not None:
data["quiz[lock_at]"] = quiz_lock_at
# OPTIONAL - quiz[unlock_at]
"""The day/time the quiz is unlocked for students.
Accepts times in ISO 8601 format, e.g. 2011-10-21T18:48Z."""
if quiz_unlock_at is not None:
data["quiz[unlock_at]"] = quiz_unlock_at
# OPTIONAL - quiz[published]
"""Whether the quiz should have a draft state of published or unpublished.
NOTE: If students have started taking the quiz, or there are any
submissions for the quiz, you may not unpublish a quiz and will recieve
an error."""
if quiz_published is not None:
data["quiz[published]"] = quiz_published
# OPTIONAL - quiz[one_time_results]
"""Whether students should be prevented from viewing their quiz results past
the first time (right after they turn the quiz in.)
Only valid if "hide_results" is not set to "always".
Defaults to false."""
if quiz_one_time_results is not None:
data["quiz[one_time_results]"] = quiz_one_time_results
# OPTIONAL - quiz[only_visible_to_overrides]
"""Whether this quiz is only visible to overrides (Only useful if
'differentiated assignments' account setting is on)
Defaults to false."""
if quiz_only_visible_to_overrides is not None:
data["quiz[only_visible_to_overrides]"] = quiz_only_visible_to_overrides
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42258 | QuizzesAPI.validate_quiz_access_code | train | def validate_quiz_access_code(self, id, course_id, access_code):
"""
Validate quiz access code.
Accepts an access code and returns a boolean indicating whether that access code is correct
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - access_code
"""The access code being validated"""
data["access_code"] = access_code
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{id}/validate_access_code with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{id}/validate_access_code".format(**path), data=data, params=params) | python | {
"resource": ""
} |
q42259 | run_suite | train | def run_suite(case, config, summary):
""" Run the full suite of validation tests """
m = _load_case_module(case, config)
result = m.run(case, config)
summary[case] = _summarize_result(m, result)
_print_summary(m, case, summary)
if result['Type'] == 'Book':
for name, page in six.iteritems(result['Data']):
functions.create_page_from_template("validation.html",
os.path.join(livvkit.index_dir, "validation", name + ".html"))
functions.write_json(page, os.path.join(livvkit.output_dir, "validation"), name + ".json")
else:
functions.create_page_from_template("validation.html",
os.path.join(livvkit.index_dir, "validation", case + ".html"))
functions.write_json(result, os.path.join(livvkit.output_dir, "validation"), case + ".json") | python | {
"resource": ""
} |
q42260 | ArgParserFactory._add_generate_sub_commands | train | def _add_generate_sub_commands(self):
"""
Sub commands for generating models for usage by clients.
Currently supports Google Closure.
"""
gen_parser = self._subparsers_handle.add_parser(
name="gen",
help="generate client side model stubs, filters"
)
gen_parser.add_argument(
"-t",
"--template",
choices=['closure.model', 'closure.filter'],
default='closure.model',
required=True,
dest="template",
help="template to use for client side code generation"
)
gen_parser.add_argument(
"-m",
"--model",
required=True,
dest="models_definition",
help="path to models definition file or package"
)
gen_parser.add_argument(
"-o",
"--output",
default=".",
dest="output",
help="output path for generated code"
)
gen_parser.add_argument(
"-n",
"--namespace",
required=True,
dest="namespace",
help="namespace to use with template e.g prestans.data.model"
)
gen_parser.add_argument(
"-fn",
"--filter-namespace",
required=False,
default=None,
dest="filter_namespace",
help="filter namespace to use with template e.g prestans.data.filter"
) | python | {
"resource": ""
} |
q42261 | CommandDispatcher._dispatch_gen | train | def _dispatch_gen(self):
"""
Process the generate subset of commands.
"""
if not os.path.isdir(self._args.output):
raise exception.Base("%s is not a writeable directory" % self._args.output)
if not os.path.isfile(self._args.models_definition):
if not self.check_package_exists(self._args.models_definition):
raise exception.Base("failed to locate package or models definitions file at: %s" % self._args.models_definition)
from prestans.devel.gen import Preplate
preplate = Preplate(
template_type=self._args.template,
models_definition=self._args.models_definition,
namespace=self._args.namespace,
filter_namespace=self._args.filter_namespace,
output_directory=self._args.output)
preplate.run() | python | {
"resource": ""
} |
q42262 | SearchParty.search | train | def search(self):
"""Return a search using the combined query of all associated special coverage objects."""
# Retrieve all Or filters pertinent to the special coverage query.
should_filters = [
es_filter.Terms(pk=self.query.get("included_ids", [])),
es_filter.Terms(pk=self.query.get("pinned_ids", []))
]
should_filters += self.get_group_filters()
# Compile list of all Must filters.
must_filters = [
es_filter.Bool(should=should_filters),
~es_filter.Terms(pk=self.query.get("excluded_ids", []))
]
return Content.search_objects.search().filter(es_filter.Bool(must=must_filters)) | python | {
"resource": ""
} |
q42263 | SearchParty.get_group_filters | train | def get_group_filters(self):
"""Return es OR filters to include all special coverage group conditions."""
group_filters = []
field_map = {
"feature-type": "feature_type.slug",
"tag": "tags.slug",
"content-type": "_type"
}
for group_set in self.query.get("groups", []):
for group in group_set:
group_filter = es_filter.MatchAll()
for condition in group.get("conditions", []):
group_filter &= get_condition_filter(condition, field_map=field_map)
group_filters.append(group_filter)
return group_filters | python | {
"resource": ""
} |
q42264 | SearchParty.query | train | def query(self):
"""Group the self.special_coverages queries and memoize them."""
if not self._query:
self._query.update({
"excluded_ids": [],
"included_ids": [],
"pinned_ids": [],
"groups": [],
})
for special_coverage in self._special_coverages:
# Access query at dict level.
query = getattr(special_coverage, "query", {})
if "query" in query:
query = query.get("query")
self._query["excluded_ids"] += query.get("excluded_ids", [])
self._query["included_ids"] += query.get("included_ids", [])
self._query["pinned_ids"] += query.get("pinned_ids", [])
self._query["groups"] += [query.get("groups", [])]
return self._query | python | {
"resource": ""
} |
q42265 | sam2rnf | train | def sam2rnf(args):
"""Convert SAM to RNF-based FASTQ with respect to argparse parameters.
Args:
args (...): Arguments parsed by argparse
"""
rnftools.mishmash.Source.recode_sam_reads(
sam_fn=args.sam_fn,
fastq_rnf_fo=args.fq_fo,
fai_fo=args.fai_fo,
genome_id=args.genome_id,
number_of_read_tuples=10**9,
simulator_name=args.simulator_name,
allow_unmapped=args.allow_unmapped,
) | python | {
"resource": ""
} |
q42266 | add_sam2rnf_parser | train | def add_sam2rnf_parser(subparsers, subcommand, help, description, simulator_name=None):
"""Add another parser for a SAM2RNF-like command.
Args:
subparsers (subparsers): File name of the genome from which read tuples are created (FASTA file).
simulator_name (str): Name of the simulator used in comments.
"""
parser_sam2rnf = subparsers.add_parser(subcommand, help=help, description=description)
parser_sam2rnf.set_defaults(func=sam2rnf)
parser_sam2rnf.add_argument(
'-s', '--sam', type=str, metavar='file', dest='sam_fn', required=True,
help='Input SAM/BAM with true (expected) alignments of the reads (- for standard input).'
)
_add_shared_params(parser_sam2rnf, unmapped_switcher=True)
parser_sam2rnf.add_argument(
'-n',
'--simulator-name',
type=str,
metavar='str',
dest='simulator_name',
default=simulator_name,
help='Name of the simulator (for RNF).' if simulator_name is not None else argparse.SUPPRESS,
) | python | {
"resource": ""
} |
q42267 | list_roles | train | def list_roles(self, account_id, show_inherited=None, state=None):
"""
List roles.
List the roles available to an account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""The id of the account to retrieve roles for."""
path["account_id"] = account_id
# OPTIONAL - state
"""Filter by role state. If this argument is omitted, only 'active' roles are
returned."""
if state is not None:
self._validate_enum(state, ["active", "inactive"])
params["state"] = state
# OPTIONAL - show_inherited
"""If this argument is true, all roles inherited from parent accounts will
be included."""
if show_inherited is not None:
params["show_inherited"] = show_inherited
self.logger.debug("GET /api/v1/accounts/{account_id}/roles with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/roles".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q42268 | get_single_role | train | def get_single_role(self, id, role_id, account_id, role=None):
"""
Get a single role.
Retrieve information about a single role
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - account_id
"""The id of the account containing the role"""
path["account_id"] = account_id
# REQUIRED - role_id
"""The unique identifier for the role"""
params["role_id"] = role_id
# OPTIONAL - role
"""The name for the role"""
if role is not None:
params["role"] = role
self.logger.debug("GET /api/v1/accounts/{account_id}/roles/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/roles/{id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42269 | Operations.execute_script | train | def execute_script(self, sql_script=None, commands=None, split_algo='sql_split', prep_statements=False,
dump_fails=True, execute_fails=True, ignored_commands=('DROP', 'UNLOCK', 'LOCK')):
"""Wrapper method for SQLScript class."""
ss = Execute(sql_script, split_algo, prep_statements, dump_fails, self)
ss.execute(commands, ignored_commands=ignored_commands, execute_fails=execute_fails) | python | {
"resource": ""
} |
q42270 | Operations.script | train | def script(self, sql_script, split_algo='sql_split', prep_statements=True, dump_fails=True):
"""Wrapper method providing access to the SQLScript class's methods and properties."""
return Execute(sql_script, split_algo, prep_statements, dump_fails, self) | python | {
"resource": ""
} |
q42271 | OAuthAuthentication.obtain_token | train | def obtain_token(self, redirect_url: str, state: str) -> str:
"""
Exchange the code that was obtained using `authorize_url` for an authorization token. The code is extracted
from the URL that redirected the user back to your site.
Example:
>>> auth = OAuthAuthentication('https://example.com/oauth/moneybird/', 'your_id', 'your_secret')
>>> auth.obtain_token('https://example.com/oauth/moneybird/?code=any&state=random_string', 'random_string')
'token_for_auth'
>>> auth.is_ready()
True
:param redirect_url: The full URL the user was redirected to.
:param state: The state used in the authorize url.
:return: The authorization token.
"""
url_data = parse_qs(redirect_url.split('?', 1)[1])
if 'error' in url_data:
logger.warning("Error received in OAuth authentication response: %s" % url_data.get('error'))
raise OAuthAuthentication.OAuthError(url_data['error'], url_data.get('error_description', None))
if 'code' not in url_data:
logger.error("The provided URL is not a valid OAuth authentication response: no code")
raise ValueError("The provided URL is not a valid OAuth authentication response: no code")
if state and [state] != url_data['state']:
logger.warning("OAuth CSRF attack detected: the state in the provided URL does not equal the given state")
raise ValueError("CSRF attack detected: the state in the provided URL does not equal the given state")
try:
response = requests.post(
url=urljoin(self.base_url, self.token_url),
data={
'grant_type': 'authorization_code',
'code': url_data['code'][0],
'redirect_uri': self.redirect_url,
'client_id': self.client_id,
'client_secret': self.client_secret,
},
).json()
except ValueError:
logger.error("The OAuth server returned an invalid response when obtaining a token: JSON error")
raise ValueError("The OAuth server returned an invalid response when obtaining a token: JSON error")
if 'error' in response:
logger.warning("Error while obtaining OAuth authorization token: %s" % response['error'])
raise OAuthAuthentication.OAuthError(response['error'], response.get('error', ''))
if 'access_token' not in response:
logger.error("The OAuth server returned an invalid response when obtaining a token: no access token")
raise ValueError("The remote server returned an invalid response when obtaining a token: no access token")
self.real_auth.set_token(response['access_token'])
logger.debug("Obtained authentication token for state %s: %s" % (state, self.real_auth.auth_token))
return response['access_token'] | python | {
"resource": ""
} |
q42272 | sql_column_type | train | def sql_column_type(column_data, prefer_varchar=False, prefer_int=False):
"""
Retrieve the best fit data type for a column of a MySQL table.
Accepts a iterable of values ONLY for the column whose data type
is in question.
:param column_data: Iterable of values from a MySQL table column
:param prefer_varchar: Use type VARCHAR if valid
:param prefer_int: Use type INT if valid
:return: data type
"""
# Collect list of type, length tuples
type_len_pairs = [ValueType(record).get_type_len for record in column_data]
# Retrieve frequency counts of each type
types_count = {t: type_len_pairs.count(t) for t in set([type_ for type_, len_, len_dec in type_len_pairs])}
# Most frequently occurring datatype
most_frequent = max(types_count.items(), key=itemgetter(1))[0]
# Get max length of all rows to determine suitable limit
len_lst, len_decimals_lst = [], []
for type_, len_, len_dec in type_len_pairs:
if type_ == most_frequent:
if type(len_) is int:
len_lst.append(len_)
if type(len_dec) is int:
len_decimals_lst.append(len_dec)
# Catch errors if current type has no len
try:
max_len = max(len_lst)
except ValueError:
max_len = None
try:
max_len_decimal = max(len_decimals_lst)
except ValueError:
max_len_decimal = None
# Return VARCHAR or INT type if flag is on
if prefer_varchar and most_frequent != 'VARCHAR' and 'text' in most_frequent.lower():
most_frequent = 'VARCHAR'
elif prefer_int and most_frequent != 'INT' and 'int' in most_frequent.lower():
most_frequent = 'INT'
# Return MySQL datatype in proper format, only include length if it is set
if max_len and max_len_decimal:
return '{0} ({1}, {2})'.format(most_frequent, max_len, max_len_decimal)
elif max_len:
return '{0} ({1})'.format(most_frequent, max_len)
else:
return most_frequent | python | {
"resource": ""
} |
q42273 | ValueType.get_sql | train | def get_sql(self):
"""Retrieve the data type for a data record."""
test_method = [
self.is_time,
self.is_date,
self.is_datetime,
self.is_decimal,
self.is_year,
self.is_tinyint,
self.is_smallint,
self.is_mediumint,
self.is_int,
self.is_bigint,
self.is_tinytext,
self.is_varchar,
self.is_mediumtext,
self.is_longtext,
]
# Loop through test methods until a test returns True
for method in test_method:
if method():
return self.sql | python | {
"resource": ""
} |
q42274 | ValueType.get_type_len | train | def get_type_len(self):
"""Retrieve the type and length for a data record."""
# Check types and set type/len
self.get_sql()
return self.type, self.len, self.len_decimal | python | {
"resource": ""
} |
q42275 | SectionsAPI.create_course_section | train | def create_course_section(self, course_id, course_section_end_at=None, course_section_name=None, course_section_restrict_enrollments_to_section_dates=None, course_section_sis_section_id=None, course_section_start_at=None, enable_sis_reactivation=None):
"""
Create course section.
Creates a new section for this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - course_section[name]
"""The name of the section"""
if course_section_name is not None:
data["course_section[name]"] = course_section_name
# OPTIONAL - course_section[sis_section_id]
"""The sis ID of the section"""
if course_section_sis_section_id is not None:
data["course_section[sis_section_id]"] = course_section_sis_section_id
# OPTIONAL - course_section[start_at]
"""Section start date in ISO8601 format, e.g. 2011-01-01T01:00Z"""
if course_section_start_at is not None:
data["course_section[start_at]"] = course_section_start_at
# OPTIONAL - course_section[end_at]
"""Section end date in ISO8601 format. e.g. 2011-01-01T01:00Z"""
if course_section_end_at is not None:
data["course_section[end_at]"] = course_section_end_at
# OPTIONAL - course_section[restrict_enrollments_to_section_dates]
"""Set to true to restrict user enrollments to the start and end dates of the section."""
if course_section_restrict_enrollments_to_section_dates is not None:
data["course_section[restrict_enrollments_to_section_dates]"] = course_section_restrict_enrollments_to_section_dates
# OPTIONAL - enable_sis_reactivation
"""When true, will first try to re-activate a deleted section with matching sis_section_id if possible."""
if enable_sis_reactivation is not None:
data["enable_sis_reactivation"] = enable_sis_reactivation
self.logger.debug("POST /api/v1/courses/{course_id}/sections with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/sections".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42276 | SectionsAPI.edit_section | train | def edit_section(self, id, course_section_end_at=None, course_section_name=None, course_section_restrict_enrollments_to_section_dates=None, course_section_sis_section_id=None, course_section_start_at=None):
"""
Edit a section.
Modify an existing section.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - course_section[name]
"""The name of the section"""
if course_section_name is not None:
data["course_section[name]"] = course_section_name
# OPTIONAL - course_section[sis_section_id]
"""The sis ID of the section"""
if course_section_sis_section_id is not None:
data["course_section[sis_section_id]"] = course_section_sis_section_id
# OPTIONAL - course_section[start_at]
"""Section start date in ISO8601 format, e.g. 2011-01-01T01:00Z"""
if course_section_start_at is not None:
data["course_section[start_at]"] = course_section_start_at
# OPTIONAL - course_section[end_at]
"""Section end date in ISO8601 format. e.g. 2011-01-01T01:00Z"""
if course_section_end_at is not None:
data["course_section[end_at]"] = course_section_end_at
# OPTIONAL - course_section[restrict_enrollments_to_section_dates]
"""Set to true to restrict user enrollments to the start and end dates of the section."""
if course_section_restrict_enrollments_to_section_dates is not None:
data["course_section[restrict_enrollments_to_section_dates]"] = course_section_restrict_enrollments_to_section_dates
self.logger.debug("PUT /api/v1/sections/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/sections/{id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42277 | Connector.change_db | train | def change_db(self, db, user=None):
"""Change connect database."""
# Get original config and change database key
config = self._config
config['database'] = db
if user:
config['user'] = user
self.database = db
# Close current database connection
self._disconnect()
# Reconnect to the new database
self._connect(config) | python | {
"resource": ""
} |
q42278 | Connector.execute | train | def execute(self, command):
"""Execute a single SQL query without returning a result."""
self._cursor.execute(command)
self._commit()
return True | python | {
"resource": ""
} |
q42279 | Connector.executemany | train | def executemany(self, command, params=None, max_attempts=5):
"""Execute multiple SQL queries without returning a result."""
attempts = 0
while attempts < max_attempts:
try:
# Execute statement
self._cursor.executemany(command, params)
self._commit()
return True
except Exception as e:
attempts += 1
self.reconnect()
continue | python | {
"resource": ""
} |
q42280 | Connector._connect | train | def _connect(self, config):
"""Establish a connection with a MySQL database."""
if 'connection_timeout' not in self._config:
self._config['connection_timeout'] = 480
try:
self._cnx = connect(**config)
self._cursor = self._cnx.cursor()
self._printer('\tMySQL DB connection established with db', config['database'])
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
raise err | python | {
"resource": ""
} |
q42281 | Connector._fetch | train | def _fetch(self, statement, commit, max_attempts=5):
"""
Execute a SQL query and return a result.
Recursively disconnect and reconnect to the database
if an error occurs.
"""
if self._auto_reconnect:
attempts = 0
while attempts < max_attempts:
try:
# Execute statement
self._cursor.execute(statement)
fetch = self._cursor.fetchall()
rows = self._fetch_rows(fetch)
if commit:
self._commit()
# Return a single item if the list only has one item
return rows[0] if len(rows) == 1 else rows
except Exception as e:
if attempts >= max_attempts:
raise e
else:
attempts += 1
self.reconnect()
continue
else:
# Execute statement
self._cursor.execute(statement)
fetch = self._cursor.fetchall()
rows = self._fetch_rows(fetch)
if commit:
self._commit()
# Return a single item if the list only has one item
return rows[0] if len(rows) == 1 else rows | python | {
"resource": ""
} |
q42282 | resize | train | def resize(widthWindow, heightWindow):
"""Initial settings for the OpenGL state machine, clear color, window size, etc"""
glEnable(GL_BLEND)
glEnable(GL_POINT_SMOOTH)
glShadeModel(GL_SMOOTH)# Enables Smooth Shading
glBlendFunc(GL_SRC_ALPHA,GL_ONE)#Type Of Blending To Perform
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);#Really Nice Perspective Calculations
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST);#Really Nice Point Smoothing
glDisable(GL_DEPTH_TEST) | python | {
"resource": ""
} |
q42283 | Bumper.set_bumper_color | train | def set_bumper_color(self, particle, group, bumper, collision_point, collision_normal):
"""Set bumper color to the color of the particle that collided with it"""
self.color = tuple(particle.color)[:3] | python | {
"resource": ""
} |
q42284 | RabaQuery.reset | train | def reset(self, rabaClass, namespace = None) :
"""rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument.
If it's a Raba Class the argument is ignored. If you fear cicular imports use strings"""
if type(rabaClass) is types.StringType :
self._raba_namespace = namespace
self.con = stp.RabaConnection(self._raba_namespace)
self.rabaClass = self.con.getClass(rabaClass)
else :
self.rabaClass = rabaClass
self._raba_namespace = self.rabaClass._raba_namespace
self.con = stp.RabaConnection(self._raba_namespace)
self.filters = []
self.tables = set()
#self.fctPattern = re.compile("\s*([^\s]+)\s*\(\s*([^\s]+)\s*\)\s*([=><])\s*([^\s]+)\s*")
self.fieldPattern = re.compile("\s*([^\s\(\)]+)\s*([=><]|([L|l][I|i][K|k][E|e]))\s*(.+)")
self.operators = set(['LIKE', '=', '<', '>', '=', '>=', '<=', '<>', '!=', 'IS']) | python | {
"resource": ""
} |
q42285 | RabaQuery.addFilter | train | def addFilter(self, *lstFilters, **dctFilters) :
"add a new filter to the query"
dstF = {}
if len(lstFilters) > 0 :
if type(lstFilters[0]) is types.DictType :
dstF = lstFilters[0]
lstFilters = lstFilters[1:]
if len(dctFilters) > 0 :
dstF = dict(dstF, **dctFilters)
filts = {}
for k, v in dstF.iteritems() :
sk = k.split(' ')
if len(sk) == 2 :
operator = sk[-1].strip().upper()
if operator not in self.operators :
raise ValueError('Unrecognized operator "%s"' % operator)
kk = '%s.%s'% (self.rabaClass.__name__, k)
elif len(sk) == 1 :
operator = "="
kk = '%s.%s ='% (self.rabaClass.__name__, k)
else :
raise ValueError('Invalid field %s' % k)
if isRabaObject(v) :
vv = v.getJsonEncoding()
else :
vv = v
if sk[0].find('.') > -1 :
kk = self._parseJoint(sk[0], operator)
filts[kk] = vv
for lt in lstFilters :
for l in lt :
match = self.fieldPattern.match(l)
if match == None :
raise ValueError("RabaQuery Error: Invalid filter '%s'" % l)
field = match.group(1)
operator = match.group(2)
value = match.group(4)
if field.find('.') > -1 :
joink = self._parseJoint(field, operator, value)
filts[joink] = value
else :
filts['%s.%s %s' %(self.rabaClass.__name__, field, operator)] = value
self.filters.append(filts) | python | {
"resource": ""
} |
q42286 | RabaQuery.count | train | def count(self, sqlTail = '') :
"Compile filters and counts the number of results. You can use sqlTail to add things such as order by"
sql, sqlValues = self.getSQLQuery(count = True)
return int(self.con.execute('%s %s'% (sql, sqlTail), sqlValues).fetchone()[0]) | python | {
"resource": ""
} |
q42287 | Schema.show_schema | train | def show_schema(self, tables=None):
"""Print schema information."""
tables = tables if tables else self.tables
for t in tables:
self._printer('\t{0}'.format(t))
for col in self.get_schema(t, True):
self._printer('\t\t{0:30} {1:15} {2:10} {3:10} {4:10} {5:10}'.format(*col)) | python | {
"resource": ""
} |
q42288 | Schema.get_schema_dict | train | def get_schema_dict(self, table):
"""
Retrieve the database schema in key, value pairs for easier
references and comparisons.
"""
# Retrieve schema in list form
schema = self.get_schema(table, with_headers=True)
# Pop headers from first item in list
headers = schema.pop(0)
# Create dictionary by zipping headers with each row
return {values[0]: dict(zip(headers, values[0:])) for values in schema} | python | {
"resource": ""
} |
q42289 | Schema.get_schema | train | def get_schema(self, table, with_headers=False):
"""Retrieve the database schema for a particular table."""
f = self.fetch('desc ' + wrap(table))
if not isinstance(f[0], list):
f = [f]
# Replace None with ''
schema = [['' if col is None else col for col in row] for row in f]
# If with_headers is True, insert headers to first row before returning
if with_headers:
schema.insert(0, ['Column', 'Type', 'Null', 'Key', 'Default', 'Extra'])
return schema | python | {
"resource": ""
} |
q42290 | Schema.add_column | train | def add_column(self, table, name='ID', data_type='int(11)', after_col=None, null=False, primary_key=False):
"""Add a column to an existing table."""
location = 'AFTER {0}'.format(after_col) if after_col else 'FIRST'
null_ = 'NULL' if null else 'NOT NULL'
comment = "COMMENT 'Column auto created by mysql-toolkit'"
pk = 'AUTO_INCREMENT PRIMARY KEY {0}'.format(comment) if primary_key else ''
query = 'ALTER TABLE {0} ADD COLUMN {1} {2} {3} {4} {5}'.format(wrap(table), name, data_type, null_, pk,
location)
self.execute(query)
self._printer("\tAdded column '{0}' to '{1}' {2}".format(name, table, '(Primary Key)' if primary_key else ''))
return name | python | {
"resource": ""
} |
q42291 | Schema.drop_column | train | def drop_column(self, table, name):
"""Remove a column to an existing table."""
try:
self.execute('ALTER TABLE {0} DROP COLUMN {1}'.format(wrap(table), name))
self._printer('\tDropped column {0} from {1}'.format(name, table))
except ProgrammingError:
self._printer("\tCan't DROP '{0}'; check that column/key exists in '{1}'".format(name, table))
return name | python | {
"resource": ""
} |
q42292 | Schema.drop_index | train | def drop_index(self, table, column):
"""Drop an index from a table."""
self.execute('ALTER TABLE {0} DROP INDEX {1}'.format(wrap(table), column))
self._printer('\tDropped index from column {0}'.format(column)) | python | {
"resource": ""
} |
q42293 | Schema.add_comment | train | def add_comment(self, table, column, comment):
"""Add a comment to an existing column in a table."""
col_def = self.get_column_definition(table, column)
query = "ALTER TABLE {0} MODIFY COLUMN {1} {2} COMMENT '{3}'".format(table, column, col_def, comment)
self.execute(query)
self._printer('\tAdded comment to column {0}'.format(column))
return True | python | {
"resource": ""
} |
q42294 | ModulesAPI.list_modules | train | def list_modules(self, course_id, include=None, search_term=None, student_id=None):
"""
List modules.
List the modules in a course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial name of the modules (and module items, if include['items'] is
specified) to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, all_pages=True) | python | {
"resource": ""
} |
q42295 | ModulesAPI.create_module | train | def create_module(self, course_id, module_name, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Create a module.
Create and return a new module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - module[name]
"""The name of the module"""
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of this module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked.
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
self.logger.debug("POST /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42296 | ModulesAPI.create_module_item | train | def create_module_item(self, course_id, module_id, module_item_type, module_item_content_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_new_tab=None, module_item_page_url=None, module_item_position=None, module_item_title=None):
"""
Create a module item.
Create and return a new module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - module_item[title]
"""The name of the module item and associated content"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# REQUIRED - module_item[type]
"""The type of content linked to the item"""
self._validate_enum(module_item_type, ["File", "Page", "Discussion", "Assignment", "Quiz", "SubHeader", "ExternalUrl", "ExternalTool"])
data["module_item[type]"] = module_item_type
# REQUIRED - module_item[content_id]
"""The id of the content to link to the module item. Required, except for
'ExternalUrl', 'Page', and 'SubHeader' types."""
data["module_item[content_id]"] = module_item_content_id
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)."""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[page_url]
"""Suffix for the linked wiki page (e.g. 'front-page'). Required for 'Page'
type."""
if module_item_page_url is not None:
data["module_item[page_url]"] = module_item_page_url
# OPTIONAL - module_item[external_url]
"""External url that the item points to. [Required for 'ExternalUrl' and
'ExternalTool' types."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete. Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42297 | ModulesAPI.update_module_item | train | def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
q42298 | ModulesAPI.select_mastery_path | train | def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True) | python | {
"resource": ""
} |
q42299 | ModulesAPI.get_module_item_sequence | train | def get_module_item_sequence(self, course_id, asset_id=None, asset_type=None):
"""
Get module item sequence.
Given an asset in a course, find the ModuleItem it belongs to, and also the previous and next Module Items
in the course sequence.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - asset_type
"""The type of asset to find module sequence information for. Use the ModuleItem if it is known
(e.g., the user navigated from a module item), since this will avoid ambiguity if the asset
appears more than once in the module sequence."""
if asset_type is not None:
self._validate_enum(asset_type, ["ModuleItem", "File", "Page", "Discussion", "Assignment", "Quiz", "ExternalTool"])
params["asset_type"] = asset_type
# OPTIONAL - asset_id
"""The id of the asset (or the url in the case of a Page)"""
if asset_id is not None:
params["asset_id"] = asset_id
self.logger.debug("GET /api/v1/courses/{course_id}/module_item_sequence with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/module_item_sequence".format(**path), data=data, params=params, single_item=True) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.