Search is not available for this dataset
text
stringlengths 75
104k
|
|---|
def does_not_contain_value(self, *values):
"""Asserts that val is a dict and does not contain the given value or values."""
self._check_dict_like(self.val, check_getitem=False)
if len(values) == 0:
raise ValueError('one or more value args must be given')
else:
found = []
for v in values:
if v in self.val.values():
found.append(v)
if found:
self._err('Expected <%s> to not contain values %s, but did contain %s.' % (self.val, self._fmt_items(values), self._fmt_items(found)))
return self
|
def contains_entry(self, *args, **kwargs):
"""Asserts that val is a dict and contains the given entry or entries."""
self._check_dict_like(self.val, check_values=False)
entries = list(args) + [{k:v} for k,v in kwargs.items()]
if len(entries) == 0:
raise ValueError('one or more entry args must be given')
missing = []
for e in entries:
if type(e) is not dict:
raise TypeError('given entry arg must be a dict')
if len(e) != 1:
raise ValueError('given entry args must contain exactly one key-value pair')
k = next(iter(e))
if k not in self.val:
missing.append(e) # bad key
elif self.val[k] != e[k]:
missing.append(e) # bad val
if missing:
self._err('Expected <%s> to contain entries %s, but did not contain %s.' % (self.val, self._fmt_items(entries), self._fmt_items(missing)))
return self
|
def is_before(self, other):
"""Asserts that val is a date and is before other date."""
if type(self.val) is not datetime.datetime:
raise TypeError('val must be datetime, but was type <%s>' % type(self.val).__name__)
if type(other) is not datetime.datetime:
raise TypeError('given arg must be datetime, but was type <%s>' % type(other).__name__)
if self.val >= other:
self._err('Expected <%s> to be before <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S')))
return self
|
def exists(self):
"""Asserts that val is a path and that it exists."""
if not isinstance(self.val, str_types):
raise TypeError('val is not a path')
if not os.path.exists(self.val):
self._err('Expected <%s> to exist, but was not found.' % self.val)
return self
|
def is_file(self):
"""Asserts that val is an existing path to a file."""
self.exists()
if not os.path.isfile(self.val):
self._err('Expected <%s> to be a file, but was not.' % self.val)
return self
|
def is_directory(self):
"""Asserts that val is an existing path to a directory."""
self.exists()
if not os.path.isdir(self.val):
self._err('Expected <%s> to be a directory, but was not.' % self.val)
return self
|
def is_named(self, filename):
"""Asserts that val is an existing path to a file and that file is named filename."""
self.is_file()
if not isinstance(filename, str_types):
raise TypeError('given filename arg must be a path')
val_filename = os.path.basename(os.path.abspath(self.val))
if val_filename != filename:
self._err('Expected filename <%s> to be equal to <%s>, but was not.' % (val_filename, filename))
return self
|
def is_child_of(self, parent):
"""Asserts that val is an existing path to a file and that file is a child of parent."""
self.is_file()
if not isinstance(parent, str_types):
raise TypeError('given parent directory arg must be a path')
val_abspath = os.path.abspath(self.val)
parent_abspath = os.path.abspath(parent)
if not val_abspath.startswith(parent_abspath):
self._err('Expected file <%s> to be a child of <%s>, but was not.' % (val_abspath, parent_abspath))
return self
|
def extracting(self, *names, **kwargs):
"""Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given)."""
if not isinstance(self.val, Iterable):
raise TypeError('val is not iterable')
if isinstance(self.val, str_types):
raise TypeError('val must not be string')
if len(names) == 0:
raise ValueError('one or more name args must be given')
def _extract(x, name):
if self._check_dict_like(x, check_values=False, return_as_bool=True):
if name in x:
return x[name]
else:
raise ValueError('item keys %s did not contain key <%s>' % (list(x.keys()), name))
elif isinstance(x, Iterable):
self._check_iterable(x, name='item')
return x[name]
elif hasattr(x, name):
attr = getattr(x, name)
if callable(attr):
try:
return attr()
except TypeError:
raise ValueError('val method <%s()> exists, but is not zero-arg method' % name)
else:
return attr
else:
raise ValueError('val does not have property or zero-arg method <%s>' % name)
def _filter(x):
if 'filter' in kwargs:
if isinstance(kwargs['filter'], str_types):
return bool(_extract(x, kwargs['filter']))
elif self._check_dict_like(kwargs['filter'], check_values=False, return_as_bool=True):
for k in kwargs['filter']:
if isinstance(k, str_types):
if _extract(x, k) != kwargs['filter'][k]:
return False
return True
elif callable(kwargs['filter']):
return kwargs['filter'](x)
return False
return True
def _sort(x):
if 'sort' in kwargs:
if isinstance(kwargs['sort'], str_types):
return _extract(x, kwargs['sort'])
elif isinstance(kwargs['sort'], Iterable):
items = []
for k in kwargs['sort']:
if isinstance(k, str_types):
items.append(_extract(x, k))
return tuple(items)
elif callable(kwargs['sort']):
return kwargs['sort'](x)
return 0
extracted = []
for i in sorted(self.val, key=lambda x: _sort(x)):
if _filter(i):
items = [_extract(i, name) for name in names]
extracted.append(tuple(items) if len(items) > 1 else items[0])
return AssertionBuilder(extracted, self.description, self.kind)
|
def raises(self, ex):
"""Asserts that val is callable and that when called raises the given error."""
if not callable(self.val):
raise TypeError('val must be callable')
if not issubclass(ex, BaseException):
raise TypeError('given arg must be exception')
return AssertionBuilder(self.val, self.description, self.kind, ex)
|
def when_called_with(self, *some_args, **some_kwargs):
"""Asserts the val callable when invoked with the given args and kwargs raises the expected exception."""
if not self.expected:
raise TypeError('expected exception not set, raises() must be called first')
try:
self.val(*some_args, **some_kwargs)
except BaseException as e:
if issubclass(type(e), self.expected):
# chain on with exception message as val
return AssertionBuilder(str(e), self.description, self.kind)
else:
# got exception, but wrong type, so raise
self._err('Expected <%s> to raise <%s> when called with (%s), but raised <%s>.' % (
self.val.__name__,
self.expected.__name__,
self._fmt_args_kwargs(*some_args, **some_kwargs),
type(e).__name__))
# didn't fail as expected, so raise
self._err('Expected <%s> to raise <%s> when called with (%s).' % (
self.val.__name__,
self.expected.__name__,
self._fmt_args_kwargs(*some_args, **some_kwargs)))
|
def _err(self, msg):
"""Helper to raise an AssertionError, and optionally prepend custom description."""
out = '%s%s' % ('[%s] ' % self.description if len(self.description) > 0 else '', msg)
if self.kind == 'warn':
print(out)
return self
elif self.kind == 'soft':
global _soft_err
_soft_err.append(out)
return self
else:
raise AssertionError(out)
|
def _fmt_args_kwargs(self, *some_args, **some_kwargs):
"""Helper to convert the given args and kwargs into a string."""
if some_args:
out_args = str(some_args).lstrip('(').rstrip(',)')
if some_kwargs:
out_kwargs = ', '.join([str(i).lstrip('(').rstrip(')').replace(', ',': ') for i in [
(k,some_kwargs[k]) for k in sorted(some_kwargs.keys())]])
if some_args and some_kwargs:
return out_args + ', ' + out_kwargs
elif some_args:
return out_args
elif some_kwargs:
return out_kwargs
else:
return ''
|
def generate_words(files):
"""
Transform list of files to list of words,
removing new line character
and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol
"""
repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': ''}
words_all = []
for i, file in enumerate(files):
lines = open(file, 'r')
for line in lines:
line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line)
words = [word for word in line.split("|") if word is not '\n']
words_all.extend(words)
return words_all
|
def create_char_dataframe(words):
"""
Give list of input tokenized words,
create dataframe of characters where first character of
the word is tagged as 1, otherwise 0
Example
=======
['กิน', 'หมด'] to dataframe of
[{'char': 'ก', 'type': ..., 'target': 1}, ...,
{'char': 'ด', 'type': ..., 'target': 0}]
"""
char_dict = []
for word in words:
for i, char in enumerate(word):
if i == 0:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': True})
else:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': False})
return pd.DataFrame(char_dict)
|
def generate_best_dataset(best_path, output_path='cleaned_data', create_val=False):
"""
Generate CSV file for training and testing data
Input
=====
best_path: str, path to BEST folder which contains unzipped subfolder
'article', 'encyclopedia', 'news', 'novel'
cleaned_data: str, path to output folder, the cleaned data will be saved
in the given folder name where training set will be stored in `train` folder
and testing set will be stored on `test` folder
create_val: boolean, True or False, if True, divide training set into training set and
validation set in `val` folder
"""
if not os.path.isdir(output_path):
os.mkdir(output_path)
if not os.path.isdir(os.path.join(output_path, 'train')):
os.makedirs(os.path.join(output_path, 'train'))
if not os.path.isdir(os.path.join(output_path, 'test')):
os.makedirs(os.path.join(output_path, 'test'))
if not os.path.isdir(os.path.join(output_path, 'val')) and create_val:
os.makedirs(os.path.join(output_path, 'val'))
for article_type in article_types:
files = glob(os.path.join(best_path, article_type, '*.txt'))
files_train, files_test = train_test_split(files, random_state=0, test_size=0.1)
if create_val:
files_train, files_val = train_test_split(files_train, random_state=0, test_size=0.1)
val_words = generate_words(files_val)
val_df = create_char_dataframe(val_words)
val_df.to_csv(os.path.join(output_path, 'val', 'df_best_{}_val.csv'.format(article_type)), index=False)
train_words = generate_words(files_train)
test_words = generate_words(files_test)
train_df = create_char_dataframe(train_words)
test_df = create_char_dataframe(test_words)
train_df.to_csv(os.path.join(output_path, 'train', 'df_best_{}_train.csv'.format(article_type)), index=False)
test_df.to_csv(os.path.join(output_path, 'test', 'df_best_{}_test.csv'.format(article_type)), index=False)
print("Save {} to CSV file".format(article_type))
|
def prepare_feature(best_processed_path, option='train'):
"""
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
"""
# padding for training and testing set
n_pad = 21
n_pad_2 = int((n_pad - 1)/2)
pad = [{'char': ' ', 'type': 'p', 'target': True}]
df_pad = pd.DataFrame(pad * n_pad_2)
df = []
for article_type in article_types:
df.append(pd.read_csv(os.path.join(best_processed_path, option, 'df_best_{}_{}.csv'.format(article_type, option))))
df = pd.concat(df)
df = pd.concat((df_pad, df, df_pad)) # pad with empty string feature
df['char'] = df['char'].map(lambda x: CHARS_MAP.get(x, 80))
df['type'] = df['type'].map(lambda x: CHAR_TYPES_MAP.get(x, 4))
df_pad = create_n_gram_df(df, n_pad=n_pad)
char_row = ['char' + str(i + 1) for i in range(n_pad_2)] + \
['char-' + str(i + 1) for i in range(n_pad_2)] + ['char']
type_row = ['type' + str(i + 1) for i in range(n_pad_2)] + \
['type-' + str(i + 1) for i in range(n_pad_2)] + ['type']
x_char = df_pad[char_row].as_matrix()
x_type = df_pad[type_row].as_matrix()
y = df_pad['target'].astype(int).as_matrix()
return x_char, x_type, y
|
def train_model(best_processed_path, weight_path='../weight/model_weight.h5', verbose=2):
"""
Given path to processed BEST dataset,
train CNN model for words beginning alongside with
character label encoder and character type label encoder
Input
=====
best_processed_path: str, path to processed BEST dataset
weight_path: str, path to weight path file
verbose: int, verbost option for training Keras model
Output
======
model: keras model, keras model for tokenize prediction
"""
x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option='train')
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
validation_set = False
if os.path.isdir(os.path.join(best_processed_path, 'val')):
validation_set = True
x_val_char, x_val_type, y_val = prepare_feature(best_processed_path, option='val')
if not os.path.isdir(os.path.dirname(weight_path)):
os.makedirs(os.path.dirname(weight_path)) # make directory if weight does not exist
callbacks_list = [
ReduceLROnPlateau(),
ModelCheckpoint(
weight_path,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
mode='min',
verbose=1
)
]
# train model
model = get_convo_nn2()
train_params = [(10, 256), (3, 512), (3, 2048), (3, 4096), (3, 8192)]
for (epochs, batch_size) in train_params:
print("train with {} epochs and {} batch size".format(epochs, batch_size))
if validation_set:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list,
validation_data=([x_val_char, x_val_type], y_val))
else:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list)
return model
|
def evaluate(best_processed_path, model):
"""
Evaluate model on splitted 10 percent testing set
"""
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
y_predict = model.predict([x_test_char, x_test_type])
y_predict = (y_predict.ravel() > 0.5).astype(int)
f1score = f1_score(y_test, y_predict)
precision = precision_score(y_test, y_predict)
recall = recall_score(y_test, y_predict)
return f1score, precision, recall
|
def tokenize(text, custom_dict=None):
"""
Tokenize given Thai text string
Input
=====
text: str, Thai text string
custom_dict: str (or list), path to customized dictionary file
It allows the function not to tokenize given dictionary wrongly.
The file should contain custom words separated by line.
Alternatively, you can provide list of custom words too.
Output
======
tokens: list, list of tokenized words
Example
=======
>> deepcut.tokenize('ตัดคำได้ดีมาก')
>> ['ตัดคำ','ได้','ดี','มาก']
"""
global TOKENIZER
if not TOKENIZER:
TOKENIZER = DeepcutTokenizer()
return TOKENIZER.tokenize(text, custom_dict=custom_dict)
|
def _document_frequency(X):
"""
Count the number of non-zero values for each feature in sparse X.
"""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
return np.diff(sp.csc_matrix(X, copy=False).indptr)
|
def _check_stop_list(stop):
"""
Check stop words list
ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95
"""
if stop == "thai":
return THAI_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
# assume it's a collection
return frozenset(stop)
|
def _word_ngrams(self, tokens):
"""
Turn tokens into a tokens of n-grams
ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153
"""
# handle stop words
if self.stop_words is not None:
tokens = [w for w in tokens if w not in self.stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
|
def fit_tranform(self, raw_documents):
"""
Transform given list of raw_documents to document-term matrix in
sparse CSR format (see scipy)
"""
X = self.transform(raw_documents, new_document=True)
return X
|
def create_feature_array(text, n_pad=21):
"""
Create feature array of character and surrounding characters
"""
n = len(text)
n_pad_2 = int((n_pad - 1)/2)
text_pad = [' '] * n_pad_2 + [t for t in text] + [' '] * n_pad_2
x_char, x_type = [], []
for i in range(n_pad_2, n_pad_2 + n):
char_list = text_pad[i + 1: i + n_pad_2 + 1] + \
list(reversed(text_pad[i - n_pad_2: i])) + \
[text_pad[i]]
char_map = [CHARS_MAP.get(c, 80) for c in char_list]
char_type = [CHAR_TYPES_MAP.get(CHAR_TYPE_FLATTEN.get(c, 'o'), 4)
for c in char_list]
x_char.append(char_map)
x_type.append(char_type)
x_char = np.array(x_char).astype(float)
x_type = np.array(x_type).astype(float)
return x_char, x_type
|
def create_n_gram_df(df, n_pad):
"""
Given input dataframe, create feature dataframe of shifted characters
"""
n_pad_2 = int((n_pad - 1)/2)
for i in range(n_pad_2):
df['char-{}'.format(i+1)] = df['char'].shift(i + 1)
df['type-{}'.format(i+1)] = df['type'].shift(i + 1)
df['char{}'.format(i+1)] = df['char'].shift(-i - 1)
df['type{}'.format(i+1)] = df['type'].shift(-i - 1)
return df[n_pad_2: -n_pad_2]
|
def _fetch_course_enrollment_data(self, enterprise_customer_uuid):
"""
Return enterprise customer UUID/user_id/course_run_id triples which represent CourseEnrollment records
which do not have a matching EnterpriseCourseEnrollment record.
The query used below looks for CourseEnrollment records that are associated with enterprise
learners where the enrollment data is after the creation of the link between the learner
and the enterprise. It also excludes learners with edx.org email addresses in order to
filter out test users.
"""
query = '''
SELECT
au.id as user_id,
ecu.enterprise_customer_id as enterprise_customer_uuid,
sce.course_id as course_run_id
FROM student_courseenrollment sce
JOIN auth_user au
ON au.id = sce.user_id
JOIN enterprise_enterprisecustomeruser ecu
ON ecu.user_id = au.id
LEFT JOIN enterprise_enterprisecourseenrollment ece
ON ece.enterprise_customer_user_id = ecu.id
AND ece.course_id = sce.course_id
WHERE
ece.id IS NULL
AND ecu.created <= sce.created
AND au.email NOT LIKE '%@edx.org'
{enterprise_customer_filter}
ORDER BY sce.created;
'''
with connection.cursor() as cursor:
if enterprise_customer_uuid:
cursor.execute(
query.format(enterprise_customer_filter='AND ecu.enterprise_customer_id = %s'),
[enterprise_customer_uuid]
)
else:
cursor.execute(
query.format(enterprise_customer_filter='')
)
return self._dictfetchall(cursor)
|
def _dictfetchall(self, cursor):
""" Return all rows from a cursor as a dict. """
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
|
def parse_lms_api_datetime(datetime_string, datetime_format=LMS_API_DATETIME_FORMAT):
"""
Parse a received datetime into a timezone-aware, Python datetime object.
Arguments:
datetime_string: A string to be parsed.
datetime_format: A datetime format string to be used for parsing
"""
if isinstance(datetime_string, datetime.datetime):
date_time = datetime_string
else:
try:
date_time = datetime.datetime.strptime(datetime_string, datetime_format)
except ValueError:
date_time = datetime.datetime.strptime(datetime_string, LMS_API_DATETIME_FORMAT_WITHOUT_TIMEZONE)
# If the datetime format didn't include a timezone, then set to UTC.
# Note that if we're using the default LMS_API_DATETIME_FORMAT, it ends in 'Z',
# which denotes UTC for ISO-8661.
if date_time.tzinfo is None:
date_time = date_time.replace(tzinfo=timezone.utc)
return date_time
|
def connect(self):
"""
Connect to the REST API, authenticating with a JWT for the current user.
"""
if JwtBuilder is None:
raise NotConnectedToOpenEdX("This package must be installed in an OpenEdX environment.")
now = int(time())
jwt = JwtBuilder.create_jwt_for_user(self.user)
self.client = EdxRestApiClient(
self.API_BASE_URL, append_slash=self.APPEND_SLASH, jwt=jwt,
)
self.expires_at = now + self.expires_in
|
def refresh_token(func):
"""
Use this method decorator to ensure the JWT token is refreshed when needed.
"""
@wraps(func)
def inner(self, *args, **kwargs):
"""
Before calling the wrapped function, we check if the JWT token is expired, and if so, re-connect.
"""
if self.token_expired():
self.connect()
return func(self, *args, **kwargs)
return inner
|
def redirect_if_blocked(course_run_ids, user=None, ip_address=None, url=None):
"""
Return redirect to embargo error page if the given user is blocked.
"""
for course_run_id in course_run_ids:
redirect_url = embargo_api.redirect_if_blocked(
CourseKey.from_string(course_run_id),
user=user,
ip_address=ip_address,
url=url
)
if redirect_url:
return redirect_url
|
def get_course_details(self, course_id):
"""
Query the Enrollment API for the course details of the given course_id.
Args:
course_id (str): The string value of the course's unique identifier
Returns:
dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)
"""
try:
return self.client.course(course_id).get()
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception(
'Failed to retrieve course enrollment details for course [%s] due to: [%s]',
course_id, str(exc)
)
return {}
|
def _sort_course_modes(self, modes):
"""
Sort the course mode dictionaries by slug according to the COURSE_MODE_SORT_ORDER constant.
Arguments:
modes (list): A list of course mode dictionaries.
Returns:
list: A list with the course modes dictionaries sorted by slug.
"""
def slug_weight(mode):
"""
Assign a weight to the course mode dictionary based on the position of its slug in the sorting list.
"""
sorting_slugs = COURSE_MODE_SORT_ORDER
sorting_slugs_size = len(sorting_slugs)
if mode['slug'] in sorting_slugs:
return sorting_slugs_size - sorting_slugs.index(mode['slug'])
return 0
# Sort slug weights in descending order
return sorted(modes, key=slug_weight, reverse=True)
|
def get_course_modes(self, course_id):
"""
Query the Enrollment API for the specific course modes that are available for the given course_id.
Arguments:
course_id (str): The string value of the course's unique identifier
Returns:
list: A list of course mode dictionaries.
"""
details = self.get_course_details(course_id)
modes = details.get('course_modes', [])
return self._sort_course_modes([mode for mode in modes if mode['slug'] not in EXCLUDED_COURSE_MODES])
|
def has_course_mode(self, course_run_id, mode):
"""
Query the Enrollment API to see whether a course run has a given course mode available.
Arguments:
course_run_id (str): The string value of the course run's unique identifier
Returns:
bool: Whether the course run has the given mode avaialble for enrollment.
"""
course_modes = self.get_course_modes(course_run_id)
return any(course_mode for course_mode in course_modes if course_mode['slug'] == mode)
|
def enroll_user_in_course(self, username, course_id, mode, cohort=None):
"""
Call the enrollment API to enroll the user in the course specified by course_id.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_id (str): The string value of the course's unique identifier
mode (str): The enrollment mode which should be used for the enrollment
cohort (str): Add the user to this named cohort
Returns:
dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.
"""
return self.client.enrollment.post(
{
'user': username,
'course_details': {'course_id': course_id},
'mode': mode,
'cohort': cohort,
}
)
|
def unenroll_user_from_course(self, username, course_id):
"""
Call the enrollment API to unenroll the user in the course specified by course_id.
Args:
username (str): The username by which the user goes on the OpenEdx platform
course_id (str): The string value of the course's unique identifier
Returns:
bool: Whether the unenrollment succeeded
"""
enrollment = self.get_course_enrollment(username, course_id)
if enrollment and enrollment['is_active']:
response = self.client.enrollment.post({
'user': username,
'course_details': {'course_id': course_id},
'is_active': False,
'mode': enrollment['mode']
})
return not response['is_active']
return False
|
def get_course_enrollment(self, username, course_id):
"""
Query the enrollment API to get information about a single course enrollment.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_id (str): The string value of the course's unique identifier
Returns:
dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.
"""
endpoint = getattr(
self.client.enrollment,
'{username},{course_id}'.format(username=username, course_id=course_id)
)
try:
result = endpoint.get()
except HttpNotFoundError:
# This enrollment data endpoint returns a 404 if either the username or course_id specified isn't valid
LOGGER.error(
'Course enrollment details not found for invalid username or course; username=[%s], course=[%s]',
username,
course_id
)
return None
# This enrollment data endpoint returns an empty string if the username and course_id is valid, but there's
# no matching enrollment found
if not result:
LOGGER.info('Failed to find course enrollment details for user [%s] and course [%s]', username, course_id)
return None
return result
|
def is_enrolled(self, username, course_run_id):
"""
Query the enrollment API and determine if a learner is enrolled in a course run.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_run_id (str): The string value of the course's unique identifier
Returns:
bool: Indicating whether the user is enrolled in the course run. Returns False under any errors.
"""
enrollment = self.get_course_enrollment(username, course_run_id)
return enrollment is not None and enrollment.get('is_active', False)
|
def _get_results(self, identity_provider, param_name, param_value, result_field_name):
"""
Calls the third party auth api endpoint to get the mapping between usernames and remote ids.
"""
try:
kwargs = {param_name: param_value}
returned = self.client.providers(identity_provider).users.get(**kwargs)
results = returned.get('results', [])
except HttpNotFoundError:
LOGGER.error(
'username not found for third party provider={provider}, {querystring_param}={id}'.format(
provider=identity_provider,
querystring_param=param_name,
id=param_value
)
)
results = []
for row in results:
if row.get(param_name) == param_value:
return row.get(result_field_name)
return None
|
def get_course_grade(self, course_id, username):
"""
Retrieve the grade for the given username for the given course_id.
Args:
* ``course_id`` (str): The string value of the course's unique identifier
* ``username`` (str): The username ID identifying the user for which to retrieve the grade.
Raises:
HttpNotFoundError if no grade found for the given user+course.
Returns:
a dict containing:
* ``username``: A string representation of a user's username passed in the request.
* ``course_key``: A string representation of a Course ID.
* ``passed``: Boolean representing whether the course has been passed according the course's grading policy.
* ``percent``: A float representing the overall grade for the course
* ``letter_grade``: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None
"""
results = self.client.courses(course_id).get(username=username)
for row in results:
if row.get('username') == username:
return row
raise HttpNotFoundError('No grade record found for course={}, username={}'.format(course_id, username))
|
def get_course_certificate(self, course_id, username):
"""
Retrieve the certificate for the given username for the given course_id.
Args:
* ``course_id`` (str): The string value of the course's unique identifier
* ``username`` (str): The username ID identifying the user for which to retrieve the certificate
Raises:
HttpNotFoundError if no certificate found for the given user+course.
Returns:
a dict containing:
* ``username``: A string representation of an user's username passed in the request.
* ``course_id``: A string representation of a Course ID.
* ``certificate_type``: A string representation of the certificate type.
* ``created_date`: Datetime the certificate was created (tz-aware).
* ``status``: A string representation of the certificate status.
* ``is_passing``: True if the certificate has a passing status, False if not.
* ``download_url``: A string representation of the certificate url.
* ``grade``: A string representation of a float for the user's course grade.
"""
return self.client.certificates(username).courses(course_id).get()
|
def course_discovery_api_client(user, catalog_url):
"""
Return a Course Discovery API client setup with authentication for the specified user.
"""
if JwtBuilder is None:
raise NotConnectedToOpenEdX(
_("To get a Catalog API client, this package must be "
"installed in an Open edX environment.")
)
jwt = JwtBuilder.create_jwt_for_user(user)
return EdxRestApiClient(catalog_url, jwt=jwt)
|
def traverse_pagination(response, endpoint, content_filter_query, query_params):
"""
Traverse a paginated API response and extracts and concatenates "results" returned by API.
Arguments:
response (dict): API response object.
endpoint (Slumber.Resource): API endpoint object.
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
Returns:
list: all the results returned by the API.
"""
results = response.get('results', [])
page = 1
while response.get('next'):
page += 1
response = endpoint().post(content_filter_query, **dict(query_params, page=page))
results += response.get('results', [])
return results
|
def get_catalog_results(self, content_filter_query, query_params=None, traverse_pagination=False):
"""
Return results from the discovery service's search/all endpoint.
Arguments:
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
traverse_pagination (bool): True to return all results, False to return the paginated response.
Defaults to False.
Returns:
dict: Paginated response or all the records.
"""
query_params = query_params or {}
try:
endpoint = getattr(self.client, self.SEARCH_ALL_ENDPOINT)
response = endpoint().post(data=content_filter_query, **query_params)
if traverse_pagination:
response['results'] = self.traverse_pagination(response, endpoint, content_filter_query, query_params)
response['next'] = response['previous'] = None
except Exception as ex: # pylint: disable=broad-except
LOGGER.exception(
'Attempted to call course-discovery search/all/ endpoint with the following parameters: '
'content_filter_query: %s, query_params: %s, traverse_pagination: %s. '
'Failed to retrieve data from the catalog API. content -- [%s]',
content_filter_query,
query_params,
traverse_pagination,
getattr(ex, 'content', '')
)
# We need to bubble up failures when we encounter them instead of masking them!
raise ex
return response
|
def get_catalog(self, catalog_id):
"""
Return specified course catalog.
Returns:
dict: catalog details if it is available for the user.
"""
return self._load_data(
self.CATALOGS_ENDPOINT,
default=[],
resource_id=catalog_id
)
|
def get_paginated_catalog_courses(self, catalog_id, querystring=None):
"""
Return paginated response for all catalog courses.
Returns:
dict: API response with links to next and previous pages.
"""
return self._load_data(
self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),
default=[],
querystring=querystring,
traverse_pagination=False,
many=False,
)
|
def get_paginated_catalogs(self, querystring=None):
"""
Return a paginated list of course catalogs, including name and ID.
Returns:
dict: Paginated response containing catalogs available for the user.
"""
return self._load_data(
self.CATALOGS_ENDPOINT,
default=[],
querystring=querystring,
traverse_pagination=False,
many=False
)
|
def get_catalog_courses(self, catalog_id):
"""
Return the courses included in a single course catalog by ID.
Args:
catalog_id (int): The catalog ID we want to retrieve.
Returns:
list: Courses of the catalog in question
"""
return self._load_data(
self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),
default=[]
)
|
def get_course_and_course_run(self, course_run_id):
"""
Return the course and course run metadata for the given course run ID.
Arguments:
course_run_id (str): The course run ID.
Returns:
tuple: The course metadata and the course run metadata.
"""
# Parse the course ID from the course run ID.
course_id = parse_course_key(course_run_id)
# Retrieve the course metadata from the catalog service.
course = self.get_course_details(course_id)
course_run = None
if course:
# Find the specified course run.
course_run = None
course_runs = [course_run for course_run in course['course_runs'] if course_run['key'] == course_run_id]
if course_runs:
course_run = course_runs[0]
return course, course_run
|
def get_course_details(self, course_id):
"""
Return the details of a single course by id - not a course run id.
Args:
course_id (str): The unique id for the course in question.
Returns:
dict: Details of the course in question.
"""
return self._load_data(
self.COURSES_ENDPOINT,
resource_id=course_id,
many=False
)
|
def get_program_by_title(self, program_title):
"""
Return single program by name, or None if not found.
Arguments:
program_title(string): Program title as seen by students and in Course Catalog Admin
Returns:
dict: Program data provided by Course Catalog API
"""
all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[])
matching_programs = [program for program in all_programs if program.get('title') == program_title]
if len(matching_programs) > 1:
raise MultipleProgramMatchError(len(matching_programs))
elif len(matching_programs) == 1:
return matching_programs[0]
else:
return None
|
def get_program_by_uuid(self, program_uuid):
"""
Return single program by UUID, or None if not found.
Arguments:
program_uuid(string): Program UUID in string form
Returns:
dict: Program data provided by Course Catalog API
"""
return self._load_data(
self.PROGRAMS_ENDPOINT,
resource_id=program_uuid,
default=None
)
|
def get_program_course_keys(self, program_uuid):
"""
Get a list of the course IDs (not course run IDs) contained in the program.
Arguments:
program_uuid (str): Program UUID in string form
Returns:
list(str): List of course keys in string form that are included in the program
"""
program_details = self.get_program_by_uuid(program_uuid)
if not program_details:
return []
return [course['key'] for course in program_details.get('courses', [])]
|
def get_program_type_by_slug(self, slug):
"""
Get a program type by its slug.
Arguments:
slug (str): The slug to identify the program type.
Returns:
dict: A program type object.
"""
return self._load_data(
self.PROGRAM_TYPES_ENDPOINT,
resource_id=slug,
default=None,
)
|
def get_common_course_modes(self, course_run_ids):
"""
Find common course modes for a set of course runs.
This function essentially returns an intersection of types of seats available
for each course run.
Arguments:
course_run_ids(Iterable[str]): Target Course run IDs.
Returns:
set: course modes found in all given course runs
Examples:
# run1 has prof and audit, run 2 has the same
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{'prof', 'audit'}
# run1 has prof and audit, run 2 has only prof
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{'prof'}
# run1 has prof and audit, run 2 honor
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{}
# run1 has nothing, run2 has prof
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{}
# run1 has prof and audit, run 2 prof, run3 has audit
get_common_course_modes(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])
{}
# run1 has nothing, run 2 prof, run3 has prof
get_common_course_modes(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])
{}
"""
available_course_modes = None
for course_run_id in course_run_ids:
course_run = self.get_course_run(course_run_id) or {}
course_run_modes = {seat.get('type') for seat in course_run.get('seats', [])}
if available_course_modes is None:
available_course_modes = course_run_modes
else:
available_course_modes &= course_run_modes
if not available_course_modes:
return available_course_modes
return available_course_modes
|
def is_course_in_catalog(self, catalog_id, course_id):
"""
Determine if the given course or course run ID is contained in the catalog with the given ID.
Args:
catalog_id (int): The ID of the catalog
course_id (str): The ID of the course or course run
Returns:
bool: Whether the course or course run is contained in the given catalog
"""
try:
# Determine if we have a course run ID, rather than a plain course ID
course_run_id = str(CourseKey.from_string(course_id))
except InvalidKeyError:
course_run_id = None
endpoint = self.client.catalogs(catalog_id).contains
if course_run_id:
resp = endpoint.get(course_run_id=course_run_id)
else:
resp = endpoint.get(course_id=course_id)
return resp.get('courses', {}).get(course_id, False)
|
def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs):
"""
Load data from API client.
Arguments:
resource(string): type of resource to load
default(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.
Returns:
dict: Deserialized response from Course Catalog API
"""
default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}
try:
return get_edx_api_data(
api_config=CatalogIntegration.current(),
resource=resource,
api=self.client,
**kwargs
) or default_val
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception(
'Failed to load data from resource [%s] with kwargs [%s] due to: [%s]',
resource, kwargs, str(exc)
)
return default_val
|
def get_content_metadata(self, enterprise_customer):
"""
Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.
Arguments:
enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.
Returns:
list: List of dicts containing content metadata.
"""
content_metadata = OrderedDict()
# TODO: This if block can be removed when we get rid of discovery service-based catalogs.
if enterprise_customer.catalog:
response = self._load_data(
self.ENTERPRISE_CUSTOMER_ENDPOINT,
detail_resource='courses',
resource_id=str(enterprise_customer.uuid),
traverse_pagination=True,
)
for course in response['results']:
for course_run in course['course_runs']:
course_run['content_type'] = 'courserun' # Make this look like a search endpoint result.
content_metadata[course_run['key']] = course_run
for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():
response = self._load_data(
self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT,
resource_id=str(enterprise_customer_catalog.uuid),
traverse_pagination=True,
querystring={'page_size': 1000},
)
for item in response['results']:
content_id = utils.get_content_metadata_item_id(item)
content_metadata[content_id] = item
return content_metadata.values()
|
def _load_data(
self,
resource,
detail_resource=None,
resource_id=None,
querystring=None,
traverse_pagination=False,
default=DEFAULT_VALUE_SAFEGUARD,
):
"""
Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API.
"""
default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}
querystring = querystring if querystring else {}
cache_key = utils.get_cache_key(
resource=resource,
querystring=querystring,
traverse_pagination=traverse_pagination,
resource_id=resource_id
)
response = cache.get(cache_key)
if not response:
# Response is not cached, so make a call.
endpoint = getattr(self.client, resource)(resource_id)
endpoint = getattr(endpoint, detail_resource) if detail_resource else endpoint
response = endpoint.get(**querystring)
if traverse_pagination:
results = utils.traverse_pagination(response, endpoint)
response = {
'count': len(results),
'next': 'None',
'previous': 'None',
'results': results,
}
if response:
# Now that we've got a response, cache it.
cache.set(cache_key, response, settings.ENTERPRISE_API_CACHE_TIMEOUT)
return response or default_val
|
def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)
self._transmit_delete(items_to_delete)
self._transmit_create(items_to_create)
self._transmit_update(items_to_update, transmission_map)
|
def _partition_items(self, channel_metadata_item_map):
"""
Return items that need to be created, updated, and deleted along with the
current ContentMetadataItemTransmissions.
"""
items_to_create = {}
items_to_update = {}
items_to_delete = {}
transmission_map = {}
export_content_ids = channel_metadata_item_map.keys()
# Get the items that were previously transmitted to the integrated channel.
# If we are not transmitting something that was previously transmitted,
# we need to delete it from the integrated channel.
for transmission in self._get_transmissions():
transmission_map[transmission.content_id] = transmission
if transmission.content_id not in export_content_ids:
items_to_delete[transmission.content_id] = transmission.channel_metadata
# Compare what is currently being transmitted to what was transmitted
# previously, identifying items that need to be created or updated.
for item in channel_metadata_item_map.values():
content_id = item.content_id
channel_metadata = item.channel_metadata
transmitted_item = transmission_map.get(content_id, None)
if transmitted_item is not None:
if diff(channel_metadata, transmitted_item.channel_metadata):
items_to_update[content_id] = channel_metadata
else:
items_to_create[content_id] = channel_metadata
LOGGER.info(
'Preparing to transmit creation of [%s] content metadata items with plugin configuration [%s]: [%s]',
len(items_to_create),
self.enterprise_configuration,
items_to_create.keys(),
)
LOGGER.info(
'Preparing to transmit update of [%s] content metadata items with plugin configuration [%s]: [%s]',
len(items_to_update),
self.enterprise_configuration,
items_to_update.keys(),
)
LOGGER.info(
'Preparing to transmit deletion of [%s] content metadata items with plugin configuration [%s]: [%s]',
len(items_to_delete),
self.enterprise_configuration,
items_to_delete.keys(),
)
return items_to_create, items_to_update, items_to_delete, transmission_map
|
def _serialize_items(self, channel_metadata_items):
"""
Serialize content metadata items for a create transmission to the integrated channel.
"""
return json.dumps(
self._prepare_items_for_transmission(channel_metadata_items),
sort_keys=True
).encode('utf-8')
|
def _transmit_create(self, channel_metadata_item_map):
"""
Transmit content metadata creation to integrated channel.
"""
for chunk in chunks(channel_metadata_item_map, self.enterprise_configuration.transmission_chunk_size):
serialized_chunk = self._serialize_items(list(chunk.values()))
try:
self.client.create_content_metadata(serialized_chunk)
except ClientError as exc:
LOGGER.error(
'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',
len(chunk),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
else:
self._create_transmissions(chunk)
|
def _transmit_update(self, channel_metadata_item_map, transmission_map):
"""
Transmit content metadata update to integrated channel.
"""
for chunk in chunks(channel_metadata_item_map, self.enterprise_configuration.transmission_chunk_size):
serialized_chunk = self._serialize_items(list(chunk.values()))
try:
self.client.update_content_metadata(serialized_chunk)
except ClientError as exc:
LOGGER.error(
'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',
len(chunk),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
else:
self._update_transmissions(chunk, transmission_map)
|
def _transmit_delete(self, channel_metadata_item_map):
"""
Transmit content metadata deletion to integrated channel.
"""
for chunk in chunks(channel_metadata_item_map, self.enterprise_configuration.transmission_chunk_size):
serialized_chunk = self._serialize_items(list(chunk.values()))
try:
self.client.delete_content_metadata(serialized_chunk)
except ClientError as exc:
LOGGER.error(
'Failed to delete [%s] content metadata items for integrated channel [%s] [%s]',
len(chunk),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
else:
self._delete_transmissions(chunk.keys())
|
def _get_transmissions(self):
"""
Return the ContentMetadataItemTransmision models for previously
transmitted content metadata items.
"""
# pylint: disable=invalid-name
ContentMetadataItemTransmission = apps.get_model(
'integrated_channel',
'ContentMetadataItemTransmission'
)
return ContentMetadataItemTransmission.objects.filter(
enterprise_customer=self.enterprise_configuration.enterprise_customer,
integrated_channel_code=self.enterprise_configuration.channel_code()
)
|
def _create_transmissions(self, content_metadata_item_map):
"""
Create ContentMetadataItemTransmision models for the given content metadata items.
"""
# pylint: disable=invalid-name
ContentMetadataItemTransmission = apps.get_model(
'integrated_channel',
'ContentMetadataItemTransmission'
)
transmissions = []
for content_id, channel_metadata in content_metadata_item_map.items():
transmissions.append(
ContentMetadataItemTransmission(
enterprise_customer=self.enterprise_configuration.enterprise_customer,
integrated_channel_code=self.enterprise_configuration.channel_code(),
content_id=content_id,
channel_metadata=channel_metadata
)
)
ContentMetadataItemTransmission.objects.bulk_create(transmissions)
|
def _update_transmissions(self, content_metadata_item_map, transmission_map):
"""
Update ContentMetadataItemTransmision models for the given content metadata items.
"""
for content_id, channel_metadata in content_metadata_item_map.items():
transmission = transmission_map[content_id]
transmission.channel_metadata = channel_metadata
transmission.save()
|
def _delete_transmissions(self, content_metadata_item_ids):
"""
Delete ContentMetadataItemTransmision models associated with the given content metadata items.
"""
# pylint: disable=invalid-name
ContentMetadataItemTransmission = apps.get_model(
'integrated_channel',
'ContentMetadataItemTransmission'
)
ContentMetadataItemTransmission.objects.filter(
enterprise_customer=self.enterprise_configuration.enterprise_customer,
integrated_channel_code=self.enterprise_configuration.channel_code(),
content_id__in=content_metadata_item_ids
).delete()
|
def deprecated(extra):
"""
Flag a method as deprecated.
:param extra: Extra text you'd like to display after the default text.
"""
def decorator(func):
"""
Return a decorated function that emits a deprecation warning on use.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
Wrap the function.
"""
message = 'You called the deprecated function `{function}`. {extra}'.format(
function=func.__name__,
extra=extra
)
frame = inspect.currentframe().f_back
warnings.warn_explicit(
message,
category=DeprecationWarning,
filename=inspect.getfile(frame.f_code),
lineno=frame.f_lineno
)
return func(*args, **kwargs)
return wrapper
return decorator
|
def ignore_warning(warning):
"""
Ignore any emitted warnings from a function.
:param warning: The category of warning to ignore.
"""
def decorator(func):
"""
Return a decorated function whose emitted warnings are ignored.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
Wrap the function.
"""
warnings.simplefilter('ignore', warning)
return func(*args, **kwargs)
return wrapper
return decorator
|
def enterprise_login_required(view):
"""
View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the decorator.
"""
if 'enterprise_uuid' not in kwargs:
raise Http404
enterprise_uuid = kwargs['enterprise_uuid']
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
# Now verify if the user is logged in. If user is not logged in then
# send the user to the login screen to sign in with an
# Enterprise-linked IdP and the pipeline will get them back here.
if not request.user.is_authenticated:
parsed_current_url = urlparse(request.get_full_path())
parsed_query_string = parse_qs(parsed_current_url.query)
parsed_query_string.update({
'tpa_hint': enterprise_customer.identity_provider,
FRESH_LOGIN_PARAMETER: 'yes'
})
next_url = '{current_path}?{query_string}'.format(
current_path=quote(parsed_current_url.path),
query_string=urlencode(parsed_query_string, doseq=True)
)
return redirect(
'{login_url}?{params}'.format(
login_url='/login',
params=urlencode(
{'next': next_url}
)
)
)
# Otherwise, they can proceed to the original view.
return view(request, *args, **kwargs)
return wrapper
|
def force_fresh_session(view):
"""
View decorator which terminates stale TPA sessions.
This decorator forces the user to obtain a new session
the first time they access the decorated view. This prevents
TPA-authenticated users from hijacking the session of another
user who may have been previously logged in using the same
browser window.
This decorator should be used in conjunction with the
enterprise_login_required decorator.
Usage::
@enterprise_login_required
@force_fresh_session()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
@method_decorator(force_fresh_session)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the function.
"""
if not request.GET.get(FRESH_LOGIN_PARAMETER):
# The enterprise_login_required decorator promises to set the fresh login URL
# parameter for this URL when it was the agent that initiated the login process;
# if that parameter isn't set, we can safely assume that the session is "stale";
# that isn't necessarily an issue, though. Redirect the user to
# log out and then come back here - the enterprise_login_required decorator will
# then take effect prior to us arriving back here again.
enterprise_customer = get_enterprise_customer_or_404(kwargs.get('enterprise_uuid'))
provider_id = enterprise_customer.identity_provider or ''
sso_provider = get_identity_provider(provider_id)
if sso_provider:
# Parse the current request full path, quote just the path portion,
# then reconstruct the full path string.
# The path and query portions should be the only non-empty strings here.
scheme, netloc, path, params, query, fragment = urlparse(request.get_full_path())
redirect_url = urlunparse((scheme, netloc, quote(path), params, query, fragment))
return redirect(
'{logout_url}?{params}'.format(
logout_url='/logout',
params=urlencode(
{'redirect_url': redirect_url}
)
)
)
return view(request, *args, **kwargs)
return wrapper
|
def validate_username(self, value):
"""
Verify that the username has a matching user, and that the user has an associated EnterpriseCustomerUser.
"""
try:
user = User.objects.get(username=value)
except User.DoesNotExist:
raise serializers.ValidationError("User does not exist")
try:
enterprise_customer_user = models.EnterpriseCustomerUser.objects.get(user_id=user.pk)
except models.EnterpriseCustomerUser.DoesNotExist:
raise serializers.ValidationError("User has no EnterpriseCustomerUser")
self.enterprise_customer_user = enterprise_customer_user
return value
|
def save(self): # pylint: disable=arguments-differ
"""
Save the model with the found EnterpriseCustomerUser.
"""
course_id = self.validated_data['course_id']
__, created = models.EnterpriseCourseEnrollment.objects.get_or_create(
enterprise_customer_user=self.enterprise_customer_user,
course_id=course_id,
)
if created:
track_enrollment('rest-api-enrollment', self.enterprise_customer_user.user_id, course_id)
|
def to_representation(self, instance):
"""
Serialize the EnterpriseCustomerCatalog object.
Arguments:
instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.
Returns:
dict: The EnterpriseCustomerCatalog converted to a dict.
"""
request = self.context['request']
enterprise_customer = instance.enterprise_customer
representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance)
# Retrieve the EnterpriseCustomerCatalog search results from the discovery service.
paginated_content = instance.get_paginated_content(request.GET)
count = paginated_content['count']
search_results = paginated_content['results']
for item in search_results:
content_type = item['content_type']
marketing_url = item.get('marketing_url')
if marketing_url:
item['marketing_url'] = utils.update_query_parameters(
marketing_url, utils.get_enterprise_utm_context(enterprise_customer)
)
# Add the Enterprise enrollment URL to each content item returned from the discovery service.
if content_type == 'course':
item['enrollment_url'] = instance.get_course_enrollment_url(item['key'])
if content_type == 'courserun':
item['enrollment_url'] = instance.get_course_run_enrollment_url(item['key'])
if content_type == 'program':
item['enrollment_url'] = instance.get_program_enrollment_url(item['uuid'])
# Build pagination URLs
previous_url = None
next_url = None
page = int(request.GET.get('page', '1'))
request_uri = request.build_absolute_uri()
if paginated_content['previous']:
previous_url = utils.update_query_parameters(request_uri, {'page': page - 1})
if paginated_content['next']:
next_url = utils.update_query_parameters(request_uri, {'page': page + 1})
representation['count'] = count
representation['previous'] = previous_url
representation['next'] = next_url
representation['results'] = search_results
return representation
|
def get_groups(self, obj):
"""
Return the enterprise related django groups that this user is a part of.
"""
if obj.user:
return [group.name for group in obj.user.groups.filter(name__in=ENTERPRISE_PERMISSION_GROUPS)]
return []
|
def validate_username(self, value):
"""
Verify that the username has a matching user.
"""
try:
self.user = User.objects.get(username=value)
except User.DoesNotExist:
raise serializers.ValidationError("User does not exist")
return value
|
def save(self): # pylint: disable=arguments-differ
"""
Save the EnterpriseCustomerUser.
"""
enterprise_customer = self.validated_data['enterprise_customer']
ecu = models.EnterpriseCustomerUser(
user_id=self.user.pk,
enterprise_customer=enterprise_customer,
)
ecu.save()
|
def to_representation(self, instance):
"""
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
"""
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course['key']
)
for course_run in updated_course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_course
|
def to_representation(self, instance):
"""
Return the updated course run data dictionary.
Arguments:
instance (dict): The course run data.
Returns:
dict: The updated course run data.
"""
updated_course_run = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
updated_course_run['key']
)
return updated_course_run
|
def to_representation(self, instance):
"""
Return the updated program data dictionary.
Arguments:
instance (dict): The program data.
Returns:
dict: The updated program data.
"""
updated_program = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(
updated_program['uuid']
)
for course in updated_program['courses']:
course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key'])
for course_run in course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_program
|
def to_internal_value(self, data):
"""
This implements the same relevant logic as ListSerializer except that if one or more items fail validation,
processing for other items that did not fail will continue.
"""
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(
input_type=type(data).__name__
)
raise serializers.ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
})
ret = []
for item in data:
try:
validated = self.child.run_validation(item)
except serializers.ValidationError as exc:
ret.append(exc.detail)
else:
ret.append(validated)
return ret
|
def create(self, validated_data):
"""
This selectively calls the child create method based on whether or not validation failed for each payload.
"""
ret = []
for attrs in validated_data:
if 'non_field_errors' not in attrs and not any(isinstance(attrs[field], list) for field in attrs):
ret.append(self.child.create(attrs))
else:
ret.append(attrs)
return ret
|
def to_representation(self, data):
"""
This selectively calls to_representation on each result that was processed by create.
"""
return [
self.child.to_representation(item) if 'detail' in item else item for item in data
]
|
def create(self, validated_data):
"""
Perform the enrollment for existing enterprise customer users, or create the pending objects for new users.
"""
enterprise_customer = self.context.get('enterprise_customer')
lms_user = validated_data.get('lms_user_id')
tpa_user = validated_data.get('tpa_user_id')
user_email = validated_data.get('user_email')
course_run_id = validated_data.get('course_run_id')
course_mode = validated_data.get('course_mode')
cohort = validated_data.get('cohort')
email_students = validated_data.get('email_students')
is_active = validated_data.get('is_active')
enterprise_customer_user = lms_user or tpa_user or user_email
if isinstance(enterprise_customer_user, models.EnterpriseCustomerUser):
validated_data['enterprise_customer_user'] = enterprise_customer_user
try:
if is_active:
enterprise_customer_user.enroll(course_run_id, course_mode, cohort=cohort)
else:
enterprise_customer_user.unenroll(course_run_id)
except (CourseEnrollmentDowngradeError, CourseEnrollmentPermissionError, HttpClientError) as exc:
validated_data['detail'] = str(exc)
return validated_data
if is_active:
track_enrollment('enterprise-customer-enrollment-api', enterprise_customer_user.user_id, course_run_id)
else:
if is_active:
enterprise_customer_user = enterprise_customer.enroll_user_pending_registration(
user_email,
course_mode,
course_run_id,
cohort=cohort
)
else:
enterprise_customer.clear_pending_registration(user_email, course_run_id)
if email_students:
enterprise_customer.notify_enrolled_learners(
self.context.get('request_user'),
course_run_id,
[enterprise_customer_user]
)
validated_data['detail'] = 'success'
return validated_data
|
def validate_lms_user_id(self, value):
"""
Validates the lms_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it.
"""
enterprise_customer = self.context.get('enterprise_customer')
try:
# Ensure the given user is associated with the enterprise.
return models.EnterpriseCustomerUser.objects.get(
user_id=value,
enterprise_customer=enterprise_customer
)
except models.EnterpriseCustomerUser.DoesNotExist:
pass
return None
|
def validate_tpa_user_id(self, value):
"""
Validates the tpa_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it.
It first uses the third party auth api to find the associated username to do the lookup.
"""
enterprise_customer = self.context.get('enterprise_customer')
try:
tpa_client = ThirdPartyAuthApiClient()
username = tpa_client.get_username_from_remote_id(
enterprise_customer.identity_provider, value
)
user = User.objects.get(username=username)
return models.EnterpriseCustomerUser.objects.get(
user_id=user.id,
enterprise_customer=enterprise_customer
)
except (models.EnterpriseCustomerUser.DoesNotExist, User.DoesNotExist):
pass
return None
|
def validate_user_email(self, value):
"""
Validates the user_email, if given, to see if an existing EnterpriseCustomerUser exists for it.
If it does not, it does not fail validation, unlike for the other field validation methods above.
"""
enterprise_customer = self.context.get('enterprise_customer')
try:
user = User.objects.get(email=value)
return models.EnterpriseCustomerUser.objects.get(
user_id=user.id,
enterprise_customer=enterprise_customer
)
except (models.EnterpriseCustomerUser.DoesNotExist, User.DoesNotExist):
pass
return value
|
def validate_course_run_id(self, value):
"""
Validates that the course run id is part of the Enterprise Customer's catalog.
"""
enterprise_customer = self.context.get('enterprise_customer')
if not enterprise_customer.catalog_contains_course(value):
raise serializers.ValidationError(
'The course run id {course_run_id} is not in the catalog '
'for Enterprise Customer {enterprise_customer}'.format(
course_run_id=value,
enterprise_customer=enterprise_customer.name,
)
)
return value
|
def validate(self, data): # pylint: disable=arguments-differ
"""
Validate that at least one of the user identifier fields has been passed in.
"""
lms_user_id = data.get('lms_user_id')
tpa_user_id = data.get('tpa_user_id')
user_email = data.get('user_email')
if not lms_user_id and not tpa_user_id and not user_email:
raise serializers.ValidationError(
'At least one of the following fields must be specified and map to an EnterpriseCustomerUser: '
'lms_user_id, tpa_user_id, user_email'
)
return data
|
def get_paginated_response(data, request):
"""
Update pagination links in course catalog data and return DRF Response.
Arguments:
data (dict): Dictionary containing catalog courses.
request (HttpRequest): Current request object.
Returns:
(Response): DRF response object containing pagination links.
"""
url = urlparse(request.build_absolute_uri())._replace(query=None).geturl()
next_page = None
previous_page = None
if data['next']:
next_page = "{base_url}?{query_parameters}".format(
base_url=url,
query_parameters=urlparse(data['next']).query,
)
next_page = next_page.rstrip('?')
if data['previous']:
previous_page = "{base_url}?{query_parameters}".format(
base_url=url,
query_parameters=urlparse(data['previous'] or "").query,
)
previous_page = previous_page.rstrip('?')
return Response(OrderedDict([
('count', data['count']),
('next', next_page),
('previous', previous_page),
('results', data['results'])
]))
|
def create_switch(apps, schema_editor):
"""Create the `role_based_access_control` switch if it does not already exist."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.update_or_create(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH, defaults={'active': False})
|
def delete_switch(apps, schema_editor):
"""Delete the `role_based_access_control` switch."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.filter(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH).delete()
|
def create_switch(apps, schema_editor):
"""Create and activate the SAP_USE_ENTERPRISE_ENROLLMENT_PAGE switch if it does not already exist."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.get_or_create(name='SAP_USE_ENTERPRISE_ENROLLMENT_PAGE', defaults={'active': False})
|
def transmit(self, payload, **kwargs):
"""
Send a completion status call to SAP SuccessFactors using the client.
Args:
payload: The learner completion data payload to send to SAP SuccessFactors
"""
kwargs['app_label'] = 'sap_success_factors'
kwargs['model_name'] = 'SapSuccessFactorsLearnerDataTransmissionAudit'
kwargs['remote_user_id'] = 'sapsf_user_id'
super(SapSuccessFactorsLearnerTransmitter, self).transmit(payload, **kwargs)
|
def handle_transmission_error(self, learner_data, request_exception):
"""Handle the case where the employee on SAPSF's side is marked as inactive."""
try:
sys_msg = request_exception.response.content
except AttributeError:
pass
else:
if 'user account is inactive' in sys_msg:
ecu = EnterpriseCustomerUser.objects.get(
enterprise_enrollments__id=learner_data.enterprise_course_enrollment_id)
ecu.active = False
ecu.save()
LOGGER.warning(
'User %s with ID %s and email %s is a former employee of %s '
'and has been marked inactive in SAPSF. Now marking inactive internally.',
ecu.username, ecu.user_id, ecu.user_email, ecu.enterprise_customer
)
return
super(SapSuccessFactorsLearnerTransmitter, self).handle_transmission_error(learner_data, request_exception)
|
def allow_request(self, request, view):
"""
Modify throttling for service users.
Updates throttling rate if the request is coming from the service user, and
defaults to UserRateThrottle's configured setting otherwise.
Updated throttling rate comes from `DEFAULT_THROTTLE_RATES` key in `REST_FRAMEWORK`
setting. service user throttling is specified in `DEFAULT_THROTTLE_RATES` by `service_user` key
Example Setting:
```
REST_FRAMEWORK = {
...
'DEFAULT_THROTTLE_RATES': {
...
'service_user': '50/day'
}
}
```
"""
service_users = get_service_usernames()
# User service user throttling rates for service user.
if request.user.username in service_users:
self.update_throttle_scope()
return super(ServiceUserThrottle, self).allow_request(request, view)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.