id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
14,300
|
opendatateam/udata
|
udata/forms/fields.py
|
EmptyNone.process_formdata
|
def process_formdata(self, valuelist):
'''Replace empty values by None'''
super(EmptyNone, self).process_formdata(valuelist)
self.data = self.data or None
|
python
|
def process_formdata(self, valuelist):
'''Replace empty values by None'''
super(EmptyNone, self).process_formdata(valuelist)
self.data = self.data or None
|
[
"def",
"process_formdata",
"(",
"self",
",",
"valuelist",
")",
":",
"super",
"(",
"EmptyNone",
",",
"self",
")",
".",
"process_formdata",
"(",
"valuelist",
")",
"self",
".",
"data",
"=",
"self",
".",
"data",
"or",
"None"
] |
Replace empty values by None
|
[
"Replace",
"empty",
"values",
"by",
"None"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L67-L70
|
14,301
|
opendatateam/udata
|
udata/forms/fields.py
|
ModelList.fetch_objects
|
def fetch_objects(self, oids):
'''
This methods is used to fetch models
from a list of identifiers.
Default implementation performs a bulk query on identifiers.
Override this method to customize the objects retrieval.
'''
objects = self.model.objects.in_bulk(oids)
if len(objects.keys()) != len(oids):
non_existants = set(oids) - set(objects.keys())
msg = _('Unknown identifiers: {identifiers}').format(
identifiers=', '.join(str(ne) for ne in non_existants))
raise validators.ValidationError(msg)
return [objects[id] for id in oids]
|
python
|
def fetch_objects(self, oids):
'''
This methods is used to fetch models
from a list of identifiers.
Default implementation performs a bulk query on identifiers.
Override this method to customize the objects retrieval.
'''
objects = self.model.objects.in_bulk(oids)
if len(objects.keys()) != len(oids):
non_existants = set(oids) - set(objects.keys())
msg = _('Unknown identifiers: {identifiers}').format(
identifiers=', '.join(str(ne) for ne in non_existants))
raise validators.ValidationError(msg)
return [objects[id] for id in oids]
|
[
"def",
"fetch_objects",
"(",
"self",
",",
"oids",
")",
":",
"objects",
"=",
"self",
".",
"model",
".",
"objects",
".",
"in_bulk",
"(",
"oids",
")",
"if",
"len",
"(",
"objects",
".",
"keys",
"(",
")",
")",
"!=",
"len",
"(",
"oids",
")",
":",
"non_existants",
"=",
"set",
"(",
"oids",
")",
"-",
"set",
"(",
"objects",
".",
"keys",
"(",
")",
")",
"msg",
"=",
"_",
"(",
"'Unknown identifiers: {identifiers}'",
")",
".",
"format",
"(",
"identifiers",
"=",
"', '",
".",
"join",
"(",
"str",
"(",
"ne",
")",
"for",
"ne",
"in",
"non_existants",
")",
")",
"raise",
"validators",
".",
"ValidationError",
"(",
"msg",
")",
"return",
"[",
"objects",
"[",
"id",
"]",
"for",
"id",
"in",
"oids",
"]"
] |
This methods is used to fetch models
from a list of identifiers.
Default implementation performs a bulk query on identifiers.
Override this method to customize the objects retrieval.
|
[
"This",
"methods",
"is",
"used",
"to",
"fetch",
"models",
"from",
"a",
"list",
"of",
"identifiers",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L487-L503
|
14,302
|
opendatateam/udata
|
udata/forms/fields.py
|
NestedModelList.validate
|
def validate(self, form, extra_validators=tuple()):
'''Perform validation only if data has been submitted'''
if not self.has_data:
return True
if self.is_list_data:
if not isinstance(self._formdata[self.name], (list, tuple)):
return False
return super(NestedModelList, self).validate(form, extra_validators)
|
python
|
def validate(self, form, extra_validators=tuple()):
'''Perform validation only if data has been submitted'''
if not self.has_data:
return True
if self.is_list_data:
if not isinstance(self._formdata[self.name], (list, tuple)):
return False
return super(NestedModelList, self).validate(form, extra_validators)
|
[
"def",
"validate",
"(",
"self",
",",
"form",
",",
"extra_validators",
"=",
"tuple",
"(",
")",
")",
":",
"if",
"not",
"self",
".",
"has_data",
":",
"return",
"True",
"if",
"self",
".",
"is_list_data",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_formdata",
"[",
"self",
".",
"name",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"False",
"return",
"super",
"(",
"NestedModelList",
",",
"self",
")",
".",
"validate",
"(",
"form",
",",
"extra_validators",
")"
] |
Perform validation only if data has been submitted
|
[
"Perform",
"validation",
"only",
"if",
"data",
"has",
"been",
"submitted"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L531-L538
|
14,303
|
opendatateam/udata
|
udata/forms/fields.py
|
NestedModelList._add_entry
|
def _add_entry(self, formdata=None, data=unset_value, index=None):
'''
Fill the form with previous data if necessary to handle partial update
'''
if formdata:
prefix = '-'.join((self.name, str(index)))
basekey = '-'.join((prefix, '{0}'))
idkey = basekey.format('id')
if prefix in formdata:
formdata[idkey] = formdata.pop(prefix)
if hasattr(self.nested_model, 'id') and idkey in formdata:
id = self.nested_model.id.to_python(formdata[idkey])
data = get_by(self.initial_data, 'id', id)
initial = flatten_json(self.nested_form,
data.to_mongo(),
prefix)
for key, value in initial.items():
if key not in formdata:
formdata[key] = value
else:
data = None
return super(NestedModelList, self)._add_entry(formdata, data, index)
|
python
|
def _add_entry(self, formdata=None, data=unset_value, index=None):
'''
Fill the form with previous data if necessary to handle partial update
'''
if formdata:
prefix = '-'.join((self.name, str(index)))
basekey = '-'.join((prefix, '{0}'))
idkey = basekey.format('id')
if prefix in formdata:
formdata[idkey] = formdata.pop(prefix)
if hasattr(self.nested_model, 'id') and idkey in formdata:
id = self.nested_model.id.to_python(formdata[idkey])
data = get_by(self.initial_data, 'id', id)
initial = flatten_json(self.nested_form,
data.to_mongo(),
prefix)
for key, value in initial.items():
if key not in formdata:
formdata[key] = value
else:
data = None
return super(NestedModelList, self)._add_entry(formdata, data, index)
|
[
"def",
"_add_entry",
"(",
"self",
",",
"formdata",
"=",
"None",
",",
"data",
"=",
"unset_value",
",",
"index",
"=",
"None",
")",
":",
"if",
"formdata",
":",
"prefix",
"=",
"'-'",
".",
"join",
"(",
"(",
"self",
".",
"name",
",",
"str",
"(",
"index",
")",
")",
")",
"basekey",
"=",
"'-'",
".",
"join",
"(",
"(",
"prefix",
",",
"'{0}'",
")",
")",
"idkey",
"=",
"basekey",
".",
"format",
"(",
"'id'",
")",
"if",
"prefix",
"in",
"formdata",
":",
"formdata",
"[",
"idkey",
"]",
"=",
"formdata",
".",
"pop",
"(",
"prefix",
")",
"if",
"hasattr",
"(",
"self",
".",
"nested_model",
",",
"'id'",
")",
"and",
"idkey",
"in",
"formdata",
":",
"id",
"=",
"self",
".",
"nested_model",
".",
"id",
".",
"to_python",
"(",
"formdata",
"[",
"idkey",
"]",
")",
"data",
"=",
"get_by",
"(",
"self",
".",
"initial_data",
",",
"'id'",
",",
"id",
")",
"initial",
"=",
"flatten_json",
"(",
"self",
".",
"nested_form",
",",
"data",
".",
"to_mongo",
"(",
")",
",",
"prefix",
")",
"for",
"key",
",",
"value",
"in",
"initial",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"formdata",
":",
"formdata",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"data",
"=",
"None",
"return",
"super",
"(",
"NestedModelList",
",",
"self",
")",
".",
"_add_entry",
"(",
"formdata",
",",
"data",
",",
"index",
")"
] |
Fill the form with previous data if necessary to handle partial update
|
[
"Fill",
"the",
"form",
"with",
"previous",
"data",
"if",
"necessary",
"to",
"handle",
"partial",
"update"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L564-L587
|
14,304
|
opendatateam/udata
|
udata/forms/fields.py
|
ExtrasField.parse
|
def parse(self, data):
'''Parse fields and store individual errors'''
self.field_errors = {}
return dict(
(k, self._parse_value(k, v)) for k, v in data.items()
)
|
python
|
def parse(self, data):
'''Parse fields and store individual errors'''
self.field_errors = {}
return dict(
(k, self._parse_value(k, v)) for k, v in data.items()
)
|
[
"def",
"parse",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"field_errors",
"=",
"{",
"}",
"return",
"dict",
"(",
"(",
"k",
",",
"self",
".",
"_parse_value",
"(",
"k",
",",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
")"
] |
Parse fields and store individual errors
|
[
"Parse",
"fields",
"and",
"store",
"individual",
"errors"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L733-L738
|
14,305
|
opendatateam/udata
|
udata/core/metrics/commands.py
|
update
|
def update(site=False, organizations=False, users=False, datasets=False,
reuses=False):
'''Update all metrics for the current date'''
do_all = not any((site, organizations, users, datasets, reuses))
if do_all or site:
log.info('Update site metrics')
update_site_metrics()
if do_all or datasets:
log.info('Update datasets metrics')
for dataset in Dataset.objects.timeout(False):
update_metrics_for(dataset)
if do_all or reuses:
log.info('Update reuses metrics')
for reuse in Reuse.objects.timeout(False):
update_metrics_for(reuse)
if do_all or organizations:
log.info('Update organizations metrics')
for organization in Organization.objects.timeout(False):
update_metrics_for(organization)
if do_all or users:
log.info('Update user metrics')
for user in User.objects.timeout(False):
update_metrics_for(user)
success('All metrics have been updated')
|
python
|
def update(site=False, organizations=False, users=False, datasets=False,
reuses=False):
'''Update all metrics for the current date'''
do_all = not any((site, organizations, users, datasets, reuses))
if do_all or site:
log.info('Update site metrics')
update_site_metrics()
if do_all or datasets:
log.info('Update datasets metrics')
for dataset in Dataset.objects.timeout(False):
update_metrics_for(dataset)
if do_all or reuses:
log.info('Update reuses metrics')
for reuse in Reuse.objects.timeout(False):
update_metrics_for(reuse)
if do_all or organizations:
log.info('Update organizations metrics')
for organization in Organization.objects.timeout(False):
update_metrics_for(organization)
if do_all or users:
log.info('Update user metrics')
for user in User.objects.timeout(False):
update_metrics_for(user)
success('All metrics have been updated')
|
[
"def",
"update",
"(",
"site",
"=",
"False",
",",
"organizations",
"=",
"False",
",",
"users",
"=",
"False",
",",
"datasets",
"=",
"False",
",",
"reuses",
"=",
"False",
")",
":",
"do_all",
"=",
"not",
"any",
"(",
"(",
"site",
",",
"organizations",
",",
"users",
",",
"datasets",
",",
"reuses",
")",
")",
"if",
"do_all",
"or",
"site",
":",
"log",
".",
"info",
"(",
"'Update site metrics'",
")",
"update_site_metrics",
"(",
")",
"if",
"do_all",
"or",
"datasets",
":",
"log",
".",
"info",
"(",
"'Update datasets metrics'",
")",
"for",
"dataset",
"in",
"Dataset",
".",
"objects",
".",
"timeout",
"(",
"False",
")",
":",
"update_metrics_for",
"(",
"dataset",
")",
"if",
"do_all",
"or",
"reuses",
":",
"log",
".",
"info",
"(",
"'Update reuses metrics'",
")",
"for",
"reuse",
"in",
"Reuse",
".",
"objects",
".",
"timeout",
"(",
"False",
")",
":",
"update_metrics_for",
"(",
"reuse",
")",
"if",
"do_all",
"or",
"organizations",
":",
"log",
".",
"info",
"(",
"'Update organizations metrics'",
")",
"for",
"organization",
"in",
"Organization",
".",
"objects",
".",
"timeout",
"(",
"False",
")",
":",
"update_metrics_for",
"(",
"organization",
")",
"if",
"do_all",
"or",
"users",
":",
"log",
".",
"info",
"(",
"'Update user metrics'",
")",
"for",
"user",
"in",
"User",
".",
"objects",
".",
"timeout",
"(",
"False",
")",
":",
"update_metrics_for",
"(",
"user",
")",
"success",
"(",
"'All metrics have been updated'",
")"
] |
Update all metrics for the current date
|
[
"Update",
"all",
"metrics",
"for",
"the",
"current",
"date"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L37-L66
|
14,306
|
opendatateam/udata
|
udata/core/metrics/commands.py
|
list
|
def list():
'''List all known metrics'''
for cls, metrics in metric_catalog.items():
echo(white(cls.__name__))
for metric in metrics.keys():
echo('> {0}'.format(metric))
|
python
|
def list():
'''List all known metrics'''
for cls, metrics in metric_catalog.items():
echo(white(cls.__name__))
for metric in metrics.keys():
echo('> {0}'.format(metric))
|
[
"def",
"list",
"(",
")",
":",
"for",
"cls",
",",
"metrics",
"in",
"metric_catalog",
".",
"items",
"(",
")",
":",
"echo",
"(",
"white",
"(",
"cls",
".",
"__name__",
")",
")",
"for",
"metric",
"in",
"metrics",
".",
"keys",
"(",
")",
":",
"echo",
"(",
"'> {0}'",
".",
"format",
"(",
"metric",
")",
")"
] |
List all known metrics
|
[
"List",
"all",
"known",
"metrics"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L70-L76
|
14,307
|
opendatateam/udata
|
udata/api/commands.py
|
json_to_file
|
def json_to_file(data, filename, pretty=False):
'''Dump JSON data to a file'''
kwargs = dict(indent=4) if pretty else {}
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
dump = json.dumps(api.__schema__, **kwargs)
with open(filename, 'wb') as f:
f.write(dump.encode('utf-8'))
|
python
|
def json_to_file(data, filename, pretty=False):
'''Dump JSON data to a file'''
kwargs = dict(indent=4) if pretty else {}
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
dump = json.dumps(api.__schema__, **kwargs)
with open(filename, 'wb') as f:
f.write(dump.encode('utf-8'))
|
[
"def",
"json_to_file",
"(",
"data",
",",
"filename",
",",
"pretty",
"=",
"False",
")",
":",
"kwargs",
"=",
"dict",
"(",
"indent",
"=",
"4",
")",
"if",
"pretty",
"else",
"{",
"}",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"dump",
"=",
"json",
".",
"dumps",
"(",
"api",
".",
"__schema__",
",",
"*",
"*",
"kwargs",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"dump",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] |
Dump JSON data to a file
|
[
"Dump",
"JSON",
"data",
"to",
"a",
"file"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L24-L32
|
14,308
|
opendatateam/udata
|
udata/api/commands.py
|
postman
|
def postman(filename, pretty, urlvars, swagger):
'''Dump the API as a Postman collection'''
data = api.as_postman(urlvars=urlvars, swagger=swagger)
json_to_file(data, filename, pretty)
|
python
|
def postman(filename, pretty, urlvars, swagger):
'''Dump the API as a Postman collection'''
data = api.as_postman(urlvars=urlvars, swagger=swagger)
json_to_file(data, filename, pretty)
|
[
"def",
"postman",
"(",
"filename",
",",
"pretty",
",",
"urlvars",
",",
"swagger",
")",
":",
"data",
"=",
"api",
".",
"as_postman",
"(",
"urlvars",
"=",
"urlvars",
",",
"swagger",
"=",
"swagger",
")",
"json_to_file",
"(",
"data",
",",
"filename",
",",
"pretty",
")"
] |
Dump the API as a Postman collection
|
[
"Dump",
"the",
"API",
"as",
"a",
"Postman",
"collection"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L49-L52
|
14,309
|
opendatateam/udata
|
udata/core/badges/tasks.py
|
notify_badge_added_certified
|
def notify_badge_added_certified(sender, kind=''):
'''
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
'''
if kind == CERTIFIED and isinstance(sender, Organization):
recipients = [member.user for member in sender.members]
subject = _(
'Your organization "%(name)s" has been certified',
name=sender.name
)
mail.send(
subject,
recipients,
'badge_added_certified',
organization=sender,
badge=sender.get_badge(kind)
)
|
python
|
def notify_badge_added_certified(sender, kind=''):
'''
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
'''
if kind == CERTIFIED and isinstance(sender, Organization):
recipients = [member.user for member in sender.members]
subject = _(
'Your organization "%(name)s" has been certified',
name=sender.name
)
mail.send(
subject,
recipients,
'badge_added_certified',
organization=sender,
badge=sender.get_badge(kind)
)
|
[
"def",
"notify_badge_added_certified",
"(",
"sender",
",",
"kind",
"=",
"''",
")",
":",
"if",
"kind",
"==",
"CERTIFIED",
"and",
"isinstance",
"(",
"sender",
",",
"Organization",
")",
":",
"recipients",
"=",
"[",
"member",
".",
"user",
"for",
"member",
"in",
"sender",
".",
"members",
"]",
"subject",
"=",
"_",
"(",
"'Your organization \"%(name)s\" has been certified'",
",",
"name",
"=",
"sender",
".",
"name",
")",
"mail",
".",
"send",
"(",
"subject",
",",
"recipients",
",",
"'badge_added_certified'",
",",
"organization",
"=",
"sender",
",",
"badge",
"=",
"sender",
".",
"get_badge",
"(",
"kind",
")",
")"
] |
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
|
[
"Send",
"an",
"email",
"when",
"a",
"CERTIFIED",
"badge",
"is",
"added",
"to",
"an",
"Organization"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/tasks.py#L27-L50
|
14,310
|
opendatateam/udata
|
udata/core/discussions/notifications.py
|
discussions_notifications
|
def discussions_notifications(user):
'''Notify user about open discussions'''
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = discussions_for(user).only('id', 'created', 'title', 'subject')
# Do not dereference subject (so it's a DBRef)
# Also improve performances and memory usage
for discussion in qs.no_dereference():
notifications.append((discussion.created, {
'id': discussion.id,
'title': discussion.title,
'subject': {
'id': discussion.subject['_ref'].id,
'type': discussion.subject['_cls'].lower(),
}
}))
return notifications
|
python
|
def discussions_notifications(user):
'''Notify user about open discussions'''
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = discussions_for(user).only('id', 'created', 'title', 'subject')
# Do not dereference subject (so it's a DBRef)
# Also improve performances and memory usage
for discussion in qs.no_dereference():
notifications.append((discussion.created, {
'id': discussion.id,
'title': discussion.title,
'subject': {
'id': discussion.subject['_ref'].id,
'type': discussion.subject['_cls'].lower(),
}
}))
return notifications
|
[
"def",
"discussions_notifications",
"(",
"user",
")",
":",
"notifications",
"=",
"[",
"]",
"# Only fetch required fields for notification serialization",
"# Greatly improve performances and memory usage",
"qs",
"=",
"discussions_for",
"(",
"user",
")",
".",
"only",
"(",
"'id'",
",",
"'created'",
",",
"'title'",
",",
"'subject'",
")",
"# Do not dereference subject (so it's a DBRef)",
"# Also improve performances and memory usage",
"for",
"discussion",
"in",
"qs",
".",
"no_dereference",
"(",
")",
":",
"notifications",
".",
"append",
"(",
"(",
"discussion",
".",
"created",
",",
"{",
"'id'",
":",
"discussion",
".",
"id",
",",
"'title'",
":",
"discussion",
".",
"title",
",",
"'subject'",
":",
"{",
"'id'",
":",
"discussion",
".",
"subject",
"[",
"'_ref'",
"]",
".",
"id",
",",
"'type'",
":",
"discussion",
".",
"subject",
"[",
"'_cls'",
"]",
".",
"lower",
"(",
")",
",",
"}",
"}",
")",
")",
"return",
"notifications"
] |
Notify user about open discussions
|
[
"Notify",
"user",
"about",
"open",
"discussions"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/notifications.py#L15-L35
|
14,311
|
opendatateam/udata
|
udata/tracking.py
|
send_signal
|
def send_signal(signal, request, user, **kwargs):
'''Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
'''
params = {
'user_ip': request.remote_addr
}
params.update(kwargs)
if user.is_authenticated:
params['uid'] = user.id
signal.send(request.url, **params)
|
python
|
def send_signal(signal, request, user, **kwargs):
'''Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
'''
params = {
'user_ip': request.remote_addr
}
params.update(kwargs)
if user.is_authenticated:
params['uid'] = user.id
signal.send(request.url, **params)
|
[
"def",
"send_signal",
"(",
"signal",
",",
"request",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'user_ip'",
":",
"request",
".",
"remote_addr",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"if",
"user",
".",
"is_authenticated",
":",
"params",
"[",
"'uid'",
"]",
"=",
"user",
".",
"id",
"signal",
".",
"send",
"(",
"request",
".",
"url",
",",
"*",
"*",
"params",
")"
] |
Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
|
[
"Generic",
"method",
"to",
"send",
"signals",
"to",
"Piwik"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/tracking.py#L5-L16
|
14,312
|
opendatateam/udata
|
udata/core/organization/notifications.py
|
membership_request_notifications
|
def membership_request_notifications(user):
'''Notify user about pending membership requests'''
orgs = [o for o in user.organizations if o.is_admin(user)]
notifications = []
for org in orgs:
for request in org.pending_requests:
notifications.append((request.created, {
'id': request.id,
'organization': org.id,
'user': {
'id': request.user.id,
'fullname': request.user.fullname,
'avatar': str(request.user.avatar)
}
}))
return notifications
|
python
|
def membership_request_notifications(user):
'''Notify user about pending membership requests'''
orgs = [o for o in user.organizations if o.is_admin(user)]
notifications = []
for org in orgs:
for request in org.pending_requests:
notifications.append((request.created, {
'id': request.id,
'organization': org.id,
'user': {
'id': request.user.id,
'fullname': request.user.fullname,
'avatar': str(request.user.avatar)
}
}))
return notifications
|
[
"def",
"membership_request_notifications",
"(",
"user",
")",
":",
"orgs",
"=",
"[",
"o",
"for",
"o",
"in",
"user",
".",
"organizations",
"if",
"o",
".",
"is_admin",
"(",
"user",
")",
"]",
"notifications",
"=",
"[",
"]",
"for",
"org",
"in",
"orgs",
":",
"for",
"request",
"in",
"org",
".",
"pending_requests",
":",
"notifications",
".",
"append",
"(",
"(",
"request",
".",
"created",
",",
"{",
"'id'",
":",
"request",
".",
"id",
",",
"'organization'",
":",
"org",
".",
"id",
",",
"'user'",
":",
"{",
"'id'",
":",
"request",
".",
"user",
".",
"id",
",",
"'fullname'",
":",
"request",
".",
"user",
".",
"fullname",
",",
"'avatar'",
":",
"str",
"(",
"request",
".",
"user",
".",
"avatar",
")",
"}",
"}",
")",
")",
"return",
"notifications"
] |
Notify user about pending membership requests
|
[
"Notify",
"user",
"about",
"pending",
"membership",
"requests"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/organization/notifications.py#L13-L30
|
14,313
|
opendatateam/udata
|
udata/harvest/commands.py
|
validate
|
def validate(identifier):
'''Validate a source given its identifier'''
source = actions.validate_source(identifier)
log.info('Source %s (%s) has been validated', source.slug, str(source.id))
|
python
|
def validate(identifier):
'''Validate a source given its identifier'''
source = actions.validate_source(identifier)
log.info('Source %s (%s) has been validated', source.slug, str(source.id))
|
[
"def",
"validate",
"(",
"identifier",
")",
":",
"source",
"=",
"actions",
".",
"validate_source",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Source %s (%s) has been validated'",
",",
"source",
".",
"slug",
",",
"str",
"(",
"source",
".",
"id",
")",
")"
] |
Validate a source given its identifier
|
[
"Validate",
"a",
"source",
"given",
"its",
"identifier"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L48-L51
|
14,314
|
opendatateam/udata
|
udata/harvest/commands.py
|
delete
|
def delete(identifier):
'''Delete a harvest source'''
log.info('Deleting source "%s"', identifier)
actions.delete_source(identifier)
log.info('Deleted source "%s"', identifier)
|
python
|
def delete(identifier):
'''Delete a harvest source'''
log.info('Deleting source "%s"', identifier)
actions.delete_source(identifier)
log.info('Deleted source "%s"', identifier)
|
[
"def",
"delete",
"(",
"identifier",
")",
":",
"log",
".",
"info",
"(",
"'Deleting source \"%s\"'",
",",
"identifier",
")",
"actions",
".",
"delete_source",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Deleted source \"%s\"'",
",",
"identifier",
")"
] |
Delete a harvest source
|
[
"Delete",
"a",
"harvest",
"source"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L55-L59
|
14,315
|
opendatateam/udata
|
udata/harvest/commands.py
|
sources
|
def sources(scheduled=False):
'''List all harvest sources'''
sources = actions.list_sources()
if scheduled:
sources = [s for s in sources if s.periodic_task]
if sources:
for source in sources:
msg = '{source.name} ({source.backend}): {cron}'
if source.periodic_task:
cron = source.periodic_task.schedule_display
else:
cron = 'not scheduled'
log.info(msg.format(source=source, cron=cron))
elif scheduled:
log.info('No sources scheduled yet')
else:
log.info('No sources defined yet')
|
python
|
def sources(scheduled=False):
'''List all harvest sources'''
sources = actions.list_sources()
if scheduled:
sources = [s for s in sources if s.periodic_task]
if sources:
for source in sources:
msg = '{source.name} ({source.backend}): {cron}'
if source.periodic_task:
cron = source.periodic_task.schedule_display
else:
cron = 'not scheduled'
log.info(msg.format(source=source, cron=cron))
elif scheduled:
log.info('No sources scheduled yet')
else:
log.info('No sources defined yet')
|
[
"def",
"sources",
"(",
"scheduled",
"=",
"False",
")",
":",
"sources",
"=",
"actions",
".",
"list_sources",
"(",
")",
"if",
"scheduled",
":",
"sources",
"=",
"[",
"s",
"for",
"s",
"in",
"sources",
"if",
"s",
".",
"periodic_task",
"]",
"if",
"sources",
":",
"for",
"source",
"in",
"sources",
":",
"msg",
"=",
"'{source.name} ({source.backend}): {cron}'",
"if",
"source",
".",
"periodic_task",
":",
"cron",
"=",
"source",
".",
"periodic_task",
".",
"schedule_display",
"else",
":",
"cron",
"=",
"'not scheduled'",
"log",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"source",
"=",
"source",
",",
"cron",
"=",
"cron",
")",
")",
"elif",
"scheduled",
":",
"log",
".",
"info",
"(",
"'No sources scheduled yet'",
")",
"else",
":",
"log",
".",
"info",
"(",
"'No sources defined yet'",
")"
] |
List all harvest sources
|
[
"List",
"all",
"harvest",
"sources"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L65-L81
|
14,316
|
opendatateam/udata
|
udata/harvest/commands.py
|
backends
|
def backends():
'''List available backends'''
log.info('Available backends:')
for backend in actions.list_backends():
log.info('%s (%s)', backend.name, backend.display_name or backend.name)
|
python
|
def backends():
'''List available backends'''
log.info('Available backends:')
for backend in actions.list_backends():
log.info('%s (%s)', backend.name, backend.display_name or backend.name)
|
[
"def",
"backends",
"(",
")",
":",
"log",
".",
"info",
"(",
"'Available backends:'",
")",
"for",
"backend",
"in",
"actions",
".",
"list_backends",
"(",
")",
":",
"log",
".",
"info",
"(",
"'%s (%s)'",
",",
"backend",
".",
"name",
",",
"backend",
".",
"display_name",
"or",
"backend",
".",
"name",
")"
] |
List available backends
|
[
"List",
"available",
"backends"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L85-L89
|
14,317
|
opendatateam/udata
|
udata/harvest/commands.py
|
schedule
|
def schedule(identifier, **kwargs):
'''Schedule a harvest job to run periodically'''
source = actions.schedule(identifier, **kwargs)
msg = 'Scheduled {source.name} with the following crontab: {cron}'
log.info(msg.format(source=source, cron=source.periodic_task.crontab))
|
python
|
def schedule(identifier, **kwargs):
'''Schedule a harvest job to run periodically'''
source = actions.schedule(identifier, **kwargs)
msg = 'Scheduled {source.name} with the following crontab: {cron}'
log.info(msg.format(source=source, cron=source.periodic_task.crontab))
|
[
"def",
"schedule",
"(",
"identifier",
",",
"*",
"*",
"kwargs",
")",
":",
"source",
"=",
"actions",
".",
"schedule",
"(",
"identifier",
",",
"*",
"*",
"kwargs",
")",
"msg",
"=",
"'Scheduled {source.name} with the following crontab: {cron}'",
"log",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"source",
"=",
"source",
",",
"cron",
"=",
"source",
".",
"periodic_task",
".",
"crontab",
")",
")"
] |
Schedule a harvest job to run periodically
|
[
"Schedule",
"a",
"harvest",
"job",
"to",
"run",
"periodically"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L120-L124
|
14,318
|
opendatateam/udata
|
udata/harvest/commands.py
|
unschedule
|
def unschedule(identifier):
'''Unschedule a periodical harvest job'''
source = actions.unschedule(identifier)
log.info('Unscheduled harvest source "%s"', source.name)
|
python
|
def unschedule(identifier):
'''Unschedule a periodical harvest job'''
source = actions.unschedule(identifier)
log.info('Unscheduled harvest source "%s"', source.name)
|
[
"def",
"unschedule",
"(",
"identifier",
")",
":",
"source",
"=",
"actions",
".",
"unschedule",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Unscheduled harvest source \"%s\"'",
",",
"source",
".",
"name",
")"
] |
Unschedule a periodical harvest job
|
[
"Unschedule",
"a",
"periodical",
"harvest",
"job"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L129-L132
|
14,319
|
opendatateam/udata
|
udata/harvest/commands.py
|
attach
|
def attach(domain, filename):
'''
Attach existing datasets to their harvest remote id
Mapping between identifiers should be in FILENAME CSV file.
'''
log.info('Attaching datasets for domain %s', domain)
result = actions.attach(domain, filename)
log.info('Attached %s datasets to %s', result.success, domain)
|
python
|
def attach(domain, filename):
'''
Attach existing datasets to their harvest remote id
Mapping between identifiers should be in FILENAME CSV file.
'''
log.info('Attaching datasets for domain %s', domain)
result = actions.attach(domain, filename)
log.info('Attached %s datasets to %s', result.success, domain)
|
[
"def",
"attach",
"(",
"domain",
",",
"filename",
")",
":",
"log",
".",
"info",
"(",
"'Attaching datasets for domain %s'",
",",
"domain",
")",
"result",
"=",
"actions",
".",
"attach",
"(",
"domain",
",",
"filename",
")",
"log",
".",
"info",
"(",
"'Attached %s datasets to %s'",
",",
"result",
".",
"success",
",",
"domain",
")"
] |
Attach existing datasets to their harvest remote id
Mapping between identifiers should be in FILENAME CSV file.
|
[
"Attach",
"existing",
"datasets",
"to",
"their",
"harvest",
"remote",
"id"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L146-L154
|
14,320
|
opendatateam/udata
|
udata/features/transfer/actions.py
|
request_transfer
|
def request_transfer(subject, recipient, comment):
'''Initiate a transfer request'''
TransferPermission(subject).test()
if recipient == (subject.organization or subject.owner):
raise ValueError(
'Recipient should be different than the current owner')
transfer = Transfer.objects.create(
owner=subject.organization or subject.owner,
recipient=recipient,
subject=subject,
comment=comment
)
return transfer
|
python
|
def request_transfer(subject, recipient, comment):
'''Initiate a transfer request'''
TransferPermission(subject).test()
if recipient == (subject.organization or subject.owner):
raise ValueError(
'Recipient should be different than the current owner')
transfer = Transfer.objects.create(
owner=subject.organization or subject.owner,
recipient=recipient,
subject=subject,
comment=comment
)
return transfer
|
[
"def",
"request_transfer",
"(",
"subject",
",",
"recipient",
",",
"comment",
")",
":",
"TransferPermission",
"(",
"subject",
")",
".",
"test",
"(",
")",
"if",
"recipient",
"==",
"(",
"subject",
".",
"organization",
"or",
"subject",
".",
"owner",
")",
":",
"raise",
"ValueError",
"(",
"'Recipient should be different than the current owner'",
")",
"transfer",
"=",
"Transfer",
".",
"objects",
".",
"create",
"(",
"owner",
"=",
"subject",
".",
"organization",
"or",
"subject",
".",
"owner",
",",
"recipient",
"=",
"recipient",
",",
"subject",
"=",
"subject",
",",
"comment",
"=",
"comment",
")",
"return",
"transfer"
] |
Initiate a transfer request
|
[
"Initiate",
"a",
"transfer",
"request"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L19-L31
|
14,321
|
opendatateam/udata
|
udata/features/transfer/actions.py
|
accept_transfer
|
def accept_transfer(transfer, comment=None):
'''Accept an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'accepted'
transfer.response_comment = comment
transfer.save()
subject = transfer.subject
recipient = transfer.recipient
if isinstance(recipient, Organization):
subject.organization = recipient
elif isinstance(recipient, User):
subject.owner = recipient
subject.save()
return transfer
|
python
|
def accept_transfer(transfer, comment=None):
'''Accept an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'accepted'
transfer.response_comment = comment
transfer.save()
subject = transfer.subject
recipient = transfer.recipient
if isinstance(recipient, Organization):
subject.organization = recipient
elif isinstance(recipient, User):
subject.owner = recipient
subject.save()
return transfer
|
[
"def",
"accept_transfer",
"(",
"transfer",
",",
"comment",
"=",
"None",
")",
":",
"TransferResponsePermission",
"(",
"transfer",
")",
".",
"test",
"(",
")",
"transfer",
".",
"responded",
"=",
"datetime",
".",
"now",
"(",
")",
"transfer",
".",
"responder",
"=",
"current_user",
".",
"_get_current_object",
"(",
")",
"transfer",
".",
"status",
"=",
"'accepted'",
"transfer",
".",
"response_comment",
"=",
"comment",
"transfer",
".",
"save",
"(",
")",
"subject",
"=",
"transfer",
".",
"subject",
"recipient",
"=",
"transfer",
".",
"recipient",
"if",
"isinstance",
"(",
"recipient",
",",
"Organization",
")",
":",
"subject",
".",
"organization",
"=",
"recipient",
"elif",
"isinstance",
"(",
"recipient",
",",
"User",
")",
":",
"subject",
".",
"owner",
"=",
"recipient",
"subject",
".",
"save",
"(",
")",
"return",
"transfer"
] |
Accept an incoming a transfer request
|
[
"Accept",
"an",
"incoming",
"a",
"transfer",
"request"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L35-L54
|
14,322
|
opendatateam/udata
|
udata/features/transfer/actions.py
|
refuse_transfer
|
def refuse_transfer(transfer, comment=None):
'''Refuse an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'refused'
transfer.response_comment = comment
transfer.save()
return transfer
|
python
|
def refuse_transfer(transfer, comment=None):
'''Refuse an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'refused'
transfer.response_comment = comment
transfer.save()
return transfer
|
[
"def",
"refuse_transfer",
"(",
"transfer",
",",
"comment",
"=",
"None",
")",
":",
"TransferResponsePermission",
"(",
"transfer",
")",
".",
"test",
"(",
")",
"transfer",
".",
"responded",
"=",
"datetime",
".",
"now",
"(",
")",
"transfer",
".",
"responder",
"=",
"current_user",
".",
"_get_current_object",
"(",
")",
"transfer",
".",
"status",
"=",
"'refused'",
"transfer",
".",
"response_comment",
"=",
"comment",
"transfer",
".",
"save",
"(",
")",
"return",
"transfer"
] |
Refuse an incoming a transfer request
|
[
"Refuse",
"an",
"incoming",
"a",
"transfer",
"request"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L58-L68
|
14,323
|
opendatateam/udata
|
udata/core/metrics/models.py
|
WithMetrics.clean
|
def clean(self):
'''Fill metrics with defaults on create'''
if not self.metrics:
self.metrics = dict(
(name, spec.default)
for name, spec in (metric_catalog.get(self.__class__, {})
.items()))
return super(WithMetrics, self).clean()
|
python
|
def clean(self):
'''Fill metrics with defaults on create'''
if not self.metrics:
self.metrics = dict(
(name, spec.default)
for name, spec in (metric_catalog.get(self.__class__, {})
.items()))
return super(WithMetrics, self).clean()
|
[
"def",
"clean",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"metrics",
":",
"self",
".",
"metrics",
"=",
"dict",
"(",
"(",
"name",
",",
"spec",
".",
"default",
")",
"for",
"name",
",",
"spec",
"in",
"(",
"metric_catalog",
".",
"get",
"(",
"self",
".",
"__class__",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
")",
"return",
"super",
"(",
"WithMetrics",
",",
"self",
")",
".",
"clean",
"(",
")"
] |
Fill metrics with defaults on create
|
[
"Fill",
"metrics",
"with",
"defaults",
"on",
"create"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/models.py#L44-L51
|
14,324
|
opendatateam/udata
|
udata/core/site/rdf.py
|
build_catalog
|
def build_catalog(site, datasets, format=None):
'''Build the DCAT catalog for this site'''
site_url = url_for('site.home_redirect', _external=True)
catalog_url = url_for('site.rdf_catalog', _external=True)
graph = Graph(namespace_manager=namespace_manager)
catalog = graph.resource(URIRef(catalog_url))
catalog.set(RDF.type, DCAT.Catalog)
catalog.set(DCT.title, Literal(site.title))
catalog.set(DCT.language,
Literal(current_app.config['DEFAULT_LANGUAGE']))
catalog.set(FOAF.homepage, URIRef(site_url))
publisher = graph.resource(BNode())
publisher.set(RDF.type, FOAF.Organization)
publisher.set(FOAF.name, Literal(current_app.config['SITE_AUTHOR']))
catalog.set(DCT.publisher, publisher)
for dataset in datasets:
catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph))
if isinstance(datasets, Paginable):
if not format:
raise ValueError('Pagination requires format')
catalog.add(RDF.type, HYDRA.Collection)
catalog.set(HYDRA.totalItems, Literal(datasets.total))
kwargs = {
'format': format,
'page_size': datasets.page_size,
'_external': True,
}
first_url = url_for('site.rdf_catalog_format', page=1, **kwargs)
page_url = url_for('site.rdf_catalog_format',
page=datasets.page, **kwargs)
last_url = url_for('site.rdf_catalog_format',
page=datasets.pages, **kwargs)
pagination = graph.resource(URIRef(page_url))
pagination.set(RDF.type, HYDRA.PartialCollectionView)
pagination.set(HYDRA.first, URIRef(first_url))
pagination.set(HYDRA.last, URIRef(last_url))
if datasets.has_next:
next_url = url_for('site.rdf_catalog_format',
page=datasets.page + 1, **kwargs)
pagination.set(HYDRA.next, URIRef(next_url))
if datasets.has_prev:
prev_url = url_for('site.rdf_catalog_format',
page=datasets.page - 1, **kwargs)
pagination.set(HYDRA.previous, URIRef(prev_url))
catalog.set(HYDRA.view, pagination)
return catalog
|
python
|
def build_catalog(site, datasets, format=None):
'''Build the DCAT catalog for this site'''
site_url = url_for('site.home_redirect', _external=True)
catalog_url = url_for('site.rdf_catalog', _external=True)
graph = Graph(namespace_manager=namespace_manager)
catalog = graph.resource(URIRef(catalog_url))
catalog.set(RDF.type, DCAT.Catalog)
catalog.set(DCT.title, Literal(site.title))
catalog.set(DCT.language,
Literal(current_app.config['DEFAULT_LANGUAGE']))
catalog.set(FOAF.homepage, URIRef(site_url))
publisher = graph.resource(BNode())
publisher.set(RDF.type, FOAF.Organization)
publisher.set(FOAF.name, Literal(current_app.config['SITE_AUTHOR']))
catalog.set(DCT.publisher, publisher)
for dataset in datasets:
catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph))
if isinstance(datasets, Paginable):
if not format:
raise ValueError('Pagination requires format')
catalog.add(RDF.type, HYDRA.Collection)
catalog.set(HYDRA.totalItems, Literal(datasets.total))
kwargs = {
'format': format,
'page_size': datasets.page_size,
'_external': True,
}
first_url = url_for('site.rdf_catalog_format', page=1, **kwargs)
page_url = url_for('site.rdf_catalog_format',
page=datasets.page, **kwargs)
last_url = url_for('site.rdf_catalog_format',
page=datasets.pages, **kwargs)
pagination = graph.resource(URIRef(page_url))
pagination.set(RDF.type, HYDRA.PartialCollectionView)
pagination.set(HYDRA.first, URIRef(first_url))
pagination.set(HYDRA.last, URIRef(last_url))
if datasets.has_next:
next_url = url_for('site.rdf_catalog_format',
page=datasets.page + 1, **kwargs)
pagination.set(HYDRA.next, URIRef(next_url))
if datasets.has_prev:
prev_url = url_for('site.rdf_catalog_format',
page=datasets.page - 1, **kwargs)
pagination.set(HYDRA.previous, URIRef(prev_url))
catalog.set(HYDRA.view, pagination)
return catalog
|
[
"def",
"build_catalog",
"(",
"site",
",",
"datasets",
",",
"format",
"=",
"None",
")",
":",
"site_url",
"=",
"url_for",
"(",
"'site.home_redirect'",
",",
"_external",
"=",
"True",
")",
"catalog_url",
"=",
"url_for",
"(",
"'site.rdf_catalog'",
",",
"_external",
"=",
"True",
")",
"graph",
"=",
"Graph",
"(",
"namespace_manager",
"=",
"namespace_manager",
")",
"catalog",
"=",
"graph",
".",
"resource",
"(",
"URIRef",
"(",
"catalog_url",
")",
")",
"catalog",
".",
"set",
"(",
"RDF",
".",
"type",
",",
"DCAT",
".",
"Catalog",
")",
"catalog",
".",
"set",
"(",
"DCT",
".",
"title",
",",
"Literal",
"(",
"site",
".",
"title",
")",
")",
"catalog",
".",
"set",
"(",
"DCT",
".",
"language",
",",
"Literal",
"(",
"current_app",
".",
"config",
"[",
"'DEFAULT_LANGUAGE'",
"]",
")",
")",
"catalog",
".",
"set",
"(",
"FOAF",
".",
"homepage",
",",
"URIRef",
"(",
"site_url",
")",
")",
"publisher",
"=",
"graph",
".",
"resource",
"(",
"BNode",
"(",
")",
")",
"publisher",
".",
"set",
"(",
"RDF",
".",
"type",
",",
"FOAF",
".",
"Organization",
")",
"publisher",
".",
"set",
"(",
"FOAF",
".",
"name",
",",
"Literal",
"(",
"current_app",
".",
"config",
"[",
"'SITE_AUTHOR'",
"]",
")",
")",
"catalog",
".",
"set",
"(",
"DCT",
".",
"publisher",
",",
"publisher",
")",
"for",
"dataset",
"in",
"datasets",
":",
"catalog",
".",
"add",
"(",
"DCAT",
".",
"dataset",
",",
"dataset_to_rdf",
"(",
"dataset",
",",
"graph",
")",
")",
"if",
"isinstance",
"(",
"datasets",
",",
"Paginable",
")",
":",
"if",
"not",
"format",
":",
"raise",
"ValueError",
"(",
"'Pagination requires format'",
")",
"catalog",
".",
"add",
"(",
"RDF",
".",
"type",
",",
"HYDRA",
".",
"Collection",
")",
"catalog",
".",
"set",
"(",
"HYDRA",
".",
"totalItems",
",",
"Literal",
"(",
"datasets",
".",
"total",
")",
")",
"kwargs",
"=",
"{",
"'format'",
":",
"format",
",",
"'page_size'",
":",
"datasets",
".",
"page_size",
",",
"'_external'",
":",
"True",
",",
"}",
"first_url",
"=",
"url_for",
"(",
"'site.rdf_catalog_format'",
",",
"page",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
"page_url",
"=",
"url_for",
"(",
"'site.rdf_catalog_format'",
",",
"page",
"=",
"datasets",
".",
"page",
",",
"*",
"*",
"kwargs",
")",
"last_url",
"=",
"url_for",
"(",
"'site.rdf_catalog_format'",
",",
"page",
"=",
"datasets",
".",
"pages",
",",
"*",
"*",
"kwargs",
")",
"pagination",
"=",
"graph",
".",
"resource",
"(",
"URIRef",
"(",
"page_url",
")",
")",
"pagination",
".",
"set",
"(",
"RDF",
".",
"type",
",",
"HYDRA",
".",
"PartialCollectionView",
")",
"pagination",
".",
"set",
"(",
"HYDRA",
".",
"first",
",",
"URIRef",
"(",
"first_url",
")",
")",
"pagination",
".",
"set",
"(",
"HYDRA",
".",
"last",
",",
"URIRef",
"(",
"last_url",
")",
")",
"if",
"datasets",
".",
"has_next",
":",
"next_url",
"=",
"url_for",
"(",
"'site.rdf_catalog_format'",
",",
"page",
"=",
"datasets",
".",
"page",
"+",
"1",
",",
"*",
"*",
"kwargs",
")",
"pagination",
".",
"set",
"(",
"HYDRA",
".",
"next",
",",
"URIRef",
"(",
"next_url",
")",
")",
"if",
"datasets",
".",
"has_prev",
":",
"prev_url",
"=",
"url_for",
"(",
"'site.rdf_catalog_format'",
",",
"page",
"=",
"datasets",
".",
"page",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
"pagination",
".",
"set",
"(",
"HYDRA",
".",
"previous",
",",
"URIRef",
"(",
"prev_url",
")",
")",
"catalog",
".",
"set",
"(",
"HYDRA",
".",
"view",
",",
"pagination",
")",
"return",
"catalog"
] |
Build the DCAT catalog for this site
|
[
"Build",
"the",
"DCAT",
"catalog",
"for",
"this",
"site"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/site/rdf.py#L15-L68
|
14,325
|
opendatateam/udata
|
udata/patch_flask_security.py
|
sendmail_proxy
|
def sendmail_proxy(subject, email, template, **context):
"""Cast the lazy_gettext'ed subject to string before passing to Celery"""
sendmail.delay(subject.value, email, template, **context)
|
python
|
def sendmail_proxy(subject, email, template, **context):
"""Cast the lazy_gettext'ed subject to string before passing to Celery"""
sendmail.delay(subject.value, email, template, **context)
|
[
"def",
"sendmail_proxy",
"(",
"subject",
",",
"email",
",",
"template",
",",
"*",
"*",
"context",
")",
":",
"sendmail",
".",
"delay",
"(",
"subject",
".",
"value",
",",
"email",
",",
"template",
",",
"*",
"*",
"context",
")"
] |
Cast the lazy_gettext'ed subject to string before passing to Celery
|
[
"Cast",
"the",
"lazy_gettext",
"ed",
"subject",
"to",
"string",
"before",
"passing",
"to",
"Celery"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/patch_flask_security.py#L18-L20
|
14,326
|
opendatateam/udata
|
udata/commands/static.py
|
collect
|
def collect(path, no_input):
'''Collect static files'''
if exists(path):
msg = '"%s" directory already exists and will be erased'
log.warning(msg, path)
if not no_input:
click.confirm('Are you sure?', abort=True)
log.info('Deleting static directory "%s"', path)
shutil.rmtree(path)
prefix = current_app.static_url_path or current_app.static_folder
if prefix.startswith('/'):
prefix = prefix[1:]
destination = join(path, prefix)
log.info('Copying application assets into "%s"', destination)
shutil.copytree(current_app.static_folder, destination)
for blueprint in current_app.blueprints.values():
if blueprint.has_static_folder:
prefix = current_app.static_prefixes.get(blueprint.name)
prefix = prefix or blueprint.url_prefix or ''
prefix += blueprint.static_url_path or ''
if prefix.startswith('/'):
prefix = prefix[1:]
log.info('Copying %s assets to %s', blueprint.name, prefix)
destination = join(path, prefix)
copy_recursive(blueprint.static_folder, destination)
for prefix, source in current_app.config['STATIC_DIRS']:
log.info('Copying %s to %s', source, prefix)
destination = join(path, prefix)
copy_recursive(source, destination)
log.info('Done')
|
python
|
def collect(path, no_input):
'''Collect static files'''
if exists(path):
msg = '"%s" directory already exists and will be erased'
log.warning(msg, path)
if not no_input:
click.confirm('Are you sure?', abort=True)
log.info('Deleting static directory "%s"', path)
shutil.rmtree(path)
prefix = current_app.static_url_path or current_app.static_folder
if prefix.startswith('/'):
prefix = prefix[1:]
destination = join(path, prefix)
log.info('Copying application assets into "%s"', destination)
shutil.copytree(current_app.static_folder, destination)
for blueprint in current_app.blueprints.values():
if blueprint.has_static_folder:
prefix = current_app.static_prefixes.get(blueprint.name)
prefix = prefix or blueprint.url_prefix or ''
prefix += blueprint.static_url_path or ''
if prefix.startswith('/'):
prefix = prefix[1:]
log.info('Copying %s assets to %s', blueprint.name, prefix)
destination = join(path, prefix)
copy_recursive(blueprint.static_folder, destination)
for prefix, source in current_app.config['STATIC_DIRS']:
log.info('Copying %s to %s', source, prefix)
destination = join(path, prefix)
copy_recursive(source, destination)
log.info('Done')
|
[
"def",
"collect",
"(",
"path",
",",
"no_input",
")",
":",
"if",
"exists",
"(",
"path",
")",
":",
"msg",
"=",
"'\"%s\" directory already exists and will be erased'",
"log",
".",
"warning",
"(",
"msg",
",",
"path",
")",
"if",
"not",
"no_input",
":",
"click",
".",
"confirm",
"(",
"'Are you sure?'",
",",
"abort",
"=",
"True",
")",
"log",
".",
"info",
"(",
"'Deleting static directory \"%s\"'",
",",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"prefix",
"=",
"current_app",
".",
"static_url_path",
"or",
"current_app",
".",
"static_folder",
"if",
"prefix",
".",
"startswith",
"(",
"'/'",
")",
":",
"prefix",
"=",
"prefix",
"[",
"1",
":",
"]",
"destination",
"=",
"join",
"(",
"path",
",",
"prefix",
")",
"log",
".",
"info",
"(",
"'Copying application assets into \"%s\"'",
",",
"destination",
")",
"shutil",
".",
"copytree",
"(",
"current_app",
".",
"static_folder",
",",
"destination",
")",
"for",
"blueprint",
"in",
"current_app",
".",
"blueprints",
".",
"values",
"(",
")",
":",
"if",
"blueprint",
".",
"has_static_folder",
":",
"prefix",
"=",
"current_app",
".",
"static_prefixes",
".",
"get",
"(",
"blueprint",
".",
"name",
")",
"prefix",
"=",
"prefix",
"or",
"blueprint",
".",
"url_prefix",
"or",
"''",
"prefix",
"+=",
"blueprint",
".",
"static_url_path",
"or",
"''",
"if",
"prefix",
".",
"startswith",
"(",
"'/'",
")",
":",
"prefix",
"=",
"prefix",
"[",
"1",
":",
"]",
"log",
".",
"info",
"(",
"'Copying %s assets to %s'",
",",
"blueprint",
".",
"name",
",",
"prefix",
")",
"destination",
"=",
"join",
"(",
"path",
",",
"prefix",
")",
"copy_recursive",
"(",
"blueprint",
".",
"static_folder",
",",
"destination",
")",
"for",
"prefix",
",",
"source",
"in",
"current_app",
".",
"config",
"[",
"'STATIC_DIRS'",
"]",
":",
"log",
".",
"info",
"(",
"'Copying %s to %s'",
",",
"source",
",",
"prefix",
")",
"destination",
"=",
"join",
"(",
"path",
",",
"prefix",
")",
"copy_recursive",
"(",
"source",
",",
"destination",
")",
"log",
".",
"info",
"(",
"'Done'",
")"
] |
Collect static files
|
[
"Collect",
"static",
"files"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/static.py#L24-L59
|
14,327
|
opendatateam/udata
|
udata/harvest/notifications.py
|
validate_harvester_notifications
|
def validate_harvester_notifications(user):
'''Notify admins about pending harvester validation'''
if not user.sysadmin:
return []
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = HarvestSource.objects(validation__state=VALIDATION_PENDING)
qs = qs.only('id', 'created_at', 'name')
for source in qs:
notifications.append((source.created_at, {
'id': source.id,
'name': source.name,
}))
return notifications
|
python
|
def validate_harvester_notifications(user):
'''Notify admins about pending harvester validation'''
if not user.sysadmin:
return []
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = HarvestSource.objects(validation__state=VALIDATION_PENDING)
qs = qs.only('id', 'created_at', 'name')
for source in qs:
notifications.append((source.created_at, {
'id': source.id,
'name': source.name,
}))
return notifications
|
[
"def",
"validate_harvester_notifications",
"(",
"user",
")",
":",
"if",
"not",
"user",
".",
"sysadmin",
":",
"return",
"[",
"]",
"notifications",
"=",
"[",
"]",
"# Only fetch required fields for notification serialization",
"# Greatly improve performances and memory usage",
"qs",
"=",
"HarvestSource",
".",
"objects",
"(",
"validation__state",
"=",
"VALIDATION_PENDING",
")",
"qs",
"=",
"qs",
".",
"only",
"(",
"'id'",
",",
"'created_at'",
",",
"'name'",
")",
"for",
"source",
"in",
"qs",
":",
"notifications",
".",
"append",
"(",
"(",
"source",
".",
"created_at",
",",
"{",
"'id'",
":",
"source",
".",
"id",
",",
"'name'",
":",
"source",
".",
"name",
",",
"}",
")",
")",
"return",
"notifications"
] |
Notify admins about pending harvester validation
|
[
"Notify",
"admins",
"about",
"pending",
"harvester",
"validation"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/notifications.py#L14-L32
|
14,328
|
opendatateam/udata
|
udata/harvest/backends/__init__.py
|
get
|
def get(app, name):
'''Get a backend given its name'''
backend = get_all(app).get(name)
if not backend:
msg = 'Harvest backend "{0}" is not registered'.format(name)
raise EntrypointError(msg)
return backend
|
python
|
def get(app, name):
'''Get a backend given its name'''
backend = get_all(app).get(name)
if not backend:
msg = 'Harvest backend "{0}" is not registered'.format(name)
raise EntrypointError(msg)
return backend
|
[
"def",
"get",
"(",
"app",
",",
"name",
")",
":",
"backend",
"=",
"get_all",
"(",
"app",
")",
".",
"get",
"(",
"name",
")",
"if",
"not",
"backend",
":",
"msg",
"=",
"'Harvest backend \"{0}\" is not registered'",
".",
"format",
"(",
"name",
")",
"raise",
"EntrypointError",
"(",
"msg",
")",
"return",
"backend"
] |
Get a backend given its name
|
[
"Get",
"a",
"backend",
"given",
"its",
"name"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/__init__.py#L7-L13
|
14,329
|
opendatateam/udata
|
udata/core/topic/views.py
|
TopicSearchMixin.search
|
def search(self):
'''Override search to match on topic tags'''
s = super(TopicSearchMixin, self).search()
s = s.filter('bool', should=[
Q('term', tags=tag) for tag in self.topic.tags
])
return s
|
python
|
def search(self):
'''Override search to match on topic tags'''
s = super(TopicSearchMixin, self).search()
s = s.filter('bool', should=[
Q('term', tags=tag) for tag in self.topic.tags
])
return s
|
[
"def",
"search",
"(",
"self",
")",
":",
"s",
"=",
"super",
"(",
"TopicSearchMixin",
",",
"self",
")",
".",
"search",
"(",
")",
"s",
"=",
"s",
".",
"filter",
"(",
"'bool'",
",",
"should",
"=",
"[",
"Q",
"(",
"'term'",
",",
"tags",
"=",
"tag",
")",
"for",
"tag",
"in",
"self",
".",
"topic",
".",
"tags",
"]",
")",
"return",
"s"
] |
Override search to match on topic tags
|
[
"Override",
"search",
"to",
"match",
"on",
"topic",
"tags"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/topic/views.py#L25-L31
|
14,330
|
opendatateam/udata
|
udata/core/reuse/models.py
|
Reuse.clean
|
def clean(self):
'''Auto populate urlhash from url'''
if not self.urlhash or 'url' in self._get_changed_fields():
self.urlhash = hash_url(self.url)
super(Reuse, self).clean()
|
python
|
def clean(self):
'''Auto populate urlhash from url'''
if not self.urlhash or 'url' in self._get_changed_fields():
self.urlhash = hash_url(self.url)
super(Reuse, self).clean()
|
[
"def",
"clean",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"urlhash",
"or",
"'url'",
"in",
"self",
".",
"_get_changed_fields",
"(",
")",
":",
"self",
".",
"urlhash",
"=",
"hash_url",
"(",
"self",
".",
"url",
")",
"super",
"(",
"Reuse",
",",
"self",
")",
".",
"clean",
"(",
")"
] |
Auto populate urlhash from url
|
[
"Auto",
"populate",
"urlhash",
"from",
"url"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/reuse/models.py#L126-L130
|
14,331
|
opendatateam/udata
|
udata/commands/serve.py
|
serve
|
def serve(info, host, port, reload, debugger, eager_loading, with_threads):
'''
Runs a local udata development server.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments.
By default it will not support any sort of concurrency at all
to simplify debugging.
This can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
'''
# Werkzeug logger is special and is required
# with this configuration for development server
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
logger.handlers = []
debug = current_app.config['DEBUG']
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
settings = os.environ.get('UDATA_SETTINGS',
os.path.join(os.getcwd(), 'udata.cfg'))
extra_files = [settings]
if reload:
extra_files.extend(assets.manifests_paths())
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads,
extra_files=extra_files)
|
python
|
def serve(info, host, port, reload, debugger, eager_loading, with_threads):
'''
Runs a local udata development server.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments.
By default it will not support any sort of concurrency at all
to simplify debugging.
This can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
'''
# Werkzeug logger is special and is required
# with this configuration for development server
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
logger.handlers = []
debug = current_app.config['DEBUG']
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
settings = os.environ.get('UDATA_SETTINGS',
os.path.join(os.getcwd(), 'udata.cfg'))
extra_files = [settings]
if reload:
extra_files.extend(assets.manifests_paths())
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads,
extra_files=extra_files)
|
[
"def",
"serve",
"(",
"info",
",",
"host",
",",
"port",
",",
"reload",
",",
"debugger",
",",
"eager_loading",
",",
"with_threads",
")",
":",
"# Werkzeug logger is special and is required",
"# with this configuration for development server",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'werkzeug'",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"logger",
".",
"handlers",
"=",
"[",
"]",
"debug",
"=",
"current_app",
".",
"config",
"[",
"'DEBUG'",
"]",
"if",
"reload",
"is",
"None",
":",
"reload",
"=",
"bool",
"(",
"debug",
")",
"if",
"debugger",
"is",
"None",
":",
"debugger",
"=",
"bool",
"(",
"debug",
")",
"if",
"eager_loading",
"is",
"None",
":",
"eager_loading",
"=",
"not",
"reload",
"app",
"=",
"DispatchingApp",
"(",
"info",
".",
"load_app",
",",
"use_eager_loading",
"=",
"eager_loading",
")",
"settings",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'UDATA_SETTINGS'",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'udata.cfg'",
")",
")",
"extra_files",
"=",
"[",
"settings",
"]",
"if",
"reload",
":",
"extra_files",
".",
"extend",
"(",
"assets",
".",
"manifests_paths",
"(",
")",
")",
"run_simple",
"(",
"host",
",",
"port",
",",
"app",
",",
"use_reloader",
"=",
"reload",
",",
"use_debugger",
"=",
"debugger",
",",
"threaded",
"=",
"with_threads",
",",
"extra_files",
"=",
"extra_files",
")"
] |
Runs a local udata development server.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments.
By default it will not support any sort of concurrency at all
to simplify debugging.
This can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
|
[
"Runs",
"a",
"local",
"udata",
"development",
"server",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/serve.py#L38-L77
|
14,332
|
opendatateam/udata
|
udata/core/dataset/forms.py
|
enforce_filetype_file
|
def enforce_filetype_file(form, field):
'''Only allowed domains in resource.url when filetype is file'''
if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE:
return
domain = urlparse(field.data).netloc
allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS']
allowed_domains += [current_app.config.get('SERVER_NAME')]
if current_app.config.get('CDN_DOMAIN'):
allowed_domains.append(current_app.config['CDN_DOMAIN'])
if '*' in allowed_domains:
return
if domain and domain not in allowed_domains:
message = _('Domain "{domain}" not allowed for filetype "{filetype}"')
raise validators.ValidationError(message.format(
domain=domain, filetype=RESOURCE_FILETYPE_FILE
))
|
python
|
def enforce_filetype_file(form, field):
'''Only allowed domains in resource.url when filetype is file'''
if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE:
return
domain = urlparse(field.data).netloc
allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS']
allowed_domains += [current_app.config.get('SERVER_NAME')]
if current_app.config.get('CDN_DOMAIN'):
allowed_domains.append(current_app.config['CDN_DOMAIN'])
if '*' in allowed_domains:
return
if domain and domain not in allowed_domains:
message = _('Domain "{domain}" not allowed for filetype "{filetype}"')
raise validators.ValidationError(message.format(
domain=domain, filetype=RESOURCE_FILETYPE_FILE
))
|
[
"def",
"enforce_filetype_file",
"(",
"form",
",",
"field",
")",
":",
"if",
"form",
".",
"_fields",
".",
"get",
"(",
"'filetype'",
")",
".",
"data",
"!=",
"RESOURCE_FILETYPE_FILE",
":",
"return",
"domain",
"=",
"urlparse",
"(",
"field",
".",
"data",
")",
".",
"netloc",
"allowed_domains",
"=",
"current_app",
".",
"config",
"[",
"'RESOURCES_FILE_ALLOWED_DOMAINS'",
"]",
"allowed_domains",
"+=",
"[",
"current_app",
".",
"config",
".",
"get",
"(",
"'SERVER_NAME'",
")",
"]",
"if",
"current_app",
".",
"config",
".",
"get",
"(",
"'CDN_DOMAIN'",
")",
":",
"allowed_domains",
".",
"append",
"(",
"current_app",
".",
"config",
"[",
"'CDN_DOMAIN'",
"]",
")",
"if",
"'*'",
"in",
"allowed_domains",
":",
"return",
"if",
"domain",
"and",
"domain",
"not",
"in",
"allowed_domains",
":",
"message",
"=",
"_",
"(",
"'Domain \"{domain}\" not allowed for filetype \"{filetype}\"'",
")",
"raise",
"validators",
".",
"ValidationError",
"(",
"message",
".",
"format",
"(",
"domain",
"=",
"domain",
",",
"filetype",
"=",
"RESOURCE_FILETYPE_FILE",
")",
")"
] |
Only allowed domains in resource.url when filetype is file
|
[
"Only",
"allowed",
"domains",
"in",
"resource",
".",
"url",
"when",
"filetype",
"is",
"file"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/forms.py#L36-L51
|
14,333
|
opendatateam/udata
|
udata/core/dataset/forms.py
|
map_legacy_frequencies
|
def map_legacy_frequencies(form, field):
''' Map legacy frequencies to new ones'''
if field.data in LEGACY_FREQUENCIES:
field.data = LEGACY_FREQUENCIES[field.data]
|
python
|
def map_legacy_frequencies(form, field):
''' Map legacy frequencies to new ones'''
if field.data in LEGACY_FREQUENCIES:
field.data = LEGACY_FREQUENCIES[field.data]
|
[
"def",
"map_legacy_frequencies",
"(",
"form",
",",
"field",
")",
":",
"if",
"field",
".",
"data",
"in",
"LEGACY_FREQUENCIES",
":",
"field",
".",
"data",
"=",
"LEGACY_FREQUENCIES",
"[",
"field",
".",
"data",
"]"
] |
Map legacy frequencies to new ones
|
[
"Map",
"legacy",
"frequencies",
"to",
"new",
"ones"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/forms.py#L101-L104
|
14,334
|
opendatateam/udata
|
udata/core/user/models.py
|
User.resources_availability
|
def resources_availability(self):
"""Return the percentage of availability for resources."""
# Flatten the list.
availabilities = list(
chain(
*[org.check_availability() for org in self.organizations]
)
)
# Filter out the unknown
availabilities = [a for a in availabilities if type(a) is bool]
if availabilities:
# Trick will work because it's a sum() of booleans.
return round(100. * sum(availabilities) / len(availabilities), 2)
# if nothing is unavailable, everything is considered OK
return 100
|
python
|
def resources_availability(self):
"""Return the percentage of availability for resources."""
# Flatten the list.
availabilities = list(
chain(
*[org.check_availability() for org in self.organizations]
)
)
# Filter out the unknown
availabilities = [a for a in availabilities if type(a) is bool]
if availabilities:
# Trick will work because it's a sum() of booleans.
return round(100. * sum(availabilities) / len(availabilities), 2)
# if nothing is unavailable, everything is considered OK
return 100
|
[
"def",
"resources_availability",
"(",
"self",
")",
":",
"# Flatten the list.",
"availabilities",
"=",
"list",
"(",
"chain",
"(",
"*",
"[",
"org",
".",
"check_availability",
"(",
")",
"for",
"org",
"in",
"self",
".",
"organizations",
"]",
")",
")",
"# Filter out the unknown",
"availabilities",
"=",
"[",
"a",
"for",
"a",
"in",
"availabilities",
"if",
"type",
"(",
"a",
")",
"is",
"bool",
"]",
"if",
"availabilities",
":",
"# Trick will work because it's a sum() of booleans.",
"return",
"round",
"(",
"100.",
"*",
"sum",
"(",
"availabilities",
")",
"/",
"len",
"(",
"availabilities",
")",
",",
"2",
")",
"# if nothing is unavailable, everything is considered OK",
"return",
"100"
] |
Return the percentage of availability for resources.
|
[
"Return",
"the",
"percentage",
"of",
"availability",
"for",
"resources",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L128-L142
|
14,335
|
opendatateam/udata
|
udata/core/user/models.py
|
User.datasets_org_count
|
def datasets_org_count(self):
"""Return the number of datasets of user's organizations."""
from udata.models import Dataset # Circular imports.
return sum(Dataset.objects(organization=org).visible().count()
for org in self.organizations)
|
python
|
def datasets_org_count(self):
"""Return the number of datasets of user's organizations."""
from udata.models import Dataset # Circular imports.
return sum(Dataset.objects(organization=org).visible().count()
for org in self.organizations)
|
[
"def",
"datasets_org_count",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Dataset",
"# Circular imports.",
"return",
"sum",
"(",
"Dataset",
".",
"objects",
"(",
"organization",
"=",
"org",
")",
".",
"visible",
"(",
")",
".",
"count",
"(",
")",
"for",
"org",
"in",
"self",
".",
"organizations",
")"
] |
Return the number of datasets of user's organizations.
|
[
"Return",
"the",
"number",
"of",
"datasets",
"of",
"user",
"s",
"organizations",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L145-L149
|
14,336
|
opendatateam/udata
|
udata/core/user/models.py
|
User.followers_org_count
|
def followers_org_count(self):
"""Return the number of followers of user's organizations."""
from udata.models import Follow # Circular imports.
return sum(Follow.objects(following=org).count()
for org in self.organizations)
|
python
|
def followers_org_count(self):
"""Return the number of followers of user's organizations."""
from udata.models import Follow # Circular imports.
return sum(Follow.objects(following=org).count()
for org in self.organizations)
|
[
"def",
"followers_org_count",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Follow",
"# Circular imports.",
"return",
"sum",
"(",
"Follow",
".",
"objects",
"(",
"following",
"=",
"org",
")",
".",
"count",
"(",
")",
"for",
"org",
"in",
"self",
".",
"organizations",
")"
] |
Return the number of followers of user's organizations.
|
[
"Return",
"the",
"number",
"of",
"followers",
"of",
"user",
"s",
"organizations",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L152-L156
|
14,337
|
opendatateam/udata
|
udata/core/badges/models.py
|
BadgeMixin.get_badge
|
def get_badge(self, kind):
''' Get a badge given its kind if present'''
candidates = [b for b in self.badges if b.kind == kind]
return candidates[0] if candidates else None
|
python
|
def get_badge(self, kind):
''' Get a badge given its kind if present'''
candidates = [b for b in self.badges if b.kind == kind]
return candidates[0] if candidates else None
|
[
"def",
"get_badge",
"(",
"self",
",",
"kind",
")",
":",
"candidates",
"=",
"[",
"b",
"for",
"b",
"in",
"self",
".",
"badges",
"if",
"b",
".",
"kind",
"==",
"kind",
"]",
"return",
"candidates",
"[",
"0",
"]",
"if",
"candidates",
"else",
"None"
] |
Get a badge given its kind if present
|
[
"Get",
"a",
"badge",
"given",
"its",
"kind",
"if",
"present"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L54-L57
|
14,338
|
opendatateam/udata
|
udata/core/badges/models.py
|
BadgeMixin.add_badge
|
def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.format(model=self.__class__.__name__,
kind=kind))
badge = Badge(kind=kind)
if current_user.is_authenticated:
badge.created_by = current_user.id
self.update(__raw__={
'$push': {
'badges': {
'$each': [badge.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self)
on_badge_added.send(self, kind=kind)
return self.get_badge(kind)
|
python
|
def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.format(model=self.__class__.__name__,
kind=kind))
badge = Badge(kind=kind)
if current_user.is_authenticated:
badge.created_by = current_user.id
self.update(__raw__={
'$push': {
'badges': {
'$each': [badge.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self)
on_badge_added.send(self, kind=kind)
return self.get_badge(kind)
|
[
"def",
"add_badge",
"(",
"self",
",",
"kind",
")",
":",
"badge",
"=",
"self",
".",
"get_badge",
"(",
"kind",
")",
"if",
"badge",
":",
"return",
"badge",
"if",
"kind",
"not",
"in",
"getattr",
"(",
"self",
",",
"'__badges__'",
",",
"{",
"}",
")",
":",
"msg",
"=",
"'Unknown badge type for {model}: {kind}'",
"raise",
"db",
".",
"ValidationError",
"(",
"msg",
".",
"format",
"(",
"model",
"=",
"self",
".",
"__class__",
".",
"__name__",
",",
"kind",
"=",
"kind",
")",
")",
"badge",
"=",
"Badge",
"(",
"kind",
"=",
"kind",
")",
"if",
"current_user",
".",
"is_authenticated",
":",
"badge",
".",
"created_by",
"=",
"current_user",
".",
"id",
"self",
".",
"update",
"(",
"__raw__",
"=",
"{",
"'$push'",
":",
"{",
"'badges'",
":",
"{",
"'$each'",
":",
"[",
"badge",
".",
"to_mongo",
"(",
")",
"]",
",",
"'$position'",
":",
"0",
"}",
"}",
"}",
")",
"self",
".",
"reload",
"(",
")",
"post_save",
".",
"send",
"(",
"self",
".",
"__class__",
",",
"document",
"=",
"self",
")",
"on_badge_added",
".",
"send",
"(",
"self",
",",
"kind",
"=",
"kind",
")",
"return",
"self",
".",
"get_badge",
"(",
"kind",
")"
] |
Perform an atomic prepend for a new badge
|
[
"Perform",
"an",
"atomic",
"prepend",
"for",
"a",
"new",
"badge"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L59-L83
|
14,339
|
opendatateam/udata
|
udata/core/badges/models.py
|
BadgeMixin.remove_badge
|
def remove_badge(self, kind):
'''Perform an atomic removal for a given badge'''
self.update(__raw__={
'$pull': {
'badges': {'kind': kind}
}
})
self.reload()
on_badge_removed.send(self, kind=kind)
post_save.send(self.__class__, document=self)
|
python
|
def remove_badge(self, kind):
'''Perform an atomic removal for a given badge'''
self.update(__raw__={
'$pull': {
'badges': {'kind': kind}
}
})
self.reload()
on_badge_removed.send(self, kind=kind)
post_save.send(self.__class__, document=self)
|
[
"def",
"remove_badge",
"(",
"self",
",",
"kind",
")",
":",
"self",
".",
"update",
"(",
"__raw__",
"=",
"{",
"'$pull'",
":",
"{",
"'badges'",
":",
"{",
"'kind'",
":",
"kind",
"}",
"}",
"}",
")",
"self",
".",
"reload",
"(",
")",
"on_badge_removed",
".",
"send",
"(",
"self",
",",
"kind",
"=",
"kind",
")",
"post_save",
".",
"send",
"(",
"self",
".",
"__class__",
",",
"document",
"=",
"self",
")"
] |
Perform an atomic removal for a given badge
|
[
"Perform",
"an",
"atomic",
"removal",
"for",
"a",
"given",
"badge"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L85-L94
|
14,340
|
opendatateam/udata
|
udata/core/badges/models.py
|
BadgeMixin.toggle_badge
|
def toggle_badge(self, kind):
'''Toggle a bdage given its kind'''
badge = self.get_badge(kind)
if badge:
return self.remove_badge(kind)
else:
return self.add_badge(kind)
|
python
|
def toggle_badge(self, kind):
'''Toggle a bdage given its kind'''
badge = self.get_badge(kind)
if badge:
return self.remove_badge(kind)
else:
return self.add_badge(kind)
|
[
"def",
"toggle_badge",
"(",
"self",
",",
"kind",
")",
":",
"badge",
"=",
"self",
".",
"get_badge",
"(",
"kind",
")",
"if",
"badge",
":",
"return",
"self",
".",
"remove_badge",
"(",
"kind",
")",
"else",
":",
"return",
"self",
".",
"add_badge",
"(",
"kind",
")"
] |
Toggle a bdage given its kind
|
[
"Toggle",
"a",
"bdage",
"given",
"its",
"kind"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L96-L102
|
14,341
|
opendatateam/udata
|
udata/core/badges/models.py
|
BadgeMixin.badge_label
|
def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind]
|
python
|
def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind]
|
[
"def",
"badge_label",
"(",
"self",
",",
"badge",
")",
":",
"kind",
"=",
"badge",
".",
"kind",
"if",
"isinstance",
"(",
"badge",
",",
"Badge",
")",
"else",
"badge",
"return",
"self",
".",
"__badges__",
"[",
"kind",
"]"
] |
Display the badge label for a given kind
|
[
"Display",
"the",
"badge",
"label",
"for",
"a",
"given",
"kind"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L104-L107
|
14,342
|
opendatateam/udata
|
udata/core/discussions/actions.py
|
discussions_for
|
def discussions_for(user, only_open=True):
'''
Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not.
'''
# Only fetch required fields for discussion filtering (id and slug)
# Greatly improve performances and memory usage
datasets = Dataset.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
reuses = Reuse.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
qs = Discussion.objects(subject__in=list(datasets) + list(reuses))
if only_open:
qs = qs(closed__exists=False)
return qs
|
python
|
def discussions_for(user, only_open=True):
'''
Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not.
'''
# Only fetch required fields for discussion filtering (id and slug)
# Greatly improve performances and memory usage
datasets = Dataset.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
reuses = Reuse.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
qs = Discussion.objects(subject__in=list(datasets) + list(reuses))
if only_open:
qs = qs(closed__exists=False)
return qs
|
[
"def",
"discussions_for",
"(",
"user",
",",
"only_open",
"=",
"True",
")",
":",
"# Only fetch required fields for discussion filtering (id and slug)",
"# Greatly improve performances and memory usage",
"datasets",
"=",
"Dataset",
".",
"objects",
".",
"owned_by",
"(",
"user",
".",
"id",
",",
"*",
"user",
".",
"organizations",
")",
".",
"only",
"(",
"'id'",
",",
"'slug'",
")",
"reuses",
"=",
"Reuse",
".",
"objects",
".",
"owned_by",
"(",
"user",
".",
"id",
",",
"*",
"user",
".",
"organizations",
")",
".",
"only",
"(",
"'id'",
",",
"'slug'",
")",
"qs",
"=",
"Discussion",
".",
"objects",
"(",
"subject__in",
"=",
"list",
"(",
"datasets",
")",
"+",
"list",
"(",
"reuses",
")",
")",
"if",
"only_open",
":",
"qs",
"=",
"qs",
"(",
"closed__exists",
"=",
"False",
")",
"return",
"qs"
] |
Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not.
|
[
"Build",
"a",
"queryset",
"to",
"query",
"discussions",
"related",
"to",
"a",
"given",
"user",
"s",
"assets",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/actions.py#L9-L25
|
14,343
|
opendatateam/udata
|
udata/frontend/markdown.py
|
nofollow_callback
|
def nofollow_callback(attrs, new=False):
"""
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
"""
parsed_url = urlparse(attrs[(None, 'href')])
if parsed_url.netloc in ('', current_app.config['SERVER_NAME']):
attrs[(None, 'href')] = '{scheme}://{netloc}{path}'.format(
scheme='https' if request.is_secure else 'http',
netloc=current_app.config['SERVER_NAME'],
path=parsed_url.path)
return attrs
else:
rel = [x for x in attrs.get((None, 'rel'), '').split(' ') if x]
if 'nofollow' not in [x.lower() for x in rel]:
rel.append('nofollow')
attrs[(None, 'rel')] = ' '.join(rel)
return attrs
|
python
|
def nofollow_callback(attrs, new=False):
"""
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
"""
parsed_url = urlparse(attrs[(None, 'href')])
if parsed_url.netloc in ('', current_app.config['SERVER_NAME']):
attrs[(None, 'href')] = '{scheme}://{netloc}{path}'.format(
scheme='https' if request.is_secure else 'http',
netloc=current_app.config['SERVER_NAME'],
path=parsed_url.path)
return attrs
else:
rel = [x for x in attrs.get((None, 'rel'), '').split(' ') if x]
if 'nofollow' not in [x.lower() for x in rel]:
rel.append('nofollow')
attrs[(None, 'rel')] = ' '.join(rel)
return attrs
|
[
"def",
"nofollow_callback",
"(",
"attrs",
",",
"new",
"=",
"False",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"attrs",
"[",
"(",
"None",
",",
"'href'",
")",
"]",
")",
"if",
"parsed_url",
".",
"netloc",
"in",
"(",
"''",
",",
"current_app",
".",
"config",
"[",
"'SERVER_NAME'",
"]",
")",
":",
"attrs",
"[",
"(",
"None",
",",
"'href'",
")",
"]",
"=",
"'{scheme}://{netloc}{path}'",
".",
"format",
"(",
"scheme",
"=",
"'https'",
"if",
"request",
".",
"is_secure",
"else",
"'http'",
",",
"netloc",
"=",
"current_app",
".",
"config",
"[",
"'SERVER_NAME'",
"]",
",",
"path",
"=",
"parsed_url",
".",
"path",
")",
"return",
"attrs",
"else",
":",
"rel",
"=",
"[",
"x",
"for",
"x",
"in",
"attrs",
".",
"get",
"(",
"(",
"None",
",",
"'rel'",
")",
",",
"''",
")",
".",
"split",
"(",
"' '",
")",
"if",
"x",
"]",
"if",
"'nofollow'",
"not",
"in",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"rel",
"]",
":",
"rel",
".",
"append",
"(",
"'nofollow'",
")",
"attrs",
"[",
"(",
"None",
",",
"'rel'",
")",
"]",
"=",
"' '",
".",
"join",
"(",
"rel",
")",
"return",
"attrs"
] |
Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once.
|
[
"Turn",
"relative",
"links",
"into",
"external",
"ones",
"and",
"avoid",
"nofollow",
"for",
"us"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/markdown.py#L40-L59
|
14,344
|
opendatateam/udata
|
udata/frontend/markdown.py
|
bleach_clean
|
def bleach_clean(stream):
"""
Sanitize malicious attempts but keep the `EXCERPT_TOKEN`.
By default, only keeps `bleach.ALLOWED_TAGS`.
"""
return bleach.clean(
stream,
tags=current_app.config['MD_ALLOWED_TAGS'],
attributes=current_app.config['MD_ALLOWED_ATTRIBUTES'],
styles=current_app.config['MD_ALLOWED_STYLES'],
strip_comments=False)
|
python
|
def bleach_clean(stream):
"""
Sanitize malicious attempts but keep the `EXCERPT_TOKEN`.
By default, only keeps `bleach.ALLOWED_TAGS`.
"""
return bleach.clean(
stream,
tags=current_app.config['MD_ALLOWED_TAGS'],
attributes=current_app.config['MD_ALLOWED_ATTRIBUTES'],
styles=current_app.config['MD_ALLOWED_STYLES'],
strip_comments=False)
|
[
"def",
"bleach_clean",
"(",
"stream",
")",
":",
"return",
"bleach",
".",
"clean",
"(",
"stream",
",",
"tags",
"=",
"current_app",
".",
"config",
"[",
"'MD_ALLOWED_TAGS'",
"]",
",",
"attributes",
"=",
"current_app",
".",
"config",
"[",
"'MD_ALLOWED_ATTRIBUTES'",
"]",
",",
"styles",
"=",
"current_app",
".",
"config",
"[",
"'MD_ALLOWED_STYLES'",
"]",
",",
"strip_comments",
"=",
"False",
")"
] |
Sanitize malicious attempts but keep the `EXCERPT_TOKEN`.
By default, only keeps `bleach.ALLOWED_TAGS`.
|
[
"Sanitize",
"malicious",
"attempts",
"but",
"keep",
"the",
"EXCERPT_TOKEN",
".",
"By",
"default",
"only",
"keeps",
"bleach",
".",
"ALLOWED_TAGS",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/markdown.py#L94-L104
|
14,345
|
opendatateam/udata
|
udata/core/badges/commands.py
|
toggle
|
def toggle(path_or_id, badge_kind):
'''Toggle a `badge_kind` for a given `path_or_id`
The `path_or_id` is either an id, a slug or a file containing a list
of ids or slugs.
'''
if exists(path_or_id):
with open(path_or_id) as open_file:
for id_or_slug in open_file.readlines():
toggle_badge(id_or_slug.strip(), badge_kind)
else:
toggle_badge(path_or_id, badge_kind)
|
python
|
def toggle(path_or_id, badge_kind):
'''Toggle a `badge_kind` for a given `path_or_id`
The `path_or_id` is either an id, a slug or a file containing a list
of ids or slugs.
'''
if exists(path_or_id):
with open(path_or_id) as open_file:
for id_or_slug in open_file.readlines():
toggle_badge(id_or_slug.strip(), badge_kind)
else:
toggle_badge(path_or_id, badge_kind)
|
[
"def",
"toggle",
"(",
"path_or_id",
",",
"badge_kind",
")",
":",
"if",
"exists",
"(",
"path_or_id",
")",
":",
"with",
"open",
"(",
"path_or_id",
")",
"as",
"open_file",
":",
"for",
"id_or_slug",
"in",
"open_file",
".",
"readlines",
"(",
")",
":",
"toggle_badge",
"(",
"id_or_slug",
".",
"strip",
"(",
")",
",",
"badge_kind",
")",
"else",
":",
"toggle_badge",
"(",
"path_or_id",
",",
"badge_kind",
")"
] |
Toggle a `badge_kind` for a given `path_or_id`
The `path_or_id` is either an id, a slug or a file containing a list
of ids or slugs.
|
[
"Toggle",
"a",
"badge_kind",
"for",
"a",
"given",
"path_or_id"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/commands.py#L35-L46
|
14,346
|
opendatateam/udata
|
udata/core/storages/views.py
|
upload
|
def upload(name):
'''Handle upload on POST if authorized.'''
storage = fs.by_name(name)
return jsonify(success=True, **handle_upload(storage))
|
python
|
def upload(name):
'''Handle upload on POST if authorized.'''
storage = fs.by_name(name)
return jsonify(success=True, **handle_upload(storage))
|
[
"def",
"upload",
"(",
"name",
")",
":",
"storage",
"=",
"fs",
".",
"by_name",
"(",
"name",
")",
"return",
"jsonify",
"(",
"success",
"=",
"True",
",",
"*",
"*",
"handle_upload",
"(",
"storage",
")",
")"
] |
Handle upload on POST if authorized.
|
[
"Handle",
"upload",
"on",
"POST",
"if",
"authorized",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/storages/views.py#L22-L25
|
14,347
|
opendatateam/udata
|
udata/search/__init__.py
|
unindex_model_on_delete
|
def unindex_model_on_delete(sender, document, **kwargs):
'''Unindex Mongo document on post_delete'''
if current_app.config.get('AUTO_INDEX'):
unindex.delay(document)
|
python
|
def unindex_model_on_delete(sender, document, **kwargs):
'''Unindex Mongo document on post_delete'''
if current_app.config.get('AUTO_INDEX'):
unindex.delay(document)
|
[
"def",
"unindex_model_on_delete",
"(",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"current_app",
".",
"config",
".",
"get",
"(",
"'AUTO_INDEX'",
")",
":",
"unindex",
".",
"delay",
"(",
"document",
")"
] |
Unindex Mongo document on post_delete
|
[
"Unindex",
"Mongo",
"document",
"on",
"post_delete"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/__init__.py#L167-L170
|
14,348
|
opendatateam/udata
|
udata/search/__init__.py
|
register
|
def register(adapter):
'''Register a search adapter'''
# register the class in the catalog
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
# Automatically (re|un)index objects on save/delete
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter
|
python
|
def register(adapter):
'''Register a search adapter'''
# register the class in the catalog
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
# Automatically (re|un)index objects on save/delete
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter
|
[
"def",
"register",
"(",
"adapter",
")",
":",
"# register the class in the catalog",
"if",
"adapter",
".",
"model",
"and",
"adapter",
".",
"model",
"not",
"in",
"adapter_catalog",
":",
"adapter_catalog",
"[",
"adapter",
".",
"model",
"]",
"=",
"adapter",
"# Automatically (re|un)index objects on save/delete",
"post_save",
".",
"connect",
"(",
"reindex_model_on_save",
",",
"sender",
"=",
"adapter",
".",
"model",
")",
"post_delete",
".",
"connect",
"(",
"unindex_model_on_delete",
",",
"sender",
"=",
"adapter",
".",
"model",
")",
"return",
"adapter"
] |
Register a search adapter
|
[
"Register",
"a",
"search",
"adapter"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/__init__.py#L173-L181
|
14,349
|
opendatateam/udata
|
udata/forms/__init__.py
|
CommonFormMixin.process
|
def process(self, formdata=None, obj=None, data=None, **kwargs):
'''Wrap the process method to store the current object instance'''
self._obj = obj
super(CommonFormMixin, self).process(formdata, obj, data, **kwargs)
|
python
|
def process(self, formdata=None, obj=None, data=None, **kwargs):
'''Wrap the process method to store the current object instance'''
self._obj = obj
super(CommonFormMixin, self).process(formdata, obj, data, **kwargs)
|
[
"def",
"process",
"(",
"self",
",",
"formdata",
"=",
"None",
",",
"obj",
"=",
"None",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_obj",
"=",
"obj",
"super",
"(",
"CommonFormMixin",
",",
"self",
")",
".",
"process",
"(",
"formdata",
",",
"obj",
",",
"data",
",",
"*",
"*",
"kwargs",
")"
] |
Wrap the process method to store the current object instance
|
[
"Wrap",
"the",
"process",
"method",
"to",
"store",
"the",
"current",
"object",
"instance"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/__init__.py#L19-L22
|
14,350
|
opendatateam/udata
|
udata/linkchecker/backends.py
|
get
|
def get(name):
'''Get a linkchecker given its name or fallback on default'''
linkcheckers = get_enabled(ENTRYPOINT, current_app)
linkcheckers.update(no_check=NoCheckLinkchecker) # no_check always enabled
selected_linkchecker = linkcheckers.get(name)
if not selected_linkchecker:
default_linkchecker = current_app.config.get(
'LINKCHECKING_DEFAULT_LINKCHECKER')
selected_linkchecker = linkcheckers.get(default_linkchecker)
if not selected_linkchecker:
log.error('No linkchecker found ({} requested and no fallback)'.format(
name))
return selected_linkchecker
|
python
|
def get(name):
'''Get a linkchecker given its name or fallback on default'''
linkcheckers = get_enabled(ENTRYPOINT, current_app)
linkcheckers.update(no_check=NoCheckLinkchecker) # no_check always enabled
selected_linkchecker = linkcheckers.get(name)
if not selected_linkchecker:
default_linkchecker = current_app.config.get(
'LINKCHECKING_DEFAULT_LINKCHECKER')
selected_linkchecker = linkcheckers.get(default_linkchecker)
if not selected_linkchecker:
log.error('No linkchecker found ({} requested and no fallback)'.format(
name))
return selected_linkchecker
|
[
"def",
"get",
"(",
"name",
")",
":",
"linkcheckers",
"=",
"get_enabled",
"(",
"ENTRYPOINT",
",",
"current_app",
")",
"linkcheckers",
".",
"update",
"(",
"no_check",
"=",
"NoCheckLinkchecker",
")",
"# no_check always enabled",
"selected_linkchecker",
"=",
"linkcheckers",
".",
"get",
"(",
"name",
")",
"if",
"not",
"selected_linkchecker",
":",
"default_linkchecker",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'LINKCHECKING_DEFAULT_LINKCHECKER'",
")",
"selected_linkchecker",
"=",
"linkcheckers",
".",
"get",
"(",
"default_linkchecker",
")",
"if",
"not",
"selected_linkchecker",
":",
"log",
".",
"error",
"(",
"'No linkchecker found ({} requested and no fallback)'",
".",
"format",
"(",
"name",
")",
")",
"return",
"selected_linkchecker"
] |
Get a linkchecker given its name or fallback on default
|
[
"Get",
"a",
"linkchecker",
"given",
"its",
"name",
"or",
"fallback",
"on",
"default"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/linkchecker/backends.py#L28-L40
|
14,351
|
opendatateam/udata
|
udata/features/notifications/actions.py
|
get_notifications
|
def get_notifications(user):
'''List notification for a given user'''
notifications = []
for name, func in _providers.items():
notifications.extend([{
'type': name,
'created_on': dt,
'details': details
} for dt, details in func(user)])
return notifications
|
python
|
def get_notifications(user):
'''List notification for a given user'''
notifications = []
for name, func in _providers.items():
notifications.extend([{
'type': name,
'created_on': dt,
'details': details
} for dt, details in func(user)])
return notifications
|
[
"def",
"get_notifications",
"(",
"user",
")",
":",
"notifications",
"=",
"[",
"]",
"for",
"name",
",",
"func",
"in",
"_providers",
".",
"items",
"(",
")",
":",
"notifications",
".",
"extend",
"(",
"[",
"{",
"'type'",
":",
"name",
",",
"'created_on'",
":",
"dt",
",",
"'details'",
":",
"details",
"}",
"for",
"dt",
",",
"details",
"in",
"func",
"(",
"user",
")",
"]",
")",
"return",
"notifications"
] |
List notification for a given user
|
[
"List",
"notification",
"for",
"a",
"given",
"user"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/notifications/actions.py#L29-L40
|
14,352
|
opendatateam/udata
|
udata/core/tags/tasks.py
|
count_tags
|
def count_tags(self):
'''Count tag occurences by type and update the tag collection'''
for key, model in TAGGED.items():
collection = '{0}_tags'.format(key)
results = (model.objects(tags__exists=True)
.map_reduce(map_tags, reduce_tags, collection))
for result in results:
tag, created = Tag.objects.get_or_create(name=result.key,
auto_save=False)
tag.counts[key] = int(result.value) if result.value else 0
tag.save()
|
python
|
def count_tags(self):
'''Count tag occurences by type and update the tag collection'''
for key, model in TAGGED.items():
collection = '{0}_tags'.format(key)
results = (model.objects(tags__exists=True)
.map_reduce(map_tags, reduce_tags, collection))
for result in results:
tag, created = Tag.objects.get_or_create(name=result.key,
auto_save=False)
tag.counts[key] = int(result.value) if result.value else 0
tag.save()
|
[
"def",
"count_tags",
"(",
"self",
")",
":",
"for",
"key",
",",
"model",
"in",
"TAGGED",
".",
"items",
"(",
")",
":",
"collection",
"=",
"'{0}_tags'",
".",
"format",
"(",
"key",
")",
"results",
"=",
"(",
"model",
".",
"objects",
"(",
"tags__exists",
"=",
"True",
")",
".",
"map_reduce",
"(",
"map_tags",
",",
"reduce_tags",
",",
"collection",
")",
")",
"for",
"result",
"in",
"results",
":",
"tag",
",",
"created",
"=",
"Tag",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"result",
".",
"key",
",",
"auto_save",
"=",
"False",
")",
"tag",
".",
"counts",
"[",
"key",
"]",
"=",
"int",
"(",
"result",
".",
"value",
")",
"if",
"result",
".",
"value",
"else",
"0",
"tag",
".",
"save",
"(",
")"
] |
Count tag occurences by type and update the tag collection
|
[
"Count",
"tag",
"occurences",
"by",
"type",
"and",
"update",
"the",
"tag",
"collection"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/tags/tasks.py#L38-L48
|
14,353
|
opendatateam/udata
|
udata/search/adapter.py
|
ModelSearchAdapter.from_model
|
def from_model(cls, document):
"""By default use the ``to_dict`` method
and exclude ``_id``, ``_cls`` and ``owner`` fields
"""
return cls(meta={'id': document.id}, **cls.serialize(document))
|
python
|
def from_model(cls, document):
"""By default use the ``to_dict`` method
and exclude ``_id``, ``_cls`` and ``owner`` fields
"""
return cls(meta={'id': document.id}, **cls.serialize(document))
|
[
"def",
"from_model",
"(",
"cls",
",",
"document",
")",
":",
"return",
"cls",
"(",
"meta",
"=",
"{",
"'id'",
":",
"document",
".",
"id",
"}",
",",
"*",
"*",
"cls",
".",
"serialize",
"(",
"document",
")",
")"
] |
By default use the ``to_dict`` method
and exclude ``_id``, ``_cls`` and ``owner`` fields
|
[
"By",
"default",
"use",
"the",
"to_dict",
"method"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/adapter.py#L38-L43
|
14,354
|
opendatateam/udata
|
udata/search/adapter.py
|
ModelSearchAdapter.completer_tokenize
|
def completer_tokenize(cls, value, min_length=3):
'''Quick and dirty tokenizer for completion suggester'''
tokens = list(itertools.chain(*[
[m for m in n.split("'") if len(m) > min_length]
for n in value.split(' ')
]))
return list(set([value] + tokens + [' '.join(tokens)]))
|
python
|
def completer_tokenize(cls, value, min_length=3):
'''Quick and dirty tokenizer for completion suggester'''
tokens = list(itertools.chain(*[
[m for m in n.split("'") if len(m) > min_length]
for n in value.split(' ')
]))
return list(set([value] + tokens + [' '.join(tokens)]))
|
[
"def",
"completer_tokenize",
"(",
"cls",
",",
"value",
",",
"min_length",
"=",
"3",
")",
":",
"tokens",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"[",
"[",
"m",
"for",
"m",
"in",
"n",
".",
"split",
"(",
"\"'\"",
")",
"if",
"len",
"(",
"m",
")",
">",
"min_length",
"]",
"for",
"n",
"in",
"value",
".",
"split",
"(",
"' '",
")",
"]",
")",
")",
"return",
"list",
"(",
"set",
"(",
"[",
"value",
"]",
"+",
"tokens",
"+",
"[",
"' '",
".",
"join",
"(",
"tokens",
")",
"]",
")",
")"
] |
Quick and dirty tokenizer for completion suggester
|
[
"Quick",
"and",
"dirty",
"tokenizer",
"for",
"completion",
"suggester"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/adapter.py#L54-L60
|
14,355
|
opendatateam/udata
|
udata/search/adapter.py
|
ModelSearchAdapter.facet_search
|
def facet_search(cls, *facets):
'''
Build a FacetSearch for a given list of facets
Elasticsearch DSL doesn't allow to list facets
once and for all and then later select them.
They are always all requested
As we don't use them every time and facet computation
can take some time, we build the FacetedSearch
dynamically with only those requested.
'''
f = dict((k, v) for k, v in cls.facets.items() if k in facets)
class TempSearch(SearchQuery):
adapter = cls
analyzer = cls.analyzer
boosters = cls.boosters
doc_types = cls
facets = f
fields = cls.fields
fuzzy = cls.fuzzy
match_type = cls.match_type
model = cls.model
return TempSearch
|
python
|
def facet_search(cls, *facets):
'''
Build a FacetSearch for a given list of facets
Elasticsearch DSL doesn't allow to list facets
once and for all and then later select them.
They are always all requested
As we don't use them every time and facet computation
can take some time, we build the FacetedSearch
dynamically with only those requested.
'''
f = dict((k, v) for k, v in cls.facets.items() if k in facets)
class TempSearch(SearchQuery):
adapter = cls
analyzer = cls.analyzer
boosters = cls.boosters
doc_types = cls
facets = f
fields = cls.fields
fuzzy = cls.fuzzy
match_type = cls.match_type
model = cls.model
return TempSearch
|
[
"def",
"facet_search",
"(",
"cls",
",",
"*",
"facets",
")",
":",
"f",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"cls",
".",
"facets",
".",
"items",
"(",
")",
"if",
"k",
"in",
"facets",
")",
"class",
"TempSearch",
"(",
"SearchQuery",
")",
":",
"adapter",
"=",
"cls",
"analyzer",
"=",
"cls",
".",
"analyzer",
"boosters",
"=",
"cls",
".",
"boosters",
"doc_types",
"=",
"cls",
"facets",
"=",
"f",
"fields",
"=",
"cls",
".",
"fields",
"fuzzy",
"=",
"cls",
".",
"fuzzy",
"match_type",
"=",
"cls",
".",
"match_type",
"model",
"=",
"cls",
".",
"model",
"return",
"TempSearch"
] |
Build a FacetSearch for a given list of facets
Elasticsearch DSL doesn't allow to list facets
once and for all and then later select them.
They are always all requested
As we don't use them every time and facet computation
can take some time, we build the FacetedSearch
dynamically with only those requested.
|
[
"Build",
"a",
"FacetSearch",
"for",
"a",
"given",
"list",
"of",
"facets"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/adapter.py#L63-L88
|
14,356
|
opendatateam/udata
|
udata/models/slug_fields.py
|
populate_slug
|
def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def exists(s):
return qs(
class_check=False, **{field.db_field: s}
).limit(1).count(True) > 0
while exists(slug):
slug = '{0}-{1}'.format(base_slug, index)
index += 1
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug
|
python
|
def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def exists(s):
return qs(
class_check=False, **{field.db_field: s}
).limit(1).count(True) > 0
while exists(slug):
slug = '{0}-{1}'.format(base_slug, index)
index += 1
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug
|
[
"def",
"populate_slug",
"(",
"instance",
",",
"field",
")",
":",
"value",
"=",
"getattr",
"(",
"instance",
",",
"field",
".",
"db_field",
")",
"try",
":",
"previous",
"=",
"instance",
".",
"__class__",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"instance",
".",
"id",
")",
"except",
"Exception",
":",
"previous",
"=",
"None",
"# Field value has changed",
"changed",
"=",
"field",
".",
"db_field",
"in",
"instance",
".",
"_get_changed_fields",
"(",
")",
"# Field initial value has been manually set",
"manual",
"=",
"not",
"previous",
"and",
"value",
"or",
"changed",
"if",
"not",
"manual",
"and",
"field",
".",
"populate_from",
":",
"# value to slugify is extracted from populate_from parameter",
"value",
"=",
"getattr",
"(",
"instance",
",",
"field",
".",
"populate_from",
")",
"if",
"previous",
"and",
"value",
"==",
"getattr",
"(",
"previous",
",",
"field",
".",
"populate_from",
")",
":",
"return",
"value",
"if",
"previous",
"and",
"getattr",
"(",
"previous",
",",
"field",
".",
"db_field",
")",
"==",
"value",
":",
"# value is unchanged from DB",
"return",
"value",
"if",
"previous",
"and",
"not",
"changed",
"and",
"not",
"field",
".",
"update",
":",
"# Field is not manually set and slug should not update on change",
"return",
"value",
"slug",
"=",
"field",
".",
"slugify",
"(",
"value",
")",
"# This can happen when serializing an object which does not contain",
"# the properties used to generate the slug. Typically, when such",
"# an object is passed to one of the Celery workers (see issue #20).",
"if",
"slug",
"is",
"None",
":",
"return",
"old_slug",
"=",
"getattr",
"(",
"previous",
",",
"field",
".",
"db_field",
",",
"None",
")",
"if",
"slug",
"==",
"old_slug",
":",
"return",
"slug",
"# Ensure uniqueness",
"if",
"field",
".",
"unique",
":",
"base_slug",
"=",
"slug",
"index",
"=",
"1",
"qs",
"=",
"instance",
".",
"__class__",
".",
"objects",
"if",
"previous",
":",
"qs",
"=",
"qs",
"(",
"id__ne",
"=",
"previous",
".",
"id",
")",
"def",
"exists",
"(",
"s",
")",
":",
"return",
"qs",
"(",
"class_check",
"=",
"False",
",",
"*",
"*",
"{",
"field",
".",
"db_field",
":",
"s",
"}",
")",
".",
"limit",
"(",
"1",
")",
".",
"count",
"(",
"True",
")",
">",
"0",
"while",
"exists",
"(",
"slug",
")",
":",
"slug",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"base_slug",
",",
"index",
")",
"index",
"+=",
"1",
"# Track old slugs for this class",
"if",
"field",
".",
"follow",
"and",
"old_slug",
"!=",
"slug",
":",
"ns",
"=",
"instance",
".",
"__class__",
".",
"__name__",
"# Destroy redirections from this new slug",
"SlugFollow",
".",
"objects",
"(",
"namespace",
"=",
"ns",
",",
"old_slug",
"=",
"slug",
")",
".",
"delete",
"(",
")",
"if",
"old_slug",
":",
"# Create a redirect for previous slug",
"slug_follower",
",",
"created",
"=",
"SlugFollow",
".",
"objects",
".",
"get_or_create",
"(",
"namespace",
"=",
"ns",
",",
"old_slug",
"=",
"old_slug",
",",
"auto_save",
"=",
"False",
",",
")",
"slug_follower",
".",
"new_slug",
"=",
"slug",
"slug_follower",
".",
"save",
"(",
")",
"# Maintain previous redirects",
"SlugFollow",
".",
"objects",
"(",
"namespace",
"=",
"ns",
",",
"new_slug",
"=",
"old_slug",
")",
".",
"update",
"(",
"new_slug",
"=",
"slug",
")",
"setattr",
"(",
"instance",
",",
"field",
".",
"db_field",
",",
"slug",
")",
"return",
"slug"
] |
Populate a slug field if needed.
|
[
"Populate",
"a",
"slug",
"field",
"if",
"needed",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L113-L193
|
14,357
|
opendatateam/udata
|
udata/models/slug_fields.py
|
SlugField.slugify
|
def slugify(self, value):
'''
Apply slugification according to specified field rules
'''
if value is None:
return
return slugify.slugify(value, max_length=self.max_length,
separator=self.separator,
to_lower=self.lower_case)
|
python
|
def slugify(self, value):
'''
Apply slugification according to specified field rules
'''
if value is None:
return
return slugify.slugify(value, max_length=self.max_length,
separator=self.separator,
to_lower=self.lower_case)
|
[
"def",
"slugify",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"return",
"slugify",
".",
"slugify",
"(",
"value",
",",
"max_length",
"=",
"self",
".",
"max_length",
",",
"separator",
"=",
"self",
".",
"separator",
",",
"to_lower",
"=",
"self",
".",
"lower_case",
")"
] |
Apply slugification according to specified field rules
|
[
"Apply",
"slugification",
"according",
"to",
"specified",
"field",
"rules"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L55-L64
|
14,358
|
opendatateam/udata
|
udata/models/slug_fields.py
|
SlugField.cleanup_on_delete
|
def cleanup_on_delete(self, sender, document, **kwargs):
'''
Clean up slug redirections on object deletion
'''
if not self.follow or sender is not self.owner_document:
return
slug = getattr(document, self.db_field)
namespace = self.owner_document.__name__
SlugFollow.objects(namespace=namespace, new_slug=slug).delete()
|
python
|
def cleanup_on_delete(self, sender, document, **kwargs):
'''
Clean up slug redirections on object deletion
'''
if not self.follow or sender is not self.owner_document:
return
slug = getattr(document, self.db_field)
namespace = self.owner_document.__name__
SlugFollow.objects(namespace=namespace, new_slug=slug).delete()
|
[
"def",
"cleanup_on_delete",
"(",
"self",
",",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"follow",
"or",
"sender",
"is",
"not",
"self",
".",
"owner_document",
":",
"return",
"slug",
"=",
"getattr",
"(",
"document",
",",
"self",
".",
"db_field",
")",
"namespace",
"=",
"self",
".",
"owner_document",
".",
"__name__",
"SlugFollow",
".",
"objects",
"(",
"namespace",
"=",
"namespace",
",",
"new_slug",
"=",
"slug",
")",
".",
"delete",
"(",
")"
] |
Clean up slug redirections on object deletion
|
[
"Clean",
"up",
"slug",
"redirections",
"on",
"object",
"deletion"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L76-L84
|
14,359
|
opendatateam/udata
|
udata/core/badges/forms.py
|
badge_form
|
def badge_form(model):
'''A form factory for a given model badges'''
class BadgeForm(ModelForm):
model_class = Badge
kind = fields.RadioField(
_('Kind'), [validators.DataRequired()],
choices=model.__badges__.items(),
description=_('Kind of badge (certified, etc)'))
return BadgeForm
|
python
|
def badge_form(model):
'''A form factory for a given model badges'''
class BadgeForm(ModelForm):
model_class = Badge
kind = fields.RadioField(
_('Kind'), [validators.DataRequired()],
choices=model.__badges__.items(),
description=_('Kind of badge (certified, etc)'))
return BadgeForm
|
[
"def",
"badge_form",
"(",
"model",
")",
":",
"class",
"BadgeForm",
"(",
"ModelForm",
")",
":",
"model_class",
"=",
"Badge",
"kind",
"=",
"fields",
".",
"RadioField",
"(",
"_",
"(",
"'Kind'",
")",
",",
"[",
"validators",
".",
"DataRequired",
"(",
")",
"]",
",",
"choices",
"=",
"model",
".",
"__badges__",
".",
"items",
"(",
")",
",",
"description",
"=",
"_",
"(",
"'Kind of badge (certified, etc)'",
")",
")",
"return",
"BadgeForm"
] |
A form factory for a given model badges
|
[
"A",
"form",
"factory",
"for",
"a",
"given",
"model",
"badges"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/forms.py#L11-L21
|
14,360
|
opendatateam/udata
|
udata/core/jobs/actions.py
|
delay
|
def delay(name, args, kwargs):
'''Run a job asynchronously'''
args = args or []
kwargs = dict(k.split() for k in kwargs) if kwargs else {}
if name not in celery.tasks:
log.error('Job %s not found', name)
job = celery.tasks[name]
log.info('Sending job %s', name)
async_result = job.delay(*args, **kwargs)
log.info('Job %s sended to workers', async_result.id)
|
python
|
def delay(name, args, kwargs):
'''Run a job asynchronously'''
args = args or []
kwargs = dict(k.split() for k in kwargs) if kwargs else {}
if name not in celery.tasks:
log.error('Job %s not found', name)
job = celery.tasks[name]
log.info('Sending job %s', name)
async_result = job.delay(*args, **kwargs)
log.info('Job %s sended to workers', async_result.id)
|
[
"def",
"delay",
"(",
"name",
",",
"args",
",",
"kwargs",
")",
":",
"args",
"=",
"args",
"or",
"[",
"]",
"kwargs",
"=",
"dict",
"(",
"k",
".",
"split",
"(",
")",
"for",
"k",
"in",
"kwargs",
")",
"if",
"kwargs",
"else",
"{",
"}",
"if",
"name",
"not",
"in",
"celery",
".",
"tasks",
":",
"log",
".",
"error",
"(",
"'Job %s not found'",
",",
"name",
")",
"job",
"=",
"celery",
".",
"tasks",
"[",
"name",
"]",
"log",
".",
"info",
"(",
"'Sending job %s'",
",",
"name",
")",
"async_result",
"=",
"job",
".",
"delay",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"log",
".",
"info",
"(",
"'Job %s sended to workers'",
",",
"async_result",
".",
"id",
")"
] |
Run a job asynchronously
|
[
"Run",
"a",
"job",
"asynchronously"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/jobs/actions.py#L22-L31
|
14,361
|
opendatateam/udata
|
udata/harvest/filters.py
|
is_url
|
def is_url(default_scheme='http', **kwargs):
"""Return a converter that converts a clean string to an URL."""
def converter(value):
if value is None:
return value
if '://' not in value and default_scheme:
value = '://'.join((default_scheme, value.strip()))
try:
return uris.validate(value)
except uris.ValidationError as e:
raise Invalid(e.message)
return converter
|
python
|
def is_url(default_scheme='http', **kwargs):
"""Return a converter that converts a clean string to an URL."""
def converter(value):
if value is None:
return value
if '://' not in value and default_scheme:
value = '://'.join((default_scheme, value.strip()))
try:
return uris.validate(value)
except uris.ValidationError as e:
raise Invalid(e.message)
return converter
|
[
"def",
"is_url",
"(",
"default_scheme",
"=",
"'http'",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"converter",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"if",
"'://'",
"not",
"in",
"value",
"and",
"default_scheme",
":",
"value",
"=",
"'://'",
".",
"join",
"(",
"(",
"default_scheme",
",",
"value",
".",
"strip",
"(",
")",
")",
")",
"try",
":",
"return",
"uris",
".",
"validate",
"(",
"value",
")",
"except",
"uris",
".",
"ValidationError",
"as",
"e",
":",
"raise",
"Invalid",
"(",
"e",
".",
"message",
")",
"return",
"converter"
] |
Return a converter that converts a clean string to an URL.
|
[
"Return",
"a",
"converter",
"that",
"converts",
"a",
"clean",
"string",
"to",
"an",
"URL",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/filters.py#L89-L100
|
14,362
|
opendatateam/udata
|
udata/harvest/filters.py
|
hash
|
def hash(value):
'''Detect an hash type'''
if not value:
return
elif len(value) == 32:
type = 'md5'
elif len(value) == 40:
type = 'sha1'
elif len(value) == 64:
type = 'sha256'
else:
return None
return {'type': type, 'value': value}
|
python
|
def hash(value):
'''Detect an hash type'''
if not value:
return
elif len(value) == 32:
type = 'md5'
elif len(value) == 40:
type = 'sha1'
elif len(value) == 64:
type = 'sha256'
else:
return None
return {'type': type, 'value': value}
|
[
"def",
"hash",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"elif",
"len",
"(",
"value",
")",
"==",
"32",
":",
"type",
"=",
"'md5'",
"elif",
"len",
"(",
"value",
")",
"==",
"40",
":",
"type",
"=",
"'sha1'",
"elif",
"len",
"(",
"value",
")",
"==",
"64",
":",
"type",
"=",
"'sha256'",
"else",
":",
"return",
"None",
"return",
"{",
"'type'",
":",
"type",
",",
"'value'",
":",
"value",
"}"
] |
Detect an hash type
|
[
"Detect",
"an",
"hash",
"type"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/filters.py#L103-L115
|
14,363
|
opendatateam/udata
|
udata/search/commands.py
|
iter_adapters
|
def iter_adapters():
'''Iter over adapter in predictable way'''
adapters = adapter_catalog.values()
return sorted(adapters, key=lambda a: a.model.__name__)
|
python
|
def iter_adapters():
'''Iter over adapter in predictable way'''
adapters = adapter_catalog.values()
return sorted(adapters, key=lambda a: a.model.__name__)
|
[
"def",
"iter_adapters",
"(",
")",
":",
"adapters",
"=",
"adapter_catalog",
".",
"values",
"(",
")",
"return",
"sorted",
"(",
"adapters",
",",
"key",
"=",
"lambda",
"a",
":",
"a",
".",
"model",
".",
"__name__",
")"
] |
Iter over adapter in predictable way
|
[
"Iter",
"over",
"adapter",
"in",
"predictable",
"way"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L38-L41
|
14,364
|
opendatateam/udata
|
udata/search/commands.py
|
iter_qs
|
def iter_qs(qs, adapter):
'''Safely iterate over a DB QuerySet yielding ES documents'''
for obj in qs.no_cache().no_dereference().timeout(False):
if adapter.is_indexable(obj):
try:
doc = adapter.from_model(obj).to_dict(include_meta=True)
yield doc
except Exception as e:
model = adapter.model.__name__
log.error('Unable to index %s "%s": %s', model, str(obj.id),
str(e), exc_info=True)
|
python
|
def iter_qs(qs, adapter):
'''Safely iterate over a DB QuerySet yielding ES documents'''
for obj in qs.no_cache().no_dereference().timeout(False):
if adapter.is_indexable(obj):
try:
doc = adapter.from_model(obj).to_dict(include_meta=True)
yield doc
except Exception as e:
model = adapter.model.__name__
log.error('Unable to index %s "%s": %s', model, str(obj.id),
str(e), exc_info=True)
|
[
"def",
"iter_qs",
"(",
"qs",
",",
"adapter",
")",
":",
"for",
"obj",
"in",
"qs",
".",
"no_cache",
"(",
")",
".",
"no_dereference",
"(",
")",
".",
"timeout",
"(",
"False",
")",
":",
"if",
"adapter",
".",
"is_indexable",
"(",
"obj",
")",
":",
"try",
":",
"doc",
"=",
"adapter",
".",
"from_model",
"(",
"obj",
")",
".",
"to_dict",
"(",
"include_meta",
"=",
"True",
")",
"yield",
"doc",
"except",
"Exception",
"as",
"e",
":",
"model",
"=",
"adapter",
".",
"model",
".",
"__name__",
"log",
".",
"error",
"(",
"'Unable to index %s \"%s\": %s'",
",",
"model",
",",
"str",
"(",
"obj",
".",
"id",
")",
",",
"str",
"(",
"e",
")",
",",
"exc_info",
"=",
"True",
")"
] |
Safely iterate over a DB QuerySet yielding ES documents
|
[
"Safely",
"iterate",
"over",
"a",
"DB",
"QuerySet",
"yielding",
"ES",
"documents"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L44-L54
|
14,365
|
opendatateam/udata
|
udata/search/commands.py
|
index_model
|
def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error'])
|
python
|
def index_model(index_name, adapter):
''' Indel all objects given a model'''
model = adapter.model
log.info('Indexing {0} objects'.format(model.__name__))
qs = model.objects
if hasattr(model.objects, 'visible'):
qs = qs.visible()
if adapter.exclude_fields:
qs = qs.exclude(*adapter.exclude_fields)
docs = iter_qs(qs, adapter)
docs = iter_for_index(docs, index_name)
for ok, info in streaming_bulk(es.client, docs, raise_on_error=False):
if not ok:
log.error('Unable to index %s "%s": %s', model.__name__,
info['index']['_id'], info['index']['error'])
|
[
"def",
"index_model",
"(",
"index_name",
",",
"adapter",
")",
":",
"model",
"=",
"adapter",
".",
"model",
"log",
".",
"info",
"(",
"'Indexing {0} objects'",
".",
"format",
"(",
"model",
".",
"__name__",
")",
")",
"qs",
"=",
"model",
".",
"objects",
"if",
"hasattr",
"(",
"model",
".",
"objects",
",",
"'visible'",
")",
":",
"qs",
"=",
"qs",
".",
"visible",
"(",
")",
"if",
"adapter",
".",
"exclude_fields",
":",
"qs",
"=",
"qs",
".",
"exclude",
"(",
"*",
"adapter",
".",
"exclude_fields",
")",
"docs",
"=",
"iter_qs",
"(",
"qs",
",",
"adapter",
")",
"docs",
"=",
"iter_for_index",
"(",
"docs",
",",
"index_name",
")",
"for",
"ok",
",",
"info",
"in",
"streaming_bulk",
"(",
"es",
".",
"client",
",",
"docs",
",",
"raise_on_error",
"=",
"False",
")",
":",
"if",
"not",
"ok",
":",
"log",
".",
"error",
"(",
"'Unable to index %s \"%s\": %s'",
",",
"model",
".",
"__name__",
",",
"info",
"[",
"'index'",
"]",
"[",
"'_id'",
"]",
",",
"info",
"[",
"'index'",
"]",
"[",
"'error'",
"]",
")"
] |
Indel all objects given a model
|
[
"Indel",
"all",
"objects",
"given",
"a",
"model"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L64-L80
|
14,366
|
opendatateam/udata
|
udata/search/commands.py
|
enable_refresh
|
def enable_refresh(index_name):
'''
Enable refresh and force merge. To be used after indexing.
See: https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html#bulk
''' # noqa
refresh_interval = current_app.config['ELASTICSEARCH_REFRESH_INTERVAL']
es.indices.put_settings(index=index_name, body={
'index': {'refresh_interval': refresh_interval}
})
es.indices.forcemerge(index=index_name, request_timeout=30)
|
python
|
def enable_refresh(index_name):
'''
Enable refresh and force merge. To be used after indexing.
See: https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html#bulk
''' # noqa
refresh_interval = current_app.config['ELASTICSEARCH_REFRESH_INTERVAL']
es.indices.put_settings(index=index_name, body={
'index': {'refresh_interval': refresh_interval}
})
es.indices.forcemerge(index=index_name, request_timeout=30)
|
[
"def",
"enable_refresh",
"(",
"index_name",
")",
":",
"# noqa",
"refresh_interval",
"=",
"current_app",
".",
"config",
"[",
"'ELASTICSEARCH_REFRESH_INTERVAL'",
"]",
"es",
".",
"indices",
".",
"put_settings",
"(",
"index",
"=",
"index_name",
",",
"body",
"=",
"{",
"'index'",
":",
"{",
"'refresh_interval'",
":",
"refresh_interval",
"}",
"}",
")",
"es",
".",
"indices",
".",
"forcemerge",
"(",
"index",
"=",
"index_name",
",",
"request_timeout",
"=",
"30",
")"
] |
Enable refresh and force merge. To be used after indexing.
See: https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html#bulk
|
[
"Enable",
"refresh",
"and",
"force",
"merge",
".",
"To",
"be",
"used",
"after",
"indexing",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L96-L106
|
14,367
|
opendatateam/udata
|
udata/search/commands.py
|
set_alias
|
def set_alias(index_name, delete=True):
'''
Properly end an indexation by creating an alias.
Previous alias is deleted if needed.
'''
log.info('Creating alias "{0}" on index "{1}"'.format(
es.index_name, index_name))
if es.indices.exists_alias(name=es.index_name):
alias = es.indices.get_alias(name=es.index_name)
previous_indices = alias.keys()
if index_name not in previous_indices:
es.indices.put_alias(index=index_name, name=es.index_name)
for index in previous_indices:
if index != index_name:
es.indices.delete_alias(index=index, name=es.index_name)
if delete:
es.indices.delete(index=index)
else:
es.indices.put_alias(index=index_name, name=es.index_name)
|
python
|
def set_alias(index_name, delete=True):
'''
Properly end an indexation by creating an alias.
Previous alias is deleted if needed.
'''
log.info('Creating alias "{0}" on index "{1}"'.format(
es.index_name, index_name))
if es.indices.exists_alias(name=es.index_name):
alias = es.indices.get_alias(name=es.index_name)
previous_indices = alias.keys()
if index_name not in previous_indices:
es.indices.put_alias(index=index_name, name=es.index_name)
for index in previous_indices:
if index != index_name:
es.indices.delete_alias(index=index, name=es.index_name)
if delete:
es.indices.delete(index=index)
else:
es.indices.put_alias(index=index_name, name=es.index_name)
|
[
"def",
"set_alias",
"(",
"index_name",
",",
"delete",
"=",
"True",
")",
":",
"log",
".",
"info",
"(",
"'Creating alias \"{0}\" on index \"{1}\"'",
".",
"format",
"(",
"es",
".",
"index_name",
",",
"index_name",
")",
")",
"if",
"es",
".",
"indices",
".",
"exists_alias",
"(",
"name",
"=",
"es",
".",
"index_name",
")",
":",
"alias",
"=",
"es",
".",
"indices",
".",
"get_alias",
"(",
"name",
"=",
"es",
".",
"index_name",
")",
"previous_indices",
"=",
"alias",
".",
"keys",
"(",
")",
"if",
"index_name",
"not",
"in",
"previous_indices",
":",
"es",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"index_name",
",",
"name",
"=",
"es",
".",
"index_name",
")",
"for",
"index",
"in",
"previous_indices",
":",
"if",
"index",
"!=",
"index_name",
":",
"es",
".",
"indices",
".",
"delete_alias",
"(",
"index",
"=",
"index",
",",
"name",
"=",
"es",
".",
"index_name",
")",
"if",
"delete",
":",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"index",
")",
"else",
":",
"es",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"index_name",
",",
"name",
"=",
"es",
".",
"index_name",
")"
] |
Properly end an indexation by creating an alias.
Previous alias is deleted if needed.
|
[
"Properly",
"end",
"an",
"indexation",
"by",
"creating",
"an",
"alias",
".",
"Previous",
"alias",
"is",
"deleted",
"if",
"needed",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L109-L127
|
14,368
|
opendatateam/udata
|
udata/search/commands.py
|
handle_error
|
def handle_error(index_name, keep=False):
'''
Handle errors while indexing.
In case of error, properly log it, remove the index and exit.
If `keep` is `True`, index is not deleted.
'''
# Handle keyboard interrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.default_int_handler)
has_error = False
try:
yield
except KeyboardInterrupt:
print('') # Proper warning message under the "^C" display
log.warning('Interrupted by signal')
has_error = True
except Exception as e:
log.error(e)
has_error = True
if has_error:
if not keep:
log.info('Removing index %s', index_name)
es.indices.delete(index=index_name)
sys.exit(-1)
|
python
|
def handle_error(index_name, keep=False):
'''
Handle errors while indexing.
In case of error, properly log it, remove the index and exit.
If `keep` is `True`, index is not deleted.
'''
# Handle keyboard interrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.default_int_handler)
has_error = False
try:
yield
except KeyboardInterrupt:
print('') # Proper warning message under the "^C" display
log.warning('Interrupted by signal')
has_error = True
except Exception as e:
log.error(e)
has_error = True
if has_error:
if not keep:
log.info('Removing index %s', index_name)
es.indices.delete(index=index_name)
sys.exit(-1)
|
[
"def",
"handle_error",
"(",
"index_name",
",",
"keep",
"=",
"False",
")",
":",
"# Handle keyboard interrupt",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"default_int_handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"default_int_handler",
")",
"has_error",
"=",
"False",
"try",
":",
"yield",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"''",
")",
"# Proper warning message under the \"^C\" display",
"log",
".",
"warning",
"(",
"'Interrupted by signal'",
")",
"has_error",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"e",
")",
"has_error",
"=",
"True",
"if",
"has_error",
":",
"if",
"not",
"keep",
":",
"log",
".",
"info",
"(",
"'Removing index %s'",
",",
"index_name",
")",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"index_name",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")"
] |
Handle errors while indexing.
In case of error, properly log it, remove the index and exit.
If `keep` is `True`, index is not deleted.
|
[
"Handle",
"errors",
"while",
"indexing",
".",
"In",
"case",
"of",
"error",
"properly",
"log",
"it",
"remove",
"the",
"index",
"and",
"exit",
".",
"If",
"keep",
"is",
"True",
"index",
"is",
"not",
"deleted",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L131-L154
|
14,369
|
opendatateam/udata
|
udata/search/commands.py
|
index
|
def index(models=None, name=None, force=False, keep=False):
'''
Initialize or rebuild the search index
Models to reindex can optionally be specified as arguments.
If not, all models are reindexed.
'''
index_name = name or default_index_name()
doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()]
models = [model.lower().rstrip('s') for model in (models or [])]
for model in models:
if model not in doc_types_names:
log.error('Unknown model %s', model)
sys.exit(-1)
log.info('Initiliazing index "{0}"'.format(index_name))
if es.indices.exists(index_name):
if IS_TTY and not force:
msg = 'Index {0} will be deleted, are you sure?'
click.confirm(msg.format(index_name), abort=True)
es.indices.delete(index_name)
es.initialize(index_name)
with handle_error(index_name, keep):
disable_refresh(index_name)
for adapter in iter_adapters():
if not models or adapter.doc_type().lower() in models:
index_model(index_name, adapter)
else:
log.info('Copying {0} objects to the new index'.format(
adapter.model.__name__))
# Need upgrade to Elasticsearch-py 5.0.0 to write:
# es.reindex({
# 'source': {'index': es.index_name, 'type': adapter.doc_type()},
# 'dest': {'index': index_name}
# })
#
# http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex
# This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0)
# triggers a server-side documents copy.
# Instead we use this helper for meant for backward compatibility
# but with poor performance as copy is client-side (scan+bulk)
es_reindex(es.client, es.index_name, index_name, scan_kwargs={
'doc_type': adapter.doc_type()
})
enable_refresh(index_name)
# At this step, we don't want error handler to delete the index
# in case of error
set_alias(index_name, delete=not keep)
|
python
|
def index(models=None, name=None, force=False, keep=False):
'''
Initialize or rebuild the search index
Models to reindex can optionally be specified as arguments.
If not, all models are reindexed.
'''
index_name = name or default_index_name()
doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()]
models = [model.lower().rstrip('s') for model in (models or [])]
for model in models:
if model not in doc_types_names:
log.error('Unknown model %s', model)
sys.exit(-1)
log.info('Initiliazing index "{0}"'.format(index_name))
if es.indices.exists(index_name):
if IS_TTY and not force:
msg = 'Index {0} will be deleted, are you sure?'
click.confirm(msg.format(index_name), abort=True)
es.indices.delete(index_name)
es.initialize(index_name)
with handle_error(index_name, keep):
disable_refresh(index_name)
for adapter in iter_adapters():
if not models or adapter.doc_type().lower() in models:
index_model(index_name, adapter)
else:
log.info('Copying {0} objects to the new index'.format(
adapter.model.__name__))
# Need upgrade to Elasticsearch-py 5.0.0 to write:
# es.reindex({
# 'source': {'index': es.index_name, 'type': adapter.doc_type()},
# 'dest': {'index': index_name}
# })
#
# http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex
# This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0)
# triggers a server-side documents copy.
# Instead we use this helper for meant for backward compatibility
# but with poor performance as copy is client-side (scan+bulk)
es_reindex(es.client, es.index_name, index_name, scan_kwargs={
'doc_type': adapter.doc_type()
})
enable_refresh(index_name)
# At this step, we don't want error handler to delete the index
# in case of error
set_alias(index_name, delete=not keep)
|
[
"def",
"index",
"(",
"models",
"=",
"None",
",",
"name",
"=",
"None",
",",
"force",
"=",
"False",
",",
"keep",
"=",
"False",
")",
":",
"index_name",
"=",
"name",
"or",
"default_index_name",
"(",
")",
"doc_types_names",
"=",
"[",
"m",
".",
"__name__",
".",
"lower",
"(",
")",
"for",
"m",
"in",
"adapter_catalog",
".",
"keys",
"(",
")",
"]",
"models",
"=",
"[",
"model",
".",
"lower",
"(",
")",
".",
"rstrip",
"(",
"'s'",
")",
"for",
"model",
"in",
"(",
"models",
"or",
"[",
"]",
")",
"]",
"for",
"model",
"in",
"models",
":",
"if",
"model",
"not",
"in",
"doc_types_names",
":",
"log",
".",
"error",
"(",
"'Unknown model %s'",
",",
"model",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"log",
".",
"info",
"(",
"'Initiliazing index \"{0}\"'",
".",
"format",
"(",
"index_name",
")",
")",
"if",
"es",
".",
"indices",
".",
"exists",
"(",
"index_name",
")",
":",
"if",
"IS_TTY",
"and",
"not",
"force",
":",
"msg",
"=",
"'Index {0} will be deleted, are you sure?'",
"click",
".",
"confirm",
"(",
"msg",
".",
"format",
"(",
"index_name",
")",
",",
"abort",
"=",
"True",
")",
"es",
".",
"indices",
".",
"delete",
"(",
"index_name",
")",
"es",
".",
"initialize",
"(",
"index_name",
")",
"with",
"handle_error",
"(",
"index_name",
",",
"keep",
")",
":",
"disable_refresh",
"(",
"index_name",
")",
"for",
"adapter",
"in",
"iter_adapters",
"(",
")",
":",
"if",
"not",
"models",
"or",
"adapter",
".",
"doc_type",
"(",
")",
".",
"lower",
"(",
")",
"in",
"models",
":",
"index_model",
"(",
"index_name",
",",
"adapter",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Copying {0} objects to the new index'",
".",
"format",
"(",
"adapter",
".",
"model",
".",
"__name__",
")",
")",
"# Need upgrade to Elasticsearch-py 5.0.0 to write:",
"# es.reindex({",
"# 'source': {'index': es.index_name, 'type': adapter.doc_type()},",
"# 'dest': {'index': index_name}",
"# })",
"#",
"# http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex",
"# This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0)",
"# triggers a server-side documents copy.",
"# Instead we use this helper for meant for backward compatibility",
"# but with poor performance as copy is client-side (scan+bulk)",
"es_reindex",
"(",
"es",
".",
"client",
",",
"es",
".",
"index_name",
",",
"index_name",
",",
"scan_kwargs",
"=",
"{",
"'doc_type'",
":",
"adapter",
".",
"doc_type",
"(",
")",
"}",
")",
"enable_refresh",
"(",
"index_name",
")",
"# At this step, we don't want error handler to delete the index",
"# in case of error",
"set_alias",
"(",
"index_name",
",",
"delete",
"=",
"not",
"keep",
")"
] |
Initialize or rebuild the search index
Models to reindex can optionally be specified as arguments.
If not, all models are reindexed.
|
[
"Initialize",
"or",
"rebuild",
"the",
"search",
"index"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/commands.py#L162-L214
|
14,370
|
opendatateam/udata
|
udata/app.py
|
create_app
|
def create_app(config='udata.settings.Defaults', override=None,
init_logging=init_logging):
'''Factory for a minimal application'''
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
if exists(settings):
app.settings_file = settings # Keep track of loaded settings for diagnostic
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
# Loads defaults from plugins
for pkg in entrypoints.get_roots(app):
if pkg == 'udata':
continue # Defaults are already loaded
module = '{}.settings'.format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config['DEBUG'] and not app.config['TESTING']
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app
|
python
|
def create_app(config='udata.settings.Defaults', override=None,
init_logging=init_logging):
'''Factory for a minimal application'''
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get('UDATA_SETTINGS', join(os.getcwd(), 'udata.cfg'))
if exists(settings):
app.settings_file = settings # Keep track of loaded settings for diagnostic
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
# Loads defaults from plugins
for pkg in entrypoints.get_roots(app):
if pkg == 'udata':
continue # Defaults are already loaded
module = '{}.settings'.format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config['DEBUG'] and not app.config['TESTING']
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app
|
[
"def",
"create_app",
"(",
"config",
"=",
"'udata.settings.Defaults'",
",",
"override",
"=",
"None",
",",
"init_logging",
"=",
"init_logging",
")",
":",
"app",
"=",
"UDataApp",
"(",
"APP_NAME",
")",
"app",
".",
"config",
".",
"from_object",
"(",
"config",
")",
"settings",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'UDATA_SETTINGS'",
",",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'udata.cfg'",
")",
")",
"if",
"exists",
"(",
"settings",
")",
":",
"app",
".",
"settings_file",
"=",
"settings",
"# Keep track of loaded settings for diagnostic",
"app",
".",
"config",
".",
"from_pyfile",
"(",
"settings",
")",
"if",
"override",
":",
"app",
".",
"config",
".",
"from_object",
"(",
"override",
")",
"# Loads defaults from plugins",
"for",
"pkg",
"in",
"entrypoints",
".",
"get_roots",
"(",
"app",
")",
":",
"if",
"pkg",
"==",
"'udata'",
":",
"continue",
"# Defaults are already loaded",
"module",
"=",
"'{}.settings'",
".",
"format",
"(",
"pkg",
")",
"if",
"pkgutil",
".",
"find_loader",
"(",
"module",
")",
":",
"settings",
"=",
"pkgutil",
".",
"get_loader",
"(",
"module",
")",
"for",
"key",
",",
"default",
"in",
"settings",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"app",
".",
"config",
".",
"setdefault",
"(",
"key",
",",
"default",
")",
"app",
".",
"json_encoder",
"=",
"UDataJsonEncoder",
"app",
".",
"debug",
"=",
"app",
".",
"config",
"[",
"'DEBUG'",
"]",
"and",
"not",
"app",
".",
"config",
"[",
"'TESTING'",
"]",
"app",
".",
"wsgi_app",
"=",
"ProxyFix",
"(",
"app",
".",
"wsgi_app",
")",
"init_logging",
"(",
"app",
")",
"register_extensions",
"(",
"app",
")",
"return",
"app"
] |
Factory for a minimal application
|
[
"Factory",
"for",
"a",
"minimal",
"application"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/app.py#L155-L188
|
14,371
|
opendatateam/udata
|
udata/app.py
|
standalone
|
def standalone(app):
'''Factory for an all in one application'''
from udata import api, core, frontend
core.init_app(app)
frontend.init_app(app)
api.init_app(app)
register_features(app)
return app
|
python
|
def standalone(app):
'''Factory for an all in one application'''
from udata import api, core, frontend
core.init_app(app)
frontend.init_app(app)
api.init_app(app)
register_features(app)
return app
|
[
"def",
"standalone",
"(",
"app",
")",
":",
"from",
"udata",
"import",
"api",
",",
"core",
",",
"frontend",
"core",
".",
"init_app",
"(",
"app",
")",
"frontend",
".",
"init_app",
"(",
"app",
")",
"api",
".",
"init_app",
"(",
"app",
")",
"register_features",
"(",
"app",
")",
"return",
"app"
] |
Factory for an all in one application
|
[
"Factory",
"for",
"an",
"all",
"in",
"one",
"application"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/app.py#L191-L201
|
14,372
|
opendatateam/udata
|
udata/commands/db.py
|
get_migration
|
def get_migration(plugin, filename):
'''Get an existing migration record if exists'''
db = get_db()
return db.migrations.find_one({'plugin': plugin, 'filename': filename})
|
python
|
def get_migration(plugin, filename):
'''Get an existing migration record if exists'''
db = get_db()
return db.migrations.find_one({'plugin': plugin, 'filename': filename})
|
[
"def",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
":",
"db",
"=",
"get_db",
"(",
")",
"return",
"db",
".",
"migrations",
".",
"find_one",
"(",
"{",
"'plugin'",
":",
"plugin",
",",
"'filename'",
":",
"filename",
"}",
")"
] |
Get an existing migration record if exists
|
[
"Get",
"an",
"existing",
"migration",
"record",
"if",
"exists"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L85-L88
|
14,373
|
opendatateam/udata
|
udata/commands/db.py
|
record_migration
|
def record_migration(plugin, filename, script, **kwargs):
'''Only record a migration without applying it'''
db = get_db()
db.eval(RECORD_WRAPPER, plugin, filename, script)
return True
|
python
|
def record_migration(plugin, filename, script, **kwargs):
'''Only record a migration without applying it'''
db = get_db()
db.eval(RECORD_WRAPPER, plugin, filename, script)
return True
|
[
"def",
"record_migration",
"(",
"plugin",
",",
"filename",
",",
"script",
",",
"*",
"*",
"kwargs",
")",
":",
"db",
"=",
"get_db",
"(",
")",
"db",
".",
"eval",
"(",
"RECORD_WRAPPER",
",",
"plugin",
",",
"filename",
",",
"script",
")",
"return",
"True"
] |
Only record a migration without applying it
|
[
"Only",
"record",
"a",
"migration",
"without",
"applying",
"it"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L115-L119
|
14,374
|
opendatateam/udata
|
udata/commands/db.py
|
available_migrations
|
def available_migrations():
'''
List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename)
'''
migrations = []
for filename in resource_listdir('udata', 'migrations'):
if filename.endswith('.js'):
migrations.append(('udata', 'udata', filename))
plugins = entrypoints.get_enabled('udata.models', current_app)
for plugin, module in plugins.items():
if resource_isdir(module.__name__, 'migrations'):
for filename in resource_listdir(module.__name__, 'migrations'):
if filename.endswith('.js'):
migrations.append((plugin, module.__name__, filename))
return sorted(migrations, key=lambda r: r[2])
|
python
|
def available_migrations():
'''
List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename)
'''
migrations = []
for filename in resource_listdir('udata', 'migrations'):
if filename.endswith('.js'):
migrations.append(('udata', 'udata', filename))
plugins = entrypoints.get_enabled('udata.models', current_app)
for plugin, module in plugins.items():
if resource_isdir(module.__name__, 'migrations'):
for filename in resource_listdir(module.__name__, 'migrations'):
if filename.endswith('.js'):
migrations.append((plugin, module.__name__, filename))
return sorted(migrations, key=lambda r: r[2])
|
[
"def",
"available_migrations",
"(",
")",
":",
"migrations",
"=",
"[",
"]",
"for",
"filename",
"in",
"resource_listdir",
"(",
"'udata'",
",",
"'migrations'",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"migrations",
".",
"append",
"(",
"(",
"'udata'",
",",
"'udata'",
",",
"filename",
")",
")",
"plugins",
"=",
"entrypoints",
".",
"get_enabled",
"(",
"'udata.models'",
",",
"current_app",
")",
"for",
"plugin",
",",
"module",
"in",
"plugins",
".",
"items",
"(",
")",
":",
"if",
"resource_isdir",
"(",
"module",
".",
"__name__",
",",
"'migrations'",
")",
":",
"for",
"filename",
"in",
"resource_listdir",
"(",
"module",
".",
"__name__",
",",
"'migrations'",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"migrations",
".",
"append",
"(",
"(",
"plugin",
",",
"module",
".",
"__name__",
",",
"filename",
")",
")",
"return",
"sorted",
"(",
"migrations",
",",
"key",
"=",
"lambda",
"r",
":",
"r",
"[",
"2",
"]",
")"
] |
List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename)
|
[
"List",
"available",
"migrations",
"for",
"udata",
"and",
"enabled",
"plugins"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L122-L141
|
14,375
|
opendatateam/udata
|
udata/commands/db.py
|
log_status
|
def log_status(plugin, filename, status):
'''Properly display a migration status line'''
display = ':'.join((plugin, filename)) + ' '
log.info('%s [%s]', '{:.<70}'.format(display), status)
|
python
|
def log_status(plugin, filename, status):
'''Properly display a migration status line'''
display = ':'.join((plugin, filename)) + ' '
log.info('%s [%s]', '{:.<70}'.format(display), status)
|
[
"def",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"status",
")",
":",
"display",
"=",
"':'",
".",
"join",
"(",
"(",
"plugin",
",",
"filename",
")",
")",
"+",
"' '",
"log",
".",
"info",
"(",
"'%s [%s]'",
",",
"'{:.<70}'",
".",
"format",
"(",
"display",
")",
",",
"status",
")"
] |
Properly display a migration status line
|
[
"Properly",
"display",
"a",
"migration",
"status",
"line"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L144-L147
|
14,376
|
opendatateam/udata
|
udata/commands/db.py
|
status
|
def status():
'''Display the database migrations status'''
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status)
|
python
|
def status():
'''Display the database migrations status'''
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status)
|
[
"def",
"status",
"(",
")",
":",
"for",
"plugin",
",",
"package",
",",
"filename",
"in",
"available_migrations",
"(",
")",
":",
"migration",
"=",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
"if",
"migration",
":",
"status",
"=",
"green",
"(",
"migration",
"[",
"'date'",
"]",
".",
"strftime",
"(",
"DATE_FORMAT",
")",
")",
"else",
":",
"status",
"=",
"yellow",
"(",
"'Not applied'",
")",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"status",
")"
] |
Display the database migrations status
|
[
"Display",
"the",
"database",
"migrations",
"status"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L151-L159
|
14,377
|
opendatateam/udata
|
udata/commands/db.py
|
migrate
|
def migrate(record, dry_run=False):
'''Perform database migrations'''
handler = record_migration if record else execute_migration
success = True
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration or not success:
log_status(plugin, filename, cyan('Skipped'))
else:
status = magenta('Recorded') if record else yellow('Apply')
log_status(plugin, filename, status)
script = resource_string(package, join('migrations', filename))
success &= handler(plugin, filename, script, dryrun=dry_run)
|
python
|
def migrate(record, dry_run=False):
'''Perform database migrations'''
handler = record_migration if record else execute_migration
success = True
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration or not success:
log_status(plugin, filename, cyan('Skipped'))
else:
status = magenta('Recorded') if record else yellow('Apply')
log_status(plugin, filename, status)
script = resource_string(package, join('migrations', filename))
success &= handler(plugin, filename, script, dryrun=dry_run)
|
[
"def",
"migrate",
"(",
"record",
",",
"dry_run",
"=",
"False",
")",
":",
"handler",
"=",
"record_migration",
"if",
"record",
"else",
"execute_migration",
"success",
"=",
"True",
"for",
"plugin",
",",
"package",
",",
"filename",
"in",
"available_migrations",
"(",
")",
":",
"migration",
"=",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
"if",
"migration",
"or",
"not",
"success",
":",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"cyan",
"(",
"'Skipped'",
")",
")",
"else",
":",
"status",
"=",
"magenta",
"(",
"'Recorded'",
")",
"if",
"record",
"else",
"yellow",
"(",
"'Apply'",
")",
"log_status",
"(",
"plugin",
",",
"filename",
",",
"status",
")",
"script",
"=",
"resource_string",
"(",
"package",
",",
"join",
"(",
"'migrations'",
",",
"filename",
")",
")",
"success",
"&=",
"handler",
"(",
"plugin",
",",
"filename",
",",
"script",
",",
"dryrun",
"=",
"dry_run",
")"
] |
Perform database migrations
|
[
"Perform",
"database",
"migrations"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L167-L179
|
14,378
|
opendatateam/udata
|
udata/commands/db.py
|
unrecord
|
def unrecord(plugin_or_specs, filename):
'''
Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js
'''
plugin, filename = normalize_migration(plugin_or_specs, filename)
migration = get_migration(plugin, filename)
if migration:
log.info('Removing migration %s:%s', plugin, filename)
db = get_db()
db.eval(UNRECORD_WRAPPER, migration['_id'])
else:
log.error('Migration not found %s:%s', plugin, filename)
|
python
|
def unrecord(plugin_or_specs, filename):
'''
Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js
'''
plugin, filename = normalize_migration(plugin_or_specs, filename)
migration = get_migration(plugin, filename)
if migration:
log.info('Removing migration %s:%s', plugin, filename)
db = get_db()
db.eval(UNRECORD_WRAPPER, migration['_id'])
else:
log.error('Migration not found %s:%s', plugin, filename)
|
[
"def",
"unrecord",
"(",
"plugin_or_specs",
",",
"filename",
")",
":",
"plugin",
",",
"filename",
"=",
"normalize_migration",
"(",
"plugin_or_specs",
",",
"filename",
")",
"migration",
"=",
"get_migration",
"(",
"plugin",
",",
"filename",
")",
"if",
"migration",
":",
"log",
".",
"info",
"(",
"'Removing migration %s:%s'",
",",
"plugin",
",",
"filename",
")",
"db",
"=",
"get_db",
"(",
")",
"db",
".",
"eval",
"(",
"UNRECORD_WRAPPER",
",",
"migration",
"[",
"'_id'",
"]",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Migration not found %s:%s'",
",",
"plugin",
",",
"filename",
")"
] |
Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js
|
[
"Remove",
"a",
"database",
"migration",
"record",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/db.py#L185-L203
|
14,379
|
opendatateam/udata
|
udata/uris.py
|
validate
|
def validate(url, schemes=None, tlds=None, private=None, local=None,
credentials=None):
'''
Validate and normalize an URL
:param str url: The URL to validate and normalize
:return str: The normalized URL
:raises ValidationError: when URL does not validate
'''
url = url.strip()
private = config_for(private, 'URLS_ALLOW_PRIVATE')
local = config_for(local, 'URLS_ALLOW_LOCAL')
credentials = config_for(credentials, 'URLS_ALLOW_CREDENTIALS')
schemes = config_for(schemes, 'URLS_ALLOWED_SCHEMES')
tlds = config_for(tlds, 'URLS_ALLOWED_TLDS')
match = URL_REGEX.match(url)
if not match:
error(url)
scheme = (match.group('scheme') or '').lower()
if scheme and scheme not in schemes:
error(url, 'Invalid scheme {0}'.format(scheme))
if not credentials and match.group('credentials'):
error(url, 'Credentials in URL are not allowed')
tld = match.group('tld')
if tld and tld not in tlds and tld.encode('idna') not in tlds:
error(url, 'Invalid TLD {0}'.format(tld))
ip = match.group('ipv6') or match.group('ipv4')
if ip:
try:
ip = IPAddress(ip)
except AddrFormatError:
error(url)
if ip.is_multicast():
error(url, '{0} is a multicast IP'.format(ip))
elif not ip.is_loopback() and ip.is_hostmask() or ip.is_netmask():
error(url, '{0} is a mask IP'.format(ip))
if not local:
if ip and ip.is_loopback() or match.group('localhost'):
error(url, 'is a local URL')
if not private and ip and ip.is_private():
error(url, 'is a private URL')
return url
|
python
|
def validate(url, schemes=None, tlds=None, private=None, local=None,
credentials=None):
'''
Validate and normalize an URL
:param str url: The URL to validate and normalize
:return str: The normalized URL
:raises ValidationError: when URL does not validate
'''
url = url.strip()
private = config_for(private, 'URLS_ALLOW_PRIVATE')
local = config_for(local, 'URLS_ALLOW_LOCAL')
credentials = config_for(credentials, 'URLS_ALLOW_CREDENTIALS')
schemes = config_for(schemes, 'URLS_ALLOWED_SCHEMES')
tlds = config_for(tlds, 'URLS_ALLOWED_TLDS')
match = URL_REGEX.match(url)
if not match:
error(url)
scheme = (match.group('scheme') or '').lower()
if scheme and scheme not in schemes:
error(url, 'Invalid scheme {0}'.format(scheme))
if not credentials and match.group('credentials'):
error(url, 'Credentials in URL are not allowed')
tld = match.group('tld')
if tld and tld not in tlds and tld.encode('idna') not in tlds:
error(url, 'Invalid TLD {0}'.format(tld))
ip = match.group('ipv6') or match.group('ipv4')
if ip:
try:
ip = IPAddress(ip)
except AddrFormatError:
error(url)
if ip.is_multicast():
error(url, '{0} is a multicast IP'.format(ip))
elif not ip.is_loopback() and ip.is_hostmask() or ip.is_netmask():
error(url, '{0} is a mask IP'.format(ip))
if not local:
if ip and ip.is_loopback() or match.group('localhost'):
error(url, 'is a local URL')
if not private and ip and ip.is_private():
error(url, 'is a private URL')
return url
|
[
"def",
"validate",
"(",
"url",
",",
"schemes",
"=",
"None",
",",
"tlds",
"=",
"None",
",",
"private",
"=",
"None",
",",
"local",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"url",
"=",
"url",
".",
"strip",
"(",
")",
"private",
"=",
"config_for",
"(",
"private",
",",
"'URLS_ALLOW_PRIVATE'",
")",
"local",
"=",
"config_for",
"(",
"local",
",",
"'URLS_ALLOW_LOCAL'",
")",
"credentials",
"=",
"config_for",
"(",
"credentials",
",",
"'URLS_ALLOW_CREDENTIALS'",
")",
"schemes",
"=",
"config_for",
"(",
"schemes",
",",
"'URLS_ALLOWED_SCHEMES'",
")",
"tlds",
"=",
"config_for",
"(",
"tlds",
",",
"'URLS_ALLOWED_TLDS'",
")",
"match",
"=",
"URL_REGEX",
".",
"match",
"(",
"url",
")",
"if",
"not",
"match",
":",
"error",
"(",
"url",
")",
"scheme",
"=",
"(",
"match",
".",
"group",
"(",
"'scheme'",
")",
"or",
"''",
")",
".",
"lower",
"(",
")",
"if",
"scheme",
"and",
"scheme",
"not",
"in",
"schemes",
":",
"error",
"(",
"url",
",",
"'Invalid scheme {0}'",
".",
"format",
"(",
"scheme",
")",
")",
"if",
"not",
"credentials",
"and",
"match",
".",
"group",
"(",
"'credentials'",
")",
":",
"error",
"(",
"url",
",",
"'Credentials in URL are not allowed'",
")",
"tld",
"=",
"match",
".",
"group",
"(",
"'tld'",
")",
"if",
"tld",
"and",
"tld",
"not",
"in",
"tlds",
"and",
"tld",
".",
"encode",
"(",
"'idna'",
")",
"not",
"in",
"tlds",
":",
"error",
"(",
"url",
",",
"'Invalid TLD {0}'",
".",
"format",
"(",
"tld",
")",
")",
"ip",
"=",
"match",
".",
"group",
"(",
"'ipv6'",
")",
"or",
"match",
".",
"group",
"(",
"'ipv4'",
")",
"if",
"ip",
":",
"try",
":",
"ip",
"=",
"IPAddress",
"(",
"ip",
")",
"except",
"AddrFormatError",
":",
"error",
"(",
"url",
")",
"if",
"ip",
".",
"is_multicast",
"(",
")",
":",
"error",
"(",
"url",
",",
"'{0} is a multicast IP'",
".",
"format",
"(",
"ip",
")",
")",
"elif",
"not",
"ip",
".",
"is_loopback",
"(",
")",
"and",
"ip",
".",
"is_hostmask",
"(",
")",
"or",
"ip",
".",
"is_netmask",
"(",
")",
":",
"error",
"(",
"url",
",",
"'{0} is a mask IP'",
".",
"format",
"(",
"ip",
")",
")",
"if",
"not",
"local",
":",
"if",
"ip",
"and",
"ip",
".",
"is_loopback",
"(",
")",
"or",
"match",
".",
"group",
"(",
"'localhost'",
")",
":",
"error",
"(",
"url",
",",
"'is a local URL'",
")",
"if",
"not",
"private",
"and",
"ip",
"and",
"ip",
".",
"is_private",
"(",
")",
":",
"error",
"(",
"url",
",",
"'is a private URL'",
")",
"return",
"url"
] |
Validate and normalize an URL
:param str url: The URL to validate and normalize
:return str: The normalized URL
:raises ValidationError: when URL does not validate
|
[
"Validate",
"and",
"normalize",
"an",
"URL"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/uris.py#L69-L119
|
14,380
|
opendatateam/udata
|
udata/core/dataset/models.py
|
get_json_ld_extra
|
def get_json_ld_extra(key, value):
'''Serialize an extras key, value pair into JSON-LD'''
value = value.serialize() if hasattr(value, 'serialize') else value
return {
'@type': 'http://schema.org/PropertyValue',
'name': key,
'value': value,
}
|
python
|
def get_json_ld_extra(key, value):
'''Serialize an extras key, value pair into JSON-LD'''
value = value.serialize() if hasattr(value, 'serialize') else value
return {
'@type': 'http://schema.org/PropertyValue',
'name': key,
'value': value,
}
|
[
"def",
"get_json_ld_extra",
"(",
"key",
",",
"value",
")",
":",
"value",
"=",
"value",
".",
"serialize",
"(",
")",
"if",
"hasattr",
"(",
"value",
",",
"'serialize'",
")",
"else",
"value",
"return",
"{",
"'@type'",
":",
"'http://schema.org/PropertyValue'",
",",
"'name'",
":",
"key",
",",
"'value'",
":",
"value",
",",
"}"
] |
Serialize an extras key, value pair into JSON-LD
|
[
"Serialize",
"an",
"extras",
"key",
"value",
"pair",
"into",
"JSON",
"-",
"LD"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L105-L112
|
14,381
|
opendatateam/udata
|
udata/core/dataset/models.py
|
get_resource
|
def get_resource(id):
'''Fetch a resource given its UUID'''
dataset = Dataset.objects(resources__id=id).first()
if dataset:
return get_by(dataset.resources, 'id', id)
else:
return CommunityResource.objects(id=id).first()
|
python
|
def get_resource(id):
'''Fetch a resource given its UUID'''
dataset = Dataset.objects(resources__id=id).first()
if dataset:
return get_by(dataset.resources, 'id', id)
else:
return CommunityResource.objects(id=id).first()
|
[
"def",
"get_resource",
"(",
"id",
")",
":",
"dataset",
"=",
"Dataset",
".",
"objects",
"(",
"resources__id",
"=",
"id",
")",
".",
"first",
"(",
")",
"if",
"dataset",
":",
"return",
"get_by",
"(",
"dataset",
".",
"resources",
",",
"'id'",
",",
"id",
")",
"else",
":",
"return",
"CommunityResource",
".",
"objects",
"(",
"id",
"=",
"id",
")",
".",
"first",
"(",
")"
] |
Fetch a resource given its UUID
|
[
"Fetch",
"a",
"resource",
"given",
"its",
"UUID"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L692-L698
|
14,382
|
opendatateam/udata
|
udata/core/dataset/models.py
|
License.guess
|
def guess(cls, *strings, **kwargs):
'''
Try to guess a license from a list of strings.
Accept a `default` keyword argument which will be
the default fallback license.
'''
license = None
for string in strings:
license = cls.guess_one(string)
if license:
break
return license or kwargs.get('default')
|
python
|
def guess(cls, *strings, **kwargs):
'''
Try to guess a license from a list of strings.
Accept a `default` keyword argument which will be
the default fallback license.
'''
license = None
for string in strings:
license = cls.guess_one(string)
if license:
break
return license or kwargs.get('default')
|
[
"def",
"guess",
"(",
"cls",
",",
"*",
"strings",
",",
"*",
"*",
"kwargs",
")",
":",
"license",
"=",
"None",
"for",
"string",
"in",
"strings",
":",
"license",
"=",
"cls",
".",
"guess_one",
"(",
"string",
")",
"if",
"license",
":",
"break",
"return",
"license",
"or",
"kwargs",
".",
"get",
"(",
"'default'",
")"
] |
Try to guess a license from a list of strings.
Accept a `default` keyword argument which will be
the default fallback license.
|
[
"Try",
"to",
"guess",
"a",
"license",
"from",
"a",
"list",
"of",
"strings",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L134-L146
|
14,383
|
opendatateam/udata
|
udata/core/dataset/models.py
|
License.guess_one
|
def guess_one(cls, text):
'''
Try to guess license from a string.
Try to exact match on identifier then slugified title
and fallback on edit distance ranking (after slugification)
'''
if not text:
return
qs = cls.objects
text = text.strip().lower() # Stored identifiers are lower case
slug = cls.slug.slugify(text) # Use slug as it normalize string
license = qs(
db.Q(id=text) | db.Q(slug=slug) | db.Q(url=text)
| db.Q(alternate_urls=text)
).first()
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = ((l, rdlevenshtein(l.slug, slug)) for l in cls.objects)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = (
(l, rdlevenshtein(cls.slug.slugify(t), slug))
for l in cls.objects
for t in l.alternate_titles
)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
return license
|
python
|
def guess_one(cls, text):
'''
Try to guess license from a string.
Try to exact match on identifier then slugified title
and fallback on edit distance ranking (after slugification)
'''
if not text:
return
qs = cls.objects
text = text.strip().lower() # Stored identifiers are lower case
slug = cls.slug.slugify(text) # Use slug as it normalize string
license = qs(
db.Q(id=text) | db.Q(slug=slug) | db.Q(url=text)
| db.Q(alternate_urls=text)
).first()
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = ((l, rdlevenshtein(l.slug, slug)) for l in cls.objects)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
if license is None:
# Try to single match with a low Damerau-Levenshtein distance
computed = (
(l, rdlevenshtein(cls.slug.slugify(t), slug))
for l in cls.objects
for t in l.alternate_titles
)
candidates = [l for l, d in computed if d <= MAX_DISTANCE]
# If there is more that one match, we cannot determinate
# which one is closer to safely choose between candidates
if len(candidates) == 1:
license = candidates[0]
return license
|
[
"def",
"guess_one",
"(",
"cls",
",",
"text",
")",
":",
"if",
"not",
"text",
":",
"return",
"qs",
"=",
"cls",
".",
"objects",
"text",
"=",
"text",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"# Stored identifiers are lower case",
"slug",
"=",
"cls",
".",
"slug",
".",
"slugify",
"(",
"text",
")",
"# Use slug as it normalize string",
"license",
"=",
"qs",
"(",
"db",
".",
"Q",
"(",
"id",
"=",
"text",
")",
"|",
"db",
".",
"Q",
"(",
"slug",
"=",
"slug",
")",
"|",
"db",
".",
"Q",
"(",
"url",
"=",
"text",
")",
"|",
"db",
".",
"Q",
"(",
"alternate_urls",
"=",
"text",
")",
")",
".",
"first",
"(",
")",
"if",
"license",
"is",
"None",
":",
"# Try to single match with a low Damerau-Levenshtein distance",
"computed",
"=",
"(",
"(",
"l",
",",
"rdlevenshtein",
"(",
"l",
".",
"slug",
",",
"slug",
")",
")",
"for",
"l",
"in",
"cls",
".",
"objects",
")",
"candidates",
"=",
"[",
"l",
"for",
"l",
",",
"d",
"in",
"computed",
"if",
"d",
"<=",
"MAX_DISTANCE",
"]",
"# If there is more that one match, we cannot determinate",
"# which one is closer to safely choose between candidates",
"if",
"len",
"(",
"candidates",
")",
"==",
"1",
":",
"license",
"=",
"candidates",
"[",
"0",
"]",
"if",
"license",
"is",
"None",
":",
"# Try to single match with a low Damerau-Levenshtein distance",
"computed",
"=",
"(",
"(",
"l",
",",
"rdlevenshtein",
"(",
"cls",
".",
"slug",
".",
"slugify",
"(",
"t",
")",
",",
"slug",
")",
")",
"for",
"l",
"in",
"cls",
".",
"objects",
"for",
"t",
"in",
"l",
".",
"alternate_titles",
")",
"candidates",
"=",
"[",
"l",
"for",
"l",
",",
"d",
"in",
"computed",
"if",
"d",
"<=",
"MAX_DISTANCE",
"]",
"# If there is more that one match, we cannot determinate",
"# which one is closer to safely choose between candidates",
"if",
"len",
"(",
"candidates",
")",
"==",
"1",
":",
"license",
"=",
"candidates",
"[",
"0",
"]",
"return",
"license"
] |
Try to guess license from a string.
Try to exact match on identifier then slugified title
and fallback on edit distance ranking (after slugification)
|
[
"Try",
"to",
"guess",
"license",
"from",
"a",
"string",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L149-L185
|
14,384
|
opendatateam/udata
|
udata/core/dataset/models.py
|
ResourceMixin.need_check
|
def need_check(self):
'''Does the resource needs to be checked against its linkchecker?
We check unavailable resources often, unless they go over the
threshold. Available resources are checked less and less frequently
based on their historical availability.
'''
min_cache_duration, max_cache_duration, ko_threshold = [
current_app.config.get(k) for k in (
'LINKCHECKING_MIN_CACHE_DURATION',
'LINKCHECKING_MAX_CACHE_DURATION',
'LINKCHECKING_UNAVAILABLE_THRESHOLD',
)
]
count_availability = self.extras.get('check:count-availability', 1)
is_available = self.check_availability()
if is_available == 'unknown':
return True
elif is_available or count_availability > ko_threshold:
delta = min(min_cache_duration * count_availability,
max_cache_duration)
else:
delta = min_cache_duration
if self.extras.get('check:date'):
limit_date = datetime.now() - timedelta(minutes=delta)
check_date = self.extras['check:date']
if not isinstance(check_date, datetime):
try:
check_date = parse_dt(check_date)
except (ValueError, TypeError):
return True
if check_date >= limit_date:
return False
return True
|
python
|
def need_check(self):
'''Does the resource needs to be checked against its linkchecker?
We check unavailable resources often, unless they go over the
threshold. Available resources are checked less and less frequently
based on their historical availability.
'''
min_cache_duration, max_cache_duration, ko_threshold = [
current_app.config.get(k) for k in (
'LINKCHECKING_MIN_CACHE_DURATION',
'LINKCHECKING_MAX_CACHE_DURATION',
'LINKCHECKING_UNAVAILABLE_THRESHOLD',
)
]
count_availability = self.extras.get('check:count-availability', 1)
is_available = self.check_availability()
if is_available == 'unknown':
return True
elif is_available or count_availability > ko_threshold:
delta = min(min_cache_duration * count_availability,
max_cache_duration)
else:
delta = min_cache_duration
if self.extras.get('check:date'):
limit_date = datetime.now() - timedelta(minutes=delta)
check_date = self.extras['check:date']
if not isinstance(check_date, datetime):
try:
check_date = parse_dt(check_date)
except (ValueError, TypeError):
return True
if check_date >= limit_date:
return False
return True
|
[
"def",
"need_check",
"(",
"self",
")",
":",
"min_cache_duration",
",",
"max_cache_duration",
",",
"ko_threshold",
"=",
"[",
"current_app",
".",
"config",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"(",
"'LINKCHECKING_MIN_CACHE_DURATION'",
",",
"'LINKCHECKING_MAX_CACHE_DURATION'",
",",
"'LINKCHECKING_UNAVAILABLE_THRESHOLD'",
",",
")",
"]",
"count_availability",
"=",
"self",
".",
"extras",
".",
"get",
"(",
"'check:count-availability'",
",",
"1",
")",
"is_available",
"=",
"self",
".",
"check_availability",
"(",
")",
"if",
"is_available",
"==",
"'unknown'",
":",
"return",
"True",
"elif",
"is_available",
"or",
"count_availability",
">",
"ko_threshold",
":",
"delta",
"=",
"min",
"(",
"min_cache_duration",
"*",
"count_availability",
",",
"max_cache_duration",
")",
"else",
":",
"delta",
"=",
"min_cache_duration",
"if",
"self",
".",
"extras",
".",
"get",
"(",
"'check:date'",
")",
":",
"limit_date",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"timedelta",
"(",
"minutes",
"=",
"delta",
")",
"check_date",
"=",
"self",
".",
"extras",
"[",
"'check:date'",
"]",
"if",
"not",
"isinstance",
"(",
"check_date",
",",
"datetime",
")",
":",
"try",
":",
"check_date",
"=",
"parse_dt",
"(",
"check_date",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"True",
"if",
"check_date",
">=",
"limit_date",
":",
"return",
"False",
"return",
"True"
] |
Does the resource needs to be checked against its linkchecker?
We check unavailable resources often, unless they go over the
threshold. Available resources are checked less and less frequently
based on their historical availability.
|
[
"Does",
"the",
"resource",
"needs",
"to",
"be",
"checked",
"against",
"its",
"linkchecker?"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L258-L291
|
14,385
|
opendatateam/udata
|
udata/core/dataset/models.py
|
Dataset.check_availability
|
def check_availability(self):
"""Check if resources from that dataset are available.
Return a list of (boolean or 'unknown')
"""
# Only check remote resources.
remote_resources = [resource
for resource in self.resources
if resource.filetype == 'remote']
if not remote_resources:
return []
return [resource.check_availability() for resource in remote_resources]
|
python
|
def check_availability(self):
"""Check if resources from that dataset are available.
Return a list of (boolean or 'unknown')
"""
# Only check remote resources.
remote_resources = [resource
for resource in self.resources
if resource.filetype == 'remote']
if not remote_resources:
return []
return [resource.check_availability() for resource in remote_resources]
|
[
"def",
"check_availability",
"(",
"self",
")",
":",
"# Only check remote resources.",
"remote_resources",
"=",
"[",
"resource",
"for",
"resource",
"in",
"self",
".",
"resources",
"if",
"resource",
".",
"filetype",
"==",
"'remote'",
"]",
"if",
"not",
"remote_resources",
":",
"return",
"[",
"]",
"return",
"[",
"resource",
".",
"check_availability",
"(",
")",
"for",
"resource",
"in",
"remote_resources",
"]"
] |
Check if resources from that dataset are available.
Return a list of (boolean or 'unknown')
|
[
"Check",
"if",
"resources",
"from",
"that",
"dataset",
"are",
"available",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L471-L482
|
14,386
|
opendatateam/udata
|
udata/core/dataset/models.py
|
Dataset.next_update
|
def next_update(self):
"""Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled.
"""
delta = None
if self.frequency == 'daily':
delta = timedelta(days=1)
elif self.frequency == 'weekly':
delta = timedelta(weeks=1)
elif self.frequency == 'fortnighly':
delta = timedelta(weeks=2)
elif self.frequency == 'monthly':
delta = timedelta(weeks=4)
elif self.frequency == 'bimonthly':
delta = timedelta(weeks=4 * 2)
elif self.frequency == 'quarterly':
delta = timedelta(weeks=52 / 4)
elif self.frequency == 'biannual':
delta = timedelta(weeks=52 / 2)
elif self.frequency == 'annual':
delta = timedelta(weeks=52)
elif self.frequency == 'biennial':
delta = timedelta(weeks=52 * 2)
elif self.frequency == 'triennial':
delta = timedelta(weeks=52 * 3)
elif self.frequency == 'quinquennial':
delta = timedelta(weeks=52 * 5)
if delta is None:
return
else:
return self.last_update + delta
|
python
|
def next_update(self):
"""Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled.
"""
delta = None
if self.frequency == 'daily':
delta = timedelta(days=1)
elif self.frequency == 'weekly':
delta = timedelta(weeks=1)
elif self.frequency == 'fortnighly':
delta = timedelta(weeks=2)
elif self.frequency == 'monthly':
delta = timedelta(weeks=4)
elif self.frequency == 'bimonthly':
delta = timedelta(weeks=4 * 2)
elif self.frequency == 'quarterly':
delta = timedelta(weeks=52 / 4)
elif self.frequency == 'biannual':
delta = timedelta(weeks=52 / 2)
elif self.frequency == 'annual':
delta = timedelta(weeks=52)
elif self.frequency == 'biennial':
delta = timedelta(weeks=52 * 2)
elif self.frequency == 'triennial':
delta = timedelta(weeks=52 * 3)
elif self.frequency == 'quinquennial':
delta = timedelta(weeks=52 * 5)
if delta is None:
return
else:
return self.last_update + delta
|
[
"def",
"next_update",
"(",
"self",
")",
":",
"delta",
"=",
"None",
"if",
"self",
".",
"frequency",
"==",
"'daily'",
":",
"delta",
"=",
"timedelta",
"(",
"days",
"=",
"1",
")",
"elif",
"self",
".",
"frequency",
"==",
"'weekly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
"elif",
"self",
".",
"frequency",
"==",
"'fortnighly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'monthly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"4",
")",
"elif",
"self",
".",
"frequency",
"==",
"'bimonthly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"4",
"*",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'quarterly'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"/",
"4",
")",
"elif",
"self",
".",
"frequency",
"==",
"'biannual'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"/",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'annual'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
")",
"elif",
"self",
".",
"frequency",
"==",
"'biennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"2",
")",
"elif",
"self",
".",
"frequency",
"==",
"'triennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"3",
")",
"elif",
"self",
".",
"frequency",
"==",
"'quinquennial'",
":",
"delta",
"=",
"timedelta",
"(",
"weeks",
"=",
"52",
"*",
"5",
")",
"if",
"delta",
"is",
"None",
":",
"return",
"else",
":",
"return",
"self",
".",
"last_update",
"+",
"delta"
] |
Compute the next expected update date,
given the frequency and last_update.
Return None if the frequency is not handled.
|
[
"Compute",
"the",
"next",
"expected",
"update",
"date"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L492-L524
|
14,387
|
opendatateam/udata
|
udata/core/dataset/models.py
|
Dataset.quality
|
def quality(self):
"""Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on
"""
from udata.models import Discussion # noqa: Prevent circular imports
result = {}
if not self.id:
# Quality is only relevant on saved Datasets
return result
if self.next_update:
result['frequency'] = self.frequency
result['update_in'] = -(self.next_update - datetime.now()).days
if self.tags:
result['tags_count'] = len(self.tags)
if self.description:
result['description_length'] = len(self.description)
if self.resources:
result['has_resources'] = True
result['has_only_closed_or_no_formats'] = all(
resource.closed_or_no_format for resource in self.resources)
result['has_unavailable_resources'] = not all(
self.check_availability())
discussions = Discussion.objects(subject=self)
if discussions:
result['discussions'] = len(discussions)
result['has_untreated_discussions'] = not all(
discussion.person_involved(self.owner)
for discussion in discussions)
result['score'] = self.compute_quality_score(result)
return result
|
python
|
def quality(self):
"""Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on
"""
from udata.models import Discussion # noqa: Prevent circular imports
result = {}
if not self.id:
# Quality is only relevant on saved Datasets
return result
if self.next_update:
result['frequency'] = self.frequency
result['update_in'] = -(self.next_update - datetime.now()).days
if self.tags:
result['tags_count'] = len(self.tags)
if self.description:
result['description_length'] = len(self.description)
if self.resources:
result['has_resources'] = True
result['has_only_closed_or_no_formats'] = all(
resource.closed_or_no_format for resource in self.resources)
result['has_unavailable_resources'] = not all(
self.check_availability())
discussions = Discussion.objects(subject=self)
if discussions:
result['discussions'] = len(discussions)
result['has_untreated_discussions'] = not all(
discussion.person_involved(self.owner)
for discussion in discussions)
result['score'] = self.compute_quality_score(result)
return result
|
[
"def",
"quality",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Discussion",
"# noqa: Prevent circular imports",
"result",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"id",
":",
"# Quality is only relevant on saved Datasets",
"return",
"result",
"if",
"self",
".",
"next_update",
":",
"result",
"[",
"'frequency'",
"]",
"=",
"self",
".",
"frequency",
"result",
"[",
"'update_in'",
"]",
"=",
"-",
"(",
"self",
".",
"next_update",
"-",
"datetime",
".",
"now",
"(",
")",
")",
".",
"days",
"if",
"self",
".",
"tags",
":",
"result",
"[",
"'tags_count'",
"]",
"=",
"len",
"(",
"self",
".",
"tags",
")",
"if",
"self",
".",
"description",
":",
"result",
"[",
"'description_length'",
"]",
"=",
"len",
"(",
"self",
".",
"description",
")",
"if",
"self",
".",
"resources",
":",
"result",
"[",
"'has_resources'",
"]",
"=",
"True",
"result",
"[",
"'has_only_closed_or_no_formats'",
"]",
"=",
"all",
"(",
"resource",
".",
"closed_or_no_format",
"for",
"resource",
"in",
"self",
".",
"resources",
")",
"result",
"[",
"'has_unavailable_resources'",
"]",
"=",
"not",
"all",
"(",
"self",
".",
"check_availability",
"(",
")",
")",
"discussions",
"=",
"Discussion",
".",
"objects",
"(",
"subject",
"=",
"self",
")",
"if",
"discussions",
":",
"result",
"[",
"'discussions'",
"]",
"=",
"len",
"(",
"discussions",
")",
"result",
"[",
"'has_untreated_discussions'",
"]",
"=",
"not",
"all",
"(",
"discussion",
".",
"person_involved",
"(",
"self",
".",
"owner",
")",
"for",
"discussion",
"in",
"discussions",
")",
"result",
"[",
"'score'",
"]",
"=",
"self",
".",
"compute_quality_score",
"(",
"result",
")",
"return",
"result"
] |
Return a dict filled with metrics related to the inner
quality of the dataset:
* number of tags
* description length
* and so on
|
[
"Return",
"a",
"dict",
"filled",
"with",
"metrics",
"related",
"to",
"the",
"inner"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L527-L561
|
14,388
|
opendatateam/udata
|
udata/core/dataset/models.py
|
Dataset.compute_quality_score
|
def compute_quality_score(self, quality):
"""Compute the score related to the quality of that dataset."""
score = 0
UNIT = 2
if 'frequency' in quality:
# TODO: should be related to frequency.
if quality['update_in'] < 0:
score += UNIT
else:
score -= UNIT
if 'tags_count' in quality:
if quality['tags_count'] > 3:
score += UNIT
if 'description_length' in quality:
if quality['description_length'] > 100:
score += UNIT
if 'has_resources' in quality:
if quality['has_only_closed_or_no_formats']:
score -= UNIT
else:
score += UNIT
if quality['has_unavailable_resources']:
score -= UNIT
else:
score += UNIT
if 'discussions' in quality:
if quality['has_untreated_discussions']:
score -= UNIT
else:
score += UNIT
if score < 0:
return 0
return score
|
python
|
def compute_quality_score(self, quality):
"""Compute the score related to the quality of that dataset."""
score = 0
UNIT = 2
if 'frequency' in quality:
# TODO: should be related to frequency.
if quality['update_in'] < 0:
score += UNIT
else:
score -= UNIT
if 'tags_count' in quality:
if quality['tags_count'] > 3:
score += UNIT
if 'description_length' in quality:
if quality['description_length'] > 100:
score += UNIT
if 'has_resources' in quality:
if quality['has_only_closed_or_no_formats']:
score -= UNIT
else:
score += UNIT
if quality['has_unavailable_resources']:
score -= UNIT
else:
score += UNIT
if 'discussions' in quality:
if quality['has_untreated_discussions']:
score -= UNIT
else:
score += UNIT
if score < 0:
return 0
return score
|
[
"def",
"compute_quality_score",
"(",
"self",
",",
"quality",
")",
":",
"score",
"=",
"0",
"UNIT",
"=",
"2",
"if",
"'frequency'",
"in",
"quality",
":",
"# TODO: should be related to frequency.",
"if",
"quality",
"[",
"'update_in'",
"]",
"<",
"0",
":",
"score",
"+=",
"UNIT",
"else",
":",
"score",
"-=",
"UNIT",
"if",
"'tags_count'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'tags_count'",
"]",
">",
"3",
":",
"score",
"+=",
"UNIT",
"if",
"'description_length'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'description_length'",
"]",
">",
"100",
":",
"score",
"+=",
"UNIT",
"if",
"'has_resources'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'has_only_closed_or_no_formats'",
"]",
":",
"score",
"-=",
"UNIT",
"else",
":",
"score",
"+=",
"UNIT",
"if",
"quality",
"[",
"'has_unavailable_resources'",
"]",
":",
"score",
"-=",
"UNIT",
"else",
":",
"score",
"+=",
"UNIT",
"if",
"'discussions'",
"in",
"quality",
":",
"if",
"quality",
"[",
"'has_untreated_discussions'",
"]",
":",
"score",
"-=",
"UNIT",
"else",
":",
"score",
"+=",
"UNIT",
"if",
"score",
"<",
"0",
":",
"return",
"0",
"return",
"score"
] |
Compute the score related to the quality of that dataset.
|
[
"Compute",
"the",
"score",
"related",
"to",
"the",
"quality",
"of",
"that",
"dataset",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L563-L595
|
14,389
|
opendatateam/udata
|
udata/core/dataset/models.py
|
Dataset.add_resource
|
def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id)
|
python
|
def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id)
|
[
"def",
"add_resource",
"(",
"self",
",",
"resource",
")",
":",
"resource",
".",
"validate",
"(",
")",
"self",
".",
"update",
"(",
"__raw__",
"=",
"{",
"'$push'",
":",
"{",
"'resources'",
":",
"{",
"'$each'",
":",
"[",
"resource",
".",
"to_mongo",
"(",
")",
"]",
",",
"'$position'",
":",
"0",
"}",
"}",
"}",
")",
"self",
".",
"reload",
"(",
")",
"post_save",
".",
"send",
"(",
"self",
".",
"__class__",
",",
"document",
"=",
"self",
",",
"resource_added",
"=",
"resource",
".",
"id",
")"
] |
Perform an atomic prepend for a new resource
|
[
"Perform",
"an",
"atomic",
"prepend",
"for",
"a",
"new",
"resource"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L602-L615
|
14,390
|
opendatateam/udata
|
udata/core/dataset/models.py
|
Dataset.update_resource
|
def update_resource(self, resource):
'''Perform an atomic update for an existing resource'''
index = self.resources.index(resource)
data = {
'resources__{index}'.format(index=index): resource
}
self.update(**data)
self.reload()
post_save.send(self.__class__, document=self)
|
python
|
def update_resource(self, resource):
'''Perform an atomic update for an existing resource'''
index = self.resources.index(resource)
data = {
'resources__{index}'.format(index=index): resource
}
self.update(**data)
self.reload()
post_save.send(self.__class__, document=self)
|
[
"def",
"update_resource",
"(",
"self",
",",
"resource",
")",
":",
"index",
"=",
"self",
".",
"resources",
".",
"index",
"(",
"resource",
")",
"data",
"=",
"{",
"'resources__{index}'",
".",
"format",
"(",
"index",
"=",
"index",
")",
":",
"resource",
"}",
"self",
".",
"update",
"(",
"*",
"*",
"data",
")",
"self",
".",
"reload",
"(",
")",
"post_save",
".",
"send",
"(",
"self",
".",
"__class__",
",",
"document",
"=",
"self",
")"
] |
Perform an atomic update for an existing resource
|
[
"Perform",
"an",
"atomic",
"update",
"for",
"an",
"existing",
"resource"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/models.py#L617-L625
|
14,391
|
opendatateam/udata
|
udata/search/result.py
|
SearchResult.get_aggregation
|
def get_aggregation(self, name):
'''
Fetch an aggregation result given its name
As there is no way at this point know the aggregation type
(ie. bucket, pipeline or metric)
we guess it from the response attributes.
Only bucket and metric types are handled
'''
agg = self.aggregations[name]
if 'buckets' in agg:
return agg['buckets']
else:
return agg
|
python
|
def get_aggregation(self, name):
'''
Fetch an aggregation result given its name
As there is no way at this point know the aggregation type
(ie. bucket, pipeline or metric)
we guess it from the response attributes.
Only bucket and metric types are handled
'''
agg = self.aggregations[name]
if 'buckets' in agg:
return agg['buckets']
else:
return agg
|
[
"def",
"get_aggregation",
"(",
"self",
",",
"name",
")",
":",
"agg",
"=",
"self",
".",
"aggregations",
"[",
"name",
"]",
"if",
"'buckets'",
"in",
"agg",
":",
"return",
"agg",
"[",
"'buckets'",
"]",
"else",
":",
"return",
"agg"
] |
Fetch an aggregation result given its name
As there is no way at this point know the aggregation type
(ie. bucket, pipeline or metric)
we guess it from the response attributes.
Only bucket and metric types are handled
|
[
"Fetch",
"an",
"aggregation",
"result",
"given",
"its",
"name"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/result.py#L98-L111
|
14,392
|
opendatateam/udata
|
udata/i18n.py
|
language
|
def language(lang_code):
'''Force a given language'''
ctx = None
if not request:
ctx = current_app.test_request_context()
ctx.push()
backup = g.get('lang_code')
g.lang_code = lang_code
refresh()
yield
g.lang_code = backup
if ctx:
ctx.pop()
refresh()
|
python
|
def language(lang_code):
'''Force a given language'''
ctx = None
if not request:
ctx = current_app.test_request_context()
ctx.push()
backup = g.get('lang_code')
g.lang_code = lang_code
refresh()
yield
g.lang_code = backup
if ctx:
ctx.pop()
refresh()
|
[
"def",
"language",
"(",
"lang_code",
")",
":",
"ctx",
"=",
"None",
"if",
"not",
"request",
":",
"ctx",
"=",
"current_app",
".",
"test_request_context",
"(",
")",
"ctx",
".",
"push",
"(",
")",
"backup",
"=",
"g",
".",
"get",
"(",
"'lang_code'",
")",
"g",
".",
"lang_code",
"=",
"lang_code",
"refresh",
"(",
")",
"yield",
"g",
".",
"lang_code",
"=",
"backup",
"if",
"ctx",
":",
"ctx",
".",
"pop",
"(",
")",
"refresh",
"(",
")"
] |
Force a given language
|
[
"Force",
"a",
"given",
"language"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L145-L158
|
14,393
|
opendatateam/udata
|
udata/i18n.py
|
redirect_to_lang
|
def redirect_to_lang(*args, **kwargs):
'''Redirect non lang-prefixed urls to default language.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs['lang_code'] = default_lang
return redirect(url_for(endpoint, **kwargs))
|
python
|
def redirect_to_lang(*args, **kwargs):
'''Redirect non lang-prefixed urls to default language.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs['lang_code'] = default_lang
return redirect(url_for(endpoint, **kwargs))
|
[
"def",
"redirect_to_lang",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"request",
".",
"endpoint",
".",
"replace",
"(",
"'_redirect'",
",",
"''",
")",
"kwargs",
"=",
"multi_to_dict",
"(",
"request",
".",
"args",
")",
"kwargs",
".",
"update",
"(",
"request",
".",
"view_args",
")",
"kwargs",
"[",
"'lang_code'",
"]",
"=",
"default_lang",
"return",
"redirect",
"(",
"url_for",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Redirect non lang-prefixed urls to default language.
|
[
"Redirect",
"non",
"lang",
"-",
"prefixed",
"urls",
"to",
"default",
"language",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L203-L209
|
14,394
|
opendatateam/udata
|
udata/i18n.py
|
redirect_to_unlocalized
|
def redirect_to_unlocalized(*args, **kwargs):
'''Redirect lang-prefixed urls to no prefixed URL.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs.pop('lang_code', None)
return redirect(url_for(endpoint, **kwargs))
|
python
|
def redirect_to_unlocalized(*args, **kwargs):
'''Redirect lang-prefixed urls to no prefixed URL.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs.pop('lang_code', None)
return redirect(url_for(endpoint, **kwargs))
|
[
"def",
"redirect_to_unlocalized",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"request",
".",
"endpoint",
".",
"replace",
"(",
"'_redirect'",
",",
"''",
")",
"kwargs",
"=",
"multi_to_dict",
"(",
"request",
".",
"args",
")",
"kwargs",
".",
"update",
"(",
"request",
".",
"view_args",
")",
"kwargs",
".",
"pop",
"(",
"'lang_code'",
",",
"None",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Redirect lang-prefixed urls to no prefixed URL.
|
[
"Redirect",
"lang",
"-",
"prefixed",
"urls",
"to",
"no",
"prefixed",
"URL",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L212-L218
|
14,395
|
opendatateam/udata
|
udata/i18n.py
|
PluggableDomain.get_translations
|
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
translations_dir = self.get_translations_path(ctx)
translations = Translations.load(translations_dir, locale,
domain=self.domain)
# Load plugins translations
if isinstance(translations, Translations):
# Load core extensions translations
from wtforms.i18n import messages_path
wtforms_translations = Translations.load(messages_path(),
locale,
domain='wtforms')
translations.merge(wtforms_translations)
import flask_security
flask_security_translations = Translations.load(
join(flask_security.__path__[0], 'translations'),
locale,
domain='flask_security'
)
translations.merge(flask_security_translations)
for pkg in entrypoints.get_roots(current_app):
package = pkgutil.get_loader(pkg)
path = join(package.filename, 'translations')
domains = [f.replace(path, '').replace('.pot', '')[1:]
for f in iglob(join(path, '*.pot'))]
for domain in domains:
translations.merge(Translations.load(path, locale,
domain=domain))
# Allows the theme to provide or override translations
from . import theme
theme_translations_dir = join(theme.current.path, 'translations')
if exists(theme_translations_dir):
domain = theme.current.identifier
theme_translations = Translations.load(theme_translations_dir,
locale,
domain=domain)
translations.merge(theme_translations)
cache[str(locale)] = translations
return translations
|
python
|
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
translations_dir = self.get_translations_path(ctx)
translations = Translations.load(translations_dir, locale,
domain=self.domain)
# Load plugins translations
if isinstance(translations, Translations):
# Load core extensions translations
from wtforms.i18n import messages_path
wtforms_translations = Translations.load(messages_path(),
locale,
domain='wtforms')
translations.merge(wtforms_translations)
import flask_security
flask_security_translations = Translations.load(
join(flask_security.__path__[0], 'translations'),
locale,
domain='flask_security'
)
translations.merge(flask_security_translations)
for pkg in entrypoints.get_roots(current_app):
package = pkgutil.get_loader(pkg)
path = join(package.filename, 'translations')
domains = [f.replace(path, '').replace('.pot', '')[1:]
for f in iglob(join(path, '*.pot'))]
for domain in domains:
translations.merge(Translations.load(path, locale,
domain=domain))
# Allows the theme to provide or override translations
from . import theme
theme_translations_dir = join(theme.current.path, 'translations')
if exists(theme_translations_dir):
domain = theme.current.identifier
theme_translations = Translations.load(theme_translations_dir,
locale,
domain=domain)
translations.merge(theme_translations)
cache[str(locale)] = translations
return translations
|
[
"def",
"get_translations",
"(",
"self",
")",
":",
"ctx",
"=",
"stack",
".",
"top",
"if",
"ctx",
"is",
"None",
":",
"return",
"NullTranslations",
"(",
")",
"locale",
"=",
"get_locale",
"(",
")",
"cache",
"=",
"self",
".",
"get_translations_cache",
"(",
"ctx",
")",
"translations",
"=",
"cache",
".",
"get",
"(",
"str",
"(",
"locale",
")",
")",
"if",
"translations",
"is",
"None",
":",
"translations_dir",
"=",
"self",
".",
"get_translations_path",
"(",
"ctx",
")",
"translations",
"=",
"Translations",
".",
"load",
"(",
"translations_dir",
",",
"locale",
",",
"domain",
"=",
"self",
".",
"domain",
")",
"# Load plugins translations",
"if",
"isinstance",
"(",
"translations",
",",
"Translations",
")",
":",
"# Load core extensions translations",
"from",
"wtforms",
".",
"i18n",
"import",
"messages_path",
"wtforms_translations",
"=",
"Translations",
".",
"load",
"(",
"messages_path",
"(",
")",
",",
"locale",
",",
"domain",
"=",
"'wtforms'",
")",
"translations",
".",
"merge",
"(",
"wtforms_translations",
")",
"import",
"flask_security",
"flask_security_translations",
"=",
"Translations",
".",
"load",
"(",
"join",
"(",
"flask_security",
".",
"__path__",
"[",
"0",
"]",
",",
"'translations'",
")",
",",
"locale",
",",
"domain",
"=",
"'flask_security'",
")",
"translations",
".",
"merge",
"(",
"flask_security_translations",
")",
"for",
"pkg",
"in",
"entrypoints",
".",
"get_roots",
"(",
"current_app",
")",
":",
"package",
"=",
"pkgutil",
".",
"get_loader",
"(",
"pkg",
")",
"path",
"=",
"join",
"(",
"package",
".",
"filename",
",",
"'translations'",
")",
"domains",
"=",
"[",
"f",
".",
"replace",
"(",
"path",
",",
"''",
")",
".",
"replace",
"(",
"'.pot'",
",",
"''",
")",
"[",
"1",
":",
"]",
"for",
"f",
"in",
"iglob",
"(",
"join",
"(",
"path",
",",
"'*.pot'",
")",
")",
"]",
"for",
"domain",
"in",
"domains",
":",
"translations",
".",
"merge",
"(",
"Translations",
".",
"load",
"(",
"path",
",",
"locale",
",",
"domain",
"=",
"domain",
")",
")",
"# Allows the theme to provide or override translations",
"from",
".",
"import",
"theme",
"theme_translations_dir",
"=",
"join",
"(",
"theme",
".",
"current",
".",
"path",
",",
"'translations'",
")",
"if",
"exists",
"(",
"theme_translations_dir",
")",
":",
"domain",
"=",
"theme",
".",
"current",
".",
"identifier",
"theme_translations",
"=",
"Translations",
".",
"load",
"(",
"theme_translations_dir",
",",
"locale",
",",
"domain",
"=",
"domain",
")",
"translations",
".",
"merge",
"(",
"theme_translations",
")",
"cache",
"[",
"str",
"(",
"locale",
")",
"]",
"=",
"translations",
"return",
"translations"
] |
Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
|
[
"Returns",
"the",
"correct",
"gettext",
"translations",
"that",
"should",
"be",
"used",
"for",
"this",
"request",
".",
"This",
"will",
"never",
"fail",
"and",
"return",
"a",
"dummy",
"translation",
"object",
"if",
"used",
"outside",
"of",
"the",
"request",
"or",
"if",
"a",
"translation",
"cannot",
"be",
"found",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L37-L96
|
14,396
|
opendatateam/udata
|
udata/core/discussions/models.py
|
Discussion.person_involved
|
def person_involved(self, person):
"""Return True if the given person has been involved in the
discussion, False otherwise.
"""
return any(message.posted_by == person for message in self.discussion)
|
python
|
def person_involved(self, person):
"""Return True if the given person has been involved in the
discussion, False otherwise.
"""
return any(message.posted_by == person for message in self.discussion)
|
[
"def",
"person_involved",
"(",
"self",
",",
"person",
")",
":",
"return",
"any",
"(",
"message",
".",
"posted_by",
"==",
"person",
"for",
"message",
"in",
"self",
".",
"discussion",
")"
] |
Return True if the given person has been involved in the
discussion, False otherwise.
|
[
"Return",
"True",
"if",
"the",
"given",
"person",
"has",
"been",
"involved",
"in",
"the"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/models.py#L37-L42
|
14,397
|
opendatateam/udata
|
udata/linkchecker/checker.py
|
is_ignored
|
def is_ignored(resource):
'''Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS'''
ignored_domains = current_app.config['LINKCHECKING_IGNORE_DOMAINS']
url = resource.url
if url:
parsed_url = urlparse(url)
return parsed_url.netloc in ignored_domains
return True
|
python
|
def is_ignored(resource):
'''Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS'''
ignored_domains = current_app.config['LINKCHECKING_IGNORE_DOMAINS']
url = resource.url
if url:
parsed_url = urlparse(url)
return parsed_url.netloc in ignored_domains
return True
|
[
"def",
"is_ignored",
"(",
"resource",
")",
":",
"ignored_domains",
"=",
"current_app",
".",
"config",
"[",
"'LINKCHECKING_IGNORE_DOMAINS'",
"]",
"url",
"=",
"resource",
".",
"url",
"if",
"url",
":",
"parsed_url",
"=",
"urlparse",
"(",
"url",
")",
"return",
"parsed_url",
".",
"netloc",
"in",
"ignored_domains",
"return",
"True"
] |
Check of the resource's URL is part of LINKCHECKING_IGNORE_DOMAINS
|
[
"Check",
"of",
"the",
"resource",
"s",
"URL",
"is",
"part",
"of",
"LINKCHECKING_IGNORE_DOMAINS"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/linkchecker/checker.py#L25-L32
|
14,398
|
opendatateam/udata
|
udata/linkchecker/checker.py
|
check_resource
|
def check_resource(resource):
'''
Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the `resource.extras['check:checker']` attribute with a key that points
to a valid `udata.linkcheckers` entrypoint. If not set, it will
fallback on the default linkchecker defined by the configuration variable
`LINKCHECKING_DEFAULT_LINKCHECKER`.
Returns
-------
dict or (dict, int)
Check results dict and status code (if error).
'''
linkchecker_type = resource.extras.get('check:checker')
LinkChecker = get_linkchecker(linkchecker_type)
if not LinkChecker:
return {'error': 'No linkchecker configured.'}, 503
if is_ignored(resource):
return dummy_check_response()
result = LinkChecker().check(resource)
if not result:
return {'error': 'No response from linkchecker'}, 503
elif result.get('check:error'):
return {'error': result['check:error']}, 500
elif not result.get('check:status'):
return {'error': 'No status in response from linkchecker'}, 503
# store the check result in the resource's extras
# XXX maybe this logic should be in the `Resource` model?
previous_status = resource.extras.get('check:available')
check_keys = _get_check_keys(result, resource, previous_status)
resource.extras.update(check_keys)
resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset
return result
|
python
|
def check_resource(resource):
'''
Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the `resource.extras['check:checker']` attribute with a key that points
to a valid `udata.linkcheckers` entrypoint. If not set, it will
fallback on the default linkchecker defined by the configuration variable
`LINKCHECKING_DEFAULT_LINKCHECKER`.
Returns
-------
dict or (dict, int)
Check results dict and status code (if error).
'''
linkchecker_type = resource.extras.get('check:checker')
LinkChecker = get_linkchecker(linkchecker_type)
if not LinkChecker:
return {'error': 'No linkchecker configured.'}, 503
if is_ignored(resource):
return dummy_check_response()
result = LinkChecker().check(resource)
if not result:
return {'error': 'No response from linkchecker'}, 503
elif result.get('check:error'):
return {'error': result['check:error']}, 500
elif not result.get('check:status'):
return {'error': 'No status in response from linkchecker'}, 503
# store the check result in the resource's extras
# XXX maybe this logic should be in the `Resource` model?
previous_status = resource.extras.get('check:available')
check_keys = _get_check_keys(result, resource, previous_status)
resource.extras.update(check_keys)
resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset
return result
|
[
"def",
"check_resource",
"(",
"resource",
")",
":",
"linkchecker_type",
"=",
"resource",
".",
"extras",
".",
"get",
"(",
"'check:checker'",
")",
"LinkChecker",
"=",
"get_linkchecker",
"(",
"linkchecker_type",
")",
"if",
"not",
"LinkChecker",
":",
"return",
"{",
"'error'",
":",
"'No linkchecker configured.'",
"}",
",",
"503",
"if",
"is_ignored",
"(",
"resource",
")",
":",
"return",
"dummy_check_response",
"(",
")",
"result",
"=",
"LinkChecker",
"(",
")",
".",
"check",
"(",
"resource",
")",
"if",
"not",
"result",
":",
"return",
"{",
"'error'",
":",
"'No response from linkchecker'",
"}",
",",
"503",
"elif",
"result",
".",
"get",
"(",
"'check:error'",
")",
":",
"return",
"{",
"'error'",
":",
"result",
"[",
"'check:error'",
"]",
"}",
",",
"500",
"elif",
"not",
"result",
".",
"get",
"(",
"'check:status'",
")",
":",
"return",
"{",
"'error'",
":",
"'No status in response from linkchecker'",
"}",
",",
"503",
"# store the check result in the resource's extras",
"# XXX maybe this logic should be in the `Resource` model?",
"previous_status",
"=",
"resource",
".",
"extras",
".",
"get",
"(",
"'check:available'",
")",
"check_keys",
"=",
"_get_check_keys",
"(",
"result",
",",
"resource",
",",
"previous_status",
")",
"resource",
".",
"extras",
".",
"update",
"(",
"check_keys",
")",
"resource",
".",
"save",
"(",
"signal_kwargs",
"=",
"{",
"'ignores'",
":",
"[",
"'post_save'",
"]",
"}",
")",
"# Prevent signal triggering on dataset",
"return",
"result"
] |
Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the `resource.extras['check:checker']` attribute with a key that points
to a valid `udata.linkcheckers` entrypoint. If not set, it will
fallback on the default linkchecker defined by the configuration variable
`LINKCHECKING_DEFAULT_LINKCHECKER`.
Returns
-------
dict or (dict, int)
Check results dict and status code (if error).
|
[
"Check",
"a",
"resource",
"availability",
"against",
"a",
"linkchecker",
"backend"
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/linkchecker/checker.py#L40-L74
|
14,399
|
opendatateam/udata
|
udata/models/owned.py
|
owned_pre_save
|
def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner
|
python
|
def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner
|
[
"def",
"owned_pre_save",
"(",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"document",
",",
"Owned",
")",
":",
"return",
"changed_fields",
"=",
"getattr",
"(",
"document",
",",
"'_changed_fields'",
",",
"[",
"]",
")",
"if",
"'organization'",
"in",
"changed_fields",
":",
"if",
"document",
".",
"owner",
":",
"# Change from owner to organization",
"document",
".",
"_previous_owner",
"=",
"document",
".",
"owner",
"document",
".",
"owner",
"=",
"None",
"else",
":",
"# Change from org to another",
"# Need to fetch previous value in base",
"original",
"=",
"sender",
".",
"objects",
".",
"only",
"(",
"'organization'",
")",
".",
"get",
"(",
"pk",
"=",
"document",
".",
"pk",
")",
"document",
".",
"_previous_owner",
"=",
"original",
".",
"organization",
"elif",
"'owner'",
"in",
"changed_fields",
":",
"if",
"document",
".",
"organization",
":",
"# Change from organization to owner",
"document",
".",
"_previous_owner",
"=",
"document",
".",
"organization",
"document",
".",
"organization",
"=",
"None",
"else",
":",
"# Change from owner to another",
"# Need to fetch previous value in base",
"original",
"=",
"sender",
".",
"objects",
".",
"only",
"(",
"'owner'",
")",
".",
"get",
"(",
"pk",
"=",
"document",
".",
"pk",
")",
"document",
".",
"_previous_owner",
"=",
"original",
".",
"owner"
] |
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
|
[
"Owned",
"mongoengine",
".",
"pre_save",
"signal",
"handler",
"Need",
"to",
"fetch",
"original",
"owner",
"before",
"the",
"new",
"one",
"erase",
"it",
"."
] |
f016585af94b0ff6bd73738c700324adc8ba7f8f
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/owned.py#L41-L68
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.