id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23,600 | ext.py | zenodo_zenodo/zenodo/modules/spam/ext.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2020 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Support and contact module for Zenodo."""
from __future__ import absolute_import, print_function
import joblib
from celery.signals import celeryd_init
from flask import Blueprint, current_app
from . import config, current_spam
from .utils import DomainList
class ZenodoSpam(object):
"""Zenodo support form."""
@property
def model(self):
"""Spam detection model."""
if not getattr(self, '_model', None):
if not current_app.config.get('ZENODO_SPAM_MODEL_LOCATION'):
model = None
else:
model = joblib.load(
current_app.config['ZENODO_SPAM_MODEL_LOCATION'])
self._model = model
return self._model
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.app = app
self.init_config(app)
# Register email templates
app.register_blueprint(Blueprint(
"zenodo_spam_email_templates",
__name__, template_folder="templates",
))
self.domain_forbiddenlist = DomainList(
app.config['ZENODO_SPAM_DOMAINS_FORBIDDEN_PATH']
)
self.domain_safelist = DomainList(
app.config['ZENODO_SPAM_DOMAINS_SAFELIST_PATH']
)
app.extensions['zenodo-spam'] = self
@staticmethod
def init_config(app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('ZENODO_SPAM_'):
app.config.setdefault(k, getattr(config, k))
@celeryd_init.connect
def warm_up_cache(instance, **kwargs):
"""Preload the spam model in the celery application."""
flask_app = instance.app.flask_app
if flask_app.config.get('ZENODO_SPAM_MODEL_PRELOAD'):
with flask_app.app_context():
current_spam.model
| 2,918 | Python | .py | 76 | 32.210526 | 76 | 0.671146 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,601 | config.py | zenodo_zenodo/zenodo/modules/spam/config.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2020 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo Spam."""
from flask import abort, current_app, flash
from flask_login import logout_user
from invenio_accounts.models import User
from invenio_accounts.sessions import delete_user_sessions
from invenio_db import db
from zenodo.modules.spam.utils import send_spam_admin_email, \
send_spam_user_email
def default_spam_handling(deposit=None, community=None):
"""Default actions to counter spam detected record."""
if deposit:
user = User.query.get(deposit['_deposit']['owners'][0])
if community:
user = community.owner
community.description = '--SPAM--' + community.description
if community.oaiset:
db.session.delete(community.oaiset)
community.delete()
user.active = False
delete_user_sessions(user)
logout_user()
db.session.add(user)
db.session.commit()
send_spam_user_email(user.email, deposit=deposit, community=community)
if current_app.config['ZENODO_SPAM_EMAIL_ADMINS']:
send_spam_admin_email(user, deposit=deposit)
if deposit:
error_message = \
('Our spam protection system has classified your upload as a '
'potential spam attempt. As a preventive measure and due to '
'significant increase in spam, we have therefore deactivated '
'your user account and logged you out of Zenodo. Your upload has '
'not been published. If you think this is a mistake, please '
'contact our support.')
if community:
error_message = \
('Our spam protection system has classified your community as a '
'potential spam attempt. As a preventive measure and due to '
'significant increase in spam, we have therefore deactivated '
'your user account and logged you out of Zenodo. Your community '
'has not been created. If you think this is a mistake, please '
'contact our support.')
flash(error_message, category='warning')
abort(400, error_message)
# Function handling deposit metadata detected as spam when publishing
ZENODO_SPAM_HANDLING_ACTIONS = default_spam_handling
# Spam model for record predictions
ZENODO_SPAM_MODEL_LOCATION = None
# Float number defining the probability over which a record is considered spam
ZENODO_SPAM_THRESHOLD = 0.5
# Should send email to Admins for automatically blocked users
ZENODO_SPAM_EMAIL_ADMINS = True
# Timeout for spam check task before it bypasses the check
ZENODO_SPAM_CHECK_TIMEOUT = 8
# Number of valid existing records and communities to skip the spam check
ZENODO_SPAM_SKIP_CHECK_NUM = 5
# Preload spam model on Celery app initialization
ZENODO_SPAM_MODEL_PRELOAD = False
ZENODO_SPAM_DOMAINS_FORBIDDEN_PATH = None
"""Path to a file with the list of forbidden email domains."""
ZENODO_SPAM_DOMAINS_SAFELIST_PATH = None
"""Path to a file with the list of safelisted domains."""
| 3,913 | Python | .py | 85 | 41.447059 | 79 | 0.732494 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,602 | tasks.py | zenodo_zenodo/zenodo/modules/spam/tasks.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2020 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Forms for spam deletion module."""
from __future__ import absolute_import, print_function
from celery import shared_task
from flask import current_app
from invenio_accounts.models import User
from invenio_accounts.proxies import current_accounts
from invenio_communities.models import Community
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_records.models import RecordMetadata
from invenio_search.api import RecordsSearch
from zenodo.modules.spam import current_spam
@shared_task(ignore_result=False)
def check_metadata_for_spam(community_id=None, dep_id=None):
"""Checks metadata of the provided deposit for spam content."""
if not current_app.config.get('ZENODO_SPAM_MODEL_LOCATION'):
return 0
if community_id:
community = Community.query.get(community_id)
spam_proba = current_spam.model.predict_proba(
[community.title + ' ' + community.description])[0][1]
if dep_id:
deposit = RecordMetadata.query.get(dep_id)
spam_proba = current_spam.model.predict_proba(
[deposit.json['title'] + ' ' + deposit.json['description']])[0][1]
return spam_proba
@shared_task(ignore_result=False)
def delete_record(record_uuid, reason, user):
"""Run delete_record as a task."""
from zenodo.modules.deposit import utils as deposit_utils
deposit_utils.delete_record(record_uuid, reason, user)
@shared_task(ignore_result=False)
def delete_spam_user(user_id, deleted_by):
"""Deletes a user and marks their records and communities as spam."""
user = User.query.get(user_id)
communities = Community.query.filter_by(id_user=user.id)
rs = RecordsSearch(index='records').filter('term', owners=user.id)
for c in communities:
if not c.deleted_at:
if not c.description.startswith('--SPAM--'):
c.description = '--SPAM--' + c.description
if c.oaiset:
db.session.delete(c.oaiset)
c.delete()
current_accounts.datastore.deactivate_user(user)
db.session.commit()
record_ids = [record.meta.id for record in rs.scan()]
for record_id in record_ids:
delete_record.delay(record_id, 'spam', deleted_by)
@shared_task(ignore_result=False)
def reindex_user_records(user_id):
"""Reindex a user's records."""
rs = RecordsSearch(index='records').filter(
'term', owners=user_id).source(False)
indexer = RecordIndexer()
index_threshold = current_app.config.get(
'ZENODO_RECORDS_SAFELIST_INDEX_THRESHOLD', 1000)
if rs.count() < index_threshold:
record_ids = [record.meta.id for record in rs.scan()]
for record_id in record_ids:
indexer.index_by_id(record_id)
else:
indexer.bulk_index((
record.meta.id for record in rs.scan()
))
| 3,816 | Python | .py | 88 | 38.647727 | 78 | 0.716133 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,603 | utils.py | zenodo_zenodo/zenodo/modules/spam/utils.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2020 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Forms for spam deletion module."""
from __future__ import absolute_import, print_function
from celery.exceptions import TimeoutError
from elasticsearch_dsl import Q
from flask import current_app, render_template
from flask_babelex import gettext as _
from flask_mail import Message
from flask_principal import ActionNeed
from flask_security import current_user
from invenio_access import Permission
from invenio_communities.models import Community
from invenio_mail.tasks import send_email
from invenio_search.api import RecordsSearch
from werkzeug.exceptions import HTTPException
from zenodo.modules.spam.models import SafelistEntry
from zenodo.modules.spam.tasks import check_metadata_for_spam
def is_user_safelisted(user):
"""Check if user is safelisted."""
if not SafelistEntry.query.get(user.id):
return False
return True
def send_spam_user_email(recipient, deposit=None, community=None):
"""Send email notification to blocked user after spam detection."""
msg = Message(
_("Your Zenodo activity has been automatically marked as Spam."),
sender=current_app.config.get('SUPPORT_EMAIL'),
recipients=[recipient],
)
msg.body = render_template(
"zenodo_spam/email/spam_user_email.tpl",
community=community,
deposit=deposit
)
send_email.delay(msg.__dict__)
def send_spam_admin_email(user, deposit=None, community=None):
"""Send email notification to admins for a spam detection."""
msg = Message(
_("Zenodo activity marked as spam."),
sender=current_app.config.get('SUPPORT_EMAIL'),
recipients=[current_app.config.get('ZENODO_ADMIN_EMAIL')],
)
msg.body = render_template(
"zenodo_spam/email/spam_admin_email.tpl",
user=user,
deposit=deposit,
community=community
)
send_email.delay(msg.__dict__)
def check_and_handle_spam(community=None, deposit=None, retry=True):
"""Checks community/deposit metadata for spam."""
try:
is_safelisted = is_user_safelisted(current_user)
is_admin = Permission(ActionNeed('admin-access')).can()
if not (is_safelisted or is_admin):
if current_app.config.get('ZENODO_SPAM_MODEL_LOCATION'):
if community:
task = check_metadata_for_spam.delay(
community_id=community.id)
user_id = community.id_user
if deposit:
task = check_metadata_for_spam.delay(dep_id=str(deposit.id))
user_id = deposit['owners'][0]
spam_proba = task.get(timeout=current_app.config[
'ZENODO_SPAM_CHECK_TIMEOUT'])
else:
spam_proba = 0
if spam_proba > current_app.config['ZENODO_SPAM_THRESHOLD']:
user_records = RecordsSearch(index='records').query(
Q('query_string', query="owners:{}".format(
user_id))).count()
user_communities = Community.query.filter_by(
id_user=user_id).count()
if community:
# Ignore the newly created community
user_communities = user_communities - 1
current_app.logger.warning(
u'Found spam upload',
extra={
'depid': deposit.id if deposit else None,
'comid': community.id if community else None
}
)
if not (user_records + user_communities >
current_app.config['ZENODO_SPAM_SKIP_CHECK_NUM']):
current_app.config['ZENODO_SPAM_HANDLING_ACTIONS'](
community=community, deposit=deposit)
except HTTPException:
raise
except TimeoutError:
if retry:
check_and_handle_spam(
community=community, deposit=deposit, retry=False)
else:
current_app.logger.exception(
u'Could not check for spam',
extra={
'depid': deposit.id if deposit else None,
'comid': community.id if community else None
}
)
except Exception:
current_app.logger.exception(
u'Could not check for spam',
extra={
'depid': deposit.id if deposit else None,
'comid': community.id if community else None
}
)
class DomainList:
"""Domain status list."""
def __init__(self, domains_filepath):
"""Initialize safelist."""
self._index = set()
if domains_filepath:
with open(domains_filepath) as file:
lines = file.readlines()
lines = [self._index.add(line.rstrip()) for line in lines]
def matches(self, domain):
"""Match a domain against index and return domain record."""
return domain in self._index
| 6,010 | Python | .py | 143 | 32.776224 | 80 | 0.631174 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,604 | __init__.py | zenodo_zenodo/zenodo/modules/spam/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2017-2020 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Spam module."""
from flask import current_app
from werkzeug.local import LocalProxy
current_spam = LocalProxy(
lambda: current_app.extensions['zenodo-spam']
)
| 1,141 | Python | .py | 29 | 38.103448 | 76 | 0.774572 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,605 | proxies.py | zenodo_zenodo/zenodo/modules/spam/proxies.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2022 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Proxies for Zenodo spam module."""
from __future__ import absolute_import, print_function
from flask import current_app
from werkzeug.local import LocalProxy
current_domain_forbiddenlist = LocalProxy(
lambda: current_app.extensions['zenodo-spam'].domain_forbiddenlist)
"""Proxy to the domain forbiddenlist for user registration."""
current_domain_safelist = LocalProxy(
lambda: current_app.extensions['zenodo-spam'].domain_safelist)
"""Proxy to the domain safelist for user registration."""
| 1,474 | Python | .py | 33 | 43.272727 | 76 | 0.779944 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,606 | forms.py | zenodo_zenodo/zenodo/modules/spam/forms.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Forms for spam deletion module."""
from __future__ import absolute_import, print_function
from flask_babelex import lazy_gettext as _
from flask_wtf import Form
from wtforms import BooleanField, SubmitField
class DeleteSpamForm(Form):
"""Form for deleting spam."""
remove_all_communities = BooleanField(
_('Remove the user communities?'),
default=True,
description=_('Will remove all communities owned by the user.'),
)
remove_all_records = BooleanField(
_('Remove all user records?'),
default=True,
description=_('Will remove all records owned by the user.'),
)
deactivate_user = BooleanField(
_('Deactivate the user account?'),
default=True,
description=_('Will deactivate the user account.'),
)
delete = SubmitField(_("Permanently delete"))
| 1,827 | Python | .py | 46 | 36.26087 | 76 | 0.72912 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,607 | views.py | zenodo_zenodo/zenodo/modules/spam/views.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""View for deletion of spam content."""
from __future__ import absolute_import, print_function, unicode_literals
from collections import Counter
from datetime import datetime, timedelta
from itertools import islice
import sqlalchemy as sa
from elasticsearch_dsl import Q
from flask import Blueprint, abort, current_app, flash, jsonify, redirect, \
render_template, request, url_for
from flask_login import login_required
from flask_menu import current_menu
from flask_principal import ActionNeed
from flask_security import current_user
from invenio_access.permissions import Permission
from invenio_accounts.models import User
from invenio_accounts.proxies import current_accounts
from invenio_admin.views import _has_admin_access
from invenio_communities.models import Community
from invenio_db import db
from invenio_search.api import RecordsSearch
from zenodo.modules.deposit.utils import delete_record
from zenodo.modules.spam.forms import DeleteSpamForm
from zenodo.modules.spam.models import SafelistEntry
from zenodo.modules.spam.proxies import current_domain_forbiddenlist, \
current_domain_safelist
from zenodo.modules.spam.tasks import delete_spam_user, reindex_user_records
from zenodo.modules.spam.utils import is_user_safelisted
blueprint = Blueprint(
'zenodo_spam',
__name__,
url_prefix='/spam',
template_folder='templates',
)
@blueprint.before_app_first_request
def init_menu():
"""Initialize menu before first request."""
# Register safelisting menu entry
item = current_menu.submenu("settings.safelisting")
item.register(
"zenodo_spam.safelist_admin",
'<i class="fa fa-check fa-fw"></i> Safelisting',
visible_when=_has_admin_access,
order=110,
)
@blueprint.route('/<int:user_id>/delete/', methods=['GET', 'POST'])
@login_required
def delete(user_id):
"""Delete spam."""
# Only admin can access this view
if not Permission(ActionNeed('admin-access')).can():
abort(403)
user = User.query.get(user_id)
deleteform = DeleteSpamForm()
communities = Community.query.filter_by(id_user=user.id)
rs = RecordsSearch(index='records').query(
Q('query_string', query="owners: {0}".format(user.id)))
rec_count = rs.count()
ctx = {
'user': user,
'form': deleteform,
'is_new': False,
'communities': communities,
'rec_count': rec_count,
}
if deleteform.validate_on_submit():
if deleteform.remove_all_communities.data:
for c in communities:
if not c.deleted_at:
if not c.description.startswith('--SPAM--'):
c.description = '--SPAM--' + c.description
if c.oaiset:
db.session.delete(c.oaiset)
c.delete()
db.session.commit()
if deleteform.deactivate_user.data:
current_accounts.datastore.deactivate_user(user)
SafelistEntry.remove_by_user_id(user.id)
db.session.commit()
# delete_record function commits the session internally
# for each deleted record
if deleteform.remove_all_records.data:
record_ids = [record.meta.id for record in rs.scan()]
for record_id in record_ids:
delete_record(record_id, 'spam', int(current_user.get_id()))
flash("Spam removed", category='success')
return redirect(url_for('.delete', user_id=user.id))
else:
records = islice(rs.scan(), 10)
ctx.update(records=records)
return render_template('zenodo_spam/delete.html', **ctx)
def _normalize_email(email):
email = email.lower()
username, domain = email.rsplit("@", 1)
no_dots_username = username.replace(".", "")
if domain == "googlemail.com":
domain = "gmail.com"
return no_dots_username + "@" + domain
def _get_domain(email):
return email[email.index("@") :].lower()
def _evaluate_user_domain(email, normalized_emails, email_domain_count):
email_domain = _get_domain(email)
is_flagged_domain = email_domain in ["@gmail.com"]
is_above_threshold = normalized_emails.get(_normalize_email(email), 0) > 3
domain_counts = email_domain_count[email_domain[1:]]
domain_info = {
'domain': email_domain[1:],
'active_count': domain_counts['active'],
'total_count': domain_counts['total'],
}
if current_domain_forbiddenlist.matches(email_domain):
domain_info['status'] = "forbidden"
return domain_info
if is_flagged_domain and is_above_threshold:
domain_info['status'] = "forbidden"
return domain_info
if current_domain_safelist.matches(email_domain):
domain_info['status'] = "safe"
return domain_info
domain_info['status'] = "unclear"
return domain_info
def _expand_users_info(results, include_pending=False):
"""Return user information."""
user_data = (
User.query.options(
db.joinedload(User.profile),
db.joinedload(User.external_identifiers),
db.joinedload(User.safelist),
).filter(User.id.in_(results.keys()))
)
normalized_emails = Counter([
_normalize_email(user.email)
for user in user_data
])
email_domain_expr = sa.func.split_part(sa.func.lower(User.email), '@', 2)
email_domains = db.session.query(
email_domain_expr,
sa.func.count(User.id),
sa.func.count(User.id).filter(User.active == True)
).group_by(email_domain_expr).all()
email_domain_count = {
domain: {'active': active, 'total': total}
for domain, total, active in email_domains
}
for user in user_data:
is_safelisted = user.safelist
is_inactivated = not user.active
if (is_safelisted or is_inactivated) and not include_pending:
results.pop(user.id)
continue
r = results[user.id]
r.update({
"id": user.id,
"email": user.email,
"external": [i.method for i in (user.external_identifiers or [])],
"domain_info": _evaluate_user_domain(user.email, normalized_emails,
email_domain_count),
"active": user.active,
"safelist": user.safelist,
})
if user.profile:
r.update({
"full_name": user.profile.full_name,
"username": user.profile.username
})
@blueprint.route('/safelist/admin', methods=['GET'])
@login_required
def safelist_admin():
"""Safelist admin."""
# Only admin can access this view
if not Permission(ActionNeed('admin-access')).can():
abort(403)
data = request.args.get('data', 'all', type=str)
data_categories = {
'records': data in ('all', 'records'),
'communities': data in ('all', 'communities'),
}
from_weeks = request.args.get('from_weeks', 4, type=int)
to_weeks = request.args.get('to_weeks', 0, type=int)
max_users = request.args.get('max_users', 1000, type=int)
include_pending = \
request.args.get('include_pending', 'include', type=str) == 'include'
result = {}
if data_categories['records']:
search = RecordsSearch(index='records').filter(
'range', **{'created': {'gte': 'now-{}w'.format(from_weeks),
'lt': 'now-{}w'.format(to_weeks)}}
).filter(
'term', _safelisted=False,
)
user_agg = search.aggs.bucket('user', 'terms', field='owners',
size=max_users)
user_agg.metric('records', 'top_hits', size=3, _source=['title',
'description',
'recid'])
res = search[0:0].execute()
for user in res.aggregations.user.buckets:
result[user.key] = {
'last_content_titles': ", ".join(r.title for r in user.records),
'last_content_descriptions': ", ".join(r.description for r in
user.records),
'first_content_url': url_for('invenio_records_ui.recid',
pid_value=user.records[0].recid),
'total_content': user.doc_count
}
if data_categories['communities']:
from_date = datetime.utcnow() - timedelta(weeks=from_weeks)
to_date = datetime.utcnow() - timedelta(weeks=to_weeks)
community_users = db.session.query(
User.id.label('user_id'),
sa.func.count(Community.id).label('count'),
sa.func.max(Community.id).label('c_id'),
sa.func.max(Community.title).label('title'),
sa.func.max(Community.description).label('description')
).join(Community).group_by(User.id).filter(
Community.created.between(from_date, to_date),
Community.deleted_at.is_(None),
~User.safelist.any()
).limit(max_users)
for row in community_users:
user_data = result.get(row.user_id, {
'last_content_titles': '',
'last_content_descriptions': '',
'first_content_url': '',
'total_content': 0
})
if user_data['last_content_titles']:
user_data['last_content_titles'] += ', '
user_data['last_content_titles'] += row.title
if user_data['last_content_descriptions']:
user_data['last_content_descriptions'] += ', '
user_data['last_content_descriptions'] += row.description
user_data['first_content_url'] = url_for('invenio_communities.detail',
community_id=row.c_id)
user_data['total_content'] += row.count
result[row.user_id] = user_data
_expand_users_info(result, include_pending)
return render_template('zenodo_spam/safelist/admin.html', users=result)
@blueprint.route('/<int:user_id>/safelist', methods=['POST'])
@login_required
def safelist_add_remove(user_id):
"""Add or remove user from the safelist."""
# Only admin can access this view
if not Permission(ActionNeed('admin-access')).can():
abort(403)
user = User.query.get(user_id)
if request.form['action'] == 'post':
# Create safelist entry
SafelistEntry.create(user_id=user.id, notes=u'Added by {} ({})'.format(
current_user.email, current_user.id))
flash("Added to safelist", category='success')
else:
# Remove safelist entry
SafelistEntry.remove_by_user_id(user.id)
flash("Removed from safelist", category='warning')
db.session.commit()
reindex_user_records.delay(user_id)
return redirect(request.form['next'])
@blueprint.route('/safelist/add/bulk', methods=['POST'])
@login_required
def safelist_bulk_add():
"""Add users to the safelist in bulk."""
# Only admin can access this view
if not Permission(ActionNeed('admin-access')).can():
abort(403)
user_ids = request.form.getlist('user_ids[]')
for user_id in user_ids:
user = User.query.get(user_id)
user.active = True
try:
if not user.safelist:
SafelistEntry.create(user_id=user.id,
notes=u'Added by {} ({})'.format(
current_user.email, current_user.id))
except Exception:
pass
db.session.commit()
for user_id in user_ids:
reindex_user_records.delay(user_id)
return jsonify({'message': 'Bulk safelisted'})
@blueprint.route('/delete/bulk', methods=['POST'])
@login_required
def spam_delete_bulk():
"""Delete spam users in bulk."""
if not Permission(ActionNeed('admin-access')).can():
abort(403)
user_ids = request.form.getlist('user_ids[]')
for user_id in user_ids:
user = User.query.get(user_id)
current_accounts.datastore.deactivate_user(user)
db.session.commit()
for user_id in user_ids:
delete_spam_user.delay(user_id, int(current_user.id))
return jsonify({'message': 'Bulk safelisted'})
| 13,378 | Python | .py | 316 | 33.68038 | 82 | 0.623673 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,608 | 6ac7565ebb86_create_whitelist_entries_table.py | zenodo_zenodo/zenodo/modules/spam/alembic/6ac7565ebb86_create_whitelist_entries_table.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2022 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Create files REST tables."""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6ac7565ebb86'
down_revision = None
branch_labels = (u'zenodo_spam',)
depends_on = '9848d0149abd' # invenio_accounts: create users table
def upgrade():
"""Upgrade database."""
op.create_table(
'safelist_entries',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['user_id'],
[u'accounts_user.id'],
ondelete='RESTRICT'),
sa.PrimaryKeyConstraint('user_id'),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column(
'created',
sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'),
nullable=False
)
)
def downgrade():
"""Downgrade database."""
op.drop_table('safelist_entries')
| 1,901 | Python | .py | 52 | 32.576923 | 76 | 0.705212 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,609 | event_builders.py | zenodo_zenodo/zenodo/modules/stats/event_builders.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Statistics events builders."""
from zenodo.modules.records.utils import is_deposit
from .utils import extract_event_record_metadata, get_record_from_context
def skip_deposit(event, sender_app, **kwargs):
"""Check if event is coming from deposit record and skip."""
record = get_record_from_context(**kwargs)
if record and is_deposit(record):
# TODO: Check that invenio-stats bails when `None` is returned
return None
return event
def add_record_metadata(event, sender_app, **kwargs):
"""Add Zenodo-specific record fields to the event."""
record = get_record_from_context(**kwargs)
if record:
event.update(extract_event_record_metadata(record))
return event
| 1,690 | Python | .py | 39 | 40.717949 | 76 | 0.752433 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,610 | ext.py | zenodo_zenodo/zenodo/modules/stats/ext.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Exporter extension."""
from __future__ import absolute_import, print_function
from elasticsearch import Elasticsearch
from elasticsearch.connection import RequestsHttpConnection
from flask import current_app
from werkzeug.utils import cached_property
from . import config
class ZenodoStats(object):
"""Zenodo stats extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
@cached_property
def search_client(self):
"""Elasticsearch client for stats queries."""
client_config = current_app.config.get(
'ZENODO_STATS_ELASTICSEARCH_CLIENT_CONFIG') or {}
client_config.setdefault(
'hosts', current_app.config.get('SEARCH_ELASTIC_HOSTS'))
client_config.setdefault('connection_class', RequestsHttpConnection)
return Elasticsearch(**client_config)
@staticmethod
def init_config(app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('ZENODO_STATS_'):
app.config.setdefault(k, getattr(config, k))
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['zenodo-stats'] = self
| 2,248 | Python | .py | 55 | 36.272727 | 76 | 0.71782 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,611 | errors.py | zenodo_zenodo/zenodo/modules/stats/errors.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Deposit errors."""
from __future__ import absolute_import, print_function
class PiwikExportRequestError(Exception):
"""Error for failed requests on Piwik export."""
def __init__(self, *args, **kwargs):
"""Initialize the error with first and last events' timestamps."""
super(PiwikExportRequestError, self).__init__(*args)
self.extra = kwargs['export_info']
| 1,364 | Python | .py | 31 | 41.806452 | 76 | 0.748494 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,612 | config.py | zenodo_zenodo/zenodo/modules/stats/config.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo stats."""
ZENODO_STATS_PIWIK_EXPORTER = {
'id_site': 1,
'url': 'https://analytics.openaire.eu/piwik.php',
'token_auth': 'api-token',
'chunk_size': 50 # [max piwik payload size = 64k] / [max querystring size = 750]
}
ZENODO_STATS_PIWIK_EXPORT_ENABLED = True
# Queries performed when processing aggregations might take more time than
# usual. This is fine though, since this is happening during Celery tasks.
ZENODO_STATS_ELASTICSEARCH_CLIENT_CONFIG = {'timeout': 60}
| 1,482 | Python | .py | 34 | 42 | 85 | 0.756925 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,613 | exporters.py | zenodo_zenodo/zenodo/modules/stats/exporters.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo stats exporters."""
import json
import requests
from dateutil.parser import parse as dateutil_parse
from elasticsearch_dsl import Search
from flask import current_app
from invenio_cache import current_cache
from invenio_pidstore.errors import PIDDeletedError
from invenio_search import current_search_client
from invenio_search.utils import build_alias_name
from six.moves.urllib.parse import urlencode, urlsplit, urlunsplit
from zenodo.modules.records.serializers.schemas.common import ui_link_for
from zenodo.modules.stats.errors import PiwikExportRequestError
from zenodo.modules.stats.utils import chunkify, fetch_record
class PiwikExporter:
"""Events exporter."""
def run(self, start_date=None, end_date=None, update_bookmark=True):
"""Run export job."""
if start_date is None:
bookmark = current_cache.get('piwik_export:bookmark')
if bookmark is None:
msg = 'Bookmark not found, and no start date specified.'
current_app.logger.warning(msg)
return
start_date = dateutil_parse(bookmark) if bookmark else None
time_range = {}
if start_date is not None:
time_range['gte'] = start_date.replace(microsecond=0).isoformat()
if end_date is not None:
time_range['lte'] = end_date.replace(microsecond=0).isoformat()
events = Search(
using=current_search_client,
index=build_alias_name('events-stats-*')
).filter(
'range', timestamp=time_range
).sort(
{'timestamp': {'order': 'asc'}}
).params(preserve_order=True).scan()
url = current_app.config['ZENODO_STATS_PIWIK_EXPORTER'].get('url', None)
token_auth = current_app.config['ZENODO_STATS_PIWIK_EXPORTER'] \
.get('token_auth', None)
chunk_size = current_app.config['ZENODO_STATS_PIWIK_EXPORTER']\
.get('chunk_size', 0)
for event_chunk in chunkify(events, chunk_size):
query_strings = []
for event in event_chunk:
if 'recid' not in event:
continue
try:
query_string = self._build_query_string(event)
query_strings.append(query_string)
except PIDDeletedError:
pass
# Check and bail if the bookmark has progressed, e.g. from another
# duplicate task or manual run of the exporter.
bookmark = current_cache.get('piwik_export:bookmark')
if event_chunk[-1].timestamp < bookmark:
return
payload = {
'requests': query_strings,
'token_auth': token_auth
}
res = requests.post(url, json=payload, timeout=60)
# Failure: not 200 or not "success"
content = res.json() if res.ok else None
if res.status_code == 200 and content.get('status') == 'success':
if content.get('invalid') != 0:
msg = 'Invalid events in Piwik export request.'
info = {
'begin_event_timestamp': event_chunk[0].timestamp,
'end_event_timestamp': event_chunk[-1].timestamp,
'invalid_events': content.get('invalid')
}
current_app.logger.warning(msg, extra=info)
elif update_bookmark is True:
current_cache.set('piwik_export:bookmark',
event_chunk[-1].timestamp,
timeout=-1)
else:
msg = 'Invalid events in Piwik export request.'
info = {
'begin_event_timestamp': event_chunk[0].timestamp,
'end_event_timestamp': event_chunk[-1].timestamp,
}
raise PiwikExportRequestError(msg, export_info=info)
def _build_query_string(self, event):
id_site = current_app.config['ZENODO_STATS_PIWIK_EXPORTER']\
.get('id_site', None)
url = ui_link_for('record_html', id=event.recid)
visitor_id = event.visitor_id[0:16]
_, record = fetch_record(event.recid)
oai = record.get('_oai', {}).get('id')
cvar = json.dumps({'1': ['oaipmhID', oai]})
action_name = record.get('title')[:150] # max 150 characters
urlref = None
if event.referrer:
try:
scheme, netloc, path, _, _ = urlsplit(event.referrer)
urlref = urlunsplit((scheme, netloc, path, None, None))
except Exception:
pass
params = dict(
idsite=id_site,
rec=1,
url=url,
_id=visitor_id,
cid=visitor_id,
cvar=cvar,
cdt=event.timestamp,
urlref=urlref,
action_name=action_name
)
if event.to_dict().get('country'):
params['country'] = event.country.lower()
if event.to_dict().get('file_key'):
params['url'] = ui_link_for('record_file', id=event.recid,
filename=event.file_key)
params['download'] = params['url']
return '?{}'.format(urlencode(params, 'utf-8'))
| 6,373 | Python | .py | 142 | 33.950704 | 80 | 0.595686 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,614 | cli.py | zenodo_zenodo/zenodo/modules/stats/cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo statistics CLI commands."""
import csv
import glob
import re
import sys
from datetime import datetime as dt
import click
from dateutil.parser import parse as dateutil_parse
from flask.cli import with_appcontext
from invenio_stats.cli import stats
from invenio_stats.proxies import current_stats
from six.moves import filter, map
from six.moves.urllib.parse import urlparse
from zenodo.modules.stats.tasks import update_record_statistics
from zenodo.modules.stats.utils import chunkify, \
extract_event_record_metadata, fetch_record, fetch_record_file
PY3 = sys.version_info[0] == 3
def _verify_date(ctx, param, value):
if value:
dateutil_parse(value)
return value
def parse_record_url(url):
"""Parses a recid and filename from a record-like URL."""
record_url = urlparse(url)
assert record_url.hostname.lower().endswith('zenodo.org'), 'non-Zenodo url'
match = re.match(
# matches "/record/(123)", "/record/(123)/export", etc
r'^\/record\/(?P<recid>\d+)'
# matches "/record/(123)/files/(some.pdf)"
r'(?:\/files\/(?P<filename>.+)$)?',
record_url.path).groupdict()
return match.get('recid'), match.get('filename')
def build_common_event(record, data):
"""Build common fields of a stats event from a record and request data."""
return dict(
timestamp=dt.utcfromtimestamp(float(data['timestamp'])).isoformat(),
pid_type='recid',
pid_value=str(record.get('recid')),
referrer=data['referrer'],
ip_address=data['ipAddress'],
user_agent=data['userAgent'],
user_id=None,
**extract_event_record_metadata(record)
)
def build_record_view_event(data):
"""Build a 'record-view' event from request data."""
try:
recid, _ = parse_record_url(data['url'])
assert recid, 'no recid in url'
_, record = fetch_record(recid)
except Exception:
return
return build_common_event(record, data)
def build_file_download_event(data):
"""Build a 'file-download' event from request data."""
try:
recid, filename = parse_record_url(data['url'])
assert recid and filename, 'no recid and filename in url'
_, record = fetch_record(recid)
obj = fetch_record_file(recid, filename)
except Exception:
return
return dict(
bucket_id=str(obj.bucket_id),
file_id=str(obj.file_id),
file_key=obj.key,
size=obj.file.size,
**build_common_event(record, data)
)
EVENT_TYPE_BUILDERS = {
'record-view': build_record_view_event,
'file-download': build_file_download_event,
}
@stats.command('import')
@click.argument('event-type', type=click.Choice(EVENT_TYPE_BUILDERS.keys()))
@click.argument('csv-dir', type=click.Path(file_okay=False, resolve_path=True))
@click.option('--chunk-size', '-s', type=int, default=100)
@with_appcontext
def import_events(event_type, csv_dir, chunk_size):
r"""Import stats events from a directory of CSV files.
Available event types: "file-download", "record-view"
The following columns should always be present:
\b
- ipAddress
- userAgent
- url ("https://zenodo.org/record/1234/files/article.pdf")
- timestamp (1388506249)
- referrer ("Google", "example.com", etc)
"""
csv_files = glob.glob(csv_dir + '/*.csv')
with click.progressbar(csv_files, len(csv_files)) as csv_files_bar:
for csv_path in csv_files_bar:
with open(csv_path, 'r' if PY3 else 'rb') as fp:
reader = csv.DictReader(fp, delimiter=',')
events = filter(
None, map(EVENT_TYPE_BUILDERS[event_type], reader))
for event_chunk in chunkify(events, chunk_size):
current_stats.publish(event_type, event_chunk)
click.secho(
'Run the "invenio_stats.tasks.process_events" to index the events...',
fg='yellow')
@stats.command('update-records')
@click.option('--start-date', callback=_verify_date)
@click.option('--end-date', callback=_verify_date)
@click.option('--eager', '-e', is_flag=True)
@with_appcontext
def update_records(start_date=None, end_date=None, eager=False):
"""Update records' statistics on ES."""
if eager:
update_record_statistics.apply(
kwargs=dict(start_date=start_date, end_date=end_date), throw=True)
click.secho('Records sent for bulk indexing. Wait for the scheduled '
'indexer or run `zenodo index run ...`', fg='yellow')
else:
update_record_statistics.delay(
start_date=start_date, end_date=end_date)
click.secho('Update records statistics task sent...', fg='yellow')
| 5,707 | Python | .py | 140 | 35.435714 | 79 | 0.682007 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,615 | tasks.py | zenodo_zenodo/zenodo/modules/stats/tasks.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Tasks for statistics."""
from datetime import datetime
from celery import shared_task
from dateutil.parser import parse as dateutil_parse
from elasticsearch_dsl import Index, Search
from flask import current_app
from invenio_indexer.api import RecordIndexer
from invenio_pidrelations.contrib.versioning import PIDVersioning
from invenio_pidstore.models import PersistentIdentifier
from invenio_stats import current_stats
from zenodo.modules.stats.exporters import PiwikExporter
@shared_task(ignore_result=True)
def update_record_statistics(start_date=None, end_date=None):
"""Update "_stats" field of affected records."""
start_date = dateutil_parse(start_date) if start_date else None
end_date = dateutil_parse(end_date) if start_date else None
aggr_configs = {}
if not start_date and not end_date:
start_date = datetime.utcnow()
end_date = datetime.utcnow()
for aggr_name, aggr_cfg in current_stats.aggregations.items():
aggr = aggr_cfg.cls(name=aggr_cfg.name, **aggr_cfg.params)
if not Index(aggr.index, using=aggr.client).exists():
if not Index(aggr.event_index, using=aggr.client).exists():
start_date = min(start_date, datetime.utcnow())
else:
start_date = min(
start_date, aggr._get_oldest_event_timestamp())
# Retrieve the last two bookmarks
bookmarks = aggr.list_bookmarks(limit=2)
if len(bookmarks) >= 1:
end_date = max(
end_date,
datetime.strptime(bookmarks[0].date, aggr.doc_id_suffix))
if len(bookmarks) == 2:
start_date = min(
start_date,
datetime.strptime(bookmarks[1].date, aggr.doc_id_suffix))
aggr_configs[aggr.index] = aggr
elif start_date and end_date:
for aggr_name, aggr_cfg in current_stats.aggregations.items():
aggr = aggr_cfg.cls(name=aggr_cfg.name, **aggr_cfg.params)
aggr_configs[aggr.index] = aggr
else:
return
# Get conceptrecids for all the affected records between the two dates
conceptrecids = set()
for aggr_index, aggr in aggr_configs.items():
query = Search(
using=aggr.client,
index=aggr.index,
).filter(
'range', timestamp={
'gte': start_date.replace(microsecond=0).isoformat() + '||/d',
'lte': end_date.replace(microsecond=0).isoformat() + '||/d'}
).source(include='conceptrecid')
conceptrecids |= {b.conceptrecid for b in query.scan()}
indexer = RecordIndexer()
for concpetrecid_val in conceptrecids:
conceptrecid = PersistentIdentifier.get('recid', concpetrecid_val)
pv = PIDVersioning(parent=conceptrecid)
children_recids = pv.children.all()
indexer.bulk_index([str(p.object_uuid) for p in children_recids])
@shared_task(ignore_result=True, max_retries=3, default_retry_delay=60 * 60)
def export_stats(start_date=None, end_date=None, update_bookmark=True, retry=False):
"""Export stats events."""
if current_app.config['ZENODO_STATS_PIWIK_EXPORT_ENABLED'] is True:
start_date = dateutil_parse(start_date) if start_date else None
end_date = dateutil_parse(end_date) if end_date else None
try:
PiwikExporter().run(start_date=start_date, end_date=end_date,
update_bookmark=update_bookmark)
except Exception as exc:
if retry:
export_stats.retry(exc=exc)
| 4,625 | Python | .py | 98 | 39.428571 | 84 | 0.670656 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,616 | registrations.py | zenodo_zenodo/zenodo/modules/stats/registrations.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Registration of aggregations."""
from flask_principal import ActionNeed
from invenio_access.permissions import Permission
from invenio_stats.aggregations import StatAggregator
from invenio_stats.queries import ESTermsQuery
from .proxies import current_stats_search_client
def register_aggregations():
"""Register Zenodo aggregations."""
return [dict(
aggregation_name='record-download-agg',
templates='zenodo.modules.stats.templates.aggregations',
aggregator_class=StatAggregator,
aggregator_config=dict(
client=current_stats_search_client,
event='file-download',
aggregation_field='recid',
aggregation_interval='day',
batch_size=1,
copy_fields=dict(
bucket_id='bucket_id',
record_id='record_id',
recid='recid',
conceptrecid='conceptrecid',
doi='doi',
conceptdoi='conceptdoi',
communities=lambda d, _: (list(d.communities)
if d.communities else None),
owners=lambda d, _: (list(d.owners) if d.owners else None),
is_parent=lambda *_: False
),
metric_aggregation_fields=dict(
unique_count=('cardinality', 'unique_session_id',
{'precision_threshold': 1000}),
volume=('sum', 'size', {}),
)
)),
dict(
aggregation_name='record-download-all-versions-agg',
templates='zenodo.modules.stats.templates.aggregations',
aggregator_class=StatAggregator,
aggregator_config=dict(
client=current_stats_search_client,
event='file-download',
aggregation_field='conceptrecid',
aggregation_interval='day',
batch_size=1,
copy_fields=dict(
conceptrecid='conceptrecid',
conceptdoi='conceptdoi',
communities=lambda d, _: (list(d.communities)
if d.communities else None),
owners=lambda d, _: (list(d.owners) if d.owners else None),
is_parent=lambda *_: True
),
metric_aggregation_fields=dict(
unique_count=('cardinality', 'unique_session_id',
{'precision_threshold': 1000}),
volume=('sum', 'size', {}),
)
)),
# NOTE: Since the "record-view-agg" aggregations is already registered
# in "invenio_stasts.contrib.registrations", we have to overwrite the
# configuration in "zenodo.config.STATS_AGGREGATIONS".
dict(
aggregation_name='record-view-all-versions-agg',
templates='zenodo.modules.stats.templates.aggregations',
aggregator_class=StatAggregator,
aggregator_config=dict(
client=current_stats_search_client,
event='record-view',
aggregation_field='conceptrecid',
aggregation_interval='day',
batch_size=1,
copy_fields=dict(
conceptrecid='conceptrecid',
conceptdoi='conceptdoi',
communities=lambda d, _: (list(d.communities)
if d.communities else None),
owners=lambda d, _: (list(d.owners) if d.owners else None),
is_parent=lambda *_: True
),
metric_aggregation_fields=dict(
unique_count=('cardinality', 'unique_session_id',
{'precision_threshold': 1000}),
)
)),
]
def queries_permission_factory(query_name, params):
"""Queries permission factory."""
return Permission(ActionNeed('admin-access'))
def register_queries():
"""Register Zenodo queries."""
return [
dict(
query_name='record-download',
query_class=ESTermsQuery,
permission_factory=queries_permission_factory,
query_config=dict(
index='stats-file-download',
copy_fields=dict(
bucket_id='bucket_id',
record_id='record_id',
recid='recid',
conceptrecid='conceptrecid',
doi='doi',
conceptdoi='conceptdoi',
communities='communities',
owners='owners',
is_parent='is_parent'
),
required_filters=dict(
recid='recid',
),
metric_fields=dict(
count=('sum', 'count', {}),
unique_count=('sum', 'unique_count', {}),
volume=('sum', 'volume', {}),
)
),
),
dict(
query_name='record-download-all-versions',
query_class=ESTermsQuery,
permission_factory=queries_permission_factory,
query_config=dict(
index='stats-file-download',
copy_fields=dict(
conceptrecid='conceptrecid',
conceptdoi='conceptdoi',
communities='communities',
owners='owners',
is_parent='is_parent'
),
query_modifiers=[
lambda query, **_: query.filter('term', is_parent=True)
],
required_filters=dict(
conceptrecid='conceptrecid',
),
metric_fields=dict(
count=('sum', 'count', {}),
unique_count=('sum', 'unique_count', {}),
volume=('sum', 'volume', {}),
)
)
),
dict(
query_name='record-view',
query_class=ESTermsQuery,
permission_factory=queries_permission_factory,
query_config=dict(
index='stats-record-view',
copy_fields=dict(
record_id='record_id',
recid='recid',
conceptrecid='conceptrecid',
doi='doi',
conceptdoi='conceptdoi',
communities='communities',
owners='owners',
is_parent='is_parent'
),
required_filters=dict(
recid='recid',
),
metric_fields=dict(
count=('sum', 'count', {}),
unique_count=('sum', 'unique_count', {}),
)
)
),
dict(
query_name='record-view-all-versions',
query_class=ESTermsQuery,
permission_factory=queries_permission_factory,
query_config=dict(
index='stats-record-view',
copy_fields=dict(
conceptrecid='conceptrecid',
conceptdoi='conceptdoi',
communities='communities',
owners='owners',
is_parent='is_parent'
),
query_modifiers=[
lambda query, **_: query.filter('term', is_parent=True)
],
required_filters=dict(
conceptrecid='conceptrecid',
),
metric_fields=dict(
count=('sum', 'count', {}),
unique_count=('sum', 'unique_count', {}),
)
)
),
]
| 8,910 | Python | .py | 220 | 25.945455 | 79 | 0.510023 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,617 | utils.py | zenodo_zenodo/zenodo/modules/stats/utils.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Statistics utilities."""
import itertools
from elasticsearch.exceptions import NotFoundError
from flask import request
from invenio_search.api import RecordsSearch
from invenio_search.proxies import current_search_client
from invenio_search.utils import build_alias_name
from invenio_stats import current_stats
from zenodo.modules.records.resolvers import record_resolver
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
def get_record_from_context(**kwargs):
"""Get the cached record object from kwargs or the request context."""
if 'record' in kwargs:
return kwargs['record']
else:
if request and \
hasattr(request._get_current_object(), 'current_file_record'):
return request.current_file_record
def extract_event_record_metadata(record):
"""Extract from a record the payload needed for a statistics event."""
return dict(
record_id=str(record.id),
recid=str(record['recid']) if record.get('recid') else None,
conceptrecid=record.get('conceptrecid'),
doi=record.get('doi'),
conceptdoi=record.get('conceptdoi'),
access_right=record.get('access_right'),
resource_type=record.get('resource_type'),
communities=record.get('communities'),
owners=record.get('owners'),
)
def build_record_stats(recid, conceptrecid):
"""Build the record's stats."""
stats = {}
stats_sources = {
'record-view': {
'params': {'recid': recid},
'fields': {
'views': 'count',
'unique_views': 'unique_count',
},
},
'record-download': {
'params': {'recid': recid},
'fields': {
'downloads': 'count',
'unique_downloads': 'unique_count',
'volume': 'volume',
},
},
'record-view-all-versions': {
'params': {'conceptrecid': conceptrecid},
'fields': {
'version_views': 'count',
'version_unique_views': 'unique_count',
}
},
'record-download-all-versions': {
'params': {'conceptrecid': conceptrecid},
'fields': {
'version_downloads': 'count',
'version_unique_downloads': 'unique_count',
'version_volume': 'volume',
},
},
}
for query_name, cfg in stats_sources.items():
try:
query_cfg = current_stats.queries[query_name]
query = query_cfg.cls(name=query_name, **query_cfg.params)
result = query.run(**cfg['params'])
for dst, src in cfg['fields'].items():
stats[dst] = result.get(src)
except Exception:
pass
return stats
def get_record_stats(recordid, throws=True):
"""Fetch record statistics from Elasticsearch."""
try:
res = current_search_client.get(
index=build_alias_name('records'),
id=recordid,
params={'_source_includes': '_stats'},
)
return res['_source']['_stats']
except NotFoundError:
return None
except Exception:
if throws:
raise
pass
def chunkify(iterable, n):
"""Create equally sized tuple-chunks from an iterable."""
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
@lru_cache(maxsize=1024)
def fetch_record(recid):
"""Cached record fetch."""
return record_resolver.resolve(recid)
@lru_cache(maxsize=1024)
def fetch_record_file(recid, filename):
"""Cached record file fetch."""
_, record = fetch_record(recid)
return record.files[filename].obj
| 4,819 | Python | .py | 134 | 28.761194 | 78 | 0.63309 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,618 | __init__.py | zenodo_zenodo/zenodo/modules/stats/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo statistics module."""
from __future__ import absolute_import, print_function
from .proxies import current_stats_search_client
__all__ = (
'current_stats_search_client',
)
| 1,156 | Python | .py | 29 | 38.586207 | 76 | 0.768477 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,619 | proxies.py | zenodo_zenodo/zenodo/modules/stats/proxies.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Proxies for Zenodo stats module."""
from __future__ import absolute_import, print_function
from flask import current_app
from werkzeug.local import LocalProxy
current_stats_search_client = LocalProxy(
lambda: current_app.extensions['zenodo-stats'].search_client)
"""Proxy to Elasticsearch client used for statistics queries."""
| 1,306 | Python | .py | 30 | 42.266667 | 76 | 0.779088 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,620 | __init__.py | zenodo_zenodo/zenodo/modules/stats/templates/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Elasticsearch templates."""
| 999 | Python | .py | 24 | 40.583333 | 76 | 0.772074 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,621 | __init__.py | zenodo_zenodo/zenodo/modules/stats/templates/aggregations/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Elasticsearch index templates for aggregations."""
| 1,022 | Python | .py | 24 | 41.541667 | 76 | 0.774323 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,622 | __init__.py | zenodo_zenodo/zenodo/modules/stats/templates/aggregations/v7/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016-2019 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Elasticsearch v7 index templates for stats aggregations."""
| 1,036 | Python | .py | 24 | 42.125 | 76 | 0.774481 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,623 | __init__.py | zenodo_zenodo/zenodo/modules/stats/templates/events/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Elasticsearch index templates for events."""
| 1,016 | Python | .py | 24 | 41.291667 | 76 | 0.772957 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,624 | __init__.py | zenodo_zenodo/zenodo/modules/stats/templates/events/v7/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016-2019 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Elasticsearch v7 index templates for stats events."""
| 1,030 | Python | .py | 24 | 41.875 | 76 | 0.773134 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,625 | __init__.py | zenodo_zenodo/zenodo/modules/rest/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Root REST API endpoint for Zenodo."""
from __future__ import absolute_import, print_function
| 1,065 | Python | .py | 25 | 41.52 | 76 | 0.771676 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,626 | views.py | zenodo_zenodo/zenodo/modules/rest/views.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Root REST API endpoint for Zenodo."""
from __future__ import absolute_import, print_function
from flask import Blueprint, Response, current_app, json, request, url_for
blueprint = Blueprint(
'zenodo_rest',
__name__,
url_prefix='',
)
def _format_args():
"""Get JSON dump indentation and separates."""
# Ensure we can run outside a application/request context.
try:
pretty_format = \
current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and \
not request.is_xhr
except RuntimeError:
pretty_format = False
if pretty_format:
return dict(
indent=2,
separators=(', ', ': '),
)
else:
return dict(
indent=None,
separators=(',', ':'),
)
@blueprint.route('/')
def index():
"""REST API root endpoint."""
return Response(
json.dumps({
'links': {
'communities': url_for(
'invenio_communities_rest.communities_list',
_external=True),
'deposits': url_for(
'invenio_deposit_rest.depid_list', _external=True),
'funders': url_for(
'invenio_records_rest.frdoi_list', _external=True),
'grants': url_for(
'invenio_records_rest.grant_list', _external=True),
'files': url_for(
'invenio_files_rest.location_api', _external=True),
'licenses': url_for(
'invenio_records_rest.od_lic_list', _external=True),
'records': url_for(
'invenio_records_rest.recid_list', _external=True), }
},
**_format_args()
),
mimetype='application/json',
)
| 2,784 | Python | .py | 76 | 28.934211 | 76 | 0.614302 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,627 | ext.py | zenodo_zenodo/zenodo/modules/jsonschemas/ext.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""JSON schemas compiler for Zenodo."""
from __future__ import absolute_import, print_function
from . import config
from .cli import jsonschemas
class ZenodoJSONSchemas(object):
"""Zenodo records extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['zenodo-jsonschemas'] = self
app.cli.add_command(jsonschemas)
@staticmethod
def init_config(app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('ZENODO_JSONSCHEMAS_'):
app.config.setdefault(k, getattr(config, k))
| 1,737 | Python | .py | 44 | 35.477273 | 76 | 0.715727 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,628 | config.py | zenodo_zenodo/zenodo/modules/jsonschemas/config.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for ZenodoJSONSchemas."""
from __future__ import absolute_import, print_function
ZENODO_JSONSCHEMAS_RECORD_SCHEMA = (
'records/record_src-v1.0.0.json',
'records/record-v1.0.0.json')
ZENODO_JSONSCHEMAS_DEPOSIT_SCHEMA = (
'deposits/records/record_src-v1.0.0.json',
'deposits/records/record-v1.0.0.json')
| 1,306 | Python | .py | 31 | 40.483871 | 76 | 0.763965 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,629 | cli.py | zenodo_zenodo/zenodo/modules/jsonschemas/cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for ZenodoJSONSchemas."""
from __future__ import absolute_import, print_function
import json
import click
from flask import current_app
from flask.cli import with_appcontext
from .compilers import compile_deposit_jsonschema, compile_record_jsonschema
from .utils import get_abs_schema_path, save_jsonschema
@click.group()
def jsonschemas():
"""Command for resolving jsonschemas."""
def compile_common_cli(output_file, default_file, compile_fun, schema_paths):
"""Common CLI parts of jsonschema compilation.
If 'default_file is set to True, write to a pre-configured destination
in current repository, alternatively if 'output_file' is set, write to a
custom destination.
:param output_file: Filename to which the jsonschema should be written.
:type output_file: str
:param default_file: Flag if the output file should be the default
repository jsonschema, as defined in config.
:type default_file: bool
:param compile_fun: Function turning old JSONSchema into a resolved one.
:param schema_paths: A pair of jsonschema names, resolvable by
invenio-jsonschemas, pointing to a source
and destination schemas (2-tuple of strings).
:type schema_paths: tuple
"""
schema_path_src, schema_path_dst = schema_paths
compiled_schema = compile_fun(schema_path_src)
if default_file:
abs_schema_path = get_abs_schema_path(schema_path_dst)
if click.confirm('This will overwrite the jsonschema in this'
' repository ({}). Continue?'.format(abs_schema_path),
default=True):
save_jsonschema(compiled_schema, abs_schema_path)
elif output_file:
save_jsonschema(compiled_schema, output_file)
else:
click.echo(json.dumps(compiled_schema, indent=2))
@jsonschemas.command('compilerecord')
@click.option('--output_file', '-f', type=click.Path(exists=False,
dir_okay=False))
@click.option('--default_file', '-d', is_flag=True, default=False)
@with_appcontext
def compile_record_cli(output_file, default_file):
"""Compile Zenodo record jsonschema."""
compile_common_cli(output_file, default_file, compile_record_jsonschema,
current_app.config['ZENODO_JSONSCHEMAS_RECORD_SCHEMA'])
@jsonschemas.command('compiledeposit')
@click.option('--output_file', '-f', type=click.Path(exists=False,
dir_okay=False))
@click.option('--default_file', '-d', is_flag=True, default=False)
@with_appcontext
def compile_deposit_cli(output_file, default_file):
"""Compile Zenodo deposit jsonschema."""
compile_common_cli(output_file, default_file, compile_deposit_jsonschema,
current_app.config['ZENODO_JSONSCHEMAS_DEPOSIT_SCHEMA'])
| 3,804 | Python | .py | 80 | 42.2125 | 79 | 0.718177 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,630 | utils.py | zenodo_zenodo/zenodo/modules/jsonschemas/utils.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""ZenodoJSONSchemas utilities functions."""
from __future__ import absolute_import, print_function
import json
from copy import deepcopy
from flask import current_app
from werkzeug.local import LocalProxy
current_jsonschemas = LocalProxy(
lambda: current_app.extensions['invenio-jsonschemas']
)
_records_state = LocalProxy(lambda: current_app.extensions['invenio-records'])
def resolve_schema_path(schema_path):
"""Resolve a schema by name.
Resolve a schema by it's registered name, e.g. 'records/record-v1.0.0.json'
WARNING: This method returns a deepcopy of the original schema.
Always use this method, as any modifications to a resolved schema
will be retain at the application level!
:param schema_path: schema path, e.g.: 'records/record-v1.0.0.json'.
:type schema_path: str
:returns: JSON schema
:rtype: dict
"""
schema = current_jsonschemas.get_schema(schema_path)
return deepcopy(schema)
def resolve_schema_url(schema_url):
"""Resolve a schema url to a dict.
WARNING: This method returns a deepcopy of the original schema.
Always use this method, as any modifications to a resolved schema
will be retain at the application level!
:param schema_url: absolute url of schema, e.g.:
'https://zenodo.org/schemas/records/record-v1.0.0.json'.
:type schema_url: str
:returns: JSON schema
:rtype: dict
"""
schema_path = current_jsonschemas.url_to_path(schema_url)
return resolve_schema_path(schema_path)
def replace_schema_refs(schema):
"""Replace all the refs in jsonschema.
:param schema: JSON schema for which the refs should be resolved.
:type schema: dict
:returns: JSON schema with resolved refs.
:rtype: dict
"""
return deepcopy(_records_state.replace_refs(schema))
def get_abs_schema_path(schema_path):
"""Resolve absolute schema path on disk from schema name.
Resolve schema name to an absolute schema path on disk, e.g.:
'records/record-v1.0.0.json' could resolve to
'/absolute/path/schemas/records/record-v1.0.0.json'
"""
return current_jsonschemas.get_schema_path(schema_path)
def save_jsonschema(schema, path):
"""Save jsonschema to disk path."""
with open(path, 'w') as fp:
json.dump(schema, fp, indent=2, sort_keys=True, separators=(',', ': '))
fp.write('\n')
def merge_dicts(first, second):
"""Merge the 'second' multiple-dictionary into the 'first' one."""
new = deepcopy(first)
for k, v in second.items():
if isinstance(v, dict) and v:
ret = merge_dicts(new.get(k, dict()), v)
new[k] = ret
else:
new[k] = second[k]
return new
def remove_keys(d, keys):
"""Remove keys from a dictionary (nested).
:param d: dictionary from which the keys are to be removed.
:type d: dict
:param keys: keys to be removed (list of str)
:type keys: list
"""
if isinstance(d, dict):
return dict((k, remove_keys(v, keys)) for k, v in d.items()
if k not in keys)
elif isinstance(d, list):
return list(remove_keys(i, keys) for i in d)
else:
return d
| 4,216 | Python | .py | 103 | 35.980583 | 79 | 0.697919 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,631 | __init__.py | zenodo_zenodo/zenodo/modules/jsonschemas/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""ZenodoJSONSchemas package."""
from __future__ import absolute_import, print_function
| 1,057 | Python | .py | 25 | 41.2 | 76 | 0.773786 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,632 | compilers.py | zenodo_zenodo/zenodo/modules/jsonschemas/compilers.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""JSON schema compiler methods."""
from __future__ import absolute_import, print_function
from .utils import merge_dicts, remove_keys, replace_schema_refs, \
resolve_schema_path, resolve_schema_url
def _iter_all_of(schema):
"""Iterate over the items within schema 'allOf' definition at the root."""
if 'allOf' in schema:
for sub_schema_ref in schema['allOf']:
sub_schema_url = sub_schema_ref['$ref']
sub_schema = resolve_schema_url(sub_schema_url)
yield sub_schema
def _compile_common(schema):
"""Compile common parts for record and deposit."""
id_ = schema['id']
title = schema['title']
# We need to iter 'allOf' manually because jsonresolver
# will not preserve ordering of subschema keys
for sub_schema in _iter_all_of(schema):
schema = merge_dicts(schema, sub_schema)
schema = replace_schema_refs(schema)
if 'allOf' in schema:
del schema['allOf']
schema['id'] = id_
schema['title'] = title
return schema
def _compile_deposit_base(schema):
"""Compile the base deposition jsonschema."""
deposit_base_schema = schema['allOf'][0]
assert 'deposits/deposit' in deposit_base_schema['$ref']
base_dep_schema = resolve_schema_url(deposit_base_schema['$ref'])
del base_dep_schema['properties']['_files']
schema = merge_dicts(base_dep_schema, schema)
schema['allOf'] = schema['allOf'][1:]
return schema
def compile_deposit_jsonschema(schema_path):
"""Compile the deposit jsonschema."""
schema = resolve_schema_path(schema_path)
schema = _compile_deposit_base(schema)
schema = _compile_common(schema)
schema['properties'] = remove_keys(schema['properties'], ['required', ])
return schema
def compile_record_jsonschema(schema_path):
"""Compile the record jsonschema."""
compiled = resolve_schema_path(schema_path)
compiled = _compile_deposit_base(compiled)
compiled = _compile_common(compiled)
del compiled['description'] # Description inherited from deposit
return compiled
| 3,043 | Python | .py | 71 | 38.915493 | 78 | 0.71781 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,633 | users.py | zenodo_zenodo/zenodo/modules/fixtures/users.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo communities fixture loading."""
from __future__ import absolute_import, print_function
from datetime import date, datetime, time, timedelta
from flask import current_app
from flask_security.utils import hash_password
from invenio_access.models import ActionUsers
from invenio_accounts.models import User
from invenio_db import db
def loaduser(user_data):
"""Load a single user to Zenodo from JSON fixture."""
kwargs = {
'email': user_data['email'],
'password': hash_password(user_data['password']),
'active': user_data.get('active', True),
'confirmed_at': datetime.utcnow() - timedelta(days=30),
}
datastore = current_app.extensions['security'].datastore
datastore.create_user(**kwargs)
db.session.commit()
user = User.query.filter_by(email=user_data['email']).one()
actions = current_app.extensions['invenio-access'].actions
actionusers_f = {
'allow': ActionUsers.allow,
'deny': ActionUsers.deny,
}
# e.g. [('allow', 'admin-access'), ]
for action_type, action_name in user_data.get('access', []):
action = actions[action_name]
db.session.add(
actionusers_f[action_type](action, user_id=user.id)
)
db.session.commit()
| 2,238 | Python | .py | 55 | 37.090909 | 76 | 0.721048 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,634 | records.py | zenodo_zenodo/zenodo/modules/fixtures/records.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016, 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
from uuid import uuid4
from flask import current_app
from flask_security import login_user
from invenio_db import db
from invenio_sipstore.models import SIPMetadataType
from six import BytesIO
from zenodo.modules.deposit.api import ZenodoDeposit
from zenodo.modules.deposit.loaders import legacyjson_v1
from zenodo.modules.deposit.minters import zenodo_deposit_minter
def loaddemorecords(records, owner):
"""Load demo records."""
with current_app.test_request_context():
login_user(owner)
for record in records:
deposit_data = legacyjson_v1(record)
deposit_id = uuid4()
zenodo_deposit_minter(deposit_id, deposit_data)
deposit = ZenodoDeposit.create(deposit_data, id_=deposit_id)
db.session.commit()
filename = record['files'][0]
deposit.files[filename] = BytesIO(filename)
db.session.commit()
deposit.publish()
db.session.commit()
def loadsipmetadatatypes(types):
"""Load SIP metadata types."""
with db.session.begin_nested():
for type in types:
db.session.add(SIPMetadataType(**type))
db.session.commit()
| 2,256 | Python | .py | 55 | 36.654545 | 76 | 0.729927 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,635 | ext.py | zenodo_zenodo/zenodo/modules/fixtures/ext.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Fixtures for Zenodo."""
from __future__ import absolute_import, print_function
import sys
from os.path import join
from .cli import fixtures
class ZenodoFixtures(object):
"""Zenodo records extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app.config)
app.cli.add_command(fixtures)
def init_config(self, config):
"""Flask application initialization."""
config.setdefault(
'FIXTURES_FILES_LOCATION',
join(sys.prefix, 'var/instance/data')
)
config.setdefault(
'FIXTURES_ARCHIVE_LOCATION',
join(sys.prefix, 'var/instance/archive')
)
| 1,805 | Python | .py | 48 | 33.166667 | 76 | 0.705378 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,636 | licenses.py | zenodo_zenodo/zenodo/modules/fixtures/licenses.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo license fixture loading."""
from __future__ import absolute_import, print_function, unicode_literals
import json
from collections import OrderedDict
from flask import current_app
from invenio_db import db
from invenio_opendefinition.minters import license_minter
from invenio_opendefinition.resolvers import license_resolver
from invenio_opendefinition.validators import license_validator
from invenio_pidstore.errors import PIDAlreadyExists, PIDDoesNotExistError
from invenio_records.api import Record
from .utils import read_json
def find_matching_licenses(legacy_licenses, od_licenses):
"""Match the licenses from the legacy set with open definition licenses.
:param legacy_licenses: Zenodo legacy licenses.
:type legacy_licenses: list of dict
:param od_licenses: OpenDefinition.org licenses.
:type od_licenses: list of dict
"""
fixed = {
"cc-zero": "CC0-1.0",
"cc-by-sa": "CC-BY-SA-4.0",
"cc-by-nc-4.0": "CC-BY-NC-4.0",
"cc-by-nd-4.0": "CC-BY-ND-4.0",
"cc-by": "CC-BY-4.0",
"agpl-v3": "AGPL-3.0",
"apache2.0": "Apache-2.0",
"apache": "Apache-2.0",
"bsl1.0": "BSL-1.0",
"cuaoffice": "CUA-OPL-1.0",
"ecl2": "ECL-2.0",
"ver2_eiffel": "EFL-2.0",
"lucent1.02": "LPL-1.02",
"pythonsoftfoundation": "Python-2.0",
"qtpl": "QPL-1.0",
"real": "RPSL-1.0",
"vovidapl": "VSL-1.0",
"ukcrown-withrights": "ukcrown-withrights",
"sun-issl": "SISSL",
"pythonpl": "CNRI-Python",
}
matchers = (
("Fixed", lambda z, o: z['id'] in fixed and fixed[z['id']] == o['id']),
("ID", lambda z, o: z['id'] == o['id']),
("ID_Upper", lambda z, o: z['id'].upper() == o['id'].upper()),
("URL", lambda z, o: z['url'] and z['url'] == o['url']),
("URL_Upper", lambda z, o: z['url'] and
z['url'].upper() == o['url'].upper()),
("Z title in O", lambda z, o: z['title'] and
o['title'].upper().startswith(z['title'].upper())),
("O title in Z", lambda z, o: z['title'] and
z['title'].upper().startswith(o['title'].upper())),
)
missing = []
matched = []
for zl in legacy_licenses:
found = False
for m_name, m_fun in matchers:
for ol in od_licenses:
if m_fun(zl, ol):
matched.append((zl, ol, m_name))
found = True
break
if found:
break
if not found:
missing.append(zl)
return matched, missing
def matchlicenses(legacy_lic_filename, od_filename, destination):
"""Generate the JSON with the licenses mapping."""
with open(legacy_lic_filename, "r") as fp:
legacy_licenses = json.load(fp)
with open(od_filename, "r") as fp:
od_licenses = json.load(fp)
if isinstance(od_licenses, dict):
od_licenses = [v for k, v in od_licenses.items()]
matched, missing = find_matching_licenses(legacy_licenses, od_licenses)
mapping = OrderedDict((l1['id'], l2['id']) for l1, l2, _ in matched)
with open(destination, 'w') as fp:
json.dump(mapping, fp, indent=2)
def update_legacy_meta(license):
"""Update the Zenodo legacy terms for license metadata.
Updates the metadata in order to conform with opendefinition schema.
"""
l = dict(license)
if 'od_conformance' not in l:
l['od_conformance'] = 'approved' if l['is_okd_compliant'] \
else 'rejected'
if 'osd_conformance' not in l:
l['osd_conformance'] = 'approved' if l['is_osi_compliant'] \
else 'rejected'
l.pop('is_okd_compliant', None)
l.pop('is_osi_compliant', None)
l['$schema'] = 'http://{0}{1}/{2}'.format(
current_app.config['JSONSCHEMAS_HOST'],
current_app.config['JSONSCHEMAS_ENDPOINT'],
current_app.config['OPENDEFINITION_SCHEMAS_DEFAULT_LICENSE']
)
return l
def create_new_license(license):
"""Create a new license record.
:param license: License dictionary to be loaded.
:type license: dict
"""
license = update_legacy_meta(license)
license_validator.validate(license)
record = Record.create(license)
license_minter(record.id, license)
def loadlicenses():
"""Load Zenodo licenses.
Create extra PID if license is to be mapped and already exists, otherwise
create a new license record and a PID.
"""
data = read_json('data/licenses.json')
map_ = read_json('data/licenses_map.json')
try:
for lic in data:
try:
create_new_license(lic)
except PIDAlreadyExists:
pass
for pid, alt_pid in map_.items():
try:
pid, record = license_resolver.resolve(pid)
license_minter(record.id, {'id': alt_pid})
except (PIDDoesNotExistError, PIDAlreadyExists):
pass
db.session.commit()
except Exception:
db.session.rollback()
raise
| 6,025 | Python | .py | 154 | 32.655844 | 79 | 0.633784 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,637 | communities.py | zenodo_zenodo/zenodo/modules/fixtures/communities.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo communities fixture loading."""
from __future__ import absolute_import, print_function
from invenio_accounts.models import User
from invenio_communities.models import Community
from invenio_communities.utils import save_and_validate_logo
from invenio_db import db
from .utils import file_stream
def loadcommunity(comm_data):
"""Load the Zenodo communities fixture."""
logo_path = comm_data.pop('logo', None)
community_id = comm_data.pop('id')
owner_email = comm_data.pop('owner_email')
owner_id = User.query.filter_by(email=owner_email).one().id
c = Community.create(community_id, owner_id, **comm_data)
if logo_path:
logo = file_stream(logo_path)
ext = save_and_validate_logo(logo, logo.name, community_id)
c.logo_ext = ext
db.session.commit()
| 1,784 | Python | .py | 42 | 40 | 76 | 0.752304 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,638 | cli.py | zenodo_zenodo/zenodo/modules/fixtures/cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016, 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
import json
from os.path import dirname, join
import click
from flask.cli import with_appcontext
from invenio_accounts.models import User
from invenio_communities.models import Community
from invenio_communities.utils import initialize_communities_bucket
from invenio_db import db
from invenio_openaire.minters import funder_minter, grant_minter
from invenio_pidstore.models import PersistentIdentifier
from invenio_records.api import Record
from .communities import loadcommunity
from .files import loadbuckets, loaddemofiles, loadlocations
from .licenses import loadlicenses, matchlicenses
from .oai import loadoaisets
from .records import loaddemorecords, loadsipmetadatatypes
from .users import loaduser
from .utils import read_json
@click.group(chain=True)
def fixtures():
"""Command for loading fixture data."""
@fixtures.command()
@with_appcontext
def init():
"""Load basic data."""
loadlocations()
loadbuckets()
loadoaisets()
initialize_communities_bucket()
@fixtures.command('loadlocations')
@with_appcontext
def loadlocations_cli():
"""Load data store location."""
locs = loadlocations()
click.secho('Created location(s): {0}'.format(
[loc.uri for loc in locs]), fg='green')
@fixtures.command('loadoaisets')
@with_appcontext
def loadoaisets_cli():
"""Load OAI-PMH sets."""
sets_count = loadoaisets()
click.secho('Created {0} OAI-PMH sets'.format(len(sets_count)), fg='green')
@fixtures.command('loadfp6grants')
@with_appcontext
def loadfp6grants_cli():
"""Load one-off grants."""
data = read_json('data/grants.json')
loaded = 0
for g in data:
if not PersistentIdentifier.query.filter_by(
pid_type='grant', pid_value=g['internal_id']).count():
r = Record.create(g)
grant_minter(r.id, r)
db.session.commit()
loaded += 1
click.echo("Loaded {0} new grants out of {1}.".format(loaded, len(data)))
@fixtures.command('loadfunders')
@with_appcontext
def loadfunders_cli():
"""Load the supported funders."""
data = read_json('data/funders.json')
loaded = 0
for f in data:
if not PersistentIdentifier.query.filter_by(
pid_type='frdoi', pid_value=f['doi']).count():
r = Record.create(f)
funder_minter(r.id, r)
db.session.commit()
loaded += 1
click.echo("Loaded {0} new funders out of {1}.".format(loaded, len(data)))
@fixtures.command('loaddemorecords')
@click.option('--records-file', type=click.File(),
default=join(dirname(__file__), 'data/records.json'))
@click.option('--owner', default='info@zenodo.org')
@with_appcontext
def loaddemorecords_cli(records_file=None, owner=None):
"""Load demo records."""
# Resolve the user
if owner.isdigit(): # user ID passed
owner = User.query.get(int(owner))
else:
owner = User.query.filter_by(email=owner).one()
with click.progressbar(json.load(records_file)) as records:
loaddemorecords(records, owner)
@fixtures.command('loadsipmetadatatypes')
@with_appcontext
def loadsipmetadatatypes_cli():
"""Load SIP metadata types."""
click.secho('Loading SIP metadata types...', fg='blue')
src = join(dirname(__file__), 'data/sipmetadatatypes.json')
with open(src, 'r') as fp:
data = json.load(fp)
with click.progressbar(data) as types:
loadsipmetadatatypes(types)
click.secho('SIP metadata types loaded!', fg='green')
@fixtures.command('loaddemofiles')
@click.argument('source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@with_appcontext
def loaddemofiles_cli(source):
"""Load demo files."""
loaddemofiles(source)
@fixtures.command('loadlicenses')
@with_appcontext
def loadlicenses_cli():
"""Load Zenodo licenses."""
loadlicenses()
@fixtures.command('matchlicenses')
@click.argument('legacy_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('od_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('destination', type=click.Path(exists=False, dir_okay=False))
def matchlicenses_cli(legacy_source, od_source, destination):
"""Match legacy Zenodo licenses with OpenDefinition.org licenses."""
matchlicenses(legacy_source, od_source, destination)
@fixtures.command('loadcommunities')
@click.option('-i', '--input-file')
@with_appcontext
def loadcommunities_cli(input_file=None):
"""Load Zenodo communities."""
data = read_json(input_file or 'data/communities.json')
skipped = 0
for comm_data in data:
if not Community.query.filter_by(id=comm_data['id']).count():
loadcommunity(comm_data)
else:
skipped += 1
click.secho('Loaded {0} communities (skipped {1} existing).'.format(
len(data) - skipped, skipped), fg='green')
@fixtures.command('loadusers')
@click.option('-i', '--input-file')
@with_appcontext
def loadusers_cli(input_file=None):
"""Load Zenodo users."""
users = read_json(input_file or 'data/users.json')
for user_data in users:
loaduser(user_data)
| 6,375 | Python | .py | 163 | 34.312883 | 79 | 0.70157 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,639 | utils.py | zenodo_zenodo/zenodo/modules/fixtures/utils.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Utils for fixtures."""
from __future__ import absolute_import, print_function
import json
from pkg_resources import resource_stream, resource_string
def read_json(path):
"""Retrieve JSON from package resource."""
return json.loads(
resource_string('zenodo.modules.fixtures', path).decode('utf8'))
def file_stream(path):
"""Retrieve JSON from package resource."""
return resource_stream('zenodo.modules.fixtures', path)
| 1,427 | Python | .py | 34 | 40.029412 | 76 | 0.762455 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,640 | __init__.py | zenodo_zenodo/zenodo/modules/fixtures/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Fixtures module."""
from __future__ import absolute_import, print_function
from .ext import ZenodoFixtures
__all__ = ('ZenodoFixtures', )
| 1,112 | Python | .py | 27 | 40.037037 | 76 | 0.768733 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,641 | oai.py | zenodo_zenodo/zenodo/modules/fixtures/oai.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
from invenio_db import db
from invenio_oaiserver.models import OAISet
def loadoaisets(force=False):
"""Load default file store location."""
sets = [
('openaire', 'OpenAIRE', None),
('openaire_data', 'OpenAIRE data sets', None),
]
try:
for setid, name, pattern in sets:
oset = OAISet(spec=setid, name=name, search_pattern=pattern)
db.session.add(oset)
db.session.commit()
return len(sets)
except Exception:
db.session.rollback()
raise
| 1,598 | Python | .py | 42 | 34.547619 | 76 | 0.722115 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,642 | files.py | zenodo_zenodo/zenodo/modules/fixtures/files.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function, unicode_literals
import hashlib
from os import makedirs, stat
from os.path import exists
from flask import current_app
from invenio_db import db
from invenio_files_rest.models import FileInstance, Location, ObjectVersion
from zenodo.modules.exporter.utils import initialize_exporter_bucket
def loadbuckets():
"""Initialize any special buckets."""
initialize_exporter_bucket()
def loadlocations(force=False):
"""Load default file store and archive location."""
try:
locs = []
uris = [
('default', True, current_app.config['FIXTURES_FILES_LOCATION'], ),
('archive', False,
current_app.config['FIXTURES_ARCHIVE_LOCATION'], )
]
for name, default, uri in uris:
if uri.startswith('/') and not exists(uri):
makedirs(uri)
if not Location.query.filter_by(name=name).count():
loc = Location(name=name, uri=uri, default=default)
db.session.add(loc)
locs.append(loc)
db.session.commit()
return locs
except Exception:
db.session.rollback()
raise
def loaddemofiles(source, force=False):
"""Load demo files."""
s = stat(source)
with open(source, 'rb') as fp:
m = hashlib.md5()
m.update(fp.read())
checksum = "md5:{0}".format(m.hexdigest())
# Create a file instance
with db.session.begin_nested():
f = FileInstance.create()
f.set_uri(source, s.st_size, checksum)
# Replace all objects associated files.
ObjectVersion.query.update({ObjectVersion.file_id: str(f.id)})
db.session.commit()
| 2,726 | Python | .py | 70 | 33.714286 | 79 | 0.691783 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,643 | upload.py | zenodo_zenodo/scripts/upload.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Simple script to make an upload via the REST API."""
from __future__ import absolute_import, print_function, unicode_literals
import json
from time import sleep
import requests
from six import BytesIO
def upload(token, metadata, files, publish=True):
"""Make an upload."""
base_url = 'http://localhost:5000/api/deposit/depositions'
auth = {
'Authorization': 'Bearer {0}'.format(token)
}
auth_json = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
auth_json.update(auth)
r = requests.post(base_url, data='{}', headers=auth_json)
assert r.status_code == 201
links = r.json()['links']
print('Create deposit:')
print(r.json())
# Wait for ES to index.
sleep(1)
for filename, stream in files:
r = requests.post(
links['files'],
data=dict(filename=filename),
files=dict(file=stream),
headers=auth)
assert r.status_code == 201
print('Upload file:')
print(r.json())
r = requests.put(
links['self'],
data=json.dumps(dict(metadata=metadata)),
headers=auth_json
)
assert r.status_code == 200
print('Update metadata:')
print(r.json())
if publish:
r = requests.post(links['publish'], headers=auth)
assert r.status_code == 202
print('Publish:')
print(r.json())
return r.json()['id']
def upload_test(token, publish=True):
"""Test upload."""
metadata = {
'title': 'My first upload',
'upload_type': 'publication',
'publication_type': 'book',
'description': 'This is my first upload',
'access_right': 'open',
'license': 'cc-by',
'creators': [{'name': 'Doe, John', 'affiliation': 'Zenodo'}]
}
files = [('test.txt', BytesIO(b'My first test upload.'))]
return upload(token, metadata, files, publish=publish)
| 2,921 | Python | .py | 83 | 30.13253 | 76 | 0.659469 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,644 | test_schemas_datacite.py | zenodo_zenodo/tests/unit/records/test_schemas_datacite.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016-2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Dublin Core mapping test."""
from __future__ import absolute_import, print_function
import json
from datetime import datetime, timedelta
import pytest
from zenodo.modules.records.serializers import datacite_v31, datacite_v41
def today():
"""Get todays UTC date."""
return datetime.utcnow().date()
def test_minimal(db, minimal_record_model, recid_pid):
"""Test minimal."""
minimal_record_model['doi'] = '10.5072/foo'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj == {
'identifier': {'identifier': '10.5072/foo', 'identifierType': 'DOI'},
'creators': [{'creatorName': 'Test', 'nameIdentifier': {}}],
'titles': [{'title': 'Test'}],
'publisher': 'Zenodo',
'publicationYear': str(today().year),
'dates': [{'dateType': 'Issued', 'date': today().isoformat()}],
'subjects': [],
'contributors': [],
'resourceType': {
'resourceType': None, 'resourceTypeGeneral': 'Software'},
'alternateIdentifiers': [{
'alternateIdentifier': 'http://localhost/record/123',
'alternateIdentifierType': 'url',
}],
'relatedIdentifiers': [],
'rightsList': [
{'rights': 'Open Access',
'rightsURI': 'info:eu-repo/semantics/openAccess'}],
'descriptions': [
{'description': 'My description', 'descriptionType': 'Abstract'}]
}
def test_non_local_doi(db, minimal_record_model, recid_pid):
"""Test non-local DOI."""
minimal_record_model['doi'] = '10.1234/foo'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['identifier'] == {'identifier': 'http://localhost/record/123',
'identifierType': 'URL'}
assert obj['relatedIdentifiers'] == [{
'relatedIdentifier': '10.1234/foo',
'relatedIdentifierType': 'DOI',
'relationType': 'IsIdenticalTo',
}]
def test_full(db, record_with_bucket, recid_pid):
"""Test full record metadata."""
_, full_record_model = record_with_bucket
full_record_model['doi'] = '10.5072/foo'
obj = datacite_v31.transform_record(recid_pid, full_record_model)
expected = {
"alternateIdentifiers": [
{
"alternateIdentifier": "urn:lsid:ubio.org:namebank:11815",
"alternateIdentifierType": "lsid"
},
{
"alternateIdentifier": "2011ApJS..192...18K",
"alternateIdentifierType": "ads"
},
{
'alternateIdentifier': '0317-8471',
'alternateIdentifierType': 'issn',
},
{
"alternateIdentifier": "10.1234/alternate.doi",
"alternateIdentifierType": "doi"
},
{
"alternateIdentifier": "http://localhost/record/12345",
"alternateIdentifierType": "url"
},
],
"contributors": [
{
"affiliation": "CERN",
"contributorName": "Smith, Other",
"contributorType": "Other",
"nameIdentifier": {
"nameIdentifier": "0000-0002-1825-0097",
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
},
{
"affiliation": "",
"contributorName": "Hansen, Viggo",
"contributorType": "Other",
"nameIdentifier": {}
},
{
"affiliation": "CERN",
"contributorName": "Kowalski, Manager",
"contributorType": "DataManager",
"nameIdentifier": {}
}
],
"creators": [
{
"affiliation": "CERN",
"creatorName": "Doe, John",
"nameIdentifier": {
"nameIdentifier": "0000-0002-1694-233X",
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
},
{
"affiliation": "CERN",
"creatorName": "Doe, Jane",
"nameIdentifier": {
"nameIdentifier": "0000-0002-1825-0097",
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
},
{
"affiliation": "CERN",
"creatorName": "Smith, John",
"nameIdentifier": {}
},
{
"affiliation": "CERN",
"creatorName": "Nowak, Jack",
"nameIdentifier": {
"nameIdentifier": "170118215",
"nameIdentifierScheme": "GND"
}
}
],
"dates": [
{"date": "2014-02-27", "dateType": "Issued"},
{"date": "2019-01-01/", "dateType": "Valid"},
# NOTE: "Withdrawn" is not in the DataCite v3.1 dateType vocabulary
# {"date": "2019-01-01", "dateType": "Withdrawn"},
{"date": "/2019-01-01", "dateType": "Collected"},
{"date": "2019-01-01/2019-02-01", "dateType": "Collected"},
],
"descriptions": [
{
"description": "Test Description",
"descriptionType": "Abstract"
},
{
"description": "notes",
"descriptionType": "Other"
},
{
"description": (
"{\"references\": [\"Doe, John et al (2012). "
"Some title. Zenodo. 10.5281/zenodo.12\", \"Smith, "
"Jane et al (2012). Some title. Zenodo. "
"10.5281/zenodo.34\"]}"
),
"descriptionType": "Other"
},
{'description': 'microscopic supersampling',
'descriptionType': 'Methods'}
],
"identifier": {"identifier": "10.5072/foo", "identifierType": "DOI"},
"language": "en",
"geoLocations": [{
"geoLocationPlace": "my place",
"geoLocationPoint": "2.35 1.534"
}, {
'geoLocationPlace': 'New York'
}],
"publicationYear": "2014",
"publisher": "Zenodo",
"relatedIdentifiers": [
{
"relationType": "Cites",
"resourceTypeGeneral": "Dataset",
"relatedIdentifier": "10.1234/foo.bar",
"relatedIdentifierType": "DOI"
},
{
"relationType": "IsIdenticalTo",
"relatedIdentifier": "1234.4325",
"relatedIdentifierType": "arXiv"
},
{
"relationType": "Cites",
"resourceTypeGeneral": "Dataset",
"relatedIdentifier": "1234.4321",
"relatedIdentifierType": "arXiv"
},
{
"relationType": "References",
"resourceTypeGeneral": "Dataset",
"relatedIdentifier": "1234.4328",
"relatedIdentifierType": "arXiv"
},
{
"relationType": "IsPartOf",
"relatedIdentifier": "10.1234/zenodo.4321",
"relatedIdentifierType": "DOI",
"resourceTypeGeneral": "Software"
},
{
"relationType": "HasPart",
"relatedIdentifier": "10.1234/zenodo.1234",
"relatedIdentifierType": "DOI",
"resourceTypeGeneral": "BookChapter"
},
{
"relationType": "IsPartOf",
"relatedIdentifier": "http://localhost/communities/zenodo",
"relatedIdentifierType": "URL"
}
],
"resourceType": {
"resourceType": None,
"resourceTypeGeneral": "Book"
},
"rightsList": [
{
"rights": "Creative Commons Attribution 4.0",
"rightsURI": "https://creativecommons.org/licenses/by/4.0/"
},
{
"rights": "Open Access",
"rightsURI": "info:eu-repo/semantics/openAccess"
}
],
"subjects": [
{"subject": "kw1"},
{"subject": "kw2"},
{"subject": "kw3"},
{
"subject": "http://id.loc.gov/authorities/subjects/sh85009003",
"subjectScheme": "url"
}
],
"titles": [{"title": "Test title"}],
"version": "1.2.5"
}
assert obj == expected
obj = datacite_v41.transform_record(recid_pid, full_record_model)
expected['creators'] = [
{
'affiliations': ['CERN'],
'creatorName': 'Doe, John',
'familyName': 'Doe',
'givenName': 'John',
'nameIdentifiers': [
{
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/',
'nameIdentifier': '0000-0002-1694-233X'
},
{
'nameIdentifierScheme': 'GND',
'nameIdentifier': '170118215'
}
],
},
{
'affiliations': ['CERN'],
'creatorName': 'Doe, Jane',
'familyName': 'Doe',
'givenName': 'Jane',
'nameIdentifiers': [
{
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/',
'nameIdentifier': '0000-0002-1825-0097'
}
],
},
{
'affiliations': ['CERN'],
'creatorName': 'Smith, John',
'familyName': 'Smith',
'givenName': 'John',
'nameIdentifiers': [],
},
{
'affiliations': ['CERN'],
'creatorName': 'Nowak, Jack',
'familyName': 'Nowak',
'givenName': 'Jack',
'nameIdentifiers': [
{
'nameIdentifierScheme': 'GND',
'nameIdentifier': '170118215'
}
],
}
]
expected['contributors'] = [
{
'affiliations': ['CERN'],
'nameIdentifiers': [
{
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/',
'nameIdentifier': '0000-0002-1825-0097'
}
],
'contributorName': 'Smith, Other',
'familyName': 'Smith',
'givenName': 'Other',
'contributorType': 'Other',
},
{
'affiliations': [''],
'nameIdentifiers': [],
'contributorName': 'Hansen, Viggo',
'familyName': 'Hansen',
'givenName': 'Viggo',
'contributorType': 'Other',
},
{
'affiliations': ['CERN'],
'nameIdentifiers': [],
'contributorName': 'Kowalski, Manager',
'familyName': 'Kowalski',
'givenName': 'Manager',
'contributorType': 'DataManager',
},
{
'contributorName': 'Smith, Professor',
'familyName': 'Smith',
'givenName': 'Professor',
'nameIdentifiers': [],
'contributorType': 'Supervisor',
}
]
expected['fundingReferences'] = []
expected["dates"] = [
{"date": "2014-02-27", "dateType": "Issued"},
{"date": "2019-01-01/", "dateType": "Valid",
"dateInformation": "Bongo"},
{"date": "/2019-01-01", "dateType": "Collected"},
{"date": "2019-01-01", "dateType": "Withdrawn"},
{"date": "2019-01-01/2019-02-01", "dateType": "Collected"},
]
expected['geoLocations'] = [{
"geoLocationPlace": "my place",
"geoLocationPoint": {
"pointLatitude": 2.35,
"pointLongitude": 1.534
}
}, {
'geoLocationPlace': 'New York'
}]
assert obj == expected
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_identifier(db, minimal_record_model, recid_pid, serializer):
"""Test identifier."""
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['identifier'] == {
'identifier': '10.5072/zenodo.123',
'identifierType': 'DOI',
}
def test_creators(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'creators': [
{'name': 'A', 'affiliation': 'AA', 'gnd': '1234'},
{'name': 'B', 'affiliation': 'BA', 'orcid': '0000-0000-0000-0000',
'gnd': '4321'},
]})
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['creators'] == [
{'affiliation': 'AA', 'creatorName': 'A', 'nameIdentifier': {
'nameIdentifier': '1234', 'nameIdentifierScheme': 'GND'}},
{'affiliation': 'BA', 'creatorName': 'B', 'nameIdentifier': {
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'}}
]
def test_creators_v4(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'creators': [
{'name': 'A, B', 'affiliation': 'AA', 'gnd': '1234'},
{
'name': 'B',
'affiliation': 'BA',
'orcid': '0000-0000-0000-0000',
'gnd': '4321'
},
]})
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['creators'] == [{
'affiliations': ['AA'],
'creatorName': 'A, B',
'givenName': 'B',
'familyName': 'A',
'nameIdentifiers': [{
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'
}]},
{
'affiliations': ['BA'],
'creatorName': 'B',
'givenName': '',
'familyName': '',
'nameIdentifiers': [{
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
}, {
'nameIdentifier': '4321',
'nameIdentifierScheme': 'GND'
}]
}
]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_embargo_date(db, minimal_record_model, recid_pid, serializer):
"""Test embargo date."""
dt = (today() + timedelta(days=1)).isoformat()
minimal_record_model.update({
'embargo_date': dt,
'access_right': 'embargoed',
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['dates'] == [
{'dateType': 'Available', 'date': dt},
{'dateType': 'Accepted', 'date': today().isoformat()},
]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_subjects(db, minimal_record_model, recid_pid, serializer):
"""Test subjects date."""
minimal_record_model.update({
'keywords': ['kw1'],
'subjects': [{'term': 'test', 'identifier': 'id', 'scheme': 'loc'}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['subjects'] == [
{'subject': 'kw1'},
{'subject': 'id', 'subjectScheme': 'loc'},
]
def test_contributors(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'contributors': [{
'name': 'A',
'affiliation': 'AA',
'gnd': '1234',
'type': 'Researcher'
}, ],
'thesis_supervisors': [{
'name': 'B',
'affiliation': 'BA',
'type': 'Supervisor'
}, ],
'grants': [{
'funder': {
'name': 'European Commission',
},
'identifiers': {
'eurepo': 'info:eu-repo/grantAgreement/EC/FP7/244909'
},
}],
})
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['contributors'] == [
{
'affiliation': 'AA',
'contributorName': 'A',
'contributorType': 'Researcher',
'nameIdentifier': {
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'}
},
{
'affiliation': 'BA',
'contributorName': 'B',
'contributorType': 'Supervisor',
'nameIdentifier': {},
},
{
'contributorName': 'European Commission',
'contributorType': 'Funder',
'nameIdentifier': {
'nameIdentifier': 'info:eu-repo/grantAgreement/EC/FP7/244909',
'nameIdentifierScheme': 'info'}
},
]
def test_contributors_v4(db, minimal_record_model, recid_pid):
"""Test contributors."""
minimal_record_model.update({
'contributors': [{
'name': 'A, B',
'affiliation': 'AA',
'gnd': '1234',
'orcid': '0000-0000-0000-0000',
'type': 'Researcher'
}, ],
'thesis': {
'supervisors': [{
'name': 'B',
'affiliation': 'BA',
'type': 'Supervisor'
}]
}
})
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['contributors'] == [
{
'affiliations': ['AA'],
'contributorName': 'A, B',
'givenName': 'B',
'familyName': 'A',
'contributorType': 'Researcher',
'nameIdentifiers': [
{
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
},
{
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'
},
]
},
{
'affiliations': ['BA'],
'contributorName': 'B',
'givenName': '',
'familyName': '',
'contributorType': 'Supervisor',
'nameIdentifiers': [],
},
]
# Test without `thesis` field
minimal_record_model.pop('thesis', None)
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['contributors'] == [
{
'affiliations': ['AA'],
'contributorName': 'A, B',
'givenName': 'B',
'familyName': 'A',
'contributorType': 'Researcher',
'nameIdentifiers': [
{
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
},
{
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'
},
]
},
]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_language(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
assert 'language' not in minimal_record_model
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert 'language' not in obj
minimal_record_model['language'] = 'eng'
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['language'] == 'en' # DataCite supports ISO 639-1 (2-letter)
minimal_record_model['language'] = 'twa' # No ISO 639-1 code
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert 'language' not in obj
# This should never happen, but in case of dirty data
minimal_record_model['language'] = 'Esperanto'
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert 'language' not in obj
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_resource_type(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
minimal_record_model['resource_type'] = {'type': 'poster'}
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['resourceType'] == {
'resourceTypeGeneral': 'Text',
'resourceType': 'Poster',
}
# If the record is not in 'c1', OpenAIRE subtype should not be serialized
minimal_record_model['resource_type'] = {'type': 'software',
'openaire_subtype': 'foo:t1'}
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['resourceType'] == {
'resourceTypeGeneral': 'Software',
'resourceType': None
}
# Add 'c1' to communities. 'foo:t1' should be serialized as a type
minimal_record_model['communities'] = ['c1', ]
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['resourceType'] == {
'resourceTypeGeneral': 'Software',
'resourceType': 'openaire:foo:t1',
}
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_alt_ids(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
minimal_record_model.update({
'alternate_identifiers': [{
'identifier': '10.1234/foo.bar',
'scheme': 'doi'
}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['alternateIdentifiers'] == [{
'alternateIdentifier': '10.1234/foo.bar',
'alternateIdentifierType': 'doi',
}, {
'alternateIdentifier': 'http://localhost/record/123',
'alternateIdentifierType': 'url',
}]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_related_identifiers(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
tests = [
('handle', 'Handle'),
('arxiv', 'arXiv'),
('ads', 'bibcode'),
('doi', 'DOI'),
]
for t, dc_t in tests:
minimal_record_model.update({
'related_identifiers': [{
'identifier': '1234',
'scheme': t,
'relation': 'isCitedBy',
'resource_type': {
'type': 'publication',
'subtype': 'section'
}
}, {
'identifier': '1234',
'scheme': 'invalid',
'relation': 'isCitedBy',
}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
expected_result = [{
'relatedIdentifier': '1234',
'relatedIdentifierType': dc_t,
'relationType': 'IsCitedBy',
'resourceTypeGeneral': 'BookChapter'
}]
assert obj['relatedIdentifiers'] == expected_result
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_communities_rel_ids(db, minimal_record_model, recid_pid, serializer):
"""Test communities in related identifiers."""
for communities in (['zenodo'], ['c1', 'c2', 'c3']):
minimal_record_model['communities'] = communities
obj = serializer.transform_record(recid_pid, minimal_record_model)
for comm in communities:
assert {
'relatedIdentifier':
'http://localhost/communities/{}'.format(comm),
'relatedIdentifierType': 'URL',
'relationType': 'IsPartOf',
} in obj['relatedIdentifiers']
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_rights(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
minimal_record_model.update({
'license': {
'identifier': 'cc-by-sa',
'title': 'Creative Commons Attribution Share-Alike',
'source': 'opendefinition.org',
'url': 'http://www.opendefinition.org/licenses/cc-by-sa'
}
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['rightsList'] == [{
'rights': 'Creative Commons Attribution Share-Alike',
'rightsURI': 'http://www.opendefinition.org/licenses/cc-by-sa',
}, {
'rights': 'Open Access',
'rightsURI': 'info:eu-repo/semantics/openAccess',
}]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_descriptions(db, minimal_record_model, recid_pid, serializer):
"""Test descriptions."""
minimal_record_model.update({
'description': 'test',
'notes': 'again',
'references': [{'raw_reference': 'A'}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['descriptions'] == [{
'description': 'test',
'descriptionType': 'Abstract',
}, {
'description': 'again',
'descriptionType': 'Other',
}, {
'description': json.dumps({'references': ['A']}),
'descriptionType': 'Other',
}]
minimal_record_model.update({
'description': (20000 * 'A') + 'BBB',
'notes': (20000 * 'A') + 'BBB',
'references': [{'raw_reference': (20000 * 'A') + 'BBB'}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert all(len(d['description']) == 20000 and 'B' not in d['description']
for d in obj['descriptions'])
def test_funding_ref_v4(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'grants': [
{'title': 'Grant Title',
'code': '1234',
'identifiers': {'eurepo': 'eurepo 1'},
'internal_id': '10.1234/foo::1234',
'funder': {'name': 'EC', 'doi': '10.1234/foo'}},
{'title': 'Title Grant',
'code': '4321',
'identifiers': {'eurepo': 'eurepo 2'},
'internal_id': '10.1234/foo::4321',
'funder': {'name': 'EC', 'doi': '10.1234/foo'}},
]})
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['fundingReferences'] == [
{
'funderName': 'EC',
'funderIdentifier': {
'funderIdentifier': '10.1234/foo',
'funderIdentifierType': 'Crossref Funder ID',
},
'awardNumber': {
'awardNumber': '1234',
'awardURI': 'eurepo 1'
},
'awardTitle': 'Grant Title'
},
{
'funderName': 'EC',
'funderIdentifier': {
'funderIdentifier': '10.1234/foo',
'funderIdentifierType': 'Crossref Funder ID',
},
'awardNumber': {
'awardNumber': '4321',
'awardURI': 'eurepo 2'
},
'awardTitle': 'Title Grant'
}
]
def test_titles(db, minimal_record_model, recid_pid):
"""Test title."""
# NOTE: There used to be a bug which was modifying the case of the title
minimal_record_model['title'] = 'a lower-case title'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['titles'] == [{'title': 'a lower-case title'}]
minimal_record_model['title'] = 'Mixed-caSe titLE'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['titles'] == [{'title': 'Mixed-caSe titLE'}]
| 29,008 | Python | .tac | 806 | 25.310174 | 79 | 0.516895 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,645 | contact_form.html | zenodo_zenodo/zenodo/modules/support/templates/zenodo_support/contact_form.html | {#-
# This file is part of Zenodo.
# Copyright (C) 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
-#}
{% extends "zenodo_theme/page.html" %}
{%- from "zenodo_support/macros.html" import render_form_field, render_form_input, render_form_label %}
{%- block page_body %}
{%- set user = current_user.is_authenticated and current_user %}
<div id="contact-page" class="container" >
<div class="row">
<div class="col-md-10">
<div class="panel-body">
<h2 class="header-form">Contact us</h2>
<p>Before making a request, you can have a look at our <a href="http://help.zenodo.org">FAQ</a> and <a href="http://about.zenodo.org">other resources</a> for more detailed information about the operation, features and goals of Zenodo.</p>
{%- if not user %}
<p>It is recommended to <a href="{{url_for_security('login')}}">login</a> before sending a request, so we can automatically fill-in your contact information. It is especially important for quicker resolution of all technical issues and requests concerning your Zenodo account.</p>
{%- endif %}
<form id="contact-form" method="POST" enctype="multipart/form-data" role="form">
{{ form.csrf_token }}
<div class="contact">
<div class="row">
{{ render_form_field(form.name, input_extras={'disabled': True} if current_user.profile and current_user.profile.full_name else {}) }}
</div>
<div class="row">
{{ render_form_field(form.email, input_extras={'disabled': True} if user else {}) }}
</div>
<div class="row">
{{ render_form_field(form.issue_category) }}
</div>
<div class="row">
<div class="col-md-2"></div>
<div class="col-md-10">
<ul id="categories-tab" class="nav nav-tabs">
{%- for category in categories.values() %}
<li class="{%- if loop.index == 1 %} active in{%- endif %}">
<a name="{{ category.key }}" href="#{{ category.key }}"></a>
</li>
{%- endfor %}
</ul>
<div class="tab-content content-tab">
{%- for category in categories.values() %}
<div id="{{ category.key }}" class="tab-pane fade {%- if loop.index == 1 %} active in {%- endif %}">
{{ category.description|safe }}
</div>
{%- endfor %}
</div>
</div>
</div>
<div class="row">
{{ render_form_field(form.subject, input_size=10) }}
</div>
<div class="row">
{{ render_form_field(form.description, input_size=10) }}
</div>
<div class="row">
<div class="col-md-2"></div>
<div class="col-md-10">
<div class="panel panel-default upload-panel" align="center">
<h3 class="upload-header">Drag files anywhere or click <a class="upload-button">here</a> to upload</h3>
<p class="upload-info"></p>
</div>
{{ render_form_input(form.attachments, size=None, extras={'style': 'display:none'}) }}
</div>
</div>
<div class="row">
{{ render_form_label(form.include_os_browser) }}
<div class="col-md-10">
<p>{%- if uap.os %}<strong>Operating System:</strong> {{ uap.os }} - {% endif %}{%- if uap.browser %}<strong>Browser:</strong> {{ uap.browser }} {% endif %}{%- if uap.device and uap.device != 'Other' %}- <strong>Device:</strong> {{ uap.device }} {%- endif %}</p>
<label class="checkbox-inline" for="{{form.include_os_browser.label.field_id}}">
{{ render_form_input(form.include_os_browser, size=None, extras={'class_':''}) }}
<p>Include this information to assist us with narrowing down the cause of your problem.</p>
</label>
</div>
</div>
<div class="row">
<div class="col-md-4"></div>
<div class="col-md-4">
{% if not user and form.recaptcha %}{{ render_form_input(form.recaptcha, size=None) }}{% endif %}
<br>
<button class="btn btn-block btn-success" type="submit">Send request</button>
</div>
<div class="col-md-4"></div>
</div>
</form>
</div>
</div>
<!-- Modal -->
<div class="modal fade" id="confirmationModal" tabindex="-1" role="dialog" aria-labelledby="confirmationModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="confirmationModalLabel">Warning!</h4>
</div>
<div class="modal-body">
<form>
<div class="form-group">
<p>
You haven't attached any files. If you want to add/update files, make sure to attach them or
provide us with the link to them.
</p>
<p>Do you want to proceed anyway?</p>
</div>
</form>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
<button type="button" class="btn btn-primary" id="confirm">Send request</button>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
{%- endblock page_body %}
{%- block javascript %}
{{super()}}
<script type="text/javascript">
var MAX_FILE_SIZE = {{ config.SUPPORT_ATTACHMENT_MAX_SIZE|tojson }};
var DESCRIPTION_MIN_LENGTH = {{ config.SUPPORT_DESCRIPTION_MIN_LENGTH|tojson }};
var DESCRIPTION_MAX_LENGTH = {{ config.SUPPORT_DESCRIPTION_MAX_LENGTH|tojson }};
var confirmed = false;
function formatFileSize(bytes) {
if (bytes == 0) {return '0.00 B';}
var e = Math.floor(Math.log(bytes) / Math.log(1000));
return (bytes / Math.pow(1000, e)).toFixed(2) + ' ' + ' KMGTP'.charAt(e) + 'B';
};
function getFiles(e) {
if (e.target && e.target.files) {
return e.target.files;
} else if (e.originalEvent && e.originalEvent.dataTransfer) {
return e.originalEvent.dataTransfer.files;
}
}
// Shows an error if size is greater than max attachment size.
function handleFileSelection(event) {
var files = getFiles(event);
if(files.length == 0) return;
var filesInfo = '<strong>' + files.length + ' file(s) selected.</strong>';
var totalSize = 0;
for(var i = 0; i < files.length; i++){
totalSize += files[i].size;
filesInfo += '<br/><small>' + files[i].name + '    ' +
formatFileSize(files[i].size) + '</small>';
}
$('.upload-info').get(0).innerHTML = filesInfo;
if(totalSize > MAX_FILE_SIZE) {
$('.field-attachments').addClass('has-error');
$('#error-attachments').get(0).innerHTML =
'File size exceeded. Please add URLs to the files or make a smaller selection.';
$('button#form_button').prop('disabled', true);
} else {
$('.field-attachments').removeClass('has-error');
$('#error-attachments').get(0).innerHTML = '';
$('button#form_button').prop('disabled', false);
}
}
// Setup file upload droparea
function initFileDroparea(droparea, button) {
function noop(e) {
e.preventDefault();
e.stopPropagation();
}
droparea.on({
dragenter: noop,
dragover: noop,
dragleave: noop,
drop: function(e) {
noop(e);
handleFileSelection(e);
},
});
var attachments = $('#attachments');
attachments.change(handleFileSelection);
button.click(function() {
attachments.value = null;
attachments.trigger('click');
});
}
function validateForm() {
var isValid = true;
var fields = ['name', 'email', 'issue_category', 'subject', 'description'];
var errorMessages = fields.reduce(function(acc, key) { acc[key] = []; return acc }, {} );
var required = ['name', 'email', 'issue_category', 'subject'];
required.forEach(function(fieldName) {
var field = $('#contact-form :input#' + fieldName);
if (!$.trim(field.val()).length) {
errorMessages[fieldName].push('This field is required.');
isValid = false;
}
});
var description = $('#contact-form :input#description').val()
if (description.length < DESCRIPTION_MIN_LENGTH ||
description.length > DESCRIPTION_MAX_LENGTH) {
errorMessages.description.push(
'Field must be between ' + DESCRIPTION_MIN_LENGTH + ' and ' + DESCRIPTION_MAX_LENGTH + ' characters long.');
isValid = false;
}
$.each(errorMessages, function(fieldName, errors) {
var field = $('#contact-form :input#' + fieldName);
if (errors.length) {
$('span#error-' + fieldName).text(errors.join('<br>'));
field.closest('div.form-group').addClass('has-error');
} else {
$('span#error-' + fieldName).text('');
field.closest('div.form-group').removeClass('has-error');
}
});
var category = $('#contact-form :input#issue_category').val();
var files = $('#attachments')[0].files;
// if there's no file to be uploaded, display a warning
// and ask for a confirmation before sending the request
if (isValid && category=='file-modification' && files.length == 0 && !confirmed) {
isValid = false;
$('#confirmationModal').modal({backdrop: 'static'}),
$('#confirm').click(function(e){
confirmed = true;
$('form#contact-form').submit();
})
}
return isValid;
}
$(function () {
// Setup issue category descriptions
$('#categories-tab').hide();
$('#issue_category').on('change', function(e){
$('#categories-tab li a[name="'+ $(this).val() + '"]').tab('show');
});
initFileDroparea($('html'), $('a.upload-button'));
$('form#contact-form').submit(validateForm);
});
</script>
{% endblock javascript %}
| 11,052 | Python | .tac | 253 | 35.893281 | 288 | 0.59406 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,646 | datacite-to-dcat-ap.xsl | zenodo_zenodo/zenodo/modules/records/data/datacite-to-dcat-ap.xsl | <?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2015-2019 EUROPEAN UNION
Licensed under the EUPL, Version 1.1 or - as soon they will be approved by
the European Commission - subsequent versions of the EUPL (the "Licence");
You may not use this work except in compliance with the Licence.
You may obtain a copy of the Licence at:
http://ec.europa.eu/idabc/eupl
Unless required by applicable law or agreed to in writing, software
distributed under the Licence is distributed on an "AS IS" basis,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the Licence for the specific language governing permissions and
limitations under the Licence.
Authors: European Commission, Joint Research Centre (JRC)
Andrea Perego <andrea.perego@ec.europa.eu>
-->
<!--
PURPOSE AND USAGE
This XSLT is a proof of concept for the implementation of the specification
concerning the DataCite profile of DCAT-AP (CiteDCAT-AP)
As such, this XSLT must be considered as unstable, and can be updated any
time based on the revisions to the CiteDCAT-AP specifications.
-->
<xsl:transform
xmlns:adms = "http://www.w3.org/ns/adms#"
xmlns:cnt = "http://www.w3.org/2011/content#"
xmlns:dc = "http://purl.org/dc/elements/1.1/"
xmlns:dct = "http://purl.org/dc/terms/"
xmlns:dctype = "http://purl.org/dc/dcmitype/"
xmlns:dcat = "http://www.w3.org/ns/dcat#"
xmlns:dtct2.2 = "http://datacite.org/schema/kernel-2.2"
xmlns:dtct3 = "http://datacite.org/schema/kernel-3"
xmlns:dtct4 = "http://datacite.org/schema/kernel-4"
xmlns:duv = "http://www.w3.org/ns/duv#"
xmlns:earl = "http://www.w3.org/ns/earl#"
xmlns:foaf = "http://xmlns.com/foaf/0.1/"
xmlns:frapo = "http://purl.org/cerif/frapo/"
xmlns:geo = "http://www.w3.org/2003/01/geo/wgs84_pos#"
xmlns:gsp = "http://www.opengis.net/ont/geosparql#"
xmlns:locn = "http://www.w3.org/ns/locn#"
xmlns:oa = "http://www.w3.org/ns/oa#"
xmlns:org = "http://www.w3.org/ns/org#"
xmlns:owl = "http://www.w3.org/2002/07/owl#"
xmlns:prov = "http://www.w3.org/ns/prov#"
xmlns:rdf = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:rdfs = "http://www.w3.org/2000/01/rdf-schema#"
xmlns:schema = "http://schema.org/"
xmlns:skos = "http://www.w3.org/2004/02/skos/core#"
xmlns:vcard = "http://www.w3.org/2006/vcard/ns#"
xmlns:xlink = "http://www.w3.org/1999/xlink"
xmlns:xsi = "http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsl = "http://www.w3.org/1999/XSL/Transform"
xmlns:wdrs = "http://www.w3.org/2007/05/powder-s#"
exclude-result-prefixes = "cnt dtct2.2 dtct3 dtct4 earl oa xlink xsi xsl"
version="1.0">
<xsl:output method="xml"
indent="yes"
encoding="utf-8"
cdata-section-elements="locn:geometry" />
<!-- Vars used when transforming strings into upper/lowercase. -->
<xsl:variable name="lowercase" select="'abcdefghijklmnopqrstuvwxyz'"/>
<xsl:variable name="uppercase" select="'ABCDEFGHIJKLMNOPQRSTUVWXYZ'"/>
<!-- The namespace of the DataCite metadata schema changes depending on the schema version -->
<!--
<xsl:param name="dtctNsUriPrefix">http://datacite.org/schema/kernel-</xsl:param>
-->
<!--
Mapping parameters
==================
This section includes mapping parameters to be modified manually.
-->
<!-- Parameter $profile -->
<!--
This parameter specifies the CiteDCAT-AP profile to be used:
- value "core": the CiteDCAT-AP Core profile, which includes only the DataCite metadata elements supported in DCAT-AP
- value "extended": the CiteDCAT-AP Extended profile, which defines mappings for all the DataCite metadata elements
The current specifications for the core and extended CiteDCAT-AP profiles are available on the JRC GitHub repository:
https://github.com/ec-jrc/datacite-to-dcat-ap/
-->
<!-- Uncomment to use CiteDCAT-AP Core -->
<!--
<xsl:param name="profile">core</xsl:param>
-->
<!-- Uncomment to use CiteDCAT-AP Extended -->
<xsl:param name="profile">extended</xsl:param>
<!--
Other global parameters
=======================
-->
<!-- URI and URN of the spatial reference system (SRS) used in the bounding box.
The default SRS is CRS84. If a different SRS is used, also parameter
$SrsAxisOrder must be specified. -->
<!-- Old param
<xsl:param name="srid">4326</xsl:param>
-->
<!-- The SRS URI is used in the WKT and GML encodings of the bounding box. -->
<xsl:param name="SrsUri">http://www.opengis.net/def/crs/OGC/1.3/CRS84</xsl:param>
<!-- The SRS URN is used in the GeoJSON encoding of the bounding box. -->
<xsl:param name="SrsUrn">urn:ogc:def:crs:OGC:1.3:CRS84</xsl:param>
<!-- Axis order for the reference SRS:
- "LonLat": longitude / latitude
- "LatLon": latitude / longitude.
The axis order must be specified only if the reference SRS is different from CRS84.
If the reference SRS is CRS84, this parameter is ignored. -->
<xsl:param name="SrsAxisOrder">LonLat</xsl:param>
<!-- Namespaces -->
<xsl:param name="xsd">http://www.w3.org/2001/XMLSchema#</xsl:param>
<xsl:param name="dct">http://purl.org/dc/terms/</xsl:param>
<xsl:param name="dctype">http://purl.org/dc/dcmitype/</xsl:param>
<xsl:param name="foaf">http://xmlns.com/foaf/0.1/</xsl:param>
<xsl:param name="vcard">http://www.w3.org/2006/vcard/ns#</xsl:param>
<!-- Currently not used.
<xsl:param name="timeUri">http://placetime.com/</xsl:param>
<xsl:param name="timeInstantUri" select="concat($timeUri,'instant/gregorian/')"/>
<xsl:param name="timeIntervalUri" select="concat($timeUri,'interval/gregorian/')"/>
-->
<xsl:param name="dcat">http://www.w3.org/ns/dcat#</xsl:param>
<xsl:param name="gsp">http://www.opengis.net/ont/geosparql#</xsl:param>
<!-- MDR NALs and other code lists -->
<xsl:param name="op">http://publications.europa.eu/resource/authority/</xsl:param>
<xsl:param name="oplang" select="concat($op,'language/')"/>
<xsl:param name="opcb" select="concat($op,'corporate-body/')"/>
<xsl:param name="oplic" select="concat($op,'licence/')"/>
<xsl:param name="opar" select="concat($op,'access-right/')"/>
<xsl:param name="opds" select="concat($op,'dataset-status/')"/>
<!--
<xsl:param name="opcountry" select="concat($op,'country/')"/>
<xsl:param name="opfq" select="concat($op,'frequency/')"/>
<xsl:param name="cldFrequency">http://purl.org/cld/freq/</xsl:param>
-->
<xsl:param name="ianaMT">https://www.iana.org/assignments/media-types/</xsl:param>
<!-- This is used as the datatype for the GeoJSON-based encoding of the bounding box. -->
<xsl:param name="geojsonMediaTypeUri">https://www.iana.org/assignments/media-types/application/vnd.geo+json</xsl:param>
<!-- INSPIRE code list URIs -->
<!--
<xsl:param name="INSPIRECodelistUri">http://inspire.ec.europa.eu/metadata-codelist/</xsl:param>
<xsl:param name="SpatialDataServiceCategoryCodelistUri" select="concat($INSPIRECodelistUri,'SpatialDataServiceCategory')"/>
<xsl:param name="DegreeOfConformityCodelistUri" select="concat($INSPIRECodelistUri,'DegreeOfConformity')"/>
<xsl:param name="ResourceTypeCodelistUri" select="concat($INSPIRECodelistUri,'ResourceType')"/>
<xsl:param name="ResponsiblePartyRoleCodelistUri" select="concat($INSPIRECodelistUri,'ResponsiblePartyRole')"/>
<xsl:param name="SpatialDataServiceTypeCodelistUri" select="concat($INSPIRECodelistUri,'SpatialDataServiceType')"/>
<xsl:param name="TopicCategoryCodelistUri" select="concat($INSPIRECodelistUri,'TopicCategory')"/>
-->
<!-- INSPIRE code list URIs (not yet supported; the URI pattern is tentative) -->
<!--
<xsl:param name="SpatialRepresentationTypeCodelistUri" select="concat($INSPIRECodelistUri,'SpatialRepresentationTypeCode')"/>
<xsl:param name="MaintenanceFrequencyCodelistUri" select="concat($INSPIRECodelistUri,'MaintenanceFrequencyCode')"/>
-->
<!--
Master template
===============
-->
<xsl:template match="/">
<rdf:RDF>
<xsl:apply-templates select="*[local-name() = 'resource' and starts-with(namespace-uri(), 'http://datacite.org/schema/kernel-')]|//*[local-name() = 'resource' and starts-with(namespace-uri(), 'http://datacite.org/schema/kernel-')]"/>
</rdf:RDF>
</xsl:template>
<!--
Metadata template
=================
-->
<xsl:template match="*[local-name() = 'resource' and starts-with(namespace-uri(), 'http://datacite.org/schema/kernel-')]|//*[local-name() = 'resource' and starts-with(namespace-uri(), 'http://datacite.org/schema/kernel-')]">
<!--
Parameters to create HTTP URIs for the resource and the corresponding metadata record
_____________________________________________________________________________________
These parameters must be customised depending on the strategy used to assign HTTP URIs.
The default rule implies that HTTP URIs are specified for the metadata file identifier
(metadata URI) and the resource identifier (resource URI).
-->
<xsl:param name="ResourceUri">
<xsl:variable name="identifier" select="normalize-space(*[local-name() = 'identifier'])"/>
<xsl:variable name="type" select="normalize-space(translate(*[local-name() = 'identifier']/@identifierType,$uppercase,$lowercase))"/>
<xsl:variable name="schemeURI" select="*[local-name() = 'identifier']/@schemeURI"/>
<xsl:variable name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$identifier"/>
<xsl:with-param name="type" select="$type"/>
<xsl:with-param name="schemeURI" select="$schemeURI"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="urilc" select="translate($uri,$uppercase,$lowercase)"/>
<xsl:if test="$uri != '' and ( starts-with($urilc, 'http://') or starts-with($urilc, 'https://') )">
<xsl:value-of select="$uri"/>
</xsl:if>
</xsl:param>
<xsl:param name="MetadataUri"/>
<!--
Other parameters
________________
-->
<!-- Resource type -->
<xsl:param name="ResourceType">
<xsl:variable name="type" select="normalize-space(translate(*[local-name() = 'resourceType']/@resourceTypeGeneral,$uppercase,$lowercase))"/>
<xsl:choose>
<xsl:when test="$type = 'audiovisual'">dataset</xsl:when>
<xsl:when test="$type = 'collection'">dataset</xsl:when>
<!-- Added in DataCite v4.1 -->
<xsl:when test="$type = 'datapaper'">dataset</xsl:when>
<xsl:when test="$type = 'dataset'">dataset</xsl:when>
<xsl:when test="$type = 'event'">event</xsl:when>
<xsl:when test="$type = 'image'">dataset</xsl:when>
<xsl:when test="$type = 'interactiveresource'">dataset</xsl:when>
<xsl:when test="$type = 'model'">dataset</xsl:when>
<xsl:when test="$type = 'physicalobject'">physicalobject</xsl:when>
<xsl:when test="$type = 'service'">service</xsl:when>
<xsl:when test="$type = 'software'">dataset</xsl:when>
<xsl:when test="$type = 'sound'">dataset</xsl:when>
<xsl:when test="$type = 'text'">dataset</xsl:when>
<xsl:when test="$type = 'workflow'">dataset</xsl:when>
<xsl:when test="$type = 'other'">other</xsl:when>
<xsl:otherwise>other</xsl:otherwise>
</xsl:choose>
</xsl:param>
<!-- Metadata description (metadata on metadata) -->
<xsl:param name="MetadataDescription"/>
<!-- Resource description (resource metadata) -->
<xsl:param name="ResourceDescription">
<!-- Resource type -->
<xsl:apply-templates select="*[local-name() = 'resourceType']"/>
<!-- Identifier -->
<xsl:apply-templates select="*[local-name() = 'identifier']">
<xsl:with-param name="ResourceType" select="$ResourceType"/>
</xsl:apply-templates>
<!-- Creators -->
<xsl:apply-templates select="*[local-name() = 'creators']/*[local-name() = 'creator']"/>
<!-- Titles -->
<xsl:apply-templates select="*[local-name() = 'titles']/*[local-name() = 'title']"/>
<!-- Publisher -->
<xsl:apply-templates select="*[local-name() = 'publisher']"/>
<!-- Publication year-->
<xsl:apply-templates select="*[local-name() = 'publicationYear']"/>
<!-- Subjects -->
<xsl:apply-templates select="*[local-name() = 'subjects']/*[local-name() = 'subject']"/>
<!-- Funding references -->
<xsl:if test="$profile = 'extended'">
<xsl:for-each select="*[local-name() = 'fundingReferences']/*[local-name() = 'fundingReference']">
<xsl:call-template name="FundingReferences"/>
</xsl:for-each>
</xsl:if>
<!-- Contributors-->
<xsl:apply-templates select="*[local-name() = 'contributors']/*[local-name() = 'contributor']"/>
<!-- Dates -->
<xsl:apply-templates select="*[local-name() = 'dates']/*[local-name() = 'date']"/>
<!-- Language -->
<xsl:apply-templates select="*[local-name() = 'language']"/>
<!-- Alternate identifiers-->
<xsl:apply-templates select="*[local-name() = 'alternateIdentifiers']/*[local-name() = 'alternateIdentifier']"/>
<!-- Related identifiers -->
<xsl:apply-templates select="*[local-name() = 'relatedIdentifiers']/*[local-name() = 'relatedIdentifier']"/>
<!-- Version -->
<xsl:apply-templates select="*[local-name() = 'version']"/>
<!-- Descriptions -->
<xsl:apply-templates select="*[local-name() = 'descriptions']/*[local-name() = 'description']"/>
<!-- Geo locations -->
<xsl:apply-templates select="*[local-name() = 'geoLocations']/*[local-name() = 'geoLocation']"/>
<!-- Access rights -->
<!-- For DataCite schema version < 3 -->
<xsl:apply-templates select="*[local-name() = 'rights']">
<xsl:with-param name="show-access-rights">yes</xsl:with-param>
</xsl:apply-templates>
<!-- For DataCite schema version >= 3 -->
<xsl:apply-templates select="*[local-name() = 'rightsList']">
<xsl:with-param name="show-access-rights">yes</xsl:with-param>
</xsl:apply-templates>
<!-- Distribution -->
<xsl:variable name="distribution">
<!-- Sizes -->
<xsl:apply-templates select="*[local-name() = 'sizes']/*[local-name() = 'size']"/>
<!-- Formats-->
<xsl:apply-templates select="*[local-name() = 'formats']/*[local-name() = 'format']"/>
<!-- Rights -->
<!-- For DataCite schema version < 3 -->
<xsl:apply-templates select="*[local-name() = 'rights']">
<xsl:with-param name="show-licence">yes</xsl:with-param>
<xsl:with-param name="show-rights">yes</xsl:with-param>
</xsl:apply-templates>
<!-- For DataCite schema version >= 3 -->
<xsl:apply-templates select="*[local-name() = 'rightsList']">
<xsl:with-param name="show-licence">yes</xsl:with-param>
<xsl:with-param name="show-rights">yes</xsl:with-param>
</xsl:apply-templates>
<xsl:if test="$ResourceUri != ''">
<xsl:choose>
<xsl:when test="$ResourceType = 'dataset'">
<dcat:accessURL rdf:resource="{$ResourceUri}"/>
</xsl:when>
<xsl:otherwise>
<!--
<foaf:page rdf:resource="{$ResourceUri}"/>
-->
</xsl:otherwise>
</xsl:choose>
</xsl:if>
</xsl:variable>
<xsl:choose>
<xsl:when test="$ResourceType = 'dataset'">
<dcat:distribution>
<dcat:Distribution>
<xsl:copy-of select="$distribution"/>
</dcat:Distribution>
</dcat:distribution>
</xsl:when>
<xsl:otherwise>
<xsl:copy-of select="$distribution"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<!-- Generating the output record. -->
<xsl:if test="$profile = 'extended' or ($profile = 'core' and $ResourceType = 'dataset')">
<xsl:choose>
<xsl:when test="$ResourceUri != ''">
<xsl:choose>
<xsl:when test="$MetadataUri != ''">
<rdf:Description rdf:about="{$MetadataUri}">
<rdf:type rdf:resource="{$dcat}CatalogRecord"/>
<foaf:primaryTopic rdf:resource="{$ResourceUri}"/>
<xsl:copy-of select="$MetadataDescription"/>
</rdf:Description>
</xsl:when>
<xsl:otherwise>
<xsl:if test="normalize-space($MetadataDescription)">
<rdf:Description>
<rdf:type rdf:resource="{$dcat}CatalogRecord"/>
<foaf:primaryTopic rdf:resource="{$ResourceUri}"/>
<xsl:copy-of select="$MetadataDescription"/>
</rdf:Description>
</xsl:if>
</xsl:otherwise>
</xsl:choose>
<rdf:Description rdf:about="{$ResourceUri}">
<xsl:copy-of select="$ResourceDescription"/>
</rdf:Description>
</xsl:when>
<xsl:otherwise>
<rdf:Description>
<xsl:if test="normalize-space($MetadataDescription)">
<foaf:isPrimaryTopicOf>
<rdf:Description>
<rdf:type rdf:resource="{$dcat}CatalogRecord"/>
<xsl:copy-of select="$MetadataDescription"/>
</rdf:Description>
</foaf:isPrimaryTopicOf>
</xsl:if>
<xsl:copy-of select="$ResourceDescription"/>
</rdf:Description>
</xsl:otherwise>
</xsl:choose>
<xsl:if test="$profile = 'extended'">
<xsl:for-each select="//*[local-name() = 'fundingReferences']/*[local-name() = 'fundingReference' and normalize-space(*[local-name() = 'awardNumber']/@awardURI) != '']">
<xsl:call-template name="FundingAwards"/>
</xsl:for-each>
<xsl:for-each select="//*[local-name() = 'fundingReferences']/*[local-name() = 'fundingReference' and ( starts-with(translate(normalize-space(*[local-name() = 'funderIdentifier']),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space(*[local-name() = 'funderIdentifier']),$uppercase,$lowercase),'https://') ) and not(*[local-name() = 'funderIdentifier']=preceding::*)]">
<xsl:call-template name="Funders"/>
</xsl:for-each>
</xsl:if>
</xsl:if>
</xsl:template>
<!--
DataCite elements templates
===========================
-->
<!-- Titles template -->
<xsl:template name="Titles" match="*[local-name() = 'titles']/*[local-name() = 'title']">
<xsl:variable name="title" select="normalize-space(.)"/>
<xsl:variable name="type" select="normalize-space(translate(@titleType,$uppercase,$lowercase))"/>
<xsl:choose>
<xsl:when test="$type = ''">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:title xml:lang="{@xml:lang}"><xsl:value-of select="$title"/></dct:title>
</xsl:when>
<xsl:otherwise>
<dct:title><xsl:value-of select="$title"/></dct:title>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$type = 'alternativetitle'">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:alternative xml:lang="{@xml:lang}"><xsl:value-of select="$title"/></dct:alternative>
</xsl:when>
<xsl:otherwise>
<dct:alternative><xsl:value-of select="$title"/></dct:alternative>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<!-- TBD
<xsl:when test="$type = 'subtitle' and $profile = 'extended'">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:?? xml:lang="{@xml:lang}"><xsl:value-of select="$title"/></dct:??>
</xsl:when>
<xsl:otherwise>
<dct:??><xsl:value-of select="$title"/></dct:??>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
-->
<!-- Unstable -->
<xsl:when test="$type = 'translated'">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:title xml:lang="{@xml:lang}"><xsl:value-of select="$title"/></dct:title>
</xsl:when>
<xsl:otherwise>
<dct:title><xsl:value-of select="$title"/></dct:title>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Descriptions template -->
<xsl:template name="Descriptions" match="*[local-name() = 'descriptions']/*[local-name() = 'description']">
<xsl:variable name="description" select="normalize-space(.)"/>
<xsl:variable name="type" select="normalize-space(translate(@descriptionType,$uppercase,$lowercase))"/>
<xsl:choose>
<xsl:when test="$type = 'abstract'">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:description xml:lang="{@xml:lang}"><xsl:value-of select="$description"/></dct:description>
</xsl:when>
<xsl:otherwise>
<dct:description><xsl:value-of select="$description"/></dct:description>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$type = 'methods'">
<dct:provenance>
<dct:ProvenanceStatement>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<rdfs:label xml:lang="{@xml:lang}"><xsl:value-of select="$description"/></rdfs:label>
</xsl:when>
<xsl:otherwise>
<rdfs:label><xsl:value-of select="$description"/></rdfs:label>
</xsl:otherwise>
</xsl:choose>
</dct:ProvenanceStatement>
</dct:provenance>
</xsl:when>
<!-- TBD
<xsl:when test="$type = 'seriesinformation' and $profile = 'extended'">
<dct:?? xml:lang="{@xml:lang}"><xsl:value-of select="$description"/></dct:??>
</xsl:when>
-->
<xsl:when test="$type = 'tableofcontents' and $profile = 'extended'">
<dct:tableOfContents><xsl:value-of select="$description"/></dct:tableOfContents>
</xsl:when>
<!--
<xsl:when test="$type = 'other' and $profile = 'extended'">
<rdfs:comment xml:lang="{@xml:lang}"><xsl:value-of select="$description"/></rdfs:comment>
</xsl:when>
-->
<!-- The following is meant to deal also with $type = 'other', and ensures that a dct:description is provided in
the resulting record. -->
<xsl:otherwise>
<xsl:choose>
<xsl:when test="not(../*[local-name() = 'description' and $type = 'abstract'])">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:description xml:lang="{@xml:lang}"><xsl:value-of select="$description"/></dct:description>
</xsl:when>
<xsl:otherwise>
<dct:description><xsl:value-of select="$description"/></dct:description>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<rdfs:comment xml:lang="{@xml:lang}"><xsl:value-of select="$description"/></rdfs:comment>
</xsl:when>
<xsl:otherwise>
<rdfs:comment><xsl:value-of select="$description"/></rdfs:comment>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Subjects template -->
<xsl:template name="Subjects" match="*[local-name() = 'subjects']/*[local-name() = 'subject']">
<xsl:variable name="subject" select="normalize-space(.)"/>
<xsl:variable name="subjectScheme" select="normalize-space(@subjectScheme)"/>
<xsl:variable name="subjectSchemeLC" select="translate($subjectScheme,$uppercase,$lowercase)"/>
<xsl:variable name="schemeURI" select="@schemeURI"/>
<xsl:choose>
<xsl:when test="starts-with($subject, 'http://') or starts-with($subject, 'https://')">
<xsl:choose>
<xsl:when test="starts-with($subject, 'http://publications.europa.eu/resource/authority/theme/')">
<dcat:theme rdf:resource="{$subject}"/>
</xsl:when>
<xsl:otherwise>
<dct:subject rdf:resource="{$subject}"/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$subjectScheme != '' or $schemeURI != ''">
<dct:subject>
<skos:Concept>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<skos:prefLabel xml:lang="{@xml:lang}"><xsl:value-of select="$subject"/></skos:prefLabel>
</xsl:when>
<xsl:otherwise>
<skos:prefLabel><xsl:value-of select="$subject"/></skos:prefLabel>
</xsl:otherwise>
</xsl:choose>
<skos:inScheme>
<xsl:choose>
<xsl:when test="$subjectScheme != '' and $schemeURI != ''">
<skos:ConceptScheme rdf:about="{$schemeURI}">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:title xml:lang="{@xml:lang}"><xsl:value-of select="$subjectScheme"/></dct:title>
</xsl:when>
<xsl:otherwise>
<dct:title><xsl:value-of select="$subjectScheme"/></dct:title>
</xsl:otherwise>
</xsl:choose>
</skos:ConceptScheme>
</xsl:when>
<xsl:when test="not($subjectScheme != '') and $schemeURI != ''">
<skos:ConceptScheme rdf:about="{$schemeURI}"/>
</xsl:when>
<xsl:when test="$subjectScheme != '' and not($schemeURI != '')">
<skos:ConceptScheme>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dct:title xml:lang="{@xml:lang}"><xsl:value-of select="$subjectScheme"/></dct:title>
</xsl:when>
<xsl:otherwise>
<dct:title><xsl:value-of select="$subjectScheme"/></dct:title>
</xsl:otherwise>
</xsl:choose>
</skos:ConceptScheme>
</xsl:when>
</xsl:choose>
</skos:inScheme>
</skos:Concept>
</dct:subject>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<dcat:keyword xml:lang="{@xml:lang}"><xsl:value-of select="$subject"/></dcat:keyword>
</xsl:when>
<xsl:otherwise>
<dcat:keyword><xsl:value-of select="$subject"/></dcat:keyword>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Funding references template -->
<xsl:template name="FundingReferences">
<xsl:param name="funderIdentifier" select="normalize-space(*[local-name() = 'funderIdentifier'])"/>
<xsl:param name="funderIdentifierType" select="translate(normalize-space(*[local-name() = 'funderIdentifier']/@funderIdentifierType),$uppercase,$lowercase)"/>
<xsl:param name="funderURI">
<xsl:variable name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$funderIdentifier"/>
<xsl:with-param name="type" select="$funderIdentifierType"/>
<!--
<xsl:with-param name="schemeURI" select="$schemeURI"/>
-->
</xsl:call-template>
</xsl:variable>
<xsl:if test="starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'https://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="$uri"/>
</xsl:if>
<!--
<xsl:if test="starts-with(translate(normalize-space(*[local-name() = 'funderIdentifier']),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space(*[local-name() = 'funderIdentifier']),$uppercase,$lowercase),'https://')">
<xsl:value-of select="normalize-space(*[local-name() = 'funderIdentifier'])"/>
</xsl:if>
-->
</xsl:param>
<xsl:param name="funderInfo">
<dct:identifier><xsl:value-of select="normalize-space(*[local-name() = 'funderIdentifier'])"/></dct:identifier>
<foaf:name><xsl:value-of select="normalize-space(*[local-name() = 'funderName'])"/></foaf:name>
</xsl:param>
<xsl:param name="fundingReferenceURI">
<xsl:value-of select="normalize-space(*[local-name() = 'awardNumber']/@awardURI)"/>
</xsl:param>
<xsl:choose>
<xsl:when test="$fundingReferenceURI != ''">
<frapo:isFundedBy rdf:resource="{$fundingReferenceURI}"/>
</xsl:when>
<xsl:when test="normalize-space(*[local-name() = 'awardNumber']) != '' or normalize-space(*[local-name() = 'awardTitle']) != ''">
<frapo:isFundedBy>
<xsl:call-template name="FundingAwards"/>
</frapo:isFundedBy>
</xsl:when>
</xsl:choose>
<xsl:choose>
<xsl:when test="$funderURI != ''">
<schema:funder rdf:resource="{$funderURI}"/>
</xsl:when>
<xsl:when test="normalize-space($funderInfo) != ''">
<schema:funder>
<xsl:call-template name="Funders"/>
</schema:funder>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Version template -->
<xsl:template name="Version" match="*[local-name() = 'version']">
<owl:versionInfo><xsl:value-of select="normalize-space(.)"/></owl:versionInfo>
</xsl:template>
<!-- Rights template -->
<xsl:template name="Rights" match="*[local-name() = 'rights']">
<xsl:param name="show-licence"/>
<xsl:param name="show-access-rights"/>
<xsl:param name="show-rights"/>
<xsl:param name="rightsText" select="normalize-space(.)"/>
<xsl:param name="rightsURI" select="normalize-space(@rightsURI)"/>
<xsl:param name="rightsLabel">
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<rdfs:label xml:lang="{@xml:lang}"><xsl:value-of select="$rightsText"/></rdfs:label>
</xsl:when>
<xsl:otherwise>
<rdfs:label><xsl:value-of select="$rightsText"/></rdfs:label>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<!-- Added in DataCite v4.2 -->
<xsl:param name="rightsIdentifier" select="normalize-space(@rightsIdentifier)"/>
<!-- Added in DataCite v4.2 -->
<xsl:param name="rightsIdentifierScheme" select="normalize-space(@rightsIdentifierScheme)"/>
<!-- Added in DataCite v4.2 -->
<xsl:param name="rightsIdentifierSchemeURI" select="normalize-space(@schemeURI)"/>
<xsl:param name="rightsID">
<xsl:if test="$rightsIdentifier != ''">
<adms:identifier>
<adms:Identifier>
<skos:notation><xsl:value-of select="$rightsIdentifier"/></skos:notation>
<xsl:choose>
<xsl:when test="$rightsIdentifierScheme != ''">
<adms:schemeAgency><xsl:value-of select="$rightsIdentifierScheme"/></adms:schemeAgency>
</xsl:when>
<xsl:when test="$rightsIdentifierSchemeURI != ''">
<dct:creator rdf:resource="{$rightsIdentifierSchemeURI}"/>
</xsl:when>
</xsl:choose>
</adms:Identifier>
</adms:identifier>
</xsl:if>
</xsl:param>
<xsl:param name="licence">
<xsl:if test="$rightsURI != ''">
<!--
<xsl:if test="$show-use-conditions = 'yes'">
<rdfs:label><xsl:value-of select="$rightsURI"/></rdfs:label>
-->
<xsl:choose>
<xsl:when test="starts-with($rightsURI, 'http://creativecommons.org/')">
<dct:license rdf:resource="{$rightsURI}"/>
</xsl:when>
<xsl:when test="starts-with($rightsURI, 'https://creativecommons.org/')">
<dct:license rdf:resource="{$rightsURI}"/>
</xsl:when>
<xsl:when test="starts-with($rightsURI, $oplic)">
<dct:license rdf:resource="{$rightsURI}"/>
</xsl:when>
<xsl:otherwise>not-detected</xsl:otherwise>
</xsl:choose>
<!--
</xsl:if>
<xsl:if test="$show-access-conditions = 'yes'">
<xsl:choose>
-->
</xsl:if>
</xsl:param>
<xsl:param name="access-rights">
<xsl:if test="$rightsURI != ''">
<!--
See:
- https://wiki.surfnet.nl/display/standards/info-eu-repo#info-eu-repo-AccessRights
- http://guidelines.readthedocs.io/en/latest/data/field_rights.html
-->
<xsl:choose>
<xsl:when test="starts-with($rightsURI,'info:eu-repo/semantics/closedAccess')">
<dct:accessRights rdf:resource="{$opar}NON_PUBLIC"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<xsl:when test="starts-with($rightsURI,'info:eu-repo/semantics/embargoedAccess')">
<dct:accessRights rdf:resource="{$opar}NON_PUBLIC"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<xsl:when test="starts-with($rightsURI,'info:eu-repo/semantics/restrictedAccess')">
<dct:accessRights rdf:resource="{$opar}RESTRICTED"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<xsl:when test="starts-with($rightsURI,'info:eu-repo/semantics/openAccess')">
<dct:accessRights rdf:resource="{$opar}PUBLIC"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<!-- See: http://www.ukoln.ac.uk/repositories/digirep/index/Eprints_AccessRights_Vocabulary_Encoding_Scheme -->
<xsl:when test="starts-with($rightsURI,'http://purl.org/eprint/accessRights/ClosedAccess')">
<dct:accessRights rdf:resource="{$opar}NON_PUBLIC"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<xsl:when test="starts-with($rightsURI,'http://purl.org/eprint/accessRights/RestrictedAccess')">
<dct:accessRights rdf:resource="{$opar}RESTRICTED"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<xsl:when test="starts-with($rightsURI,'http://purl.org/eprint/accessRights/OpenAccess')">
<dct:accessRights rdf:resource="{$opar}PUBLIC"/>
<dct:accessRights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</dct:accessRights>
</xsl:when>
<xsl:when test="starts-with($rightsURI,$opar)">
<dct:accessRights rdf:resource="{$rightsURI}"/>
</xsl:when>
<xsl:otherwise>not-detected</xsl:otherwise>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="rights">
<xsl:choose>
<xsl:when test="$rightsURI != ''">
<dct:rights>
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
<xsl:copy-of select="$rightsID"/>
</dct:RightsStatement>
</dct:rights>
</xsl:when>
<xsl:otherwise>
<dct:rights>
<dct:RightsStatement>
<xsl:copy-of select="$rightsLabel"/>
<xsl:copy-of select="$rightsID"/>
</dct:RightsStatement>
</dct:rights>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:choose>
<xsl:when test="$licence != 'not-detected' or $access-rights != 'not-detected'">
<xsl:if test="$licence != 'not-detected' and $show-licence = 'yes'">
<xsl:copy-of select="$licence"/>
</xsl:if>
<xsl:if test="$access-rights != 'not-detected' and $show-access-rights = 'yes'">
<xsl:copy-of select="$access-rights"/>
</xsl:if>
</xsl:when>
<xsl:when test="$show-rights = 'yes'">
<xsl:copy-of select="$rights"/>
</xsl:when>
</xsl:choose>
<!--
<dct:rights>
<xsl:choose>
<xsl:when test="$rightsURI != ''">
<dct:RightsStatement rdf:about="{$rightsURI}">
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</xsl:when>
<xsl:otherwise>
<dct:RightsStatement>
<xsl:copy-of select="$rightsLabel"/>
</dct:RightsStatement>
</xsl:otherwise>
</xsl:choose>
</dct:rights>
-->
</xsl:template>
<!-- Rights list template -->
<xsl:template name="RightsList" match="*[local-name() = 'rightsList']">
<xsl:param name="show-licence"/>
<xsl:param name="show-access-rights"/>
<xsl:param name="show-rights"/>
<xsl:apply-templates select="*[local-name() = 'rights']">
<xsl:with-param name="show-licence" select="$show-licence"/>
<xsl:with-param name="show-access-rights" select="$show-access-rights"/>
<xsl:with-param name="show-rights" select="$show-rights"/>
</xsl:apply-templates>
</xsl:template>
<!-- Geolocations template -->
<xsl:template name="Geolocations" match="*[local-name() = 'geoLocations']/*[local-name() = 'geoLocation']">
<xsl:param name="place" select="normalize-space(*[local-name() = 'geoLocationPlace'])"/>
<xsl:param name="point">
<xsl:if test="normalize-space(*[local-name() = 'geoLocationPoint']) != ''">
<xsl:choose>
<xsl:when test="*[local-name() = 'geoLocationPoint']/*[local-name() = 'pointLatitude'] and *[local-name() = 'geoLocationPoint']/*[local-name() = 'pointLongitude']">
<xsl:value-of select="normalize-space(concat(*[local-name() = 'geoLocationPoint']/*[local-name() = 'pointLatitude'],' ',*[local-name() = 'geoLocationPoint']/*[local-name() = 'pointLongitude']))"/>
</xsl:when>
<xsl:when test="self::text()">
<xsl:value-of select="normalize-space(translate(*[local-name() = 'geoLocationPoint'],',',' '))"/>
</xsl:when>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="pointLongitude" select="normalize-space(*[local-name() = 'geoLocationPoint']/*[local-name() = 'pointLongitude'])"/>
<xsl:param name="pointLatitude" select="normalize-space(*[local-name() = 'geoLocationPoint']/*[local-name() = 'pointLatitude'])"/>
<xsl:param name="box">
<xsl:if test="normalize-space(*[local-name() = 'geoLocationBox']) != ''">
<xsl:choose>
<xsl:when test="*[local-name() = 'geoLocationBox']/*[local-name() = 'northBoundLatitude'] and *[local-name() = 'geoLocationBox']/*[local-name() = 'southBoundLatitude'] and *[local-name() = 'geoLocationBox']/*[local-name() = 'eastBoundLongitude'] and *[local-name() = 'geoLocationBox']/*[local-name() = 'westBoundLongitude']">
<xsl:value-of select="normalize-space(concat(*[local-name() = 'geoLocationBox']/*[local-name() = 'southBoundLatitude'],' ',*[local-name() = 'geoLocationBox']/*[local-name() = 'westBoundLongitude'],' ',*[local-name() = 'geoLocationBox']/*[local-name() = 'northBoundLatitude'],' ',*[local-name() = 'geoLocationBox']/*[local-name() = 'eastBoundLongitude']))"/>
</xsl:when>
<xsl:when test="self::text()">
<xsl:value-of select="normalize-space(translate(*[local-name() = 'geoLocationBox'],',',' '))"/>
</xsl:when>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="north">
<xsl:choose>
<xsl:when test="*[local-name() = 'geoLocationBox']/*[local-name() = 'northBoundLatitude']">
<xsl:value-of select="normalize-space(*[local-name() = 'geoLocationBox']/*[local-name() = 'northBoundLatitude'])"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring-before($box, ' ')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="esw" select="substring-after($box, ' ')"/>
<xsl:param name="sw" select="substring-after($esw, ' ')"/>
<xsl:param name="east">
<xsl:choose>
<xsl:when test="*[local-name() = 'geoLocationBox']/*[local-name() = 'eastBoundLongitude']">
<xsl:value-of select="normalize-space(*[local-name() = 'geoLocationBox']/*[local-name() = 'eastBoundLongitude'])"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring-before($esw, ' ')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="south">
<xsl:choose>
<xsl:when test="*[local-name() = 'geoLocationBox']/*[local-name() = 'southBoundLatitude']">
<xsl:value-of select="normalize-space(*[local-name() = 'geoLocationBox']/*[local-name() = 'southBoundLatitude'])"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring-before($sw, ' ')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="west">
<xsl:choose>
<xsl:when test="*[local-name() = 'geoLocationBox']/*[local-name() = 'westBoundLongitude']">
<xsl:value-of select="normalize-space(*[local-name() = 'geoLocationBox']/*[local-name() = 'westBoundLongitude'])"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring-after($sw, ' ')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="polygonAsLonLat">
<xsl:for-each select="*[local-name() = 'geoLocationPolygon']">
<xsl:variable name="cnr" select="count(*[local-name() = 'polygonPoint'])"/>
<xsl:for-each select="*[local-name() = 'polygonPoint']">
<xsl:variable name="delimiter">
<xsl:if test="position() < $cnr">
<xsl:text>,</xsl:text>
</xsl:if>
</xsl:variable>
<xsl:value-of select="normalize-space(*[local-name() = 'pointLongitude'])"/><xsl:text> </xsl:text><xsl:value-of select="normalize-space(*[local-name() = 'pointLatitude'])"/><xsl:value-of select="$delimiter"/>
</xsl:for-each>
</xsl:for-each>
</xsl:param>
<xsl:param name="polygonAsLatLon">
<xsl:for-each select="*[local-name() = 'geoLocationPolygon']">
<xsl:variable name="cnr" select="count(*[local-name() = 'polygonPoint'])"/>
<xsl:for-each select="*[local-name() = 'polygonPoint']">
<xsl:variable name="delimiter">
<xsl:if test="position() < $cnr">
<xsl:text>,</xsl:text>
</xsl:if>
</xsl:variable>
<xsl:value-of select="normalize-space(*[local-name() = 'pointLatitude'])"/><xsl:text> </xsl:text><xsl:value-of select="normalize-space(*[local-name() = 'pointLongitude'])"/><xsl:value-of select="$delimiter"/>
</xsl:for-each>
</xsl:for-each>
</xsl:param>
<xsl:param name="polygon">
<xsl:choose>
<xsl:when test="$SrsAxisOrder = 'LatLon'">
<xsl:value-of select="$polygonAsLatLon"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$polygonAsLonLat"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<!-- Geometry as GML (GeoSPARQL) -->
<xsl:param name="pointAsGMLLiteral">
<xsl:if test="$point != ''">
<xsl:choose>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><gml:Point srsName="<xsl:value-of select="$SrsUri"/>"><gml:pos srsDimension="2"><xsl:value-of select="$pointLatitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLongitude"/></gml:pos></gml:Point></xsl:when>
<xsl:otherwise><gml:Point srsName="<xsl:value-of select="$SrsUri"/>"><gml:pos srsDimension="2"><xsl:value-of select="$pointLongitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLatitude"/></gml:pos></gml:Point></xsl:otherwise>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="boxAsGMLLiteral">
<xsl:if test="$box != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'"><gml:Envelope srsName="<xsl:value-of select="$SrsUri"/>"><gml:lowerCorner><xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/></gml:lowerCorner><gml:upperCorner><xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/></gml:upperCorner></gml:Envelope></xsl:when>
<xsl:when test="$SrsAxisOrder = 'LonLat'"><gml:Envelope srsName="<xsl:value-of select="$SrsUri"/>"><gml:lowerCorner><xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/></gml:lowerCorner><gml:upperCorner><xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/></gml:upperCorner></gml:Envelope></xsl:when>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><gml:Envelope srsName="<xsl:value-of select="$SrsUri"/>"><gml:lowerCorner><xsl:value-of select="$south"/><xsl:text> </xsl:text><xsl:value-of select="$west"/></gml:lowerCorner><gml:upperCorner><xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$east"/></gml:upperCorner></gml:Envelope></xsl:when>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="polygonAsGMLLiteral">
<xsl:if test="$polygon != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'"><gml:Polygon srsName="<xsl:value-of select="$SrsUri"/>"><gml:exterior><gml:LinearRing><gml:posList srsDimension="2"><xsl:value-of select="translate($polygon,',',' ')"/></gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></xsl:when>
<xsl:otherwise><gml:Polygon srsName="<xsl:value-of select="$SrsUri"/>"><gml:exterior><gml:LinearRing><gml:posList srsDimension="2"><xsl:value-of select="translate($polygon,',',' ')"/></gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></xsl:otherwise>
</xsl:choose>
</xsl:if>
</xsl:param>
<!--
<xsl:param name="GMLLiteral">
<xsl:choose>
<xsl:when test="$point != ''">
<xsl:choose>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><gml:Point srsName="<xsl:value-of select="$SrsUri"/>"><gml:pos srsDimension="2"><xsl:value-of select="$pointLatitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLongitude"/></gml:pos></gml:Point></xsl:when>
<xsl:otherwise><gml:Point srsName="<xsl:value-of select="$SrsUri"/>"><gml:pos srsDimension="2"><xsl:value-of select="$pointLongitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLatitude"/></gml:pos></gml:Point></xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$box != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'"><gml:Envelope srsName="<xsl:value-of select="$SrsUri"/>"><gml:lowerCorner><xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/></gml:lowerCorner><gml:upperCorner><xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/></gml:upperCorner></gml:Envelope></xsl:when>
<xsl:when test="$SrsAxisOrder = 'LonLat'"><gml:Envelope srsName="<xsl:value-of select="$SrsUri"/>"><gml:lowerCorner><xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/></gml:lowerCorner><gml:upperCorner><xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/></gml:upperCorner></gml:Envelope></xsl:when>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><gml:Envelope srsName="<xsl:value-of select="$SrsUri"/>"><gml:lowerCorner><xsl:value-of select="$south"/><xsl:text> </xsl:text><xsl:value-of select="$west"/></gml:lowerCorner><gml:upperCorner><xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$east"/></gml:upperCorner></gml:Envelope></xsl:when>
</xsl:choose>
</xsl:when>
<xsl:when test="$polygon != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'"><gml:Polygon srsName="<xsl:value-of select="$SrsUri"/>"><gml:exterior><gml:LinearRing><gml:posList srsDimension="2"><xsl:value-of select="translate($polygon,',',' ')"/></gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></xsl:when>
<xsl:otherwise><gml:Polygon srsName="<xsl:value-of select="$SrsUri"/>"><gml:exterior><gml:LinearRing><gml:posList srsDimension="2"><xsl:value-of select="translate($polygon,',',' ')"/></gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></xsl:otherwise>
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:param>
-->
<!-- Geometry as WKT (GeoSPARQL) -->
<xsl:param name="pointAsWKTLiteral">
<xsl:if test="$point != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'">POINT(<xsl:value-of select="$pointLongitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLatitude"/>)</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LonLat'"><<xsl:value-of select="$SrsUri"/>> POINT(<xsl:value-of select="$pointLongitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLatitude"/>)</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><<xsl:value-of select="$SrsUri"/>> POINT(<xsl:value-of select="$pointLatitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLongitude"/>)</xsl:when>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="boxAsWKTLiteral">
<xsl:if test="$box != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'">POLYGON((<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>))</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LonLat'"><<xsl:value-of select="$SrsUri"/>> POLYGON((<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>))</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><<xsl:value-of select="$SrsUri"/>> POLYGON((<xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$west"/>,<xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$east"/>,<xsl:value-of select="$south"/><xsl:text> </xsl:text><xsl:value-of select="$east"/>,<xsl:value-of select="$south"/><xsl:text> </xsl:text><xsl:value-of select="$west"/>,<xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$west"/>))</xsl:when>
</xsl:choose>
</xsl:if>
</xsl:param>
<xsl:param name="polygonAsWKTLiteral">
<xsl:if test="$polygon != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'">POLYGON((<xsl:value-of select="$polygon"/>))</xsl:when>
<xsl:otherwise><<xsl:value-of select="$SrsUri"/>> POLYGON((<xsl:value-of select="$polygon"/>))</xsl:otherwise>
</xsl:choose>
</xsl:if>
</xsl:param>
<!--
<xsl:param name="WKTLiteral">
<xsl:choose>
<xsl:when test="$point != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'">POINT(<xsl:value-of select="$pointLongitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLatitude"/>)</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LonLat'"><<xsl:value-of select="$SrsUri"/>> POINT(<xsl:value-of select="$pointLongitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLatitude"/>)</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><<xsl:value-of select="$SrsUri"/>> POINT(<xsl:value-of select="$pointLatitude"/><xsl:text> </xsl:text><xsl:value-of select="$pointLongitude"/>)</xsl:when>
</xsl:choose>
</xsl:when>
<xsl:when test="$box != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'">POLYGON((<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>))</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LonLat'"><<xsl:value-of select="$SrsUri"/>> POLYGON((<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>,<xsl:value-of select="$east"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$south"/>,<xsl:value-of select="$west"/><xsl:text> </xsl:text><xsl:value-of select="$north"/>))</xsl:when>
<xsl:when test="$SrsAxisOrder = 'LatLon'"><<xsl:value-of select="$SrsUri"/>> POLYGON((<xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$west"/>,<xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$east"/>,<xsl:value-of select="$south"/><xsl:text> </xsl:text><xsl:value-of select="$east"/>,<xsl:value-of select="$south"/><xsl:text> </xsl:text><xsl:value-of select="$west"/>,<xsl:value-of select="$north"/><xsl:text> </xsl:text><xsl:value-of select="$west"/>))</xsl:when>
</xsl:choose>
</xsl:when>
<xsl:when test="$polygon != ''">
<xsl:choose>
<xsl:when test="$SrsUri = 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'">POLYGON((<xsl:value-of select="translate($polygon)"/>))</xsl:when>
<xsl:otherwise><<xsl:value-of select="$SrsUri"/>> POLYGON((<xsl:value-of select="translate($polygon)"/>))</xsl:otherwise>
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:param>
-->
<!-- Geometry as GeoJSON -->
<xsl:param name="pointAsGeoJSONLiteral">
<xsl:if test="$point != ''">{"type":"Point","crs":{"type":"name","properties":{"name":"<xsl:value-of select="$SrsUrn"/>"}},"coordinates":[<xsl:value-of select="$pointLongitude"/><xsl:text>,</xsl:text><xsl:value-of select="$pointLatitude"/>]}</xsl:if>
</xsl:param>
<xsl:param name="boxAsGeoJSONLiteral">
<xsl:if test="$box != ''">{"type":"Polygon","crs":{"type":"name","properties":{"name":"<xsl:value-of select="$SrsUrn"/>"}},"coordinates":[[[<xsl:value-of select="$west"/><xsl:text>,</xsl:text><xsl:value-of select="$north"/>],[<xsl:value-of select="$east"/><xsl:text>,</xsl:text><xsl:value-of select="$north"/>],[<xsl:value-of select="$east"/><xsl:text>,</xsl:text><xsl:value-of select="$south"/>],[<xsl:value-of select="$west"/><xsl:text>,</xsl:text><xsl:value-of select="$south"/>],[<xsl:value-of select="$west"/><xsl:text>,</xsl:text><xsl:value-of select="$north"/>]]]}</xsl:if>
</xsl:param>
<xsl:param name="polygonAsArray">
<xsl:if test="$polygon != ''">
<xsl:text>[</xsl:text>
<xsl:for-each select="*[local-name() = 'geoLocationPolygon']">
<xsl:variable name="cnr" select="count(*[local-name() = 'polygonPoint'])"/>
<xsl:for-each select="*[local-name() = 'polygonPoint']">
<xsl:variable name="delimiter">
<xsl:if test="position() < $cnr">
<xsl:text>,</xsl:text>
</xsl:if>
</xsl:variable>
<xsl:text>[</xsl:text><xsl:value-of select="normalize-space(*[local-name() = 'pointLatitude'])"/><xsl:text> </xsl:text><xsl:value-of select="normalize-space(*[local-name() = 'pointLongitude'])"/><xsl:value-of select="$delimiter"/><xsl:text>]</xsl:text>
</xsl:for-each>
</xsl:for-each>
<xsl:text>]</xsl:text>
</xsl:if>
</xsl:param>
<xsl:param name="polygonAsGeoJSONLiteral">
<xsl:if test="$polygon != ''">{"type":"Polygon","crs":{"type":"name","properties":{"name":"<xsl:value-of select="$SrsUrn"/>"}},"coordinates":[[[<xsl:value-of select="$polygonAsArray"/>]]]}</xsl:if>
</xsl:param>
<!--
<xsl:param name="GeoJSONLiteral">
<xsl:choose>
<xsl:when test="$point != ''">{"type":"Point","crs":{"type":"name","properties":{"name":"<xsl:value-of select="$SrsUrn"/>"}},"coordinates":[<xsl:value-of select="$pointLongitude"/><xsl:text>,</xsl:text><xsl:value-of select="$pointLatitude"/>]}</xsl:when>
<xsl:when test="$box != ''">{"type":"Polygon","crs":{"type":"name","properties":{"name":"<xsl:value-of select="$SrsUrn"/>"}},"coordinates":[[[<xsl:value-of select="$west"/><xsl:text>,</xsl:text><xsl:value-of select="$north"/>],[<xsl:value-of select="$east"/><xsl:text>,</xsl:text><xsl:value-of select="$north"/>],[<xsl:value-of select="$east"/><xsl:text>,</xsl:text><xsl:value-of select="$south"/>],[<xsl:value-of select="$west"/><xsl:text>,</xsl:text><xsl:value-of select="$south"/>],[<xsl:value-of select="$west"/><xsl:text>,</xsl:text><xsl:value-of select="$north"/>]]]}</xsl:when>
<xsl:when test="$polygon != ''">{"type":"Polygon","crs":{"type":"name","properties":{"name":"<xsl:value-of select="$SrsUrn"/>"}},"coordinates":[[[<xsl:value-of select="$polygonAsGeoJSON"/>]]]}</xsl:when>
</xsl:choose>
</xsl:param>
-->
<!-- Geolocation -->
<xsl:if test="$place != '' or $point != '' or $box != '' or $polygon != ''">
<dct:spatial>
<dct:Location>
<xsl:if test="$place != ''">
<xsl:choose>
<xsl:when test="normalize-space(*[local-name() = 'geoLocationPlace']/@xml:lang) != ''">
<locn:geographicName xml:lang="{*[local-name() = 'geoLocationPlace']/@xml:lang}"><xsl:value-of select="$place"/></locn:geographicName>
</xsl:when>
<xsl:otherwise>
<locn:geographicName><xsl:value-of select="$place"/></locn:geographicName>
</xsl:otherwise>
</xsl:choose>
</xsl:if>
<xsl:if test="$point != ''">
<geo:lat_long rdf:datatype="{$xsd}decimal"><xsl:value-of select="$point"/></geo:lat_long>
<locn:geometry rdf:datatype="{$gsp}wktLiteral"><xsl:value-of select="$pointAsWKTLiteral"/></locn:geometry>
<locn:geometry rdf:datatype="{$gsp}gmlLiteral"><xsl:value-of select="$pointAsGMLLiteral"/></locn:geometry>
<locn:geometry rdf:datatype="{$geojsonMediaTypeUri}"><xsl:value-of select="$pointAsGeoJSONLiteral"/></locn:geometry>
</xsl:if>
<xsl:if test="$box != ''">
<schema:box rdf:datatype="{$xsd}string"><xsl:value-of select="$box"/></schema:box>
<locn:geometry rdf:datatype="{$gsp}wktLiteral"><xsl:value-of select="$boxAsWKTLiteral"/></locn:geometry>
<locn:geometry rdf:datatype="{$gsp}gmlLiteral"><xsl:value-of select="$boxAsGMLLiteral"/></locn:geometry>
<locn:geometry rdf:datatype="{$geojsonMediaTypeUri}"><xsl:value-of select="$boxAsGeoJSONLiteral"/></locn:geometry>
</xsl:if>
<xsl:if test="$polygon != ''">
<schema:polygon><xsl:value-of select="normalize-space(translate($polygonAsLatLon,',',' '))"/></schema:polygon>
<locn:geometry rdf:datatype="{$gsp}wktLiteral"><xsl:value-of select="$polygonAsWKTLiteral"/></locn:geometry>
<locn:geometry rdf:datatype="{$gsp}gmlLiteral"><xsl:value-of select="$polygonAsGMLLiteral"/></locn:geometry>
<locn:geometry rdf:datatype="{$geojsonMediaTypeUri}"><xsl:value-of select="$polygonAsGeoJSONLiteral"/></locn:geometry>
</xsl:if>
</dct:Location>
</dct:spatial>
</xsl:if>
</xsl:template>
<!-- Sizes template -->
<xsl:template name="Size" match="*[local-name() = 'sizes']/*[local-name() = 'size']">
<!--
<dcat:byteSize rdf:datatype="{$xsd}decimal"><xsl:value-of select="normalize-space(.)"/></dcat:byteSize>
-->
<xsl:if test="$profile = 'extended'">
<dct:extent>
<dct:SizeOrDuration>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<rdfs:label xml:lang="{@xml:lang}"><xsl:value-of select="normalize-space(.)"/></rdfs:label>
</xsl:when>
<xsl:otherwise>
<rdfs:label><xsl:value-of select="normalize-space(.)"/></rdfs:label>
</xsl:otherwise>
</xsl:choose>
</dct:SizeOrDuration>
</dct:extent>
</xsl:if>
</xsl:template>
<!-- Dates template -->
<xsl:template name="PublicationYear" match="*[local-name() = 'publicationYear']">
<dct:issued rdf:datatype="{$xsd}gYear">
<xsl:value-of select="normalize-space(.)"/>
</dct:issued>
</xsl:template>
<xsl:template name="Dates" match="*[local-name() = 'dates']/*[local-name() = 'date']">
<xsl:variable name="date" select="normalize-space(.)"/>
<xsl:variable name="type" select="normalize-space(translate(@dateType,$uppercase,$lowercase))"/>
<xsl:variable name="dateDataType">
<xsl:choose>
<xsl:when test="string-length($date) = 4">
<xsl:text>gYear</xsl:text>
</xsl:when>
<xsl:when test="string-length($date) = 10">
<xsl:text>date</xsl:text>
</xsl:when>
<xsl:when test="string-length($date) > 10">
<xsl:text>dateTime</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>date</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<!-- Added in DataCite v4.1 -->
<!-- NB: Currently not mapped. Options could be to use reification or PROV-O -->
<xsl:variable name="dateInformation" select="normalize-space(@dateInformation)"/>
<xsl:choose>
<xsl:when test="$type = 'issued'">
<dct:issued rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:issued>
</xsl:when>
<xsl:when test="$type = 'updated'">
<dct:modified rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:modified>
</xsl:when>
<xsl:when test="$type = 'created' and $profile = 'extended'">
<dct:created rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:created>
</xsl:when>
<xsl:when test="$type = 'accepted' and $profile = 'extended'">
<dct:dateAccepted rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:dateAccepted>
</xsl:when>
<xsl:when test="$type = 'available' and $profile = 'extended'">
<dct:available rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:available>
</xsl:when>
<xsl:when test="$type = 'copyrighted' and $profile = 'extended'">
<dct:dateCopyrighted rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:dateCopyrighted>
</xsl:when>
<xsl:when test="$type = 'collected' and $profile = 'extended'">
<dct:created rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:created>
</xsl:when>
<xsl:when test="$type = 'submitted' and $profile = 'extended'">
<dct:dateSubmitted rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:dateSubmitted>
</xsl:when>
<xsl:when test="$type = 'valid' and $profile = 'extended'">
<dct:valid rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:valid>
</xsl:when>
<!-- Added in DataCite v4.1 -->
<xsl:when test="$type = 'other'">
<dct:date rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:date>
</xsl:when>
<!-- Added in DataCite v4.2 -->
<xsl:when test="$type = 'withdrawn' and $profile = 'extended'">
<dct:modified rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:modified>
<adms:status rdf:resource="{$opds}WITHDRAWN"/>
</xsl:when>
<xsl:otherwise>
<dct:date rdf:datatype="{$xsd}{$dateDataType}">
<xsl:value-of select="$date"/>
</dct:date>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Publisher template -->
<xsl:template name="Publisher" match="*[local-name() = 'publisher']">
<dct:publisher>
<foaf:Agent>
<foaf:name><xsl:value-of select="normalize-space(.)"/></foaf:name>
</foaf:Agent>
</dct:publisher>
</xsl:template>
<!-- Creators and contributors template -->
<xsl:template name="Agents" match="*[local-name() = 'creators']/*[local-name() = 'creator']|*[local-name() = 'contributors']/*[local-name() = 'contributor']">
<!-- Added in DataCite v4.1 -->
<xsl:param name="nameType">
<xsl:choose>
<xsl:when test="local-name(.) = 'creator' and *[local-name() = 'creatorName']/@nameType">
<xsl:value-of select="translate(normalize-space(*[local-name() = 'creatorName']/@nameType),$uppercase,$lowercase)"/>
</xsl:when>
<xsl:when test="local-name(.) = 'contributor' and *[local-name() = 'contributorName']/@nameType">
<xsl:value-of select="translate(normalize-space(*[local-name() = 'contributorName']/@nameType),$uppercase,$lowercase)"/>
</xsl:when>
</xsl:choose>
</xsl:param>
<xsl:param name="agentName">
<xsl:choose>
<xsl:when test="local-name(.) = 'creator'">
<xsl:value-of select="normalize-space(*[local-name() = 'creatorName'])"/>
</xsl:when>
<xsl:when test="local-name(.) = 'contributor'">
<xsl:value-of select="normalize-space(*[local-name() = 'contributorName'])"/>
</xsl:when>
</xsl:choose>
</xsl:param>
<xsl:param name="agentFamilyName">
<xsl:choose>
<!-- Added in DataCite v4.0 -->
<xsl:when test="*[local-name() = 'familyName']">
<xsl:value-of select="normalize-space(*[local-name() = 'familyName'])"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="normalize-space(substring-before($agentName, ','))"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="agentGivenName">
<xsl:choose>
<!-- Added in DataCite v4.0 -->
<xsl:when test="*[local-name() = 'givenName']">
<xsl:value-of select="normalize-space(*[local-name() = 'givenName'])"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="normalize-space(substring-after($agentName, ','))"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="type" select="normalize-space(translate(@contributorType,$uppercase,$lowercase))"/>
<xsl:param name="nameIdentifier" select="normalize-space(*[local-name() = 'nameIdentifier'])"/>
<xsl:param name="nameIdentifierScheme" select="normalize-space(translate(*[local-name() = 'nameIdentifier']/@nameIdentifierScheme,$uppercase,$lowercase))"/>
<xsl:param name="schemeURI" select="normalize-space(translate(*[local-name() = 'nameIdentifier']/@schemeURI,$uppercase,$lowercase))"/>
<xsl:param name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$nameIdentifier"/>
<xsl:with-param name="type" select="$nameIdentifierScheme"/>
<xsl:with-param name="schemeURI" select="$schemeURI"/>
</xsl:call-template>
</xsl:param>
<xsl:param name="nameIdentifierDatatype">
<xsl:choose>
<xsl:when test="starts-with(translate(normalize-space($nameIdentifier),$uppercase,$lowercase),'http://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space($nameIdentifier),$uppercase,$lowercase),'https://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space($nameIdentifier),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="concat($xsd,'string')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="agentType">
<xsl:choose>
<xsl:when test="$type = 'contactperson'">
<xsl:choose>
<xsl:when test="$nameType = 'personal'">
<rdf:type rdf:resource="{$vcard}Individual"/>
</xsl:when>
<xsl:when test="$nameType = 'organizational'">
<rdf:type rdf:resource="{$vcard}Organization"/>
</xsl:when>
<xsl:otherwise>
<rdf:type rdf:resource="{$vcard}Individual"/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="$nameType = 'personal'">
<rdf:type rdf:resource="{$foaf}Person"/>
</xsl:when>
<xsl:when test="$nameType = 'organizational'">
<rdf:type rdf:resource="{$foaf}Organization"/>
</xsl:when>
<xsl:otherwise>
<rdf:type rdf:resource="{$foaf}Agent"/>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="agentDescription">
<xsl:copy-of select="$agentType"/>
<xsl:if test="$nameIdentifier != ''">
<dct:identifier rdf:datatype="{$nameIdentifierDatatype}"><xsl:value-of select="$nameIdentifier"/></dct:identifier>
</xsl:if>
<xsl:choose>
<xsl:when test="$type = 'contactperson'">
<!--
<rdf:type rdf:resource="{$vcard}Individual"/>
-->
<xsl:if test="$agentName != ''">
<vcard:fn><xsl:value-of select="$agentName"/></vcard:fn>
<vcard:given-name><xsl:value-of select="$agentGivenName"/></vcard:given-name>
<vcard:family-name><xsl:value-of select="$agentFamilyName"/></vcard:family-name>
</xsl:if>
<xsl:for-each select="*[local-name() = 'affiliation']">
<vcard:organization-name><xsl:value-of select="."/></vcard:organization-name>
</xsl:for-each>
</xsl:when>
<xsl:otherwise>
<!--
<rdf:type rdf:resource="{$foaf}Agent"/>
-->
<xsl:if test="$agentName != ''">
<foaf:name><xsl:value-of select="$agentName"/></foaf:name>
</xsl:if>
<xsl:if test="$agentGivenName != ''">
<foaf:givenName><xsl:value-of select="$agentGivenName"/></foaf:givenName>
</xsl:if>
<xsl:if test="$agentFamilyName != ''">
<foaf:familyName><xsl:value-of select="$agentFamilyName"/></foaf:familyName>
</xsl:if>
<xsl:for-each select="*[local-name() = 'affiliation']">
<org:memberOf>
<xsl:call-template name="Affiliations"/>
</org:memberOf>
</xsl:for-each>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="agent">
<xsl:variable name="urilc" select="translate($uri,$uppercase,$lowercase)"/>
<xsl:choose>
<xsl:when test="$uri != '' and ( starts-with($urilc, 'http://') or starts-with($urilc, 'https://') or starts-with($urilc, 'urn://') )">
<rdf:Description rdf:about="{$uri}">
<xsl:copy-of select="$agentDescription"/>
</rdf:Description>
</xsl:when>
<xsl:otherwise>
<rdf:Description>
<xsl:if test="$uri != ''">
<dct:identifier rdf:datatype="{$xsd}string"><xsl:value-of select="$uri"/></dct:identifier>
</xsl:if>
<xsl:copy-of select="$agentDescription"/>
</rdf:Description>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:choose>
<xsl:when test="local-name(.) = 'creator' and $profile = 'extended'">
<dct:creator><xsl:copy-of select="$agent"/></dct:creator>
</xsl:when>
<xsl:when test="local-name(.) = 'contributor'">
<xsl:choose>
<xsl:when test="$type = 'contactperson'">
<dcat:contactPoint><xsl:copy-of select="$agent"/></dcat:contactPoint>
</xsl:when>
<xsl:when test="$profile = 'extended'">
<xsl:choose>
<!-- TBD -->
<!--
<xsl:when test="$type = 'datacollector'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'datacurator'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'datamanager'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- Mapping to be confirmed when the final version of DUV will be released -->
<xsl:when test="$type = 'distributor'">
<duv:hasDistributor><xsl:copy-of select="$agent"/></duv:hasDistributor>
</xsl:when>
<xsl:when test="$type = 'editor'">
<schema:editor><xsl:copy-of select="$agent"/></schema:editor>
</xsl:when>
<!-- Unstable -->
<xsl:when test="$type = 'funder'">
<schema:funder><xsl:copy-of select="$agent"/></schema:funder>
</xsl:when>
<!-- TBD -->
<!--
<xsl:when test="$type = 'hostinginstitution'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
<xsl:when test="$type = 'producer'">
<schema:producer><xsl:copy-of select="$agent"/></schema:producer>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'projectleader'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
<prov:wasGeneratedBy>
<foaf:Project>
<??:??><xsl:copy-of select="$agent"/></??:??>
</foaf:Project>
</prov:wasGeneratedBy>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'projectmanager'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
<prov:wasGeneratedBy>
<foaf:Project>
<??:??><xsl:copy-of select="$agent"/></??:??>
</foaf:Project>
</prov:wasGeneratedBy>
</xsl:when>
-->
<!-- TBD -->
<xsl:when test="$type = 'projectmember'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
<prov:wasGeneratedBy>
<foaf:Project>
<foaf:member><xsl:copy-of select="$agent"/></foaf:member>
</foaf:Project>
</prov:wasGeneratedBy>
</xsl:when>
<!-- TBD -->
<!--
<xsl:when test="$type = 'registrationagency'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'registrationauthority'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'relatedperson'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'researcher'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'researchgroup'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<xsl:when test="$type = 'rightsholder'">
<dct:rightsHolder><xsl:copy-of select="$agent"/></dct:rightsHolder>
</xsl:when>
<!-- Unstable -->
<xsl:when test="$type = 'sponsor'">
<schema:sponsor><xsl:copy-of select="$agent"/></schema:sponsor>
</xsl:when>
<!-- TBD -->
<!--
<xsl:when test="$type = 'supervisor'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$type = 'workpackageleader'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
-->
<!-- TBD -->
<xsl:when test="$type = 'other'">
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:when>
<xsl:otherwise>
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<!-- Default mapping of contributor types not support in the core profile -->
<!--
<xsl:otherwise>
<dct:contributor><xsl:copy-of select="$agent"/></dct:contributor>
</xsl:otherwise>
-->
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Main and alternate identifiers template -->
<xsl:template name="Identifiers" match="*[local-name() = 'identifier']|*[local-name() = 'alternateIdentifiers']/*[local-name() = 'alternateIdentifier']">
<xsl:param name="ResourceType"/>
<xsl:param name="identifier" select="normalize-space(.)"/>
<xsl:param name="type-original">
<xsl:choose>
<xsl:when test="local-name() = 'identifier'">
<xsl:value-of select="normalize-space(@identifierType)"/>
</xsl:when>
<xsl:when test="local-name() = 'alternateIdentifier'">
<xsl:value-of select="normalize-space(@alternateIdentifierType)"/>
</xsl:when>
</xsl:choose>
</xsl:param>
<xsl:param name="type" select="translate($type-original,$uppercase,$lowercase)"/>
<xsl:param name="schemeURI" select="@schemeURI"/>
<xsl:param name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$identifier"/>
<xsl:with-param name="type" select="$type"/>
<xsl:with-param name="schemeURI" select="$schemeURI"/>
</xsl:call-template>
</xsl:param>
<xsl:variable name="urilc" select="translate($uri,$uppercase,$lowercase)"/>
<xsl:choose>
<xsl:when test="$uri != '' and ( starts-with($urilc, 'http://') or starts-with($urilc, 'https://') or starts-with($urilc, 'urn:') )">
<xsl:choose>
<xsl:when test="local-name() = 'identifier'">
<dct:identifier rdf:datatype="{$xsd}anyURI"><xsl:value-of select="$uri"/></dct:identifier>
<xsl:choose>
<xsl:when test="$ResourceType = dataset">
<dcat:landingPage rdf:resource="{$uri}"/>
</xsl:when>
<xsl:otherwise>
<foaf:page rdf:resource="{$uri}"/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="local-name() = 'alternateIdentifier'">
<owl:sameAs rdf:resource="{$uri}"/>
<!--
<adms:identifier rdf:resource="{$uri}"/>
-->
<adms:identifier>
<adms:Identifier>
<skos:notation rdf:datatype="{$xsd}anyURI"><xsl:value-of select="$uri"/></skos:notation>
<xsl:choose>
<xsl:when test="$type != ''">
<adms:schemeAgency><xsl:value-of select="$type-original"/></adms:schemeAgency>
</xsl:when>
<xsl:when test="$schemeURI != ''">
<dct:creator rdf:resource="{$schemeURI}"/>
</xsl:when>
</xsl:choose>
</adms:Identifier>
</adms:identifier>
</xsl:when>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="local-name() = 'identifier'">
<dc:identifier rdf:datatype="{$xsd}string"><xsl:value-of select="$uri"/></dc:identifier>
</xsl:when>
<xsl:when test="local-name() = 'alternateIdentifier'">
<adms:identifier>
<adms:Identifier>
<skos:notation><xsl:value-of select="$uri"/></skos:notation>
<xsl:choose>
<xsl:when test="$type != ''">
<adms:schemeAgency><xsl:value-of select="$type-original"/></adms:schemeAgency>
</xsl:when>
<xsl:when test="$schemeURI != ''">
<dct:creator rdf:resource="{$schemeURI}"/>
</xsl:when>
</xsl:choose>
</adms:Identifier>
</adms:identifier>
</xsl:when>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Related identifiers template -->
<xsl:template name="RelatedIdentifiers" match="*[local-name() = 'relatedIdentifiers']/*[local-name() = 'relatedIdentifier']">
<xsl:param name="relation" select="normalize-space(translate(@relationType,$uppercase,$lowercase))"/>
<xsl:param name="identifier" select="normalize-space(.)"/>
<xsl:param name="type" select="normalize-space(translate(@relatedIdentifierType,$uppercase,$lowercase))"/>
<xsl:param name="schemeURI" select="@schemeURI"/>
<xsl:param name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$identifier"/>
<xsl:with-param name="type" select="$type"/>
<xsl:with-param name="schemeURI" select="$schemeURI"/>
</xsl:call-template>
</xsl:param>
<!-- Added in DataCite v4.1 -->
<!-- NB: Currently not mapped. -->
<xsl:param name="resourceType" select="normalize-space(translate(@resourceTypeGeneral,$uppercase,$lowercase))"/>
<xsl:choose>
<xsl:when test="$relation = 'hasmetadata'">
<foaf:isPrimaryTopicOf>
<dcat:CatalogRecord rdf:about="{$uri}">
<xsl:if test="@relatedMetadataScheme != '' or @schemeURI != ''">
<xsl:choose>
<xsl:when test="@relatedMetadataScheme != '' and @schemeURI != ''">
<dct:conformsTo>
<dct:Standard rdf:about="{@schemeURI}">
<dct:title><xsl:value-of select="normalize-space(@relatedMetadataScheme)"/></dct:title>
</dct:Standard>
</dct:conformsTo>
</xsl:when>
<xsl:when test="@relatedMetadataScheme != '' and not(@schemeURI != '')">
<dct:conformsTo>
<dct:Standard>
<dct:title><xsl:value-of select="normalize-space(@relatedMetadataScheme)"/></dct:title>
</dct:Standard>
</dct:conformsTo>
</xsl:when>
<xsl:when test="not(@relatedMetadataScheme != '') and @schemeURI != ''">
<dct:conformsTo>
<dct:Standard rdf:about="{@schemeURI}"/>
</dct:conformsTo>
</xsl:when>
</xsl:choose>
</xsl:if>
</dcat:CatalogRecord>
</foaf:isPrimaryTopicOf>
</xsl:when>
<xsl:when test="$relation = 'isnewversionof'">
<dct:isVersionOf rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'ispreviousversionof'">
<dct:hasVersion rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'isdocumentedby'">
<foaf:page rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'isderivedfrom'">
<dct:source rdf:resource="{$uri}"/>
</xsl:when>
<!-- Added in DataCite v4.1 -->
<xsl:when test="$relation = 'hasversion'">
<dct:hasVersion rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'isversionof'">
<dct:isVersionOf rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$profile = 'extended'">
<xsl:choose>
<!-- TBD -->
<!--
<xsl:when test="$relation = 'iscitedby'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'cites'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'issupplementto'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'issupplementedby'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'iscontinuedby'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'continues'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<xsl:when test="$relation = 'ismetadatafor'">
<foaf:primaryTopic rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'ispartof'">
<dct:isPartOf rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'haspart'">
<dct:hasPart rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'isreferencedby'">
<dct:isReferencedBy rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'references'">
<dct:references rdf:resource="{$uri}"/>
</xsl:when>
<!-- TBD -->
<!--
<xsl:when test="$relation = 'documents'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'iscompiledby'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'compiles'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<xsl:when test="$relation = 'isvariantformof'">
<schema:isVariantOf rdf:resource="{$uri}"/>
</xsl:when>
<!-- TBD -->
<!--
<xsl:when test="$relation = 'isoriginalformof'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<xsl:when test="$relation = 'isidenticalto'">
<owl:sameAs rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'isreviewedby'">
<schema:review rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'reviews'">
<schema:itemReviewed rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'issourceof'">
<prov:hadDerivation rdf:resource="{$uri}"/>
</xsl:when>
<!-- Added in DataCite v4.1 -->
<!-- TBD -->
<!--
<xsl:when test="$relation = 'describes'">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
-->
<!-- Added in DataCite v4.1 -->
<xsl:when test="$relation = 'isdescribedby'">
<wdrs:describedby rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'requires'">
<dct:requires rdf:resource="{$uri}"/>
</xsl:when>
<xsl:when test="$relation = 'isrequiredby'">
<dct:isRequiredBy rdf:resource="{$uri}"/>
</xsl:when>
<!-- Added in DataCite v4.2 -->
<xsl:when test="$relation = 'obsoletes'">
<dct:replaces rdf:resource="{$uri}"/>
</xsl:when>
<!-- Added in DataCite v4.2 -->
<xsl:when test="$relation = 'isobsoletedby'">
<dct:isReplacedBy rdf:resource="{$uri}"/>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="urilc" select="translate($uri,$uppercase,$lowercase)"/>
<xsl:choose>
<xsl:when test="$uri != '' and ( starts-with($urilc, 'http://') or starts-with($urilc, 'https://') or starts-with($urilc, 'urn://') )">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
<xsl:otherwise>
<dct:relation rdf:parseType="Resource">
<dc:identifier><xsl:value-of select="$uri"/></dc:identifier>
</dct:relation>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="urilc" select="translate($uri,$uppercase,$lowercase)"/>
<xsl:choose>
<xsl:when test="$uri != '' and ( starts-with($urilc, 'http://') or starts-with($urilc, 'https://') or starts-with($urilc, 'urn://') )">
<dct:relation rdf:resource="{$uri}"/>
</xsl:when>
<xsl:otherwise>
<dct:relation rdf:parseType="Resource">
<dc:identifier><xsl:value-of select="$uri"/></dc:identifier>
</dct:relation>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Formats template -->
<xsl:template name="Formats" match="*[local-name() = 'formats']/*[local-name() = 'format']">
<xsl:variable name="format" select="normalize-space(.)"/>
<xsl:variable name="type" select="substring-before($format,'/')"/>
<xsl:choose>
<xsl:when test="$type = 'text' or $type = 'image' or $type = 'audio' or $type = 'video' or $type = 'application' or $type = 'multipart' or $type = 'message'">
<dcat:mediaType rdf:resource="{$ianaMT}{$format}"/>
</xsl:when>
<xsl:otherwise>
<dct:format>
<dct:MediaTypeOrExtent>
<rdfs:label><xsl:value-of select="$format"/></rdfs:label>
</dct:MediaTypeOrExtent>
</dct:format>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Language template -->
<xsl:template name="Language" match="*[local-name() = 'language']">
<xsl:variable name="langCodeAlpha3">
<xsl:call-template name="LanguageAlpha3">
<xsl:with-param name="langCode" select="normalize-space(.)"/>
</xsl:call-template>
</xsl:variable>
<dct:language rdf:resource="{$oplang}{translate($langCodeAlpha3,$lowercase,$uppercase)}"/>
</xsl:template>
<!-- Resource type template -->
<xsl:template name="ResourceType" match="*[local-name() = 'resourceType']">
<xsl:param name="type" select="normalize-space(translate(@resourceTypeGeneral,$uppercase,$lowercase))"/>
<xsl:choose>
<xsl:when test="$type = 'audiovisual'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}MovingImage"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'collection'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}Collection"/>
</xsl:if>
</xsl:when>
<!-- Added in DataCite v4.1 -->
<xsl:when test="$type = 'datapaper'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<!-- TBD -->
<!--
<dct:type rdf:resource="{$dctype}??"/>
-->
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'dataset'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}Dataset"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'event'">
<xsl:if test="$profile = 'extended'">
<rdf:type rdf:resource="{$dctype}Event"/>
<dct:type rdf:resource="{$dctype}Event"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'image'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}Image"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'interactiveresource'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}InteractiveResource"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'model'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<!-- TBD -->
<!--
<dct:type rdf:resource="{$dctype}??"/>
-->
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'physicalobject'">
<xsl:if test="$profile = 'extended'">
<rdf:type rdf:resource="{$dctype}PhysicalObject"/>
<dct:type rdf:resource="{$dctype}PhysicalObject"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'service'">
<xsl:if test="$profile = 'extended'">
<rdf:type rdf:resource="{$dctype}Service"/>
<dct:type rdf:resource="{$dctype}Service"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'software'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}Software"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'sound'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}Sound"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'text'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<dct:type rdf:resource="{$dctype}Text"/>
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'workflow'">
<rdf:type rdf:resource="{$dcat}Dataset"/>
<xsl:if test="$profile = 'extended'">
<!-- TBD -->
<!--
<dct:type rdf:resource="{$dctype}??"/>
-->
</xsl:if>
</xsl:when>
<xsl:when test="$type = 'other'">
<!-- TBD -->
<!--
<rdf:type rdf:resource="{$dctype}??"/>
-->
</xsl:when>
<xsl:otherwise>
<!-- TBD -->
<!--
<rdf:type rdf:resource="{$dctype}??"/>
-->
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!--
Mapping templates
=================
-->
<!-- Template for creating the absolute URI of an identifier -->
<xsl:template name="IdentifierURI">
<xsl:param name="identifier"/>
<xsl:param name="type"/>
<xsl:param name="schemeURI"/>
<!-- Resolvers -->
<xsl:variable name="orcid">http://orcid.org/</xsl:variable>
<xsl:variable name="isni">http://www.isni.org/</xsl:variable>
<xsl:variable name="grid">https://www.grid.ac/institutes/</xsl:variable>
<!--
<xsl:variable name="fundref">http://www.crossref.org/fundref/</xsl:variable>
<xsl:variable name="fundref">https://doi.org/</xsl:variable>
-->
<xsl:variable name="fundref"></xsl:variable>
<!-- Added in DataCite v4.3 -->
<xsl:variable name="ror"></xsl:variable>
<xsl:variable name="n2t">http://n2t.net/</xsl:variable>
<xsl:variable name="arxiv">http://arxiv.org/abs/</xsl:variable>
<xsl:variable name="doi">https://doi.org/</xsl:variable>
<xsl:variable name="bibcode">http://adsabs.harvard.edu/abs/</xsl:variable>
<xsl:variable name="pmid">http://www.ncbi.nlm.nih.gov/pubmed/</xsl:variable>
<xsl:variable name="handle">https://hdl.handle.net/</xsl:variable>
<!-- Added in DataCite v4.0 -->
<xsl:variable name="igsn">https://hdl.handle.net/10273/</xsl:variable>
<xsl:variable name="istc">http://istc-search-beta.peppertag.com/ptproc/IstcSearch?tFrame=IstcListing&tForceNewQuery=Yes&esfIstc=</xsl:variable>
<!--
<xsl:variable name="issn">urn:issn:</xsl:variable>
-->
<xsl:variable name="issn">http://issn.org/resource/ISSN/</xsl:variable>
<xsl:variable name="lissn">http://issn.org/resource/ISSN-L/</xsl:variable>
<xsl:variable name="isbn">urn:isbn:</xsl:variable>
<xsl:choose>
<xsl:when test="$type = 'orcid'">
<xsl:value-of select="concat($orcid,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'isni'">
<xsl:value-of select="concat($isni,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'grid'">
<xsl:value-of select="concat($isni,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'crossref funder id'">
<xsl:value-of select="concat($fundref,$identifier)"/>
</xsl:when>
<!-- Added in DataCite v4.3 -->
<xsl:when test="$type = 'ror'">
<xsl:value-of select="concat($ror,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'ark'">
<xsl:value-of select="concat($n2t,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'arxiv'">
<xsl:value-of select="concat($arxiv,substring-after($identifier,':'))"/>
</xsl:when>
<xsl:when test="$type = 'bibcode'">
<xsl:value-of select="concat($bibcode,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'doi'">
<xsl:value-of select="concat($doi,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'ean13'">
<!-- To be fixed - fictional URN namespace -->
<xsl:value-of select="concat('urn:ean-13:',$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'eissn'">
<xsl:value-of select="concat($issn,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'handle'">
<xsl:value-of select="concat($handle,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'isbn'">
<xsl:value-of select="concat($isbn,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'igsn'">
<xsl:value-of select="concat($igsn,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'issn'">
<xsl:value-of select="concat($issn,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'istc'">
<xsl:value-of select="concat($istc,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'lissn'">
<xsl:value-of select="concat($lissn,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'lsid'">
<xsl:value-of select="$identifier"/>
</xsl:when>
<xsl:when test="$type = 'pmid'">
<xsl:value-of select="concat($pmid,$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'purl'">
<xsl:value-of select="$identifier"/>
</xsl:when>
<xsl:when test="$type = 'upc'">
<!-- To be fixed - fictional URN namespace -->
<xsl:value-of select="concat('urn:upc:',$identifier)"/>
</xsl:when>
<xsl:when test="$type = 'url'">
<xsl:value-of select="$identifier"/>
</xsl:when>
<xsl:when test="$type = 'urn'">
<xsl:value-of select="$identifier"/>
</xsl:when>
<!-- Added in DataCite v4.2 -->
<xsl:when test="$type = 'w3id'">
<xsl:value-of select="$identifier"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$identifier"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- Affiliation template -->
<xsl:template name="Affiliations">
<xsl:param name="affiliationIdentifier" select="normalize-space(@affiliationIdentifier)"/>
<xsl:param name="affiliationIdentifierScheme" select="normalize-space(@affiliationIdentifierScheme)"/>
<xsl:param name="affiliationSchemeURI" select="translate(normalize-space(@schemeURI),$uppercase,$lowercase)"/>
<xsl:param name="affiliationURI">
<xsl:variable name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$affiliationIdentifier"/>
<xsl:with-param name="type" select="$affiliationIdentifierScheme"/>
<xsl:with-param name="schemeURI" select="$affiliationSchemeURI"/>
</xsl:call-template>
</xsl:variable>
<xsl:if test="starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'https://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="$uri"/>
</xsl:if>
</xsl:param>
<xsl:param name="affiliationIdentifierDatatype">
<xsl:choose>
<xsl:when test="starts-with(translate(normalize-space($affiliationIdentifier),$uppercase,$lowercase),'http://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space($affiliationIdentifier),$uppercase,$lowercase),'https://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space($affiliationIdentifier),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="concat($xsd,'string')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="affiliationInfo">
<xsl:if test="$affiliationIdentifier != ''">
<dct:identifier rdf:datatype="{$affiliationIdentifierDatatype}"><xsl:value-of select="$affiliationIdentifier"/></dct:identifier>
</xsl:if>
<xsl:choose>
<xsl:when test="normalize-space(@xml:lang) != ''">
<foaf:name xml:lang="{normalize-space(@xml:lang)}"><xsl:value-of select="normalize-space(.)"/></foaf:name>
</xsl:when>
<xsl:otherwise>
<foaf:name><xsl:value-of select="normalize-space(.)"/></foaf:name>
</xsl:otherwise>
</xsl:choose>
<!--
<adms:identifier>
<adms:Identifier>
<skos:notation><xsl:value-of select="$affiliationIdentifier"/></skos:notation>
<xsl:choose>
<xsl:when test="$affiliationIdentifierScheme != ''">
<adms:schemeAgency><xsl:value-of select="$affiliationIdentifierScheme"/></adms:schemeAgency>
</xsl:when>
<xsl:when test="$affiliationSchemeURI != ''">
<dct:creator rdf:resource="{$affiliationSchemeURI}"/>
</xsl:when>
</xsl:choose>
</adms:Identifier>
</adms:identifier>
-->
</xsl:param>
<xsl:choose>
<xsl:when test="$affiliationURI != ''">
<foaf:Organization rdf:about="{$affiliationURI}">
<xsl:copy-of select="$affiliationInfo"/>
</foaf:Organization>
</xsl:when>
<xsl:when test="normalize-space($affiliationInfo) != ''">
<foaf:Organization>
<xsl:copy-of select="$affiliationInfo"/>
</foaf:Organization>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Funders template -->
<xsl:template name="Funders">
<xsl:param name="funderIdentifier" select="normalize-space(*[local-name() = 'funderIdentifier'])"/>
<!--
<xsl:param name="funderIdentifierType" select="translate(normalize-space(*[local-name() = 'funderIdentifier']/@funderIdentifierType),$uppercase,$lowercase)"/>
-->
<xsl:param name="funderIdentifierType" select="normalize-space(*[local-name() = 'funderIdentifier']/@funderIdentifierType)"/>
<xsl:param name="funderSchemeURI" select="translate(normalize-space(*[local-name() = 'funderIdentifier']/@schemeURI),$uppercase,$lowercase)"/>
<xsl:param name="funderURI">
<xsl:variable name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$funderIdentifier"/>
<xsl:with-param name="type" select="$funderIdentifierType"/>
<xsl:with-param name="schemeURI" select="$funderSchemeURI"/>
</xsl:call-template>
</xsl:variable>
<xsl:if test="starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'https://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="$uri"/>
</xsl:if>
</xsl:param>
<xsl:param name="funderIdentifierDatatype">
<xsl:choose>
<xsl:when test="starts-with(translate(normalize-space($funderIdentifier),$uppercase,$lowercase),'http://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space($funderIdentifier),$uppercase,$lowercase),'https://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space($funderIdentifier),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="concat($xsd,'string')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="funderInfo">
<dct:identifier rdf:datatype="{$funderIdentifierDatatype}"><xsl:value-of select="$funderIdentifier"/></dct:identifier>
<xsl:choose>
<xsl:when test="normalize-space(*[local-name() = 'funderName']/@xml:lang) != ''">
<foaf:name xml:lang="{normalize-space(*[local-name() = 'funderName']/@xml:lang)}"><xsl:value-of select="normalize-space(*[local-name() = 'funderName'])"/></foaf:name>
</xsl:when>
<xsl:otherwise>
<foaf:name><xsl:value-of select="normalize-space(*[local-name() = 'funderName'])"/></foaf:name>
</xsl:otherwise>
</xsl:choose>
<!--
<adms:identifier>
<adms:Identifier>
<skos:notation><xsl:value-of select="$funderIdentifier"/></skos:notation>
<xsl:choose>
<xsl:when test="$funderIdentifierType != ''">
<adms:schemeAgency><xsl:value-of select="$funderIdentifierType"/></adms:schemeAgency>
</xsl:when>
<xsl:when test="$funderSchemeURI != ''">
<dct:creator rdf:resource="{$funderSchemeURI}"/>
</xsl:when>
</xsl:choose>
</adms:Identifier>
</adms:identifier>
-->
</xsl:param>
<xsl:choose>
<xsl:when test="$funderURI != ''">
<foaf:Organization rdf:about="{$funderURI}">
<xsl:copy-of select="$funderInfo"/>
</foaf:Organization>
</xsl:when>
<xsl:when test="normalize-space($funderInfo) != ''">
<foaf:Organization>
<xsl:copy-of select="$funderInfo"/>
</foaf:Organization>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Funding Awards references template -->
<!--
<xsl:template match="//*[local-name() = 'fundingReferences']/*[local-name() = 'fundingReference' and normalize-space(*[local-name() = 'awardNumber']/@awardURI) != '']">
-->
<xsl:template name="FundingAwards">
<xsl:param name="funderIdentifier" select="normalize-space(*[local-name() = 'funderIdentifier'])"/>
<xsl:param name="funderIdentifierType" select="translate(normalize-space(*[local-name() = 'funderIdentifier']/@funderIdentifierType),$uppercase,$lowercase)"/>
<xsl:param name="funderURI">
<xsl:variable name="uri">
<xsl:call-template name="IdentifierURI">
<xsl:with-param name="identifier" select="$funderIdentifier"/>
<xsl:with-param name="type" select="$funderIdentifierType"/>
<!--
<xsl:with-param name="schemeURI" select="$schemeURI"/>
-->
</xsl:call-template>
</xsl:variable>
<xsl:if test="starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'https://') or starts-with(translate(normalize-space($uri),$uppercase,$lowercase),'urn://')">
<xsl:value-of select="$uri"/>
</xsl:if>
<!--
<xsl:if test="starts-with(translate(normalize-space(*[local-name() = 'funderIdentifier']),$uppercase,$lowercase),'http://') or starts-with(translate(normalize-space(*[local-name() = 'funderIdentifier']),$uppercase,$lowercase),'https://')">
<xsl:value-of select="normalize-space(*[local-name() = 'funderIdentifier'])"/>
</xsl:if>
-->
</xsl:param>
<!--
<xsl:param name="funderInfo">
<dct:identifier><xsl:value-of select="normalize-space(*[local-name() = 'funderIdentifier'])"/></dct:identifier>
<foaf:name><xsl:value-of select="normalize-space(*[local-name() = 'funderName'])"/></foaf:name>
</xsl:param>
-->
<xsl:param name="fundingReferenceURI">
<xsl:value-of select="normalize-space(*[local-name() = 'awardNumber']/@awardURI)"/>
</xsl:param>
<xsl:param name="fundingReferenceIdentifierDatatype">
<xsl:choose>
<xsl:when test="starts-with(translate(normalize-space(*[local-name() = 'awardNumber']), $uppercase, $lowercase), 'http://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space(*[local-name() = 'awardNumber']), $uppercase, $lowercase), 'https://')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:when test="starts-with(translate(normalize-space(*[local-name() = 'awardNumber']), $uppercase, $lowercase), 'urn:')">
<xsl:value-of select="concat($xsd,'anyURI')"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="concat($xsd,'string')"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:param name="fundingReferenceInfo">
<dct:identifier rdf:datatype="{$fundingReferenceIdentifierDatatype}"><xsl:value-of select="normalize-space(*[local-name() = 'awardNumber'])"/></dct:identifier>
<xsl:choose>
<xsl:when test="normalize-space(*[local-name() = 'awardTitle']/@xml:lang) != ''">
<dct:title xml:lang="{normalize-space(*[local-name() = 'awardTitle']/@xml:lang)}"><xsl:value-of select="normalize-space(*[local-name() = 'awardTitle'])"/></dct:title>
</xsl:when>
<xsl:otherwise>
<dct:title><xsl:value-of select="normalize-space(*[local-name() = 'awardTitle'])"/></dct:title>
</xsl:otherwise>
</xsl:choose>
<xsl:choose>
<xsl:when test="$funderURI != ''">
<frapo:isAwardedBy rdf:resource="{$funderURI}"/>
</xsl:when>
<xsl:when test="normalize-space(*[local-name() = 'funderName']) != '' or normalize-space(*[local-name() = 'funderIdentifier']) != ''">
<frapo:isAwardedBy>
<xsl:call-template name="Funders"/>
</frapo:isAwardedBy>
</xsl:when>
</xsl:choose>
</xsl:param>
<xsl:choose>
<xsl:when test="$fundingReferenceURI != ''">
<foaf:Project rdf:about="{$fundingReferenceURI}">
<xsl:copy-of select="$fundingReferenceInfo"/>
</foaf:Project>
</xsl:when>
<xsl:when test="normalize-space($fundingReferenceInfo) != ''">
<foaf:Project>
<xsl:copy-of select="$fundingReferenceInfo"/>
</foaf:Project>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Template returning the Alpha-2 version of a language code / tag -->
<!-- CAVEAT: The mapping concerns just the 24 official EU languages -->
<xsl:template name="LanguageAlpha2">
<xsl:param name="langCode"/>
<xsl:choose>
<xsl:when test="string-length($langCode) = 2">
<xsl:value-of select="translate($langCode,$uppercase,$lowercase)"/>
</xsl:when>
<xsl:when test="string-length($langCode) > 3 and string-length(substring-before($langCode, '-')) = 2">
<xsl:value-of select="translate(substring-before($langCode, '-'),$uppercase,$lowercase)"/>
</xsl:when>
<xsl:when test="string-length($langCode) = 3">
<xsl:variable name="alpha3" select="translate($langCode,$uppercase,$lowercase)"/>
<xsl:choose>
<xsl:when test="$alpha3 = 'bul'">
<xsl:text>bg</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'cze'">
<xsl:text>cs</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'dan'">
<xsl:text>da</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'ger'">
<xsl:text>de</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'gre'">
<xsl:text>el</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'eng'">
<xsl:text>en</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'spa'">
<xsl:text>es</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'est'">
<xsl:text>et</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'fin'">
<xsl:text>fi</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'fre'">
<xsl:text>fr</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'gle'">
<xsl:text>ga</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'hrv'">
<xsl:text>hr</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'ita'">
<xsl:text>it</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'lav'">
<xsl:text>lv</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'lit'">
<xsl:text>lt</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'hun'">
<xsl:text>hu</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'mlt'">
<xsl:text>mt</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'dut'">
<xsl:text>nl</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'pol'">
<xsl:text>pl</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'por'">
<xsl:text>pt</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'rum'">
<xsl:text>ru</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'slo'">
<xsl:text>sk</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'slv'">
<xsl:text>sl</xsl:text>
</xsl:when>
<xsl:when test="$alpha3 = 'swe'">
<xsl:text>sv</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="translate($langCode,$uppercase,$lowercase)"/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:template>
<!-- Template returning the Alpha-3 version of a language code / tag -->
<!-- CAVEAT: The mapping concerns just the 24 official EU languages -->
<xsl:template name="LanguageAlpha3">
<xsl:param name="langCode"/>
<xsl:variable name="alpha2">
<xsl:choose>
<xsl:when test="string-length($langCode) = 2">
<xsl:value-of select="translate($langCode,$uppercase,$lowercase)"/>
</xsl:when>
<xsl:when test="string-length($langCode) > 3 and string-length(substring-before($langCode, '-')) = 2">
<xsl:value-of select="translate(substring-before($langCode, '-'),$uppercase,$lowercase)"/>
</xsl:when>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="string-length($langCode) = 3">
<xsl:value-of select="translate($langCode,$uppercase,$lowercase)"/>
</xsl:when>
<xsl:when test="$alpha2 != ''">
<xsl:choose>
<xsl:when test="$alpha2 = 'bg'">
<xsl:text>bul</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'cs'">
<xsl:text>cze</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'da'">
<xsl:text>dan</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'de'">
<xsl:text>ger</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'el'">
<xsl:text>gre</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'en'">
<xsl:text>eng</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'es'">
<xsl:text>spa</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'et'">
<xsl:text>est</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'fi'">
<xsl:text>fin</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'fr'">
<xsl:text>fre</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'ga'">
<xsl:text>gle</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'hr'">
<xsl:text>hrv</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'it'">
<xsl:text>ita</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'lv'">
<xsl:text>lav</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'lt'">
<xsl:text>lit</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'hu'">
<xsl:text>hun</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'mt'">
<xsl:text>mlt</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'nl'">
<xsl:text>dut</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'pl'">
<xsl:text>pol</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'pt'">
<xsl:text>por</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'ru'">
<xsl:text>rum</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'sk'">
<xsl:text>slo</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'sl'">
<xsl:text>slv</xsl:text>
</xsl:when>
<xsl:when test="$alpha2 = 'sv'">
<xsl:text>swe</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="translate($langCode,$uppercase,$lowercase)"/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
</xsl:choose>
</xsl:template>
</xsl:transform>
| 119,576 | Python | .tac | 2,499 | 39.260504 | 592 | 0.585867 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,647 | datacite-resourceType-v4.1.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-resourceType-v4.1.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2011-01-13 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element; added values "Audiovisual", "Workflow" & "Other"; deleted value "Film"
2017-10-23 v4.1: Addition of value "DataPaper" -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="resourceType" id="resourceType">
<xs:annotation>
<xs:documentation>The general type of a resource.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="Audiovisual"/>
<xs:enumeration value="Collection"/>
<xs:enumeration value="DataPaper"/>
<xs:enumeration value="Dataset"/>
<xs:enumeration value="Event"/>
<xs:enumeration value="Image"/>
<xs:enumeration value="InteractiveResource"/>
<xs:enumeration value="Model"/>
<xs:enumeration value="PhysicalObject"/>
<xs:enumeration value="Service"/>
<xs:enumeration value="Software"/>
<xs:enumeration value="Sound"/>
<xs:enumeration value="Text"/>
<xs:enumeration value="Workflow"/>
<xs:enumeration value="Other"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 1,346 | Python | .tac | 28 | 42.571429 | 184 | 0.682853 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,648 | datacite-funderIdentifierType-v4.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-funderIdentifierType-v4.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2016-05-14 -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="funderIdentifierType" id="funderIdentifierType">
<xs:annotation>
<xs:documentation>The type of the funderIdentifier.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="ISNI"/>
<xs:enumeration value="GRID"/>
<xs:enumeration value="Crossref Funder ID"/>
<xs:enumeration value="Other"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 711 | Python | .tac | 15 | 43.066667 | 184 | 0.704023 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,649 | datacite-relatedIdentifierType-v4.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-relatedIdentifierType-v4.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2011-01-13 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element; addition of value "PMID"
2014-08-20 v3.1: Addition of values "arxiv" and "bibcode"
2015-02-12 v4.0 Addition of value "IGSN" -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="relatedIdentifierType" id="relatedIdentifierType">
<xs:annotation>
<xs:documentation>The type of the RelatedIdentifier.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="ARK"/>
<xs:enumeration value="arXiv"/>
<xs:enumeration value="bibcode"/>
<xs:enumeration value="DOI"/>
<xs:enumeration value="EAN13"/>
<xs:enumeration value="EISSN"/>
<xs:enumeration value="Handle"/>
<xs:enumeration value="IGSN"/>
<xs:enumeration value="ISBN"/>
<xs:enumeration value="ISSN"/>
<xs:enumeration value="ISTC"/>
<xs:enumeration value="LISSN"/>
<xs:enumeration value="LSID"/>
<xs:enumeration value="PMID"/>
<xs:enumeration value="PURL"/>
<xs:enumeration value="UPC"/>
<xs:enumeration value="URL"/>
<xs:enumeration value="URN"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 1,422 | Python | .tac | 32 | 39.0625 | 184 | 0.672662 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,650 | datacite-titleType-v4.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-titleType-v4.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2011-01-13 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element
2015-02-12 v4.0 Added value "Other" -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="titleType" id="titleType">
<xs:restriction base="xs:string">
<xs:enumeration value="AlternativeTitle"/>
<xs:enumeration value="Subtitle"/>
<xs:enumeration value="TranslatedTitle"/>
<xs:enumeration value="Other"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 694 | Python | .tac | 14 | 45.714286 | 184 | 0.704412 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,651 | datacite-relationType-v4.1.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-relationType-v4.1.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!--
2011-01-13 v1.0 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element, addition of values "IsIdenticalTo", "HasMetadata" & "IsMetadataFor"
2014-08-20 v3.1: Addition of values "Reviews" & "IsReviewedBy" and "IsDerivedFrom" & "IsSourceOf"
2017-10-23 v.4.1: Addition of values "Describes", "IsDescribedBy", "HasVersion", "IsVersionOf", "Requires", "IsRequiredBy" -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="relationType" id="relationType">
<xs:annotation>
<xs:documentation>Description of the relationship of the resource being registered (A) and the related resource (B).</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="IsCitedBy"/>
<xs:enumeration value="Cites"/>
<xs:enumeration value="IsSupplementTo"/>
<xs:enumeration value="IsSupplementedBy"/>
<xs:enumeration value="IsContinuedBy"/>
<xs:enumeration value="Continues"/>
<xs:enumeration value="IsNewVersionOf"/>
<xs:enumeration value="IsPreviousVersionOf"/>
<xs:enumeration value="IsPartOf"/>
<xs:enumeration value="HasPart"/>
<xs:enumeration value="IsReferencedBy"/>
<xs:enumeration value="References"/>
<xs:enumeration value="IsDocumentedBy"/>
<xs:enumeration value="Documents"/>
<xs:enumeration value="IsCompiledBy"/>
<xs:enumeration value="Compiles"/>
<xs:enumeration value="IsVariantFormOf"/>
<xs:enumeration value="IsOriginalFormOf"/>
<xs:enumeration value="IsIdenticalTo"/>
<xs:enumeration value="HasMetadata"/>
<xs:enumeration value="IsMetadataFor"/>
<xs:enumeration value="Reviews"/>
<xs:enumeration value="IsReviewedBy"/>
<xs:enumeration value="IsDerivedFrom"/>
<xs:enumeration value="IsSourceOf"/>
<xs:enumeration value="Describes"/>
<xs:enumeration value="IsDescribedBy"/>
<xs:enumeration value="HasVersion"/>
<xs:enumeration value="IsVersionOf"/>
<xs:enumeration value="Requires"/>
<xs:enumeration value="IsRequiredBy"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 2,325 | Python | .tac | 46 | 44.76087 | 184 | 0.697674 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,652 | datacite-nameType-v4.1.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-nameType-v4.1.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 4.1 - Created 2017-10-23 -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="nameType" id="nameType">
<xs:restriction base="xs:string">
<xs:enumeration value="Organizational"/>
<xs:enumeration value="Personal"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 494 | Python | .tac | 10 | 46 | 184 | 0.704545 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,653 | datacite-dateType-v4.1.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-dateType-v4.1.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2011-01-13 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element; addition of value "Collected"; deleted "StartDate" & "EndDate"
2017-10-23 v4.1: Addition of value "Other"-->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="dateType" id="dateType">
<xs:annotation>
<xs:documentation>The type of date. Use RKMS�ISO8601 standard for depicting date ranges.To indicate the end of an embargo period, use Available. To indicate the start of an embargo period, use Submitted or Accepted, as appropriate.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="Accepted"/>
<xs:enumeration value="Available"/>
<xs:enumeration value="Collected"/>
<xs:enumeration value="Copyrighted"/>
<xs:enumeration value="Created"/>
<xs:enumeration value="Issued"/>
<xs:enumeration value="Other"/>
<xs:enumeration value="Submitted"/>
<xs:enumeration value="Updated"/>
<xs:enumeration value="Valid"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 1,298 | Python | .tac | 23 | 51.26087 | 258 | 0.702745 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,654 | datacite-descriptionType-v4.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-descriptionType-v4.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2011-01-13 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element, addition of value "Methods"
2015-02-12 v4.0: Addition of value "TechnicalInfo"-->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="descriptionType" id="descriptionType">
<xs:annotation>
<xs:documentation>The type of the description.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="Abstract"/>
<xs:enumeration value="Methods"/>
<xs:enumeration value="SeriesInformation"/>
<xs:enumeration value="TableOfContents"/>
<xs:enumeration value="TechnicalInfo"/>
<xs:enumeration value="Other"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 955 | Python | .tac | 19 | 45.473684 | 184 | 0.705128 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,655 | datacite-contributorType-v4.xsd | zenodo_zenodo/zenodo/modules/records/data/include/v4.1/datacite-contributorType-v4.xsd | <?xml version="1.0" encoding="UTF-8"?>
<!-- Version 1.0 - Created 2011-01-13 - FZ, TIB, Germany
2013-05 v3.0: Addition of ID to simpleType element, added values "ResearchGroup" & "Other"
2014-08-20 v3.1: Addition of value "DataCurator"
2015-05-14 v4.0 dropped value "Funder", use new "funderReference" -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://datacite.org/schema/kernel-4" targetNamespace="http://datacite.org/schema/kernel-4" elementFormDefault="qualified">
<xs:simpleType name="contributorType" id="contributorType">
<xs:annotation>
<xs:documentation>The type of contributor of the resource.</xs:documentation>
</xs:annotation>
<xs:restriction base="xs:string">
<xs:enumeration value="ContactPerson"/>
<xs:enumeration value="DataCollector"/>
<xs:enumeration value="DataCurator"/>
<xs:enumeration value="DataManager"/>
<xs:enumeration value="Distributor"/>
<xs:enumeration value="Editor"/>
<xs:enumeration value="HostingInstitution"/>
<xs:enumeration value="Other"/>
<xs:enumeration value="Producer"/>
<xs:enumeration value="ProjectLeader"/>
<xs:enumeration value="ProjectManager"/>
<xs:enumeration value="ProjectMember"/>
<xs:enumeration value="RegistrationAgency"/>
<xs:enumeration value="RegistrationAuthority"/>
<xs:enumeration value="RelatedPerson"/>
<xs:enumeration value="ResearchGroup"/>
<xs:enumeration value="RightsHolder"/>
<xs:enumeration value="Researcher"/>
<xs:enumeration value="Sponsor"/>
<xs:enumeration value="Supervisor"/>
<xs:enumeration value="WorkPackageLeader"/>
</xs:restriction>
</xs:simpleType>
</xs:schema>
| 1,729 | Python | .tac | 35 | 43.8 | 184 | 0.700708 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,656 | update_datacite.html | zenodo_zenodo/zenodo/modules/records/templates/zenodo_records/update_datacite.html | {#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
-#}
{% extends 'admin/master.html' %}
{%- from "zenodo_support/macros.html" import render_form_field, render_form_input %}
{% block body %}
{% if is_task_running == False %}
<form method="POST">
{{ form.csrf_token }}
<div class="form-group row">
{{ render_form_field(form.from_date) }}
</div>
<div class="form-group row">
{{ render_form_field(form.until_date) }}
</div>
<div class="form-group row">
<div class="col-sm-offset-2 col-sm-10">
<button type="submit" class="btn btn-primary" name="action" value="SubmitDates">Submit</button>
</div>
</div>
{% if details and details.total_pids != None %}
<div class="form-group">
<div class="row">
<div class="col-sm-12">
<p>
You are about to update {{details.total_pids}} DOIs.
This will take {{time}} hours.
</p>
</div>
</div>
<div class="row">
<div class="col-sm-12">
<button type="submit" class="btn btn-primary" name="action" value="Confirm">Confirm</button>
<button type="submit" class="btn btn-danger" name="action" value="Cancel">Cancel</button>
</div>
</div>
</div>
{% endif %}
</form>
{% endif %}
{% if is_task_running %}
<form method="POST">
{{ cancel_or_new_task_form.csrf_token }}
<div class="row">
<div class="col-sm-6">
<h4>Task details</h4>
<p>The job started on: {{details.start_date}}</p>
{% if details.finish_date %}
<p>The job finished on: {{details.finish_date}}</p>
{% endif %}
<p>From: {{details.from_date}}</p>
<p>Until: {{details.until_date}}</p>
<p>Progress: {{details.total_pids-details.left_pids}}/{{details.total_pids}} updated DOIs.</p>
<p>Last update: {{details.last_update}}</p>
<button type="submit"
class="btn {{'btn-primary' if details.left_pids == 0 }} {{'btn-danger' if details.left_pids > 0 }}">
{{'New Task' if details.left_pids == 0 }} {{'Cancel Task' if details.left_pids > 0 }}
</button>
</div>
</div>
</form>
{% endif %}
{% endblock %}
| 3,102 | Python | .tac | 83 | 32.313253 | 116 | 0.631631 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,657 | datacite.py | zenodo_zenodo/zenodo/modules/records/serializers/datacite.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017, 2018 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Marshmallow based DataCite serializer for records."""
from __future__ import absolute_import, print_function
from flask import current_app
from invenio_records_rest.serializers.datacite import DataCite31Serializer, \
DataCite41Serializer
from .pidrelations import preprocess_related_identifiers
from .schemas.common import ui_link_for
class ZenodoDataCite31Serializer(DataCite31Serializer):
"""Marshmallow based DataCite serializer for records.
Note: This serializer is not suitable for serializing large number of
records.
"""
def preprocess_record(self, pid, record, links_factory=None):
"""Add related identifiers from PID relations."""
result = super(ZenodoDataCite31Serializer, self).preprocess_record(
pid, record, links_factory=links_factory
)
# Versioning links
result = preprocess_related_identifiers(pid, record, result)
# Alternate identifiers
altidentifiers = result['metadata'].get('alternate_identifiers', [])
altidentifiers.append({
'identifier': ui_link_for('record_html', id=str(record['recid'])),
'scheme': 'url',
})
result['metadata']['alternate_identifiers'] = altidentifiers
return result
class ZenodoDataCite41Serializer(DataCite41Serializer):
"""Marshmallow based DataCite serializer for records.
Note: This serializer is not suitable for serializing large number of
records.
"""
def preprocess_record(self, pid, record, links_factory=None):
"""Add related identifiers from PID relations."""
result = super(ZenodoDataCite41Serializer, self).preprocess_record(
pid, record, links_factory=links_factory
)
# Versioning links
result = preprocess_related_identifiers(pid, record, result)
# Alternate identifiers
altidentifiers = result['metadata'].get('alternate_identifiers', [])
altidentifiers.append({
'identifier': ui_link_for('record_html', id=str(record['recid'])),
'scheme': 'url'
})
result['metadata']['alternate_identifiers'] = altidentifiers
return result
| 3,174 | Python | .tac | 70 | 40 | 78 | 0.721539 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,658 | datacite.py | zenodo_zenodo/zenodo/modules/records/serializers/schemas/datacite.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016-2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record serialization."""
from __future__ import absolute_import, print_function, unicode_literals
import json
import re
import arrow
import pycountry
from flask import current_app
from marshmallow import Schema, fields, missing, post_dump
from ...models import ObjectType
from ...utils import is_doi_locally_managed
from .common import ui_link_for
class PersonSchema(Schema):
"""Creator/contributor common schema."""
affiliation = fields.Str()
nameIdentifier = fields.Method('get_nameidentifier')
def get_nameidentifier(self, obj):
"""Get name identifier."""
if obj.get('orcid'):
return {
"nameIdentifier": obj.get('orcid'),
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
if obj.get('gnd'):
return {
"nameIdentifier": obj.get('gnd'),
"nameIdentifierScheme": "GND",
}
return {}
class CreatorSchema(PersonSchema):
"""Creator schema."""
creatorName = fields.Str(attribute='name')
class ContributorSchema(PersonSchema):
"""Contributor schema."""
contributorName = fields.Str(attribute='name')
contributorType = fields.Str(attribute='type')
class TitleSchema(Schema):
"""Title schema."""
title = fields.Str()
class DateSchema(Schema):
"""Date schema."""
VALID_DATE_TYPES = {
'Accepted',
'Available',
'Copyrighted',
'Collected',
'Created',
'Issued',
'Submitted',
'Updated',
'Valid',
}
date = fields.Str(attribute='date')
dateType = fields.Str(attribute='type')
class AlternateIdentifierSchema(Schema):
"""Alternate identifiers schema."""
alternateIdentifier = fields.Str(attribute='identifier')
alternateIdentifierType = fields.Str(attribute='scheme')
class RelatedIdentifierSchema(Schema):
"""Alternate identifiers schema."""
relatedIdentifier = fields.Str(attribute='identifier')
relatedIdentifierType = fields.Method('get_type')
relationType = fields.Function(
lambda o: o['relation'][0].upper() + o['relation'][1:])
resourceTypeGeneral = fields.Method(
'get_resource_type', attribute='resource_type')
def get_type(self, obj):
"""Get type."""
if obj['scheme'] == 'handle':
return 'Handle'
elif obj['scheme'] == 'ads':
return 'bibcode'
elif obj['scheme'] == 'arxiv':
return 'arXiv'
else:
return obj['scheme'].upper()
def get_resource_type(self, obj):
"""Resource type."""
resource_type = obj.get('resource_type')
if resource_type:
t = ObjectType.get_by_dict(resource_type)
return t['datacite']['general']
else:
return missing
class DataCiteSchema(Schema):
"""Base class for schemas."""
DATE_SCHEMA = DateSchema
identifier = fields.Method('get_identifier', attribute='metadata.doi')
titles = fields.List(fields.Nested(TitleSchema), attribute='metadata')
publisher = fields.Constant('Zenodo')
publicationYear = fields.Function(
lambda o: str(arrow.get(o['metadata']['publication_date']).year))
subjects = fields.Method('get_subjects')
dates = fields.Method('get_dates')
language = fields.Method('get_language')
geoLocations = fields.Method('get_locations')
version = fields.Str(attribute='metadata.version')
resourceType = fields.Method('get_type')
alternateIdentifiers = fields.List(
fields.Nested(AlternateIdentifierSchema),
attribute='metadata.alternate_identifiers',
)
relatedIdentifiers = fields.Method('get_related_identifiers')
rightsList = fields.Method('get_rights')
descriptions = fields.Method('get_descriptions')
@post_dump
def cleanup(self, data):
"""Clean the data."""
# Remove the language if Alpha-2 code was not found
if 'language' in data and data['language'] is None:
del data['language']
return data
def get_identifier(self, obj):
"""Get record main identifier."""
doi = obj['metadata'].get('doi', '')
if is_doi_locally_managed(doi):
return {
'identifier': doi,
'identifierType': 'DOI'
}
else:
recid = obj.get('metadata', {}).get('recid', '')
return {
'identifier': ui_link_for('record_html', id=recid),
'identifierType': 'URL',
}
def get_language(self, obj):
"""Export language to the Alpha-2 code (if available)."""
lang = obj['metadata'].get('language', None)
if lang:
lang_res = pycountry.languages.get(alpha_3=lang)
if not lang_res or not hasattr(lang_res, 'alpha_2'):
return None
return lang_res.alpha_2
return None
def get_descriptions(self, obj):
"""Get descriptions."""
items = []
desc = obj['metadata']['description']
max_descr_size = current_app.config.get(
'DATACITE_MAX_DESCRIPTION_SIZE', 20000)
if desc:
items.append({
'description': desc[:max_descr_size],
'descriptionType': 'Abstract'
})
notes = obj['metadata'].get('notes')
if notes:
items.append({
'description': notes[:max_descr_size],
'descriptionType': 'Other'
})
refs = obj['metadata'].get('references')
if refs:
items.append({
'description': json.dumps({
'references': [
r['raw_reference'] for r in refs
if 'raw_reference' in r]
})[:max_descr_size],
'descriptionType': 'Other'
})
method = obj['metadata'].get('method')
if method:
items.append({
'description': method[:max_descr_size],
'descriptionType': 'Methods'
})
return items
def get_rights(self, obj):
"""Get rights."""
items = []
# license
license_url = obj['metadata'].get('license', {}).get('url')
license_text = obj['metadata'].get('license', {}).get('title')
if license_url and license_text:
items.append({
'rightsURI': license_url,
'rights': license_text,
})
# info:eu-repo
items.append({
'rightsURI': 'info:eu-repo/semantics/{}Access'.format(
obj['metadata']['access_right']),
'rights': '{0} access'.format(
obj['metadata']['access_right']).title()
})
return items
def get_type(self, obj):
"""Resource type."""
t = ObjectType.get_by_dict(obj['metadata']['resource_type'])
type_ = {
'resourceTypeGeneral': t['datacite']['general'],
'resourceType': t['datacite'].get('type'),
}
oa_type = ObjectType.get_openaire_subtype(obj['metadata'])
# NOTE: This overwrites the resourceType if the configuration
# of the OpenAIRE subtypes overlaps with regular subtypes.
if oa_type:
type_['resourceType'] = oa_type
return type_
def get_related_identifiers(self, obj):
"""Related identifiers."""
accepted_types = [
'doi', 'ark', 'ean13', 'eissn', 'handle', 'isbn', 'issn', 'istc',
'lissn', 'lsid', 'purl', 'upc', 'url', 'urn', 'ads', 'arxiv',
'bibcode',
]
s = RelatedIdentifierSchema()
items = []
for r in obj['metadata'].get('related_identifiers', []):
if r['scheme'] in accepted_types:
items.append(s.dump(r).data)
doi = obj['metadata'].get('doi', '')
if not is_doi_locally_managed(doi):
items.append(s.dump({
'identifier': doi,
'scheme': 'doi',
'relation': 'IsIdenticalTo',
}).data)
# Zenodo community identifiers
for comm in obj['metadata'].get('communities', []):
items.append(s.dump({
'identifier': ui_link_for('community', id=comm),
'scheme': 'url',
'relation': 'IsPartOf',
}).data)
return items
def get_subjects(self, obj):
"""Get subjects."""
items = []
for s in obj['metadata'].get('keywords', []):
items.append({'subject': s})
for s in obj['metadata'].get('subjects', []):
items.append({
'subject': s['identifier'],
'subjectScheme': s['scheme'],
})
return items
def get_dates(self, obj):
"""Get dates from record."""
schema_cls = self.DATE_SCHEMA
schema = schema_cls()
dates = []
if obj['metadata']['access_right'] == 'embargoed' and \
obj['metadata'].get('embargo_date'):
dates.append(schema.dump(dict(
date=obj['metadata']['embargo_date'],
type='Available')).data)
dates.append(schema.dump(dict(
date=obj['metadata']['publication_date'],
type='Accepted')).data)
else:
dates.append(schema.dump(dict(
date=obj['metadata']['publication_date'],
type='Issued')).data)
for interval in obj['metadata'].get('dates', []):
date_type = interval.get('type')
if date_type in schema.VALID_DATE_TYPES:
start = interval.get('start') or ''
end = interval.get('end') or ''
if start != '' and end != '' and start == end:
dates.append(schema.dump(dict(
date=start,
type=date_type,
info=interval.get('description', missing))).data)
else:
dates.append(schema.dump(dict(
date=start + '/' + end,
type=date_type,
info=interval.get('description', missing))).data)
return dates
class DataCiteSchemaV1(DataCiteSchema):
"""Schema for records v1 in JSON."""
creators = fields.List(
fields.Nested(CreatorSchema),
attribute='metadata.creators')
contributors = fields.Method('get_contributors')
def get_contributors(self, obj):
"""Get contributors."""
def inject_type(c):
c['type'] = 'Supervisor'
return c
# Contributors and supervisors
s = ContributorSchema()
contributors = obj['metadata'].get('contributors', [])
contributors.extend([
inject_type(c) for c in
obj['metadata'].get('thesis_supervisors', [])
])
items = []
for c in contributors:
items.append(s.dump(c).data)
# Grants
s = ContributorSchema()
for g in obj['metadata'].get('grants', []):
funder = g.get('funder', {}).get('name')
eurepo = g.get('identifiers', {}).get('eurepo')
if funder and eurepo:
items.append({
'contributorName': funder,
'contributorType': 'Funder',
'nameIdentifier': {
'nameIdentifier': eurepo,
'nameIdentifierScheme': 'info',
},
})
return items
def get_locations(self, obj):
"""Get locations."""
locations = []
for l in obj['metadata'].get('locations', []):
location = {'geoLocationPlace': l['place']}
if l.get('lat') and l.get('lon'):
location['geoLocationPoint'] = '{} {}'\
.format(l['lat'], l['lon'])
locations.append(location)
return locations or missing
def get_related_identifiers(self, obj):
"""Related identifiers."""
items = super(DataCiteSchemaV1, self).get_related_identifiers(obj)
for item in items:
if item['relationType'] and item['relationType'] == 'IsVersionOf':
item['relationType'] = 'IsPartOf'
if item['relationType'] and item['relationType'] == 'HasVersion':
item['relationType'] = 'HasPart'
return items
class PersonSchemav4(Schema):
"""Creator/contributor common schema for v4."""
affiliations = fields.List(
fields.Str(),
attribute='affiliation')
nameIdentifiers = fields.Method('get_nameidentifiers')
familyName = fields.Method('get_familyname')
givenName = fields.Method('get_givennames')
def get_familyname(self, obj):
"""Get family name."""
name = obj.get('name')
if name:
names = name.split(',')
if len(names) == 2:
return names.pop(0).strip()
return ''
def get_givennames(self, obj):
"""Get given name."""
name = obj.get('name')
if name:
names = name.split(',')
if len(names) == 2:
return names.pop(1).strip()
return ''
def get_nameidentifiers(self, obj):
"""Get name identifier."""
name_identifiers = []
if obj.get('orcid'):
name_identifiers.append({
"nameIdentifier": obj.get('orcid'),
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
})
if obj.get('gnd'):
name_identifiers.append({
"nameIdentifier": obj.get('gnd'),
"nameIdentifierScheme": "GND",
})
return name_identifiers
class CreatorSchemav4(PersonSchemav4):
"""Creator schema for v4."""
creatorName = fields.Str(attribute='name')
class ContributorSchemav4(PersonSchemav4):
"""Contributor schema for v4."""
contributorName = fields.Str(attribute='name')
contributorType = fields.Str(attribute='type')
class DateSchemaV4(DateSchema):
"""Date schema for v4."""
VALID_DATE_TYPES = {
'Accepted',
'Available',
'Copyrighted',
'Collected',
'Created',
'Issued',
'Submitted',
'Updated',
'Valid',
'Withdrawn',
'Other',
}
dateInformation = fields.Str(attribute='info')
class DataCiteSchemaV4(DataCiteSchema):
"""Schema for records v4 in JSON."""
DATE_SCHEMA = DateSchemaV4
creators = fields.List(
fields.Nested(CreatorSchemav4),
attribute='metadata.creators')
contributors = fields.Method('get_contributors')
fundingReferences = fields.Method('get_fundingreferences')
def get_contributors(self, obj):
"""Get contributors."""
def inject_type(c):
c['type'] = 'Supervisor'
return c
# Contributors and supervisors
s = ContributorSchemav4()
contributors = obj['metadata'].get('contributors', [])
contributors.extend([
inject_type(c) for c in
obj['metadata'].get('thesis', {}).get('supervisors', [])
])
items = []
for c in contributors:
items.append(s.dump(c).data)
return items
def get_fundingreferences(self, obj):
"""Get funding references."""
items = []
for g in obj['metadata'].get('grants', []):
funder_name = g.get('funder', {}).get('name')
funder_identifier = g.get('funder', {}).get('doi')
award_program = g.get('program')
if award_program:
funder_program_matches = current_app.config.get(
'ZENODO_RECORDS_FUNDER_PROGRAM_MATCHES', {})
for funder_id, regexes in funder_program_matches.items():
if any(re.match(r, award_program) for r in regexes):
funder_identifier = funder_id
award_number = g.get('code')
award_title = g.get('title')
eurepo = g.get('identifiers', {}).get('eurepo')
if funder_name and eurepo:
items.append({
'funderName': funder_name,
'funderIdentifier': {
'funderIdentifier': funder_identifier,
'funderIdentifierType': 'Crossref Funder ID',
},
'awardNumber': {
'awardNumber': award_number,
'awardURI': eurepo
},
'awardTitle': award_title
})
return items
def get_locations(self, obj):
"""Get locations."""
locations = []
for l in obj['metadata'].get('locations', []):
location = {'geoLocationPlace': l['place']}
if l.get('lat') and l.get('lon'):
location['geoLocationPoint'] = {
'pointLongitude': l['lon'],
'pointLatitude': l['lat']
}
locations.append(location)
return locations or missing
| 18,409 | Python | .tac | 478 | 28.069038 | 78 | 0.556883 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,659 | uwsgi.ini | zenodo_zenodo/docker/uwsgi/uwsgi.ini | [uwsgi]
plugin = python
master = true
die-on-term = true
stats = 127.0.0.1:9000
socket = 0.0.0.0:5000
safe-pidfile = /usr/local/var/run/uwsgi.pid
chmod-socket = 666
vacuum = true
buffer-size = 65535
socket-timeout = 60
socket-write-timeout = 60
so-write-timeout = 60
so-send-timeout = 60
socket-send-timeout = 60
# # Silence write errors for misbehaving clients
# # https://github.com/getsentry/raven-python/issues/732
ignore-sigpipe = true
ignore-write-errors = true
disable-write-exception = true
processes = 2
threads = 2
thunder-lock = true
module = zenodo.wsgi
callable = application
virtualenv = /usr/local/
| 619 | Python | .wsgi | 26 | 22.615385 | 56 | 0.777211 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,660 | test_wsgi.py | zenodo_zenodo/tests/unit/default/test_wsgi.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo module test cases."""
from __future__ import absolute_import, print_function
def test_wsgi():
"""Test WSGI application."""
from zenodo.wsgi import application
assert application
| 1,171 | Python | .wsgi | 29 | 38.827586 | 76 | 0.766257 | zenodo/zenodo | 906 | 241 | 543 | GPL-2.0 | 9/5/2024, 5:13:26 PM (Europe/Amsterdam) |
23,661 | copyright | truenas_middleware/debian/debian/copyright | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: truenas
Source: http://www.truenas.com
Files: *
Copyright: 2019 iXsystems Inc
2019 William Grzybowski <william@grzy.org>
License: BSD-3-Clause
Files: debian/*
Copyright: 2019 William Grzybowski <william@grzy.org>
License: BSD-3-Clause
License: BSD-3-Clause
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE HOLDERS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1,813 | Python | .py | 34 | 50.794118 | 74 | 0.804617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,662 | ix-boot-core.py | truenas_middleware/src/freenas/usr/bin/ix-boot-core.py | #!/usr/bin/python3
import subprocess
import sys
if __name__ == "__main__":
boot_pool = "boot-pool"
if subprocess.run(["zfs", "list", "boot-pool"], capture_output=True).returncode != 0:
boot_pool = "freenas-boot"
for line in subprocess.run(
["zfs", "list", "-H", "-o", "name,truenas:12", "-r", f"{boot_pool}/ROOT"],
capture_output=True, text=True, check=True,
).stdout.splitlines():
name, truenas_12 = line.split("\t")
if truenas_12 == "1":
break
else:
sys.stderr.write(f"No dataset with truenas:12=1 found on {boot_pool}\n")
sys.exit(1)
subprocess.run(["zpool", "set", f"bootfs={name}", boot_pool], check=True)
subprocess.run(["mount", "-t", "zfs", f"{boot_pool}/grub", "/boot/grub"])
subprocess.run(["update-grub"], check=True)
subprocess.run(["reboot"], check=True)
| 876 | Python | .py | 21 | 35.571429 | 89 | 0.59577 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,663 | smart_log.chart.py | truenas_middleware/src/freenas/usr/lib/netdata/python.d/smart_log.chart.py | # -*- coding: utf-8 -*-
# Description: smart netdata python.d module
# Author: ilyam8, vorph1
# SPDX-License-Identifier: GPL-3.0-or-later
import contextlib
import os
import re
from copy import deepcopy
from pathlib import Path
from time import time
from bases.FrameworkServices.SimpleService import SimpleService
from bases.collection import read_last_line
ABSOLUTE = 'absolute'
ATA = 'ata'
ATTR194 = '194'
ATTR190 = '190'
ATTR_TEMPERATURE = 'temperature'
CSV = '.csv'
DEF_AGE = 30
DEF_PATH = '/var/log/smartd'
DEF_RESCAN_INTERVAL = 60
INCREMENTAL = 'incremental'
RE_ATA = re.compile(
r'(\d+);' # attribute
r'(\d+);' # normalized value
r'(\d+)', # raw value
re.X
)
RE_SCSI = re.compile(
r'([a-z-]+);' # attribute
r'([0-9.]+)', # raw value
re.X
)
SCSI = 'scsi'
# Netdata specific
CHARTS = {}
ORDER = []
def get_nvme_disks():
nvme_disks = []
for i in Path('/sys/class/hwmon/').glob('*'):
try:
if not ((i / 'name').read_text().strip() == 'nvme'):
continue
except FileNotFoundError:
continue
try:
for j in filter(lambda x: x.is_dir() and x.name.startswith('nvme'), (i / 'device').iterdir()):
nvme_disks.append((j.name, str(i)))
break
except FileNotFoundError:
continue
return nvme_disks
class BaseAtaSmartAttribute:
def __init__(self, name, normalized_value, raw_value):
self.name = name
self.normalized_value = normalized_value
self.raw_value = raw_value
def value(self):
raise NotImplementedError
class BaseNvmeSmartValue:
def __init__(self, raw_value):
self.name = 'temperature'
self.raw_value = raw_value
def value(self):
raise NotImplementedError
class NvmeRaw(BaseNvmeSmartValue):
def value(self):
return self.raw_value
class AtaRaw(BaseAtaSmartAttribute):
def value(self):
return self.raw_value
class AtaNormalized(BaseAtaSmartAttribute):
def value(self):
return self.normalized_value
class Ata190(BaseAtaSmartAttribute):
def value(self):
# logic copied over from upstream
# https://github.com/netdata/netdata/blob/c9f92a691c38b7bc4c55804738fb55023597a746/src/collectors
# /python.d.plugin/smartd_log/smartd_log.chart.py#L494
return 100 - int(self.normalized_value)
class Ata194(BaseAtaSmartAttribute):
# https://github.com/netdata/netdata/issues/3041
# https://github.com/netdata/netdata/issues/5919
#
# The low byte is the current temperature, the third lowest is the maximum, and the fifth lowest is the minimum
def value(self):
value = int(self.raw_value)
if value > 1e6:
return value & 0xFF
return min(int(self.normalized_value), int(self.raw_value))
class BaseSCSISmartAttribute:
def __init__(self, name, raw_value):
self.name = name
self.raw_value = raw_value
def value(self):
raise NotImplementedError
class SCSIRaw(BaseSCSISmartAttribute):
def value(self):
return self.raw_value
def ata_attribute_factory(value):
name = value[0]
if name == ATTR194:
return Ata194(*value)
elif name == ATTR190:
return Ata190(*value)
def scsi_attribute_factory(value):
return SCSIRaw(*value)
def attribute_factory(value):
name = value[0]
if name.isdigit():
return ata_attribute_factory(value)
return scsi_attribute_factory(value)
def get_temperature_attr(all_temp_attr):
# This basically returns which attr we are going to use for temperature
# We prefer attr 194, then 190, then temperature which is an attr for scsi disks
if ATTR194 in all_temp_attr:
return all_temp_attr[ATTR194]
elif ATTR190 in all_temp_attr:
return all_temp_attr[ATTR190]
elif ATTR_TEMPERATURE in all_temp_attr:
return all_temp_attr[ATTR_TEMPERATURE]
def handle_error(*errors):
def on_method(method):
def on_call(*args):
try:
return method(*args)
except errors:
return None
return on_call
return on_method
class DiskLogFile:
def __init__(self, full_path):
self.path = full_path
self.size = os.path.getsize(full_path)
@handle_error(OSError)
def is_changed(self):
return self.size != os.path.getsize(self.path)
@handle_error(OSError)
def is_active(self, current_time, limit):
return (current_time - os.path.getmtime(self.path)) / 60 < limit
@handle_error(OSError)
def read(self):
self.size = os.path.getsize(self.path)
return read_last_line(self.path)
class BaseDisk:
def __init__(self, name, log_file):
self.raw_name = name
self.name = name.rsplit('-', 1)[-1]
self.log_file = log_file
self.attrs = list()
self.alive = True
self.charted = False
def __eq__(self, other):
if isinstance(other, BaseDisk):
return self.raw_name == other.raw_name
return self.raw_name == other
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def parser(self, data):
raise NotImplementedError
@handle_error(TypeError)
def populate_attrs(self):
self.attrs = list()
line = self.log_file.read()
all_attrs = {}
for value in self.parser(line):
if (attr := attribute_factory(value)) and attr.name in [ATTR194, ATTR190, ATTR_TEMPERATURE]:
all_attrs[attr.name] = attr
if attr := get_temperature_attr(all_attrs):
self.attrs.append(attr)
return len(self.attrs)
def data(self):
data = dict()
for attr in self.attrs:
data[self.name] = attr.value()
return data
class ATADisk(BaseDisk):
def parser(self, data):
return RE_ATA.findall(data)
class SCSIDisk(BaseDisk):
def parser(self, data):
return RE_SCSI.findall(data)
class NVMEDisk(BaseDisk):
def __init__(self, name, hwmon_path):
super().__init__(name, None)
self.hwmon_path = hwmon_path
def parser(self, data):
return
def read_nvme_temp(self):
try:
return round(int((Path(self.hwmon_path) / 'temp1_input').read_text()) * 0.001)
except Exception:
return 0
@handle_error(TypeError)
def populate_attrs(self):
self.attrs = list()
temp = str(self.read_nvme_temp())
self.attrs.append(NvmeRaw(temp))
return len(self.attrs)
def data(self):
data = {}
for attr in self.attrs:
data[self.name] = attr.value()
return data
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = deepcopy(CHARTS)
self.log_path = configuration.get('log_path', DEF_PATH)
self.age = configuration.get('age', DEF_AGE)
self.exclude = configuration.get('exclude_disks', str()).split()
self.disks = list()
self.runs = 0
self.update_data = self.age
self.do_force_rescan = False
# smartd daemon only queries drive temps every 30mins so the files won't be updated
# but once every 30ish minutes - we should change this if at any point we change smartd interval
# We now run this every minute but use cached data for 30 minutes
self.update_every = 60
def check(self):
return self.scan() > 0
def get_chart_name(self, disk):
return f'disktemp.{disk.name}'
def get_data(self):
self.runs += 1
self.update_data += 1
if self.do_force_rescan or self.runs % DEF_RESCAN_INTERVAL == 0:
self.cleanup()
self.scan()
self.do_force_rescan = False
data = dict()
for disk in self.disks:
if not disk.alive:
continue
self.add_disk_to_charts(disk)
changed = disk.log_file.is_changed() if disk.log_file else False
if changed is None and disk.log_file is not None:
disk.alive = False
self.do_force_rescan = True
continue
if self.update_data >= self.age and not disk.populate_attrs():
disk.alive = False
self.do_force_rescan = True
continue
data.update(disk.data())
if not self.do_force_rescan and self.update_data >= self.age:
self.update_data = 0
return data
def cleanup(self):
current_time = time()
for disk in filter(lambda d: d.log_file, self.disks[:]):
if any(
[
not disk.alive,
not disk.log_file.is_active(current_time, self.age),
]
):
self.disks.remove(disk.raw_name)
self.remove_disk_from_charts(disk)
def scan(self):
self.debug('scanning {0}'.format(self.log_path))
current_time = time()
with contextlib.suppress(FileNotFoundError):
# log path will not exist in an all flash system
for path in Path(self.log_path).iterdir():
disk = self.create_disk_from_file(str(path), current_time)
if not disk:
continue
self.disks.append(disk)
for nvme_name, nvme_path in get_nvme_disks():
self.disks.append(NVMEDisk(nvme_name, nvme_path))
return len(self.disks)
def create_disk_from_file(self, path, current_time):
full_name = os.path.basename(path)
if not full_name.endswith(CSV):
self.debug('skipping {0}: not a csv file'.format(full_name))
return None
name = full_name.split('.')[-3]
if name in self.disks:
self.debug('skipping {0}: already in disks'.format(full_name))
return None
if [p for p in self.exclude if p in name]:
self.debug('skipping {0}: filtered by `exclude` option'.format(full_name))
return None
if not os.access(path, os.R_OK):
self.debug('skipping {0}: not readable'.format(full_name))
return None
if os.path.getsize(path) == 0:
self.debug('skipping {0}: zero size'.format(full_name))
return None
if (current_time - os.path.getmtime(path)) / 60 > self.age:
self.debug('skipping {0}: haven\'t been updated for last {1} minutes'.format(full_name, self.age))
return None
if ATA in full_name:
disk = ATADisk(name, DiskLogFile(path))
elif SCSI in full_name:
disk = SCSIDisk(name, DiskLogFile(path))
else:
self.debug('skipping {0}: unknown type'.format(full_name))
return None
disk.populate_attrs()
if not disk.attrs:
self.error('skipping {0}: parsing failed'.format(full_name))
return None
self.debug('added {0}'.format(full_name))
return disk
def add_disk_to_charts(self, disk):
chart_name = self.get_chart_name(disk)
if chart_name in self.charts:
return
disk.charted = True
self.charts.add_chart([
chart_name, chart_name, 'Temperature', 'celsius', 'temperature', 'smartd_log.temperature_celsius', 'line',
])
self.charts[chart_name].add_dimension([disk.name])
def remove_disk_from_charts(self, disk):
if len(self.charts) == 0 or not disk.charted:
return
chart_name = self.get_chart_name(disk)
if not disk.name or chart_name not in self.charts:
self.charts[chart_name].del_dimension(disk.name)
| 11,999 | Python | .py | 324 | 28.638889 | 118 | 0.613823 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,664 | truenas_disk_stats.chart.py | truenas_middleware/src/freenas/usr/lib/netdata/python.d/truenas_disk_stats.chart.py | from bases.FrameworkServices.SimpleService import SimpleService
from middlewared.utils.disks import get_disks_with_identifiers
from middlewared.utils.disk_stats import get_disk_stats
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.disk_mapping = {}
def check(self):
self.disk_mapping = get_disks_with_identifiers()
self.add_disk_to_charts(self.disk_mapping.values())
return True
def get_data(self):
disk_data = get_disk_stats(self.disk_mapping)
disk_data_len = len(disk_data)
current_disk_mapping_len = len(self.disk_mapping)
if disk_data_len != current_disk_mapping_len:
# This means that some disk has been added/removed
self.disk_mapping = get_disks_with_identifiers()
# Now that we have updated our mapping, we would like to normalize the identifier
# for the disk which was added, for removal case we don't care as it is fine
new_disks = []
if disk_data_len > current_disk_mapping_len:
for new_disk in filter(lambda d: not d.startswith('{'), list(disk_data)):
# We still use .get() here for safety, ideally we should have the identifier
# but for whatever reason that didn't happen, we will then report it as such
new_identifier = self.disk_mapping.get(new_disk, new_disk)
disk_data[new_identifier] = disk_data.pop(new_disk)
new_disks.append(new_identifier)
self.add_disk_to_charts(new_disks)
disks_stats = {}
for disk_id, disks_io in disk_data.items():
for op, value in disks_io.items():
disks_stats[f'{disk_id}.{op}'] = value
return disks_stats
def add_disk_to_charts(self, disk_ids):
for disk_id in disk_ids:
if disk_id in self.charts:
continue
self.charts.add_chart([
f'io.{disk_id}', disk_id, disk_id, 'KiB/s',
'disk.io',
f'Read/Write for disk {disk_id}',
'line',
])
self.charts.add_chart([
f'ops.{disk_id}', disk_id, disk_id, 'Operation/s',
'disk.ops',
f'Complete read/write for disk {disk_id}',
'line',
])
self.charts.add_chart([
f'busy.{disk_id}', disk_id, disk_id, 'Milliseconds',
'disk.busy',
'Disk Busy Time',
'area',
])
self.charts[f'io.{disk_id}'].add_dimension([f'{disk_id}.reads', 'reads', 'incremental'])
self.charts[f'io.{disk_id}'].add_dimension([f'{disk_id}.writes', 'writes', 'incremental'])
self.charts[f'ops.{disk_id}'].add_dimension([f'{disk_id}.read_ops', 'read_ops', 'incremental'])
self.charts[f'ops.{disk_id}'].add_dimension([f'{disk_id}.write_ops', 'write_ops', 'incremental'])
self.charts[f'busy.{disk_id}'].add_dimension([f'{disk_id}.busy', 'busy', 'incremental'])
| 3,220 | Python | .py | 61 | 40.098361 | 109 | 0.576508 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,665 | cputemp.chart.py | truenas_middleware/src/freenas/usr/lib/netdata/python.d/cputemp.chart.py | from bases.FrameworkServices.SimpleService import SimpleService
from collections import defaultdict
from copy import deepcopy
from third_party import lm_sensors as sensors
from middlewared.utils.cpu import amd_cpu_temperatures, generic_cpu_temperatures, cpu_info
CPU_TEMPERATURE_FEAT_TYPE = 2
ORDER = [
'temperatures',
]
# This is a prototype of chart definition which is used to dynamically create self.definitions
CHARTS = {
'temperatures': {
'options': [None, 'Temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
'lines': []
}
}
def cpu_temperatures(cpu_metrics):
if amd_metrics := cpu_metrics.get('k10temp-pci-00c3'):
return amd_cpu_temperatures(amd_metrics)
return generic_cpu_temperatures(cpu_metrics)
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = deepcopy(ORDER)
self.definitions = deepcopy(CHARTS)
def get_data(self):
seen, data, cpu_data = dict(), dict(), defaultdict(dict)
try:
for chip in sensors.ChipIterator():
chip_name = sensors.chip_snprintf_name(chip)
seen[chip_name] = defaultdict(list)
if not any(chip_name.startswith(cpu_chip) for cpu_chip in ('k10temp-pci-00c3', 'coretemp-isa')):
continue
cpu_d = {}
for feat in sensors.FeatureIterator(chip):
if feat.type != CPU_TEMPERATURE_FEAT_TYPE:
continue
feat_name = str(feat.name.decode())
feat_label = sensors.get_label(chip, feat)
sub_feat = next(sensors.SubFeatureIterator(chip, feat)) # current value
if not sub_feat:
continue
try:
v = sensors.get_value(chip, sub_feat.number)
except sensors.SensorsError:
continue
if v is None:
continue
cpu_d[f'{chip_name}_{feat_name}'] = {'name': feat_label, 'value': v}
cpu_data[chip_name] = cpu_d
except sensors.SensorsError as error:
self.error(error)
try:
cpu_temps = cpu_temperatures(cpu_data)
except Exception as error:
self.error(error)
cpu_temps = {}
data = {}
for core, temp in cpu_temps.items():
data[str(core)] = temp
return data or {str(i): 0 for i in range(cpu_info()['core_count'])}
def check(self):
try:
sensors.init()
except sensors.SensorsError as error:
self.error(error)
return False
data = self.get_data()
for i in data:
self.definitions['temperatures']['lines'].append([str(i)])
return bool(data)
| 3,003 | Python | .py | 71 | 30.816901 | 112 | 0.581499 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,666 | truenas-set-authentication-method.py | truenas_middleware/src/freenas/usr/local/bin/truenas-set-authentication-method.py | #!/usr/bin/env python3
import json
import sys
import sqlite3
from middlewared.plugins.account import ADMIN_UID, ADMIN_GID, crypted_password
from middlewared.utils.db import FREENAS_DATABASE
if __name__ == "__main__":
authentication_method = json.loads(sys.stdin.read())
username = authentication_method["username"]
password = crypted_password(authentication_method["password"])
conn = sqlite3.connect(FREENAS_DATABASE)
conn.row_factory = sqlite3.Row
c = conn.cursor()
if username == "root":
c.execute("UPDATE account_bsdusers SET bsdusr_unixhash = ? WHERE bsdusr_username = 'root'", (password,))
else:
home = f"/home/{username}"
c.execute("""
INSERT INTO account_bsdgroups (bsdgrp_gid, bsdgrp_group, bsdgrp_builtin, bsdgrp_smb, bsdgrp_sudo_commands,
bsdgrp_sudo_commands_nopasswd)
VALUES (?, ?, 0, 0, '[]', '[]')
""", (ADMIN_GID, username,))
c.execute("SELECT last_insert_rowid()")
group_id = dict(c.fetchone())["last_insert_rowid()"]
c.execute("SELECT * FROM account_bsdusers WHERE bsdusr_username = 'root'")
user = dict(c.fetchone())
del user["id"]
user["bsdusr_uid"] = ADMIN_UID
user["bsdusr_username"] = username
user["bsdusr_unixhash"] = password
user["bsdusr_smbhash"] = "*"
user["bsdusr_home"] = home
user["bsdusr_full_name"] = "Local Administrator"
user["bsdusr_builtin"] = 0
user["bsdusr_smb"] = 0
user["bsdusr_password_disabled"] = 0
user["bsdusr_ssh_password_enabled"] = 0
user["bsdusr_locked"] = 0
user["bsdusr_sudo_commands"] = '["ALL"]'
user["bsdusr_group_id"] = group_id
c.execute(f"""
INSERT INTO account_bsdusers ({', '.join([k for k in user.keys()])})
VALUES ({', '.join(['?' for k in user.keys()])})
""", tuple(user.values()))
c.execute("SELECT last_insert_rowid()")
user_id = dict(c.fetchone())["last_insert_rowid()"]
c.execute("""
INSERT INTO account_twofactor_user_auth (secret, user_id) VALUES (?, ?)
""", (None, user_id))
c.execute("SELECT id FROM account_bsdgroups WHERE bsdgrp_group = 'builtin_administrators'")
builtin_administrators_group_id = dict(c.fetchone())["id"]
c.execute("""
INSERT INTO account_bsdgroupmembership (bsdgrpmember_group_id, bsdgrpmember_user_id) VALUES (?, ?)
""", (builtin_administrators_group_id, user_id))
c.execute("UPDATE account_bsdusers SET bsdusr_password_disabled = 1 WHERE bsdusr_username = 'root'")
conn.commit()
conn.close()
| 2,717 | Python | .py | 57 | 39.122807 | 118 | 0.612854 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,667 | truenas-autotune.py | truenas_middleware/src/freenas/usr/local/bin/truenas-autotune.py | #!/usr/bin/env python3
import argparse
import sys
from types import SimpleNamespace
from ixhardware import TRUENAS_UNKNOWN, get_chassis_hardware, parse_dmi
from middlewared.plugins.tunables import zfs_parameter_value
from middlewared.utils.db import query_table, update_table
KiB = 1024 ** 1
MiB = 1024 ** 2
GiB = 1024 ** 3
MIN_ZFS_RESERVED_MEM = 1 * GiB
zfs_parameters = {}
def zfs_parameter(tunable_name):
def decorator(func):
zfs_parameters[tunable_name] = func
return func
return decorator
@zfs_parameter("zfs_dirty_data_max_max")
def guess_vfs_zfs_dirty_data_max_max(context):
if context.hardware.startswith("M"):
return 12 * GiB
else:
return None
@zfs_parameter("l2arc_noprefetch")
def guess_vfs_zfs_l2arc_noprefetch(context):
return 0
@zfs_parameter("l2arc_write_max")
def guess_vfs_zfs_l2arc_write_max(context):
return 10000000
@zfs_parameter("l2arc_write_boost")
def guess_vfs_zfs_l2arc_write_boost(context):
return 40000000
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--skip-unknown", action="store_true")
args = parser.parse_args()
chassis_hardware = get_chassis_hardware(parse_dmi())
if args.skip_unknown and chassis_hardware == TRUENAS_UNKNOWN:
sys.exit(0)
context = SimpleNamespace(hardware=chassis_hardware)
recommendations = {}
for knob, func in zfs_parameters.items():
retval = func(context)
if retval is None:
continue
recommendations[knob] = str(retval)
overwrite = False
changed_values = False
qs = {i["var"]: i for i in query_table("system_tunable", prefix="tun_")}
for var, value in recommendations.items():
if tunable := qs.get(var, {}):
if not overwrite:
# Already exists and we're honoring the user setting. Move along.
continue
elif tunable["value"] == value:
# We bail out here because if we set a value to what the database
# already has we'll set changed_values = True which will
# cause the system to be rebooted.
continue
comment = "Generated by autotune"
if id_ := tunable.pop("id", None):
update_table("UPDATE system_tunable SET tun_value = ?, tun_comment = ? WHERE id = ?", (value, comment, id_))
else:
orig_value = zfs_parameter_value(var)
update_table(
"INSERT INTO system_tunable (tun_type, tun_var, tun_value, tun_orig_value, tun_comment, tun_enabled)"
"VALUES (?, ?, ?, ?, ?, ?)",
("ZFS", var, value, orig_value, comment, 1)
)
# If we got this far, that means the database save went through just
# fine at least once.
changed_values = True
for tunable in qs.values():
if tunable["comment"] == "Generated by autotune":
if tunable["var"] in ["zfs_arc_max", "zfs_vdev_async_read_max_active", "zfs_vdev_sync_read_max_active",
"zfs_vdev_async_write_max_active", "zfs_vdev_sync_write_max_active",
"zfs_vdev_sync_write_max_active"]:
update_table("DELETE FROM system_tunable WHERE id = ?", (tunable["id"],))
changed_values = True
if changed_values:
sys.exit(2)
| 3,419 | Python | .py | 81 | 33.950617 | 120 | 0.627492 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,668 | truenas-nvdimm.py | truenas_middleware/src/freenas/usr/local/bin/truenas-nvdimm.py | #!/usr/bin/env python3
import re
from subprocess import run
from packaging import version
VERSION = re.compile(r'(?<=Version: ).*')
PRODUCT = re.compile(r'(?<=Product Name: ).*')
GEN3_MIN_VERS = version.Version('3.0').major
NVDIMM_CONF_FILE = '/etc/modprobe.d/truenas-nvdimm.conf'
def parse_dmi():
output = run(['dmidecode', '-t1'], capture_output=True, encoding='utf8').stdout
found_prod = PRODUCT.search(output)
found_vers = VERSION.search(output)
prod = vers = ''
if found_prod:
prod = found_prod.group(0)
if found_vers:
vers = found_vers.group(0)
return prod, vers
def write_config(config):
with open(NVDIMM_CONF_FILE, 'w') as f:
f.write('\n'.join(config) + '\n')
def is_m_series(prod):
lower_prod = prod.lower()
return all((
lower_prod.startswith('truenas-m'),
lower_prod.find('mini') == -1,
))
def main():
try:
prod, vers = parse_dmi()
except Exception as e:
print(f'Unhandled exception parsing DMI: {e}')
return
if not is_m_series(prod):
# nvdimm config module is only relevant on m series.
return
try:
parsed_version = version.parse(vers)
except Exception as e:
print(f'Unhandled exception ({e}) parsing DMI version ({vers!r})')
return
gen1_2 = gen3 = False
if parsed_version.major == GEN3_MIN_VERS:
# for now we only check to make sure that the current version is 3 because
# we quickly found out that the SMBIOS defaults for the system-version value
# from supermicro aren't very predictable. Since setting these values on a
# system that doesn't support the dual-nvdimm configs leads to "no carrier"
# on the ntb0 interface, we play it safe. The `gen3_min_vers` will need to be
# changed as time goes on if we start tagging hardware with 4.0,5.0 etc etc
gen3 = True
else:
# Means this is an m-series system that isn't tagged with "3.0" in the
# version field of SMBIOS. This means the field is populated with OEM
# default information. We've seen 0123456789 and 12345679 as some of the
# default values so this is a catch-all since it's impossible to account
# for all the possible values.
gen1_2 = True
options = [
'options ntb driver_override="ntb_split"',
'options ntb_transport use_dma=1',
]
if gen1_2:
# single nvdimm on gen1/2 hardware
options.append('options ntb_split config="ntb_pmem:1:4:0,ntb_transport"')
elif gen3:
# dual nvdimm on gen3 hardware
options.append('options ntb_hw_plx usplit=1')
options.append('options ntb_split config="ntb_pmem:1:4:0,ntb_pmem:1:4:0,ntb_transport"')
write_config(options)
if __name__ == '__main__':
main()
| 2,842 | Python | .py | 71 | 33.647887 | 96 | 0.650563 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,669 | truenas-initrd.py | truenas_middleware/src/freenas/usr/local/bin/truenas-initrd.py | #!/usr/bin/env python3
import argparse
import contextlib
import json
import logging
import os
import subprocess
import sys
import textwrap
import libzfs
import pyudev
logger = logging.getLogger(__name__)
def update_zfs_default(root, readonly_rootfs):
with libzfs.ZFS() as zfs:
existing_pools = [p.name for p in zfs.pools]
for i in ['freenas-boot', 'boot-pool']:
if i in existing_pools:
boot_pool = i
break
else:
raise CallError(f'Failed to locate valid boot pool. Pools located were: {", ".join(existing_pools)}')
with libzfs.ZFS() as zfs:
disks = [disk.replace("/dev/", "") for disk in zfs.get(boot_pool).disks]
mapping = {}
for dev in filter(
lambda d: not d.sys_name.startswith("sr") and d.get("DEVTYPE") in ("disk", "partition"),
pyudev.Context().list_devices(subsystem="block")
):
if dev.get("DEVTYPE") == "disk":
mapping[dev.sys_name] = dev.get("ID_BUS")
elif dev.get("ID_PART_ENTRY_UUID"):
parent = dev.find_parent("block")
mapping[dev.sys_name] = parent.get("ID_BUS")
mapping[os.path.join("disk/by-partuuid", dev.get("ID_PART_ENTRY_UUID"))] = parent.get("ID_BUS")
has_usb = False
for dev in disks:
if mapping.get(dev) == "usb":
has_usb = True
break
zfs_config_path = os.path.join(root, "etc/default/zfs")
with open(zfs_config_path) as f:
original_config = f.read()
lines = original_config.rstrip().split("\n")
zfs_var_name = "ZFS_INITRD_POST_MODPROBE_SLEEP"
lines = [line for line in lines if not line.startswith(f"{zfs_var_name}=")]
if has_usb:
lines.append(f"{zfs_var_name}=15")
new_config = "\n".join(lines) + "\n"
if new_config != original_config:
readonly_rootfs.make_writeable()
with open(zfs_config_path, "w") as f:
f.write(new_config)
return True
return False
def get_current_gpu_pci_ids(database):
adv_config = query_config_table("system_advanced", database, "adv_")
to_isolate = [gpu for gpu in get_gpus() if gpu["addr"]["pci_slot"] in adv_config.get("isolated_gpu_pci_ids", [])]
return [dev["pci_slot"] for gpu in to_isolate for dev in gpu["devices"]]
def update_pci_module_files(root, config):
# This method is (and must be) called when root is writeable
def get_path(p):
return os.path.join(root, p)
pci_slots = config["pci_ids"]
for path in map(
get_path, [
'etc/initramfs-tools/scripts/init-top/truenas_bind_vfio.sh',
"etc/initramfs-tools/modules",
"etc/modules",
"etc/modprobe.d/kvm.conf",
"etc/modprobe.d/nvidia.conf",
]
):
with contextlib.suppress(Exception):
os.unlink(path)
os.makedirs(get_path("etc/initramfs-tools"), exist_ok=True)
os.makedirs(get_path("etc/modprobe.d"), exist_ok=True)
if not pci_slots:
for path in map(
get_path, [
"etc/initramfs-tools/modules",
"etc/modules",
]
):
with open(path, "w"):
pass
return
for path in map(get_path, ["etc/initramfs-tools/modules", "etc/modules"]):
with open(path, "w") as f:
f.write(textwrap.dedent("""\
vfio
vfio_iommu_type1
vfio_virqfd
vfio_pci
"""))
with open(get_path("etc/modprobe.d/kvm.conf"), "w") as f:
f.write("options kvm ignore_msrs=1\n")
with open(get_path("etc/modprobe.d/nvidia.conf"), "w") as f:
f.write(textwrap.dedent("""\
softdep nouveau pre: vfio-pci
softdep nvidia pre: vfio-pci
softdep nvidia* pre: vfio-pci
"""))
with open(get_path("etc/initramfs-tools/scripts/init-top/truenas_bind_vfio.sh"), "w") as f:
f.write(textwrap.dedent(f"""\
#!/bin/sh
PREREQS=""
DEVS="{' '.join(pci_slots)}"
for DEV in $DEVS;
do echo "vfio-pci" > /sys/bus/pci/devices/$DEV/driver_override
done
modprobe -i vfio-pci
"""))
os.chmod(get_path("etc/initramfs-tools/scripts/init-top/truenas_bind_vfio.sh"), 0o755)
def update_pci_initramfs_config(root, readonly_rootfs, database):
initramfs_config_path = os.path.join(root, "boot/initramfs_config.json")
initramfs_config = {
"pci_ids": get_current_gpu_pci_ids(database),
}
original_config = None
if os.path.exists(initramfs_config_path):
with open(initramfs_config_path, "r") as f:
original_config = json.loads(f.read())
if initramfs_config != original_config:
readonly_rootfs.make_writeable()
with open(initramfs_config_path, "w") as f:
f.write(json.dumps(initramfs_config))
update_pci_module_files(root, initramfs_config)
return True
return False
def update_zfs_module_config(root, readonly_rootfs, database):
options = []
for tunable in query_table("system_tunable", database, "tun_"):
if tunable["type"] != "ZFS":
continue
if not tunable["enabled"]:
continue
options.append(f"{tunable['var']}={tunable['value']}")
if options:
config = f"options zfs {' '.join(options)}\n"
else:
config = None
config_path = os.path.join(root, "etc", "modprobe.d", "zfs.conf")
try:
with open(config_path) as f:
existing_config = f.read()
except FileNotFoundError:
existing_config = None
if existing_config != config:
readonly_rootfs.make_writeable()
if config is None:
os.unlink(config_path)
else:
with open(config_path, "w") as f:
f.write(config)
return True
return False
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("chroot", nargs=1)
p.add_argument("--database", "-d", default="")
p.add_argument("--force", "-f", action="store_true")
args = p.parse_args()
root = args.chroot[0]
if root != "/":
sys.path.insert(0, os.path.join(root, "usr/lib/python3/dist-packages"))
from middlewared.service_exception import CallError
from middlewared.utils.db import FREENAS_DATABASE, query_config_table, query_table
from middlewared.utils.gpu import get_gpus
from middlewared.utils.rootfs import ReadonlyRootfsManager
with ReadonlyRootfsManager(root) as readonly_rootfs:
try:
database = args.database or os.path.join(root, FREENAS_DATABASE[1:])
if update_required := any((
args.force,
update_zfs_default(root, readonly_rootfs),
update_pci_initramfs_config(root, readonly_rootfs, database),
update_zfs_module_config(root, readonly_rootfs, database),
)):
readonly_rootfs.make_writeable()
subprocess.run(["chroot", root, "update-initramfs", "-k", "all", "-u"], check=True)
except Exception:
logger.error("Failed to update initramfs", exc_info=True)
exit(2)
# We give out an exit code of 1 when initramfs has been updated as we require a reboot of the system for the
# changes to have an effect. This caters to the case of uploading a database. Otherwise, we give an exit code
# of 0 and in case of erring out
exit(int(update_required))
| 7,573 | Python | .py | 187 | 31.893048 | 117 | 0.60616 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,670 | truenas-grub.py | truenas_middleware/src/freenas/usr/local/bin/truenas-grub.py | #!/usr/bin/env python3
import math
import psutil
import os
import json
import logging
from middlewared.utils.serial import serial_port_choices
from middlewared.utils.db import query_config_table
from middlewared.utils.vendor import Vendors
logger = logging.getLogger(__name__)
def get_serial_ports():
return {e['start']: e['name'].replace('uart', 'ttyS') for e in serial_port_choices()}
if __name__ == "__main__":
advanced = query_config_table("system_advanced", prefix="adv_")
kernel_extra_options = advanced.get("kernel_extra_options") or ""
vendor = Vendors.TRUENAS_SCALE
try:
with open("/data/.vendor", "r") as f:
vendor = json.loads(f.read()).get("name", Vendors.TRUENAS_SCALE)
except FileNotFoundError:
pass
except Exception:
logger.error("Failed to parse /data/.vendor", exc_info=True)
# We need to allow tpm in grub as sedutil-cli requires it
# `zfsforce=1` is needed because FreeBSD bootloader imports boot pool with hostid=0 while SCALE releases up to
# 22.02-RC.2 use real hostid. We need to be able to boot both of these configurations.
# `nvme_core.multipath=N` is needed to disable NVMe multipath support. Otherwise, multipath-capable NVMe
# devices will be exposed to udev as nvme0c0n1, meanwhile actual block devices `nvme0n1` will be created
# as virtual devices with no straightforward way to map between those two. We don't use NVMe multipath
# so we can sacrifice it to achieve consistent behavior between multipath-capable and non-multipath-capable
# devices and avoid mapping actual hardware devices and virtual block devices.
config = [
f'GRUB_DISTRIBUTOR="{vendor}"',
'GRUB_TIMEOUT=10',
'GRUB_DISABLE_RECOVERY="true"',
'GRUB_CMDLINE_LINUX_DEFAULT="libata.allow_tpm=1 amd_iommu=on iommu=pt '
'kvm_amd.npt=1 kvm_amd.avic=1 intel_iommu=on zfsforce=1 nvme_core.multipath=N'
f'{f" {kernel_extra_options}" if kernel_extra_options else ""}"',
]
terminal_output = ["console"]
terminal_input = ["console"]
cmdline = []
if advanced["serialconsole"]:
port = get_serial_ports().get(advanced['serialport'], advanced['serialport'])
port_nr = port.replace('ttyS', '')
config.append(f'GRUB_SERIAL_COMMAND="serial --unit={port_nr} --speed={advanced["serialspeed"]} --word=8 --parity=no --stop=1"')
if os.path.exists("/sys/firmware/efi"):
terminal_output = ["gfxterm"]
terminal_output.append("serial")
terminal_input.append("serial")
cmdline.append(f"console=tty1 console={port},{advanced['serialspeed']}")
if advanced.get("kdump_enabled"):
# (memory in kb) / 16 / 1024 / 1024
# For every 4KB of physical memory, we should allocate 2 bits to the crash kernel
# In other words, for every 16KB of memory we allocate 1 byte.
# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/
# kernel_administration_guide/kernel_crash_dump_guide#sect-kdump-memory-requirements
#
# We should test this on systems with higher memory as there are contradicting
# docs - https://www.suse.com/support/kb/doc/?id=000016171
# With our custom kernel, having 256MB RAM as base is not enough.
# In my tests it worked with having 400MB as base RAM.
# TODO: Let's please see what we can do to bring this down on the kernel side perhaps
current_mem = psutil.virtual_memory().total / 1024
cmdline.append(f"crashkernel={400 + math.ceil(current_mem / 16 / 1024 / 1024)}M")
config.append(f'GRUB_TERMINAL_INPUT="{" ".join(terminal_input)}"')
config.append(f'GRUB_TERMINAL_OUTPUT="{" ".join(terminal_output)}"')
config.append(f'GRUB_CMDLINE_LINUX="{" ".join(cmdline)}"')
config.append("")
with open("/etc/default/grub.d/truenas.cfg", "w") as f:
f.write("\n".join(config))
| 3,954 | Python | .py | 71 | 49.295775 | 135 | 0.68595 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,671 | snmp-agent.py | truenas_middleware/src/freenas/usr/local/bin/snmp-agent.py | #!/usr/bin/env python3
import threading
import time
import contextlib
import os
import libzfs
import netsnmpagent
import pysnmp.hlapi # noqa
import pysnmp.smi
from truenas_api_client import Client
def get_kstat():
kstat = {}
try:
with open("/proc/spl/kstat/zfs/arcstats") as f:
for lineno, line in enumerate(f, start=1):
if lineno > 2 and (info := line.strip()):
name, _, data = info.split()
kstat[f"kstat.zfs.misc.arcstats.{name}"] = int(data)
except Exception:
return kstat
else:
kstat["vfs.zfs.version.spa"] = 5000
return kstat
def get_arc_efficiency(kstat):
if not kstat.get("vfs.zfs.version.spa"):
return
output = {}
prefix = 'kstat.zfs.misc.arcstats'
arc_hits = kstat[f"{prefix}.hits"]
arc_misses = kstat[f"{prefix}.misses"]
demand_data_hits = kstat[f"{prefix}.demand_data_hits"]
demand_data_misses = kstat[f"{prefix}.demand_data_misses"]
demand_metadata_hits = kstat[f"{prefix}.demand_metadata_hits"]
demand_metadata_misses = kstat[f"{prefix}.demand_metadata_misses"]
mfu_ghost_hits = kstat[f"{prefix}.mfu_ghost_hits"]
mfu_hits = kstat[f"{prefix}.mfu_hits"]
mru_ghost_hits = kstat[f"{prefix}.mru_ghost_hits"]
mru_hits = kstat[f"{prefix}.mru_hits"]
prefetch_data_hits = kstat[f"{prefix}.prefetch_data_hits"]
prefetch_data_misses = kstat[f"{prefix}.prefetch_data_misses"]
prefetch_metadata_hits = kstat[f"{prefix}.prefetch_metadata_hits"]
prefetch_metadata_misses = kstat[f"{prefix}.prefetch_metadata_misses"]
anon_hits = arc_hits - (mfu_hits + mru_hits + mfu_ghost_hits + mru_ghost_hits)
arc_accesses_total = (arc_hits + arc_misses)
demand_data_total = (demand_data_hits + demand_data_misses)
prefetch_data_total = (prefetch_data_hits + prefetch_data_misses)
real_hits = (mfu_hits + mru_hits)
output["total_accesses"] = fHits(arc_accesses_total)
output["cache_hit_ratio"] = {
'per': fPerc(arc_hits, arc_accesses_total),
'num': fHits(arc_hits),
}
output["cache_miss_ratio"] = {
'per': fPerc(arc_misses, arc_accesses_total),
'num': fHits(arc_misses),
}
output["actual_hit_ratio"] = {
'per': fPerc(real_hits, arc_accesses_total),
'num': fHits(real_hits),
}
output["data_demand_efficiency"] = {
'per': fPerc(demand_data_hits, demand_data_total),
'num': fHits(demand_data_total),
}
if prefetch_data_total > 0:
output["data_prefetch_efficiency"] = {
'per': fPerc(prefetch_data_hits, prefetch_data_total),
'num': fHits(prefetch_data_total),
}
if anon_hits > 0:
output["cache_hits_by_cache_list"] = {}
output["cache_hits_by_cache_list"]["anonymously_used"] = {
'per': fPerc(anon_hits, arc_hits),
'num': fHits(anon_hits),
}
output["most_recently_used"] = {
'per': fPerc(mru_hits, arc_hits),
'num': fHits(mru_hits),
}
output["most_frequently_used"] = {
'per': fPerc(mfu_hits, arc_hits),
'num': fHits(mfu_hits),
}
output["most_recently_used_ghost"] = {
'per': fPerc(mru_ghost_hits, arc_hits),
'num': fHits(mru_ghost_hits),
}
output["most_frequently_used_ghost"] = {
'per': fPerc(mfu_ghost_hits, arc_hits),
'num': fHits(mfu_ghost_hits),
}
output["cache_hits_by_data_type"] = {}
output["cache_hits_by_data_type"]["demand_data"] = {
'per': fPerc(demand_data_hits, arc_hits),
'num': fHits(demand_data_hits),
}
output["cache_hits_by_data_type"]["prefetch_data"] = {
'per': fPerc(prefetch_data_hits, arc_hits),
'num': fHits(prefetch_data_hits),
}
output["cache_hits_by_data_type"]["demand_metadata"] = {
'per': fPerc(demand_metadata_hits, arc_hits),
'num': fHits(demand_metadata_hits),
}
output["cache_hits_by_data_type"]["prefetch_metadata"] = {
'per': fPerc(prefetch_metadata_hits, arc_hits),
'num': fHits(prefetch_metadata_hits),
}
output["cache_misses_by_data_type"] = {}
output["cache_misses_by_data_type"]["demand_data"] = {
'per': fPerc(demand_data_misses, arc_misses),
'num': fHits(demand_data_misses),
}
output["cache_misses_by_data_type"]["prefetch_data"] = {
'per': fPerc(prefetch_data_misses, arc_misses),
'num': fHits(prefetch_data_misses),
}
output["cache_misses_by_data_type"]["demand_metadata"] = {
'per': fPerc(demand_metadata_misses, arc_misses),
'num': fHits(demand_metadata_misses),
}
output["cache_misses_by_data_type"]["prefetch_metadata"] = {
'per': fPerc(prefetch_metadata_misses, arc_misses),
'num': fHits(prefetch_metadata_misses),
}
return output
def fHits(Hits=0, Decimal=2):
khits = (10 ** 3)
mhits = (10 ** 6)
bhits = (10 ** 9)
thits = (10 ** 12)
qhits = (10 ** 15)
Qhits = (10 ** 18)
shits = (10 ** 21)
Shits = (10 ** 24)
if Hits >= Shits:
return str("%0." + str(Decimal) + "f") % (Hits / Shits) + "S"
elif Hits >= shits:
return str("%0." + str(Decimal) + "f") % (Hits / shits) + "s"
elif Hits >= Qhits:
return str("%0." + str(Decimal) + "f") % (Hits / Qhits) + "Q"
elif Hits >= qhits:
return str("%0." + str(Decimal) + "f") % (Hits / qhits) + "q"
elif Hits >= thits:
return str("%0." + str(Decimal) + "f") % (Hits / thits) + "t"
elif Hits >= bhits:
return str("%0." + str(Decimal) + "f") % (Hits / bhits) + "b"
elif Hits >= mhits:
return str("%0." + str(Decimal) + "f") % (Hits / mhits) + "m"
elif Hits >= khits:
return str("%0." + str(Decimal) + "f") % (Hits / khits) + "k"
elif Hits == 0:
return str("%d" % 0)
else:
return str("%d" % Hits)
def fPerc(lVal=0, rVal=0, Decimal=2):
if rVal > 0:
return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%"
else:
return str("%0." + str(Decimal) + "f") % 100 + "%"
def get_zfs_arc_miss_percent(kstat):
arc_hits = kstat["kstat.zfs.misc.arcstats.hits"]
arc_misses = kstat["kstat.zfs.misc.arcstats.misses"]
arc_read = arc_hits + arc_misses
if arc_read > 0:
hit_percent = float(100 * arc_hits / arc_read)
miss_percent = 100 - hit_percent
return miss_percent
return 0
mib_builder = pysnmp.smi.builder.MibBuilder()
mib_sources = mib_builder.getMibSources() + (pysnmp.smi.builder.DirMibSource("/usr/local/share/pysnmp/mibs"),)
mib_builder.setMibSources(*mib_sources)
mib_builder.loadModules("TRUENAS-MIB")
agent = netsnmpagent.netsnmpAgent(
AgentName="TrueNASAgent",
MIBFiles=["/usr/local/share/snmp/mibs/TRUENAS-MIB.txt"],
)
zpool_table = agent.Table(
oidstr="TRUENAS-MIB::zpoolTable",
indexes=[agent.Integer32()],
columns=[
(1, agent.Integer32()),
(2, agent.DisplayString()),
(3, agent.DisplayString()),
(4, agent.Counter64()),
(5, agent.Counter64()),
(6, agent.Counter64()),
(7, agent.Counter64()),
(8, agent.Counter64()),
(9, agent.Counter64()),
(10, agent.Counter64()),
(11, agent.Counter64()),
],
)
zvol_table = agent.Table(
oidstr="TRUENAS-MIB::zvolTable",
indexes=[agent.Integer32()],
columns=[
(1, agent.Integer32()),
(2, agent.DisplayString()),
(3, agent.Counter64()),
(4, agent.Counter64()),
(5, agent.Counter64()),
],
)
hdd_temp_table = agent.Table(
oidstr="TRUENAS-MIB::hddTempTable",
indexes=[
agent.Integer32(),
],
columns=[
(2, agent.DisplayString()),
(3, agent.Unsigned32()),
]
)
zfs_arc_size = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsArcSize")
zfs_arc_meta = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsArcMeta")
zfs_arc_data = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsArcData")
zfs_arc_hits = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsArcHits")
zfs_arc_misses = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsArcMisses")
zfs_arc_c = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsArcC")
zfs_arc_miss_percent = agent.DisplayString(oidstr="TRUENAS-MIB::zfsArcMissPercent")
zfs_arc_cache_hit_ratio = agent.DisplayString(oidstr="TRUENAS-MIB::zfsArcCacheHitRatio")
zfs_arc_cache_miss_ratio = agent.DisplayString(oidstr="TRUENAS-MIB::zfsArcCacheMissRatio")
zfs_l2arc_hits = agent.Counter32(oidstr="TRUENAS-MIB::zfsL2ArcHits")
zfs_l2arc_misses = agent.Counter32(oidstr="TRUENAS-MIB::zfsL2ArcMisses")
zfs_l2arc_read = agent.Counter32(oidstr="TRUENAS-MIB::zfsL2ArcRead")
zfs_l2arc_write = agent.Counter32(oidstr="TRUENAS-MIB::zfsL2ArcWrite")
zfs_l2arc_size = agent.Unsigned32(oidstr="TRUENAS-MIB::zfsL2ArcSize")
zfs_zilstat_ops1 = agent.Counter64(oidstr="TRUENAS-MIB::zfsZilstatOps1sec")
zfs_zilstat_ops5 = agent.Counter64(oidstr="TRUENAS-MIB::zfsZilstatOps5sec")
zfs_zilstat_ops10 = agent.Counter64(oidstr="TRUENAS-MIB::zfsZilstatOps10sec")
def readZilOpsCount() -> int:
total = 0
with open("/proc/spl/kstat/zfs/zil") as f:
for line in f:
var, _size, val, *_ = line.split()
if var in ("zil_itx_metaslab_normal_count", "zil_itx_metaslab_slog_count"):
total += int(val)
return total
class ZilstatThread(threading.Thread):
def __init__(self, interval):
super().__init__()
self.daemon = True
self.interval = interval
self.value = 0
def run(self):
previous = readZilOpsCount()
while True:
time.sleep(self.interval)
current = readZilOpsCount()
self.value = current - previous
previous = current
class DiskTempThread(threading.Thread):
def __init__(self, interval):
super().__init__()
self.daemon = True
self.interval = interval
self.temperatures = {}
self.initialized = False
self.disks = []
self.powermode = None
def run(self):
while True:
if not self.initialized:
try:
with Client() as c:
self.disks = c.call("disk.disks_for_temperature_monitoring")
self.powermode = c.call("smart.config")["powermode"]
except Exception as e:
print(f"Failed to query disks for temperature monitoring: {e!r}")
else:
self.initialized = True
if not self.initialized:
time.sleep(self.interval)
continue
if not self.disks:
return
try:
with Client() as c:
self.temperatures = {
disk: temperature * 1000
for disk, temperature in c.call("disk.temperatures", self.disks, self.powermode).items()
if temperature is not None
}
except Exception as e:
print(f"Failed to collect disks temperatures: {e!r}")
self.temperatures = {}
time.sleep(self.interval)
def gather_zpool_iostat_info(prev_data, name, zpoolobj):
r_ops = zpoolobj.root_vdev.stats.ops[libzfs.ZIOType.READ]
w_ops = zpoolobj.root_vdev.stats.ops[libzfs.ZIOType.WRITE]
r_bytes = zpoolobj.root_vdev.stats.bytes[libzfs.ZIOType.READ]
w_bytes = zpoolobj.root_vdev.stats.bytes[libzfs.ZIOType.WRITE]
# the current values as reported by libzfs
values_overall = {name: {
"read_ops": r_ops,
"write_ops": w_ops,
"read_bytes": r_bytes,
"write_bytes": w_bytes,
}}
values_1s = {name: {"read_ops": 0, "write_ops": 0, "read_bytes": 0, "write_bytes": 0}}
for key in prev_data.get(name, ()):
values_1s[name][key] = (values_overall[name][key] - prev_data[name][key])
return values_overall, values_1s
def fill_in_zpool_snmp_row_info(idx, name, health, io_overall, io_1s):
row = zpool_table.addRow([agent.Integer32(idx)])
row.setRowCell(1, agent.Integer32(idx))
row.setRowCell(2, agent.DisplayString(name))
row.setRowCell(3, agent.DisplayString(health))
row.setRowCell(4, agent.Counter64(io_overall[name]["read_ops"]))
row.setRowCell(5, agent.Counter64(io_overall[name]["write_ops"]))
row.setRowCell(6, agent.Counter64(io_overall[name]["read_bytes"]))
row.setRowCell(7, agent.Counter64(io_overall[name]["write_bytes"]))
row.setRowCell(8, agent.Counter64(io_1s[name]["read_ops"]))
row.setRowCell(9, agent.Counter64(io_1s[name]["write_ops"]))
row.setRowCell(10, agent.Counter64(io_1s[name]["read_bytes"]))
row.setRowCell(11, agent.Counter64(io_1s[name]["write_bytes"]))
def fill_in_zvol_snmp_row_info(idx, info):
row = zvol_table.addRow([agent.Integer32(idx)])
row.setRowCell(1, agent.Integer32(idx))
row.setRowCell(2, agent.DisplayString(info["name"]))
row.setRowCell(3, agent.Counter64(info["properties"]["used"]["parsed"]))
row.setRowCell(4, agent.Counter64(info["properties"]["available"]["parsed"]))
row.setRowCell(5, agent.Counter64(info["properties"]["referenced"]["parsed"]))
def report_zfs_info(prev_zpool_info):
zpool_table.clear()
zvol_table.clear()
# zpool related information
with libzfs.ZFS() as z:
for idx, zpool in enumerate(z.pools, start=1):
name = zpool.name
health = zpool.properties["health"].value
io_overall, io_1s = gather_zpool_iostat_info(prev_zpool_info, name, zpool)
fill_in_zpool_snmp_row_info(idx, name, health, io_overall, io_1s)
# be sure and update our zpool io data so next time it's called
# we calculate the 1sec values properly
prev_zpool_info.update(io_overall)
zvols = get_list_of_zvols()
kwargs = {
'user_props': False,
'props': ['used', 'available', 'referenced'],
'retrieve_children': False,
'datasets': zvols,
}
for idx, ds_info in enumerate(z.datasets_serialized(**kwargs), start=1):
fill_in_zvol_snmp_row_info(idx, ds_info)
def get_list_of_zvols():
zvols = set()
root_dir = '/dev/zvol/'
with contextlib.suppress(FileNotFoundError): # no zvols
for dir_path, unused_dirs, files in os.walk(root_dir):
for file in filter(lambda x: '@' not in x, files):
zvols.add(os.path.join(dir_path, file).removeprefix(root_dir).replace('+', ' '))
return list(zvols)
if __name__ == "__main__":
zilstat_1_thread = zilstat_5_thread = zilstat_10_thread = None
with Client() as c:
if c.call("snmp.config")["zilstat"]:
zilstat_1_thread = ZilstatThread(1)
zilstat_5_thread = ZilstatThread(5)
zilstat_10_thread = ZilstatThread(10)
zilstat_1_thread.start()
zilstat_5_thread.start()
zilstat_10_thread.start()
disk_temp_thread = DiskTempThread(300)
disk_temp_thread.start()
agent.start()
prev_zpool_info = {}
last_update_at = int(time.monotonic())
while True:
agent.check_and_process()
if int(time.monotonic()) - last_update_at > 1:
report_zfs_info(prev_zpool_info)
if hdd_temp_table:
hdd_temp_table.clear()
if disk_temp_thread:
for i, (name, temp) in enumerate(list(disk_temp_thread.temperatures.items())):
row = hdd_temp_table.addRow([agent.Integer32(i + 1)])
row.setRowCell(2, agent.DisplayString(name))
row.setRowCell(3, agent.Unsigned32(temp))
kstat = get_kstat()
arc_efficiency = get_arc_efficiency(kstat)
prefix = "kstat.zfs.misc.arcstats"
zfs_arc_size.update(kstat[f"{prefix}.size"] // 1024)
zfs_arc_meta.update(kstat[f"{prefix}.arc_meta_used"] // 1024)
zfs_arc_data.update(kstat[f"{prefix}.data_size"] // 1024)
zfs_arc_hits.update(int(kstat[f"{prefix}.hits"] % 2 ** 32))
zfs_arc_misses.update(int(kstat[f"{prefix}.misses"] % 2 ** 32))
zfs_arc_c.update(kstat[f"{prefix}.c"] // 1024)
zfs_arc_miss_percent.update(str(get_zfs_arc_miss_percent(kstat)).encode("ascii"))
zfs_arc_cache_hit_ratio.update(str(arc_efficiency["cache_hit_ratio"]["per"][:-1]).encode("ascii"))
zfs_arc_cache_miss_ratio.update(str(arc_efficiency["cache_miss_ratio"]["per"][:-1]).encode("ascii"))
zfs_l2arc_hits.update(int(kstat[f"{prefix}.l2_hits"] % 2 ** 32))
zfs_l2arc_misses.update(int(kstat[f"{prefix}.l2_misses"] % 2 ** 32))
zfs_l2arc_read.update(kstat[f"{prefix}.l2_read_bytes"] // 1024 % 2 ** 32)
zfs_l2arc_write.update(kstat[f"{prefix}.l2_write_bytes"] // 1024 % 2 ** 32)
zfs_l2arc_size.update(kstat[f"{prefix}.l2_asize"] // 1024)
if zilstat_1_thread:
zfs_zilstat_ops1.update(zilstat_1_thread.value)
if zilstat_5_thread:
zfs_zilstat_ops5.update(zilstat_5_thread.value)
if zilstat_10_thread:
zfs_zilstat_ops10.update(zilstat_10_thread.value)
last_update_at = int(time.monotonic())
| 17,417 | Python | .py | 405 | 34.928395 | 112 | 0.608185 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,672 | firmware_update.py | truenas_middleware/src/freenas/usr/local/sbin/firmware_update.py | #!/usr/bin/env python
#
# Copyright (c) 2020 iXsystems, Inc.
# All rights reserved.
# This file is a part of TrueNAS
# and may not be copied and/or distributed
# without the express permission of iXsystems.
from collections import namedtuple
import logging
import logging.config
import logging.handlers
import os
from packaging import version
import re
import sys
import subprocess
SAS2FLASH = '/usr/local/sbin/sas2flash'
SAS3FLASH = '/usr/local/sbin/sas3flash'
STORCLI = '/usr/local/sbin/storcli'
FWPATH = '/usr/local/share/firmware/'
FAILED_UPDATE_SENTINEL = '/data/.hba_firmware_update_fail'
UPDATE_SENTINEL = '/data/.hba_firmware_update'
UPDATE_SUCCESS = []
UPDATE_FAIL = []
Firmware = namedtuple("Firmware", ["path", "version"])
def get_firmware(prefix):
for item in os.listdir(FWPATH):
if item.startswith(prefix):
if m := re.match(r".+\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\.bin$", item):
return Firmware(os.path.join(FWPATH, item), version.parse(m.group(1)))
logger.error("Unable to find firmware file with prefix %r", prefix)
return None
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'datetime': {
'format': '%(asctime)s %(message)s',
},
},
'handlers': {
'file': {
'class': 'logging.FileHandler',
'formatter': 'datetime',
'level': 'DEBUG',
'filename': '/data/hba_firmware_update.log',
},
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True,
},
},
})
logger = logging.getLogger(__name__)
if os.path.exists(FAILED_UPDATE_SENTINEL):
# Prevent a firmware flash failure from causing a boot loop
logger.info("Failure sentinel present, skipping HBA firmware checks")
sys.exit(255)
logger.info("Checking SAS92xx HBAs firmware")
proc = subprocess.Popen([
SAS2FLASH, "-listall"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
listall = proc.communicate()[0].decode("utf8", "ignore").strip()
# logger.debug(listall)
for hba in re.finditer(r"^(([0-9]+) +[^ ]+ +([0-9]{2}\.[0-9]{2}\.[0-9]{2}\.[0-9]{2}) +.*)$", listall, re.MULTILINE):
logger.debug(hba.group(1))
n = hba.group(2)
controller = "SAS92xx#%s" % n
firmware_version = version.parse(hba.group(3))
if firmware_version < version.parse("1"):
logger.error("Can't get firmware version")
continue
proc = subprocess.Popen([
SAS2FLASH, "-list", "-c", n
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
info = proc.communicate()[0].decode("utf8", "ignore").strip()
m = re.search(r"Board Name *: *([^ ]+)$", info, re.MULTILINE)
if m is None:
logger.error("Can't get board name")
logger.debug(info)
continue
boardname = m.group(1)
# In some cases we'll end up with a board name like:
# SAS9206-16E (Notice the ending E is capitalized...)
if boardname.endswith("E"):
boardname = boardname[:-1] + 'e'
if boardname.endswith("I"):
boardname = boardname[:-1] + 'i'
logger.debug("Board Name is %s" % boardname)
new_firmware = get_firmware(f"mps_{boardname}")
if not new_firmware:
continue
bios_file = os.path.join(FWPATH, "mps_bios.rom")
if not os.path.exists(bios_file):
logger.error("BIOS image %s not found" % bios_file)
continue
if firmware_version >= new_firmware.version:
logger.debug("Up to date firmware version %r" % firmware_version)
continue
logger.info("Found old firmware %r, updating to %r" % (firmware_version, new_firmware.version))
ret = subprocess.run([SAS2FLASH, "-c", n, "-b", bios_file, "-f", new_firmware.path])
if not ret.returncode:
logger.info("Update successful")
UPDATE_SUCCESS.append(controller)
else:
logger.error("Update failed: %s -c %s -b %s -f %s returned %d" %
(SAS2FLASH, n, bios_file, new_firmware.path, ret.returncode))
UPDATE_FAIL.append(controller)
logger.debug("")
logger.info("Checking SAS93xx HBAs firmware")
proc = subprocess.Popen([
SAS3FLASH, "-listall"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
listall = proc.communicate()[0].decode("utf8", "ignore").strip()
# logger.debug(listall)
for hba in re.finditer(r"^(([0-9]+) +[^ ]+ +([0-9]{2}\.[0-9]{2}\.[0-9]{2}\.[0-9]{2}) +.*)$", listall, re.MULTILINE):
logger.debug(hba.group(1))
n = hba.group(2)
controller = "SAS93xx#%s" % n
firmware_version = version.parse(hba.group(3))
if firmware_version < version.parse("1"):
logger.error("Can't get firmware version")
continue
proc = subprocess.Popen([
SAS3FLASH, "-list", "-c", n
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
info = proc.communicate()[0].decode("utf8", "ignore").strip()
m = re.search(r"Board Name *: *([^ ]+)$", info, re.MULTILINE)
if m is None:
logger.error("Can't get board name")
logger.debug(info)
continue
boardname = m.group(1)
# Echostreams HBAs have different PCBs, but use standard firmware.
if boardname == "Echostreams HBA":
boardname = "SAS9300-8i"
logger.debug("Board Name is %s" % boardname)
new_firmware = get_firmware(f"mpr_{boardname}")
if not new_firmware:
continue
bios_file = os.path.join(FWPATH, "mpr_bios.rom")
if not os.path.exists(bios_file):
logger.error("BIOS image %s not found" % bios_file)
continue
if firmware_version >= new_firmware.version:
logger.debug("Up to date firmware version %d" % firmware_version)
continue
logger.info("Found old firmware %r, updating to %r" % (firmware_version, new_firmware.version))
ret = subprocess.run([SAS3FLASH, "-c", n, "-b", bios_file, "-f", new_firmware.path])
if not ret.returncode:
logger.info("Update successful")
UPDATE_SUCCESS.append(controller)
else:
logger.error("Update failed: %s -c %s -b %s -f %s returned %d" %
(SAS3FLASH, n, bios_file, new_firmware.path, ret.returncode))
UPDATE_FAIL.append(controller)
logger.debug("")
logger.info("Checking HBA94xx HBAs firmware")
proc = subprocess.Popen([
STORCLI, "show"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
listall = proc.communicate()[0].decode("utf8", "ignore").strip()
# logger.debug(listall)
for hba in re.finditer(r"^( *([0-9]+) +(HBA 94[^ ]+) +SAS.*)$", listall, re.MULTILINE):
logger.debug(hba.group(1))
n = hba.group(2)
controller = "HBA94xx#%s" % n
boardname = hba.group(3).replace(" ", "")
logger.debug("Board Name is %s" % boardname)
proc = subprocess.Popen([
STORCLI, "/c%s" % n, "show"
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
info = proc.communicate()[0].decode("utf8", "ignore").strip()
m = re.search(r"^FW Version = ([0-9]{2}\.[0-9]{2}\.[0-9]{2}\.[0-9]{2})$", info, re.MULTILINE)
if m is None:
logger.error("Can't get firmware version")
logger.debug(info)
continue
firmware_version = version.parse(m.group(1))
if firmware_version < version.parse("1"):
logger.error("Can't get firmware version")
logger.debug(info)
continue
new_firmware = get_firmware(f"mpr_{boardname}")
if not new_firmware:
continue
efibios_file = os.path.join(FWPATH, "mpr_HBA_efibios.rom")
if not os.path.exists(efibios_file):
logger.error("EFI BIOS image %s not found" % efibios_file)
continue
if firmware_version >= new_firmware.version:
logger.debug("Up to date firmware version %d" % firmware_version)
continue
logger.info("Found old firmware %r, updating to %r" % (firmware_version, new_firmware.version))
ret = subprocess.run([STORCLI, "/c%s" % n, "download", "file=" + new_firmware.path])
if not ret.returncode:
logger.info("Update successful")
UPDATE_SUCCESS.append(controller)
else:
logger.error("Update failed: %s /c%s download file=%s returned %d" %
(STORCLI, n, new_firmware.path, ret.returncode))
UPDATE_FAIL.append(controller)
continue
ret = subprocess.run([STORCLI, "/c%s" % n, "download", "efibios", "file=" + efibios_file])
if not ret.returncode:
logger.info("EFI BIOS update successful")
else:
logger.error("Update failed: %s /c%s download efibios file=%s returned %d" %
(STORCLI, n, efibios_file, ret.returncode))
logger.debug("")
logger.info("HBA firmware check complete")
logger.debug("")
if len(UPDATE_FAIL) > 0:
fh = open(FAILED_UPDATE_SENTINEL, "w")
fh.write(', '.join(UPDATE_FAIL))
fh.close()
if os.path.exists(UPDATE_SENTINEL):
os.unlink(UPDATE_SENTINEL)
if len(UPDATE_SUCCESS) > 0:
# signal our caller a reboot is needed with a return value of 0
sys.exit(0)
if len(UPDATE_SUCCESS) == 0 and len(UPDATE_FAIL) == 0:
# There were no controllers that needed updating
sys.exit(254)
if len(UPDATE_FAIL) > 0:
# The caller doesn't do anything with non-zero return codes as of right now
sys.exit(len(UPDATE_FAIL))
| 9,415 | Python | .py | 235 | 34.097872 | 116 | 0.636145 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,673 | libpython.py | truenas_middleware/src/freenas/usr/local/share/python-gdb/libpython.py | #!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax
# compatible (2.6+ and 3.0+). See #19308.
from __future__ import print_function
import gdb
import os
import locale
import sys
if sys.version_info[0] >= 3:
unichr = chr
xrange = range
long = int
# Look up the gdb.Type for some standard types:
# Those need to be refreshed as types (pointer sizes) may change when
# gdb loads different executables
def _type_char_ptr():
return gdb.lookup_type('char').pointer() # char*
def _type_unsigned_char_ptr():
return gdb.lookup_type('unsigned char').pointer() # unsigned char*
def _type_unsigned_short_ptr():
return gdb.lookup_type('unsigned short').pointer()
def _type_unsigned_int_ptr():
return gdb.lookup_type('unsigned int').pointer()
def _sizeof_void_p():
return gdb.lookup_type('void').pointer().sizeof
# value computed later, see PyUnicodeObjectPtr.proxy()
_is_pep393 = None
Py_TPFLAGS_HEAPTYPE = (1 << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given an integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(int(val)))
if sys.version_info[0] >= 3:
def write_unicode(file, text):
file.write(text)
else:
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
try:
os_fsencode = os.fsencode
except AttributeError:
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to io.StringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print('tp_flags = 0x%08x' % tp_flags)
#print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
'method-wrapper': wrapperobject,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by all classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(_sizeof_void_p() - 1)
) & ~(_sizeof_void_p() - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % _sizeof_void_p() == 0
dictptr = self._gdbval.cast(_type_char_ptr()) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# Class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analogous to dict.iteritems()
'''
keys = self.field('ma_keys')
values = self.field('ma_values')
entries, nentries = self._get_entries(keys)
for i in safe_range(nentries):
ep = entries[i]
if long(values):
pyop_value = PyObjectPtr.from_pyobject_ptr(values[i])
else:
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
def _get_entries(self, keys):
dk_nentries = int(keys['dk_nentries'])
dk_size = int(keys['dk_size'])
try:
# <= Python 3.5
return keys['dk_entries'], dk_size
except gdb.error:
# >= Python 3.6
pass
if dk_size <= 0xFF:
offset = dk_size
elif dk_size <= 0xFFFF:
offset = 2 * dk_size
elif dk_size <= 0xFFFFFFFF:
offset = 4 * dk_size
else:
offset = 8 * dk_size
ent_addr = keys['dk_indices']['as_1'].address
ent_addr = ent_addr.cast(_type_unsigned_char_ptr()) + offset
ent_ptr_t = gdb.lookup_type('PyDictKeyEntry').pointer()
ent_addr = ent_addr.cast(ent_ptr_t)
return ent_addr, dk_nentries
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return ()
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return ()
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
try:
f = open(os_fsencode(filename), 'r')
except IOError:
return None
with f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
return
visited = set()
sys.stdout.write(' File "%s", line %i, in %s\n'
% (self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
@classmethod
def _dummy_key(self):
return gdb.lookup_global_symbol('_PySet_Dummy').value()
def __iter__(self):
dummy_ptr = self._dummy_key()
table = self.field('table')
for i in safe_range(self.field('mask') + 1):
setentry = table[i]
key = setentry['key']
if key != 0 and key != dummy_ptr:
yield PyObjectPtr.from_pyobject_ptr(key)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = (key.proxyval(visited) for key in self)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
for key in self:
if not first:
out.write(', ')
first = False
key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr())
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
global _is_pep393
if _is_pep393 is None:
fields = gdb.lookup_type('PyUnicodeObject').target().fields()
_is_pep393 = 'data' in [f.name for f in fields]
if _is_pep393:
# Python 3.3 and newer
may_have_surrogates = False
compact = self.field('_base')
ascii = compact['_base']
state = ascii['state']
is_compact_ascii = (int(state['ascii']) and int(state['compact']))
if not int(state['ready']):
# string is not ready
field_length = long(compact['wstr_length'])
may_have_surrogates = True
field_str = ascii['wstr']
else:
field_length = long(ascii['length'])
if is_compact_ascii:
field_str = ascii.address + 1
elif int(state['compact']):
field_str = compact.address + 1
else:
field_str = self.field('data')['any']
repr_kind = int(state['kind'])
if repr_kind == 1:
field_str = field_str.cast(_type_unsigned_char_ptr())
elif repr_kind == 2:
field_str = field_str.cast(_type_unsigned_short_ptr())
elif repr_kind == 4:
field_str = field_str.cast(_type_unsigned_int_ptr())
else:
# Python 3.2 and earlier
field_length = long(self.field('length'))
field_str = self.field('str')
may_have_surrogates = self.char_width() == 2
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-1, UCS-2 or UCS-4 code points:
if not may_have_surrogates:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([
(_unichr(ucs) if ucs <= 0x10ffff else '\ufffd')
for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
class wrapperobject(PyObjectPtr):
_typename = 'wrapperobject'
def safe_name(self):
try:
name = self.field('descr')['d_base']['name'].string()
return repr(name)
except (NullPyObjectPtr, RuntimeError):
return '<unknown name>'
def safe_tp_name(self):
try:
return self.field('self')['ob_type']['tp_name'].string()
except (NullPyObjectPtr, RuntimeError):
return '<unknown tp_name>'
def safe_self_addresss(self):
try:
address = long(self.field('self'))
return '%#x' % address
except (NullPyObjectPtr, RuntimeError):
return '<failed to get self address>'
def proxyval(self, visited):
name = self.safe_name()
tp_name = self.safe_tp_name()
self_address = self.safe_self_addresss()
return ("<method-wrapper %s of %s object at %s>"
% (name, tp_name, self_address))
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
out.write(proxy)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code != gdb.TYPE_CODE_PTR:
return None
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject", "wrapperobject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj is None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
# We divide frames into:
# - "python frames":
# - "bytecode frames" i.e. PyEval_EvalFrameEx
# - "other python frames": things that are of interest from a python
# POV, but aren't bytecode (e.g. GC, GIL)
# - everything else
def is_python_frame(self):
'''Is this a PyEval_EvalFrameEx frame, or some other important
frame? (see is_other_python_frame for what "important" means in this
context)'''
if self.is_evalframeex():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False
def is_other_python_frame(self):
'''Is this frame worth displaying in python backtraces?
Examples:
- waiting on the GIL
- garbage-collecting
- within a CFunction
If it is, return a descriptive string
For other frames, return False
'''
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
if self.is_gc_collect():
return 'Garbage-collecting'
# Detect invocations of PyCFunction instances:
frame = self._gdbframe
caller = frame.name()
if not caller:
return False
if caller in ('_PyCFunction_FastCallDict',
'_PyCFunction_FastCallKeywords'):
arg_name = 'func'
# Within that frame:
# "func" is the local containing the PyObject* of the
# PyCFunctionObject instance
# "f" is the same value, but cast to (PyCFunctionObject*)
# "self" is the (PyObject*) of the 'self'
try:
# Use the prettyprinter for the func:
func = frame.read_var(arg_name)
return str(func)
except RuntimeError:
return 'PyCFunction invocation (unable to read %s)' % arg_name
if caller == 'wrapper_call':
try:
func = frame.read_var('wp')
return str(func)
except RuntimeError:
return '<wrapper_call invocation>'
# This frame isn't worth reporting:
return False
def is_waiting_for_gil(self):
'''Is this frame waiting on the GIL?'''
# This assumes the _POSIX_THREADS version of Python/ceval_gil.h:
name = self._gdbframe.name()
if name:
return 'pthread_cond_timedwait' in name
def is_gc_collect(self):
'''Is this frame "collect" within the garbage-collector?'''
return self._gdbframe.name() == 'collect'
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
# gdb is unable to get the "f" argument of PyEval_EvalFrameEx()
# because it was "optimized out". Try to get "f" from the frame
# of the caller, PyEval_EvalCodeEx().
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
return orig_frame
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python-related code in the selected
frame, or None'''
try:
frame = cls.get_selected_frame()
except gdb.error:
# No frame: Python didn't start yet
return None
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
# Not found:
return None
@classmethod
def get_selected_bytecode_frame(cls):
'''Try to obtain the Frame for the python bytecode interpreter in the
selected GDB frame, or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write('#%i %s\n' % (self.get_index(), info))
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(' %s\n' % info)
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
# py-list requires an actual PyEval_EvalFrameEx frame:
frame = Frame.get_selected_bytecode_frame()
if not frame:
print('Unable to locate gdb frame for python bytecode interpreter')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
print('Unable to read information on python frame')
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_python_frame():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print('Unable to find an older python frame')
else:
print('Unable to find a newer python frame')
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
sys.stdout.write('Traceback (most recent call first):\n')
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print('%r not found' % name)
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
| 63,797 | Python | .py | 1,580 | 29.753165 | 102 | 0.571886 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,674 | TRUENAS-MIB.py | truenas_middleware/src/freenas/usr/local/share/pysnmp/mibs/TRUENAS-MIB.py | # PySNMP SMI module. Autogenerated from smidump -f python TRUENAS-MIB
# by libsmi2pysnmp-0.1.3 at Wed Jul 24 12:51:26 2024,
# Python version sys.version_info(major=3, minor=11, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( Bits, Counter32, Counter64, Gauge32, Integer32, Integer32, ModuleIdentity, MibIdentifier, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, enterprises, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "Counter64", "Gauge32", "Integer32", "Integer32", "ModuleIdentity", "MibIdentifier", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "enterprises")
( DisplayString, TextualConvention, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
# Types
class AlertLevelType(Integer):
subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(1,2,3,4,5,6,7,)
namedValues = NamedValues(("info", 1), ("notice", 2), ("warning", 3), ("error", 4), ("critical", 5), ("alert", 6), ("emergency", 7), )
# Objects
trueNas = ModuleIdentity((1, 3, 6, 1, 4, 1, 50536)).setRevisions(("2022-12-21 18:00",))
if mibBuilder.loadTexts: trueNas.setOrganization("www.ixsystems.com")
if mibBuilder.loadTexts: trueNas.setContactInfo("postal: 2490 Kruse Dr\nSan Jose, CA 95131\n\nemail: support@iXsystems.com")
if mibBuilder.loadTexts: trueNas.setDescription("TrueNAS Specific MIBs")
zfs = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1))
zpool = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1, 1))
zpoolTable = MibTable((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1))
if mibBuilder.loadTexts: zpoolTable.setDescription("")
zpoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1)).setIndexNames((0, "TRUENAS-MIB", "zpoolIndex"))
if mibBuilder.loadTexts: zpoolEntry.setDescription("")
zpoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: zpoolIndex.setDescription("")
zpoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolName.setDescription("The name of the zpool")
zpoolHealth = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolHealth.setDescription("The health of the zpool")
zpoolReadOps = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolReadOps.setDescription("The number of read I/O operations sent to the pool or device,\nincluding metadata requests (averaged since system booted).")
zpoolWriteOps = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolWriteOps.setDescription("The number of write I/O operations sent to the pool or device\n(averaged since system booted).")
zpoolReadBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolReadBytes.setDescription("The bandwidth of all read operations (including metadata),\nexpressed as units per second (averaged since system booted)")
zpoolWriteBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolWriteBytes.setDescription("The bandwidth of all write operations, expressed as units per\nsecond (averaged since system booted).")
zpoolReadOps1sec = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolReadOps1sec.setDescription("The number of read I/O operations sent to the pool or device,\nincluding metadata requests (over 1 second interval).")
zpoolWriteOps1sec = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolWriteOps1sec.setDescription("The number of write I/O operations sent to the pool or device\n(over 1 second interval).")
zpoolReadBytes1sec = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolReadBytes1sec.setDescription("The bandwidth of all read operations (including metadata),\nexpressed as units per second (over 1 second interval)")
zpoolWriteBytes1sec = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 1, 1, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zpoolWriteBytes1sec.setDescription("The bandwidth of all write operations, expressed as units per\nsecond (over 1 second interval).")
zvol = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1, 2))
zvolTable = MibTable((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1))
if mibBuilder.loadTexts: zvolTable.setDescription("")
zvolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1, 1)).setIndexNames((0, "TRUENAS-MIB", "zvolIndex"))
if mibBuilder.loadTexts: zvolEntry.setDescription("")
zvolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: zvolIndex.setDescription("")
zvolDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zvolDescr.setDescription("The name of the zvol")
zvolUsedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zvolUsedBytes.setDescription("The zfs used property value")
zvolAvailableBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zvolAvailableBytes.setDescription("The zfs available property value")
zvolReferencedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 1, 2, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zvolReferencedBytes.setDescription("The zfs referenced property value")
arc = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1, 3))
zfsArcSize = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcSize.setDescription("")
zfsArcMeta = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcMeta.setDescription("")
zfsArcData = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcData.setDescription("")
zfsArcHits = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcHits.setDescription("")
zfsArcMisses = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcMisses.setDescription("")
zfsArcC = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcC.setDescription("")
zfsArcMissPercent = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcMissPercent.setDescription("Arc Miss Percentage.\nNote: Floating precision sent across SNMP as a String")
zfsArcCacheHitRatio = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcCacheHitRatio.setDescription("Arc Cache Hit Ration Percentage.\nNote: Floating precision sent across SNMP as a String")
zfsArcCacheMissRatio = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcCacheMissRatio.setDescription("Arc Cache Miss Ration Percentage.\nNote: Floating precision sent across SNMP as a String")
l2arc = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1, 4))
zfsL2ArcHits = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 4, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsL2ArcHits.setDescription("")
zfsL2ArcMisses = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 4, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsL2ArcMisses.setDescription("")
zfsL2ArcRead = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 4, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsL2ArcRead.setDescription("")
zfsL2ArcWrite = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 4, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsL2ArcWrite.setDescription("")
zfsL2ArcSize = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 4, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsL2ArcSize.setDescription("")
zil = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1, 5))
zfsZilstatOps1sec = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 5, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsZilstatOps1sec.setDescription("")
zfsZilstatOps5sec = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 5, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsZilstatOps5sec.setDescription("")
zfsZilstatOps10sec = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 5, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsZilstatOps10sec.setDescription("")
notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 2))
notificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 2, 1))
notificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 2, 2))
alertId = MibScalar((1, 3, 6, 1, 4, 1, 50536, 2, 2, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alertId.setDescription("")
alertLevel = MibScalar((1, 3, 6, 1, 4, 1, 50536, 2, 2, 2), AlertLevelType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alertLevel.setDescription("")
alertMessage = MibScalar((1, 3, 6, 1, 4, 1, 50536, 2, 2, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alertMessage.setDescription("")
hddTempTable = MibTable((1, 3, 6, 1, 4, 1, 50536, 3))
if mibBuilder.loadTexts: hddTempTable.setDescription("Table of HDDs and their temperatures.")
hddTempEntry = MibTableRow((1, 3, 6, 1, 4, 1, 50536, 3, 1)).setIndexNames((0, "TRUENAS-MIB", "hddTempIndex"))
if mibBuilder.loadTexts: hddTempEntry.setDescription("An entry containing a HDD and its temperature.")
hddTempIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hddTempIndex.setDescription("Reference index for each observed HDD.")
hddTempDevice = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hddTempDevice.setDescription("The name of the HDD we are reading temperature from.")
hddTempValue = MibTableColumn((1, 3, 6, 1, 4, 1, 50536, 3, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hddTempValue.setDescription("The temperature of this HDD in mC.")
# Augmentions
# Notifications
alert = NotificationType((1, 3, 6, 1, 4, 1, 50536, 2, 1, 1)).setObjects(*(("TRUENAS-MIB", "alertId"), ("TRUENAS-MIB", "alertLevel"), ("TRUENAS-MIB", "alertMessage"), ) )
if mibBuilder.loadTexts: alert.setDescription("An alert raised")
alertCancellation = NotificationType((1, 3, 6, 1, 4, 1, 50536, 2, 1, 2)).setObjects(*(("TRUENAS-MIB", "alertId"), ) )
if mibBuilder.loadTexts: alertCancellation.setDescription("An alert cancelled")
# Exports
# Module identity
mibBuilder.exportSymbols("TRUENAS-MIB", PYSNMP_MODULE_ID=trueNas)
# Types
mibBuilder.exportSymbols("TRUENAS-MIB", AlertLevelType=AlertLevelType)
# Objects
mibBuilder.exportSymbols("TRUENAS-MIB", trueNas=trueNas, zfs=zfs, zpool=zpool, zpoolTable=zpoolTable, zpoolEntry=zpoolEntry, zpoolIndex=zpoolIndex, zpoolName=zpoolName, zpoolHealth=zpoolHealth, zpoolReadOps=zpoolReadOps, zpoolWriteOps=zpoolWriteOps, zpoolReadBytes=zpoolReadBytes, zpoolWriteBytes=zpoolWriteBytes, zpoolReadOps1sec=zpoolReadOps1sec, zpoolWriteOps1sec=zpoolWriteOps1sec, zpoolReadBytes1sec=zpoolReadBytes1sec, zpoolWriteBytes1sec=zpoolWriteBytes1sec, zvol=zvol, zvolTable=zvolTable, zvolEntry=zvolEntry, zvolIndex=zvolIndex, zvolDescr=zvolDescr, zvolUsedBytes=zvolUsedBytes, zvolAvailableBytes=zvolAvailableBytes, zvolReferencedBytes=zvolReferencedBytes, arc=arc, zfsArcSize=zfsArcSize, zfsArcMeta=zfsArcMeta, zfsArcData=zfsArcData, zfsArcHits=zfsArcHits, zfsArcMisses=zfsArcMisses, zfsArcC=zfsArcC, zfsArcMissPercent=zfsArcMissPercent, zfsArcCacheHitRatio=zfsArcCacheHitRatio, zfsArcCacheMissRatio=zfsArcCacheMissRatio, l2arc=l2arc, zfsL2ArcHits=zfsL2ArcHits, zfsL2ArcMisses=zfsL2ArcMisses, zfsL2ArcRead=zfsL2ArcRead, zfsL2ArcWrite=zfsL2ArcWrite, zfsL2ArcSize=zfsL2ArcSize, zil=zil, zfsZilstatOps1sec=zfsZilstatOps1sec, zfsZilstatOps5sec=zfsZilstatOps5sec, zfsZilstatOps10sec=zfsZilstatOps10sec, notifications=notifications, notificationPrefix=notificationPrefix, notificationObjects=notificationObjects, alertId=alertId, alertLevel=alertLevel, alertMessage=alertMessage, hddTempTable=hddTempTable, hddTempEntry=hddTempEntry, hddTempIndex=hddTempIndex, hddTempDevice=hddTempDevice, hddTempValue=hddTempValue)
# Notifications
mibBuilder.exportSymbols("TRUENAS-MIB", alert=alert, alertCancellation=alertCancellation)
| 13,569 | Python | .py | 132 | 101.583333 | 1,524 | 0.756503 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,675 | smart_alert.py | truenas_middleware/src/freenas/usr/local/libexec/smart_alert.py | #!/usr/bin/env python3
import os
from truenas_api_client import Client
def main():
device = os.environ.get("SMARTD_DEVICE")
if device is None:
return
message = os.environ.get("SMARTD_MESSAGE")
if message is None:
return
if "nvme" in device and "number of Error Log entries increased" in message:
return
with Client() as c:
dev_name = device.removeprefix("/dev/")
info = c.call("device.get_disk", dev_name, False, True)
if info is not None and (serial := info['serial']):
device = " ".join([device, f"({serial!r})"])
c.call("alert.oneshot_create", "SMART", {"device": device, "message": message})
if __name__ == "__main__":
main()
| 736 | Python | .py | 20 | 30.5 | 87 | 0.616147 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,676 | ipa_ctl.py | truenas_middleware/src/freenas/usr/local/libexec/ipa_ctl.py | #!/usr/bin/python3
# IPA control script for TrueNAS IPA Client. This provides support for some
# basic IPA-related operations that the TrueNAS middleware performs.
#
# Although it is written as a standalone script, it is not intended for use
# outside of the scope of TrueNAS developers. Ad-hoc usage of this script may
# result in undefined sever behavior and is not supported in any way.
import argparse
import base64
import json
import subprocess
import sys
from configparser import RawConfigParser
from contextlib import contextmanager
from cryptography.hazmat.primitives.serialization import Encoding
from ipaclient.install.client import get_ca_certs_from_ldap
from ipaclient.install.ipa_client_samba import (
generate_smb_machine_account,
retrieve_domain_information,
)
from ipalib import api, errors
from ipapython.ipautil import realm_to_suffix
from ipaplatform.paths import paths
from tempfile import NamedTemporaryFile
from typing import Optional
from middlewared.utils.directoryservices.ipactl_constants import (
ExitCode,
IpaOperation,
)
from middlewared.utils.directoryservices.krb5 import KRB5_KT_VNO
from middlewared.utils.directoryservices.krb5_constants import KRB_ETYPE
from middlewared.plugins.smb_.util_param import smbconf_getparm
DESCRIPTION = (
"This program is intended for exclusive use by TrueNAS developers. "
"Any use of it outside of the scope of TrueNAS backend operations is "
"unsupported and may result in a production outage. "
"This program provides support for some basic FreeIPA server related "
"operations. "
"NOTE: requires the following: "
"(1) valid kerberos ticket and configuration for FreeIPA domain, "
"(2) valid FreeIPA domain configuration in `/etc/ipa/default.conf`, "
"(3) valid FreeIPA domain CA certificate in `/etc/ipa/ca.crt`"
)
SUPPORTED_SERVICES = ('cifs', 'nfs')
IPA_JOIN = '/sbin/ipa-join'
IPA_JOIN_CMD_ERR_CODE = {
0: 'Success',
1: 'Kerberos context initialization failed',
2: 'Incorrect usage',
3: 'Out of memory',
4: 'Invalid service principal name',
5: 'No Kerberos credentials cache',
6: 'No Kerberos principal and no bind DN and password',
7: 'Failed to open keytab',
8: 'Failed to create key material',
9: 'Setting keytab failed',
10: 'Bind password required when using a bind DN',
11: 'Failed to add key to keytab',
12: 'Failed to close keytab',
13: 'Host is already enrolled',
14: 'LDAP failure',
15: 'Incorrect bulk password',
16: 'Host name must be fully-qualified',
17: 'RPC fault',
18: 'Principal not found in host entry',
19: 'Unable to generate Kerberos credentials cache',
20: 'Unenrollment result not in RPC response',
21: 'Failed to get default Kerberos realm',
22: 'Unable to auto-detect fully-qualified hostname'
}
IPA_GETKEYTAB = '/sbin/ipa-getkeytab'
IPA_GETKEYTAB_ERR_CODE = {
0: 'Success',
1: 'Kerberos context initialization failed',
2: 'Incorrect usage',
3: 'Out of memory',
4: 'Invalid service principal name',
5: 'No Kerberos credentials cache',
6: 'No Kerberos principal and no bind DN and password',
7: 'Failed to open keytab',
8: 'Failed to create key material',
9: 'Setting keytab failed',
10: 'Bind password required when using a bind DN',
11: 'Failed to add key to keytab',
12: 'Failed to close keytab',
}
DESIRED_ETYPES = (
KRB_ETYPE.AES256_CTS_HMAC_SHA1_96.value,
KRB_ETYPE.AES128_CTS_HMAC_SHA1_96.value
)
class IpaCtlError(Exception):
def __init__(
self,
op,
error_code,
rpc_response=None,
text=None,
error_code_map=None
):
self.error_code = error_code
self.error_code_str = None
self.op = op
self.rpc_response = rpc_response
self.text = text
self.errmsg = self.__get_errmsg(error_code_map)
def __get_errmsg(self, error_code_map):
if self.rpc_response:
return json.dumps(self.rpc_response)
if self.text:
return self.text.strip()
if error_code_map:
if (errmsg := error_code_map.get(self.error_code)) is not None:
return errmsg
return f'Operation failed with unknown error: {self.error_code}'
def __str__(self):
return f'[{self.op}]: {self.errmsg}'
@contextmanager
def temporary_keytab():
"""
This is a simple context manager for a temporary keytab that is deleted
when it exits. The purpose is to provide a target for keytab writes from
various tools before we convert data to base64 and include in response to
caller.
"""
with NamedTemporaryFile() as fname:
fname.write(KRB5_KT_VNO)
fname.flush()
yield fname
def extract_json_rpc_msg(data):
"""
ipa-join, ipa-getkeytab, and ipa-rmkeytab print JSON-RPC request and response
information to stderr when the debug flag is passed `-d`. By the time this
is called, we've already confirmed that `JSON-RPC response:' is in `data`.
"""
json_msg = data.split('JSON-RPC response:')[1].strip().splitlines()[0].strip()
return json.loads(json_msg)
def raise_ipa_cmd_failure(op, exit_code, errmsg, error_code_map):
# If we got as far as to communicate with FreeIPA, present the
# JSON-RPC response to the caller, otherwise dump as-is.
if 'JSON-RPC response' not in errmsg:
raise IpaCtlError(op, exit_code, text=errmsg, error_code_map=error_code_map)
raise IpaCtlError(op, exit_code, rpc_response=extract_json_rpc_msg(errmsg))
def initialize_ipa_connection():
# Set IPA context that will be used for access then perform client
# connection.
#
# NOTE: this requires valid kerberos ticket.
api.bootstrap(context="custom", in_server=False)
api.finalize()
api.Backend.rpcclient.connect()
def collapse_key(entry, key):
if entry.get(key) is None:
return None
elif isinstance(entry[key], tuple):
return entry[key][0]
return entry[key]
def parse_ldap_result(entry):
output = {}
for key in entry.keys():
output[key] = collapse_key(entry, key)
return output
def add_service(hostname: str, service_name: str):
"""
Add a service for the specified hostname.
Returns the added kerberos principal name, example:
"nfs/truenas.walkerdom.test@WALKERDOM.TEST"
"""
res = api.Command.service_add(f'{service_name}/{hostname}')
return res['value']
def del_service(hostname: str, service_name: str):
"""
Delete a service for the specified hostname in IPA domain.
Returns dictionary containing the deleted kerberos principal name, example:
{"service": "nfs/truenas.walkerdom.test@WALKERDOM.TEST"}
"""
res = api.Command.service_del(f'{service_name}/{hostname}')
return {'service': res['value'][0]}
def del_service_smb(hostname: str, realm: str):
"""
Delete the SMB service principal for the specified hostname in
the specified realm.
Returns the kerberos principal name that was deleted.
"""
principal = f'cifs/{hostname}@{realm}'
res = api.Command.service_del(principal)
return res['value']
def get_keytab(
principal_name: str,
server_name: Optional[str] = None,
get_password: Optional[bool] = False
) -> str:
"""
Generate a keytab for the specified `principal_name`
param: server_name - optionally specify name (FQDN) of FreeIPA server for operation.
get_password: set password in kerberos keytab to randomized string and include
both keytab and password in output.
returns: dictionary containing base64-encoded keytab and optionally password.
WARNING: this invalidates existing keytab for service
"""
if get_password:
# generate a randomized password with ascii characters
# with minimum length of 128 and maximum of 256
password = generate_smb_machine_account(None, None, None, None)
with temporary_keytab() as fname:
etypes = list(DESIRED_ETYPES)
if principal_name.startswith('cifs'):
# SMB service must have arcfour-hmac generated to allow domain
# member to authenticate to domain controller.
etypes.append(KRB_ETYPE.ARCFOUR_HMAC.value)
cmd = [
IPA_GETKEYTAB,
'-p', principal_name,
'-k', fname.name,
'-e', ','.join(etypes)
]
if server_name:
cmd.extend([
'-s', server_name
])
if get_password:
cmd.append('-P')
res = subprocess.run(
cmd, check=False,
input=b'{password}\n{password}',
capture_output=True
)
else:
res = subprocess.run(cmd, check=False, capture_output=True)
if res.returncode:
raise_ipa_cmd_failure(
'IPA-GETKEYTAB',
res.returncode,
res.stderr.decode(),
IPA_GETKEYTAB_ERR_CODE
)
with open(fname.name, 'rb') as f:
kt = base64.b64encode(f.read())
if get_password:
return {'keytab': kt.decode(), 'password': password}
else:
return {'keytab': kt.decode()}
def get_smb_service_keytab_and_password(hostname: str, realm: str):
"""
Generate a kerberos keytab and password for the SMB service for the
specified hostname + realm. The password is returned so that may be
inserted by caller into samba's secrets.tdb
"""
principal = f'cifs/{hostname}@{realm}'
try:
api.Command.service_show(principal)
# SMB kerberos principal already exists. Plain-text password
# must be set within the secrets.tdb file concurrently with
# generating a new keytab and so we have to forcibly regenerate
# configuration.
api.Command.service_del(principal)
except errors.NotFound:
pass
netbiosname = smbconf_getparm('netbiosname')
api.Command.service_add_smb(hostname, netbiosname)
kt_resp = get_keytab(principal, get_password=True)
api.Command.service_mod(principal, addattr='ipaNTHash=MagicRegen')
return kt_resp | {'service': principal}
def get_service_keytab(hostname, service, force=False):
"""
Get a base64-encoded kerberos keytab for the specified
service name for the specified hostname.
return dictionary as follows:
```
{
"keytab": <base64 string>,
"service": "nfs"
}
```
"""
try:
entry = api.Command.service_show(f'{service}/{hostname}')['result']
except errors.NotFound:
add_service(hostname, service)
entry = api.Command.service_show(f'{service}/{hostname}')['result']
principal = parse_ldap_result(entry)['krbprincipalname']
return get_keytab(principal) | {'service': principal}
def get_ipa_cacerts(server, realm):
base_dn = str(realm_to_suffix(realm))
cert_bytes = b''
certs = get_ca_certs_from_ldap(server, base_dn, realm)
for cert in certs:
cert_bytes += cert.public_bytes(Encoding.PEM)
return {'realm': realm, 'cacert': cert_bytes.decode()}
def has_ticket_assert():
rv = subprocess.run(['klist', '-s'], check=False)
if rv.returncode != 0:
print('Kerberos ticket is required', file=sys.stderr)
sys.exit(ExitCode.KERBEROS)
def parse_ipa_config():
parser = RawConfigParser()
try:
parser.read(paths.IPA_DEFAULT_CONF)
except Exception as e:
print(str(e), file=sys.stderr)
sys.exit(ExitCode.FREEIPA_CONFIG)
return parser
def ipa_join(
hostname: str,
ipa_server: str,
realm: str,
):
"""
Join the server to the FreeIPA domain
param: hostname - fqdn of host joining freeipa 'truenas.testdom.test'
param: ipa_server - name of target freeipa server 'ipa.testdom.test'
param: realm - name of kerberos realm of freeipa domain 'TESTDOM.TEST'
returns: dictionary containing JSON-RPC response and base64-encoded
keytab for kerberos principal `host/<hostname>`
"""
base_dn = str(realm_to_suffix(realm))
with temporary_keytab() as fname:
join_cmd = [
IPA_JOIN,
'-d',
'-h', hostname,
'-s', ipa_server,
'-b', base_dn,
'-k', fname.name,
]
join = subprocess.run(join_cmd, check=False, capture_output=True)
match join.returncode:
case 0:
# success
pass
case 13:
# server is already enrolled and so we can simply generate new
# keytab
resp = get_keytab(f'host/{hostname}', ipa_server)
json_msg = extract_json_rpc_msg(join.stderr.decode())
return resp | json_msg
case _:
raise_ipa_cmd_failure(
'IPA-JOIN',
join.returncode,
join.stderr.decode(),
IPA_JOIN_CMD_ERR_CODE
)
with open(fname.name, 'rb') as f:
kt = base64.b64encode(f.read())
json_msg = extract_json_rpc_msg(join.stderr.decode())
return {
'keytab': kt.decode(),
'rpc_response': json_msg
}
def ipa_leave(
hostname: str,
ipa_server: str,
realm: str,
):
"""
Deactivate the host account associated with `hostname` in FreeIPA.
param: hostname - fqdn of host joining freeipa 'truenas.testdom.test'
param: ipa_server - name of target freeipa server 'ipa.testdom.test'
param: realm - name of kerberos realm of freeipa domain 'TESTDOM.TEST'
returns: dictionary containing JSON-RPC response
NOTE: this may fail if additional SPNs are specified for the hostname.
"""
base_dn = str(realm_to_suffix(realm))
leave_cmd = [
IPA_JOIN,
'-d', '-f',
'-h', hostname,
'-s', ipa_server,
'-b', base_dn,
'--unenroll',
]
leave = subprocess.run(leave_cmd, check=False, capture_output=True)
if leave.returncode != 0:
raise_ipa_cmd_failure(
'IPA-LEAVE',
leave.returncode,
leave.stderr.decode(),
IPA_JOIN_CMD_ERR_CODE
)
json_msg = extract_json_rpc_msg(leave.stderr.decode())
return {'rpc_response': json_msg}
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-a', '--action',
help='Action to perform related to FreeIPA domain',
required=True,
choices=[op.name for op in IpaOperation]
)
args = parser.parse_args()
has_ticket_assert()
ipa_config = parse_ipa_config()
match args.action:
case IpaOperation.GET_CACERT_FROM_LDAP.name | IpaOperation.JOIN.name:
pass
case _:
initialize_ipa_connection()
resp = None
try:
match args.action:
case IpaOperation.JOIN.name:
resp = ipa_join(
ipa_config.get('global', 'host'),
ipa_config.get('global', 'server'),
ipa_config.get('global', 'realm')
)
case IpaOperation.LEAVE.name:
resp = ipa_leave(
ipa_config.get('global', 'host'),
ipa_config.get('global', 'server'),
ipa_config.get('global', 'realm')
)
case IpaOperation.SET_NFS_PRINCIPAL.name:
"""
resp is formatted as follows:
```
{
"keytab": <base64 string>,
"service": "nfs/truenas.walkerdom.test@WALKERDOM.TEST"
}
```
"""
resp = get_service_keytab(
ipa_config.get('global', 'host'),
'nfs'
)
case IpaOperation.DEL_NFS_PRINCIPAL.name:
"""
resp is formatted as follows:
```
{
"service": "nfs/truenas.walkerdom.test@WALKERDOM.TEST"
}
```
"""
resp = del_service(
ipa_config.get('global', 'host'), 'nfs'
)
case IpaOperation.SET_SMB_PRINCIPAL.name:
"""
resp is formatted as follows:
```
{
"keytab": <base64 string>,
"password": <random string>,
"domain_info": [
{
"netbios_name": "WALKERDOM",
"domain_sid": "S-1-5-21-3696504179-2855309571-923743039",
"domain_name": "walkerdom.test",
"range_id_min": 565200000,
"range_id_max": 565399999
}
],
"service": "cifs/truenas.walkerdom.test@WALKERDOM.TEST"
}
```
"""
if not (domain_info := retrieve_domain_information(api)):
print(
'No configured trust controller detected '
'on IPA masters.',
file=sys.stderr
)
sys.exit(ExitCode.NO_SMB_SUPPORT)
resp = get_smb_service_keytab_and_password(
ipa_config.get('global', 'host'),
ipa_config.get('global', 'realm')
)
resp |= {'domain_info': domain_info}
case IpaOperation.DEL_SMB_PRINCIPAL.name:
resp = del_service_smb(
ipa_config.get('global', 'host'),
ipa_config.get('global', 'realm')
)
case IpaOperation.SMB_DOMAIN_INFO.name:
resp = retrieve_domain_information(api)
case IpaOperation.GET_CACERT_FROM_LDAP.name:
resp = get_ipa_cacerts(
ipa_config.get('global', 'server'),
ipa_config.get('global', 'realm')
)
case _:
raise ValueError(f'{args.action}: unhandled action')
except IpaCtlError as e:
if resp:
# We may have partially completed request
# print to stdout so that caller has some
# chance of error handling
print(json.dumps(resp))
if e.rpc_response:
print(json.dumps(e.rpc_response), file=sys.stderr)
sys.exit(ExitCode.JSON_ERROR)
print(f'{e.op} - {e.errmsg}', file=sys.stderr)
sys.exit(ExitCode.GENERIC)
except Exception as e:
print(str(e), file=sys.stderr)
sys.exit(ExitCode.GENERIC)
print(json.dumps(resp))
sys.exit(ExitCode.SUCCESS)
if __name__ == '__main__':
main()
| 19,058 | Python | .py | 502 | 29.081673 | 88 | 0.609982 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,677 | find_alias_for_smtplib.py | truenas_middleware/src/freenas/etc/find_alias_for_smtplib.py | import argparse
import email
import email.parser
import json
import os
import re
import requests
import sys
import syslog
from truenas_api_client import Client
ALIASES = re.compile(r'^(?P<from>[^#]\S*?):\s*(?P<to>\S+)$')
def do_sendmail(msg, to_addrs=None, parse_recipients=False):
if to_addrs is None:
if not parse_recipients:
syslog.syslog('Do not know who to send the message to.' + msg[0:140])
raise ValueError('Do not know who to send the message to.')
to_addrs = []
# XXX: this should probably be a FeedParser because reading from sys.stdin
# is blocking.
em_parser = email.parser.Parser()
em = em_parser.parsestr(msg)
if parse_recipients:
# Strip away the comma based delimiters and whitespace.
to_addrs = list(map(str.strip, em.get('To').split(',')))
if not to_addrs or not to_addrs[0]:
to_addrs = ['root']
to_addrs_repl = []
if to_addrs:
aliases = get_aliases()
for to_addr in to_addrs:
for to_addr in to_addr.split(','):
if to_addr.find('@') != -1:
to_addrs_repl.append(to_addr)
elif to_addr.find('@') == -1 and to_addr in aliases:
to_addrs_repl.append(aliases[to_addr])
if not to_addrs_repl:
syslog.syslog(f'No aliases found to send email to {", ".join(to_addrs)}')
sys.exit(1)
with Client() as c:
sw_name = 'TrueNAS'
margs = {}
margs['extra_headers'] = dict(em)
margs['extra_headers'].update({
'X-Mailer': sw_name,
f'X-{sw_name}-Host': c.call('system.hostname'),
'To': ', '.join(to_addrs_repl),
})
margs['subject'] = em.get('Subject')
if em.is_multipart():
attachments = [part for part in em.walk() if part.get_content_maintype() != 'multipart']
margs['attachments'] = True if attachments else False
margs['text'] = (
'This is a MIME formatted message. If you see '
'this text it means that your email software '
'does not support MIME formatted messages.')
margs['html'] = None
else:
margs['text'] = ''.join(email.iterators.body_line_iterator(em))
margs['to'] = to_addrs_repl
if not margs.get('attachments'):
c.call('mail.send', margs)
else:
token = c.call('auth.generate_token')
files = []
for attachment in attachments:
entry = {'headers': []}
for k, v in attachment.items():
entry['headers'].append({'name': k, 'value': v})
entry['content'] = attachment.get_payload()
files.append(entry)
requests.post(
f'http://localhost:6000/_upload?auth_token={token}',
files={
'data': json.dumps({'method': 'mail.send', 'params': [margs]}),
'file': json.dumps(files),
},
)
def get_aliases():
with open('/etc/aliases', 'r') as f:
aliases = {}
for line in f.readlines():
search = ALIASES.search(line)
if search:
_from, _to = search.groups()
aliases[_from] = _to
while True:
oldaliases = set(aliases.items())
for key, val in aliases.items():
if key == val:
syslog.syslog(syslog.LOG_ERR, f'Found a recursive dependency for {key}')
elif val in aliases:
aliases[key] = aliases[val]
if set(aliases.items()) == oldaliases:
break
return aliases
def main():
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
parser = argparse.ArgumentParser(description='Process email')
parser.add_argument('-i', dest='strip_leading_dot', action='store_false',
default=True, help='see sendmail(8) -i')
parser.add_argument('-t', dest='parse_recipients', action='store_true',
default=False,
help='parse recipients from message')
parser.usage = ' '.join(parser.format_usage().split(' ')[1:-1])
parser.usage += ' [email_addr|user] ..'
args, to = parser.parse_known_args()
if not to and not args.parse_recipients:
parser.exit(message=parser.format_usage())
msg = sys.stdin.read()
syslog.syslog("sending mail to " + ', '.join(to) + '\n' + msg[0:140])
do_sendmail(msg, to_addrs=to, parse_recipients=args.parse_recipients)
if __name__ == "__main__":
main()
| 4,710 | Python | .py | 113 | 31.238938 | 100 | 0.555313 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,678 | copyright | truenas_middleware/src/freenas/debian/copyright | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: truenas
Source: http://www.truenas.com
Files: *
Copyright: 2019 iXsystems Inc
2019 William Grzybowski <william@grzy.org>
License: BSD-3-Clause
Files: debian/*
Copyright: 2019 William Grzybowski <william@grzy.org>
License: BSD-3-Clause
License: BSD-3-Clause
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE HOLDERS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1,813 | Python | .py | 34 | 50.794118 | 74 | 0.804617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,679 | setup_test.py | truenas_middleware/src/middlewared/setup_test.py | from setuptools import setup
install_requires = [
'pytest-rerunfailures',
]
setup(
name='middlewared',
description='TrueNAS Middleware Daemon Integration Test Facilities',
packages=[
'middlewared',
'middlewared.test.integration.assets',
'middlewared.test.integration.utils',
],
package_data={},
include_package_data=True,
license='BSD',
platforms='any',
namespace_packages=[str('middlewared')],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=install_requires,
)
| 752 | Python | .py | 26 | 23.346154 | 72 | 0.655602 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,680 | setup.py | truenas_middleware/src/middlewared/setup.py | import os
from setuptools import find_packages, setup
def get_assets(name):
"""
Recursive get dirs from middlewared/{name}
"""
base_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'middlewared',
)
result = []
for root, dirs, files in os.walk(os.path.join(base_path, name)):
result.extend([f'{os.path.relpath(root, base_path)}/*'] + [
os.path.join(os.path.relpath(root, base_path), file)
for file in filter(lambda f: f == '.gitkeep', files)
])
return result
setup(
name='middlewared',
description='TrueNAS Middleware Daemon',
packages=find_packages(),
package_data={
'middlewared.apidocs': [
'templates/websocket/*',
'templates/*.*',
],
'middlewared': (
get_assets('alembic') +
['alembic.ini'] +
get_assets('assets') +
get_assets('etc_files') +
get_assets('migration')
),
},
include_package_data=True,
license='BSD',
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
entry_points={
'console_scripts': [
'configure_fips = middlewared.scripts.configure_fips:main',
'setup_cgroups = middlewared.scripts.setup_cgroups:main',
'middlewared = middlewared.main:main',
'midgdb = middlewared.scripts.gdb:main',
'sedhelper = middlewared.scripts.sedhelper:main',
'wait_to_hang_and_dump_core = middlewared.scripts.wait_to_hang_and_dump_core:main',
'wait_on_disks = middlewared.scripts.wait_on_disks:main',
'start_vendor_service = middlewared.scripts.vendor_service:main',
],
},
)
| 1,960 | Python | .py | 57 | 26.22807 | 95 | 0.585571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,681 | copyright | truenas_middleware/src/middlewared/debian/copyright | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: middlewared
Source: https://github.com/freenas/freenas
Files: *
Copyright: 2016 William Grzybowski <william@grzy.org>
License: BSD-2-Clause
Files: debian/*
Copyright: 2019 William Grzybowski <william@grzy.org>
License: BSD-2-Clause
| 324 | Python | .py | 9 | 34.777778 | 74 | 0.811502 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,682 | role.py | truenas_middleware/src/middlewared/middlewared/role.py | from collections import defaultdict
from dataclasses import dataclass, field
import typing
@dataclass()
class Role:
"""
An authenticated user role.
:ivar includes: a list of other roles that this role includes. When user is granted this role, they will also
receive permissions granted by all the included roles.
:ivar full_admin: if `True` then this role will allow calling all methods.
"""
includes: typing.List[str] = field(default_factory=list)
full_admin: bool = False
builtin: bool = True
ROLES = {
'ACCOUNT_READ': Role(),
'ACCOUNT_WRITE': Role(includes=['ACCOUNT_READ']),
'API_KEY_READ': Role(),
'API_KEY_WRITE': Role(includes=['API_KEY_READ']),
'FAILOVER_READ': Role(),
'FAILOVER_WRITE': Role(includes=['FAILOVER_READ']),
'AUTH_SESSIONS_READ': Role(),
'AUTH_SESSIONS_WRITE': Role(includes=['AUTH_SESSIONS_READ']),
'DIRECTORY_SERVICE_READ': Role(),
'DIRECTORY_SERVICE_WRITE': Role(includes=['DIRECTORY_SERVICE_READ']),
'KMIP_READ': Role(),
'KMIP_WRITE': Role(includes=['KMIP_READ']),
'IPMI_READ': Role(),
'IPMI_WRITE': Role(includes=['IPMI_READ']),
'FILESYSTEM_ATTRS_READ': Role(),
'FILESYSTEM_ATTRS_WRITE': Role(includes=['FILESYSTEM_ATTRS_READ']),
'FILESYSTEM_DATA_READ': Role(),
'FILESYSTEM_DATA_WRITE': Role(includes=['FILESYSTEM_DATA_READ']),
'FILESYSTEM_FULL_CONTROL': Role(includes=['FILESYSTEM_ATTRS_WRITE',
'FILESYSTEM_DATA_WRITE']),
'REPORTING_READ': Role(),
'REPORTING_WRITE': Role(includes=['REPORTING_READ']),
'SUPPORT_READ': Role(),
'SUPPORT_WRITE': Role(includes=['SUPPORT_READ']),
'SYSTEM_AUDIT_READ': Role(),
'SYSTEM_AUDIT_WRITE': Role(),
'FULL_ADMIN': Role(full_admin=True, builtin=False),
# Alert roles
'ALERT_LIST_READ': Role(),
'CLOUD_BACKUP_READ': Role(),
'CLOUD_BACKUP_WRITE': Role(includes=['CLOUD_BACKUP_READ']),
'CLOUD_SYNC_READ': Role(),
'CLOUD_SYNC_WRITE': Role(includes=['CLOUD_SYNC_READ']),
'SERVICE_READ': Role(),
'SERVICE_WRITE': Role(),
# for webui.enclosure.** namespace
'ENCLOSURE_READ': Role(),
'ENCLOSURE_WRITE': Role(includes=['ENCLOSURE_READ']),
# Network roles
'NETWORK_GENERAL_READ': Role(),
'NETWORK_INTERFACE_READ': Role(),
'NETWORK_INTERFACE_WRITE': Role(includes=['NETWORK_INTERFACE_READ']),
# VM roles
'VM_READ': Role(),
'VM_WRITE': Role(includes=['VM_READ']),
'VM_DEVICE_READ': Role(includes=['VM_READ']),
'VM_DEVICE_WRITE': Role(includes=['VM_WRITE', 'VM_DEVICE_READ']),
# JBOF roles
'JBOF_READ': Role(),
'JBOF_WRITE': Role(includes=['JBOF_READ']),
# Truecommand roles
'TRUECOMMAND_READ': Role(),
'TRUECOMMAND_WRITE': Role(includes=['TRUECOMMAND_READ']),
# Crypto roles
'CERTIFICATE_READ': Role(),
'CERTIFICATE_WRITE': Role(includes=['CERTIFICATE_READ']),
'CERTIFICATE_AUTHORITY_READ': Role(),
'CERTIFICATE_AUTHORITY_WRITE': Role(includes=['CERTIFICATE_AUTHORITY_READ']),
# Apps roles
'CATALOG_READ': Role(),
'CATALOG_WRITE': Role(includes=['CATALOG_READ']),
'DOCKER_READ': Role(includes=[]),
'DOCKER_WRITE': Role(includes=['DOCKER_READ']),
'APPS_READ': Role(includes=['CATALOG_READ']),
'APPS_WRITE': Role(includes=['CATALOG_WRITE', 'APPS_READ']),
# FTP roles
'SHARING_FTP_READ': Role(),
'SHARING_FTP_WRITE': Role(includes=['SHARING_FTP_READ']),
# iSCSI roles
'SHARING_ISCSI_AUTH_READ': Role(),
'SHARING_ISCSI_AUTH_WRITE': Role(includes=['SHARING_ISCSI_AUTH_READ']),
'SHARING_ISCSI_EXTENT_READ': Role(),
'SHARING_ISCSI_EXTENT_WRITE': Role(includes=['SHARING_ISCSI_EXTENT_READ']),
'SHARING_ISCSI_GLOBAL_READ': Role(),
'SHARING_ISCSI_GLOBAL_WRITE': Role(includes=['SHARING_ISCSI_GLOBAL_READ']),
'SHARING_ISCSI_HOST_READ': Role(),
'SHARING_ISCSI_HOST_WRITE': Role(includes=['SHARING_ISCSI_HOST_READ']),
'SHARING_ISCSI_INITIATOR_READ': Role(),
'SHARING_ISCSI_INITIATOR_WRITE': Role(includes=['SHARING_ISCSI_INITIATOR_READ']),
'SHARING_ISCSI_PORTAL_READ': Role(),
'SHARING_ISCSI_PORTAL_WRITE': Role(includes=['SHARING_ISCSI_PORTAL_READ']),
'SHARING_ISCSI_TARGET_READ': Role(),
'SHARING_ISCSI_TARGET_WRITE': Role(includes=['SHARING_ISCSI_TARGET_READ']),
'SHARING_ISCSI_TARGETEXTENT_READ': Role(),
'SHARING_ISCSI_TARGETEXTENT_WRITE': Role(includes=['SHARING_ISCSI_TARGETEXTENT_READ']),
'SHARING_ISCSI_READ': Role(includes=['SHARING_ISCSI_AUTH_READ',
'SHARING_ISCSI_EXTENT_READ',
'SHARING_ISCSI_GLOBAL_READ',
'SHARING_ISCSI_HOST_READ',
'SHARING_ISCSI_INITIATOR_READ',
'SHARING_ISCSI_PORTAL_READ',
'SHARING_ISCSI_TARGET_READ',
'SHARING_ISCSI_TARGETEXTENT_READ']),
'SHARING_ISCSI_WRITE': Role(includes=['SHARING_ISCSI_AUTH_WRITE',
'SHARING_ISCSI_EXTENT_WRITE',
'SHARING_ISCSI_GLOBAL_WRITE',
'SHARING_ISCSI_HOST_WRITE',
'SHARING_ISCSI_INITIATOR_WRITE',
'SHARING_ISCSI_PORTAL_WRITE',
'SHARING_ISCSI_TARGET_WRITE',
'SHARING_ISCSI_TARGETEXTENT_WRITE']),
'SHARING_NFS_READ': Role(),
'SHARING_NFS_WRITE': Role(includes=['SHARING_NFS_READ']),
'SHARING_SMB_READ': Role(),
'SHARING_SMB_WRITE': Role(includes=['SHARING_SMB_READ']),
'SHARING_READ': Role(includes=['SHARING_ISCSI_READ',
'SHARING_NFS_READ',
'SHARING_SMB_READ',
'SHARING_FTP_READ']),
'SHARING_WRITE': Role(includes=['SHARING_ISCSI_WRITE',
'SHARING_NFS_WRITE',
'SHARING_SMB_WRITE',
'SHARING_FTP_WRITE']),
'KEYCHAIN_CREDENTIAL_READ': Role(),
'KEYCHAIN_CREDENTIAL_WRITE': Role(includes=['KEYCHAIN_CREDENTIAL_READ']),
'REPLICATION_TASK_CONFIG_READ': Role(),
'REPLICATION_TASK_CONFIG_WRITE': Role(includes=['REPLICATION_TASK_CONFIG_READ']),
'REPLICATION_TASK_READ': Role(),
'REPLICATION_TASK_WRITE': Role(includes=['REPLICATION_TASK_READ']),
'REPLICATION_TASK_WRITE_PULL': Role(includes=['REPLICATION_TASK_WRITE']),
'SNAPSHOT_TASK_READ': Role(),
'SNAPSHOT_TASK_WRITE': Role(includes=['SNAPSHOT_TASK_READ']),
'POOL_SCRUB_READ': Role(),
'POOL_SCRUB_WRITE': Role(includes=['POOL_SCRUB_READ']),
'DATASET_READ': Role(),
'DATASET_WRITE': Role(includes=['DATASET_READ']),
'DATASET_DELETE': Role(),
'SNAPSHOT_READ': Role(),
'SNAPSHOT_WRITE': Role(includes=['SNAPSHOT_READ']),
'SNAPSHOT_DELETE': Role(),
'REPLICATION_ADMIN': Role(includes=['KEYCHAIN_CREDENTIAL_WRITE',
'REPLICATION_TASK_CONFIG_WRITE',
'REPLICATION_TASK_WRITE',
'SNAPSHOT_TASK_WRITE',
'SNAPSHOT_WRITE'],
builtin=False),
'SHARING_ADMIN': Role(includes=['READONLY_ADMIN',
'DATASET_WRITE',
'SHARING_WRITE',
'FILESYSTEM_ATTRS_WRITE',
'SERVICE_READ'],
builtin=False),
# System settings
'SYSTEM_GENERAL_READ': Role(),
'SYSTEM_GENERAL_WRITE': Role(includes=['SYSTEM_GENERAL_READ']),
'SYSTEM_ADVANCED_READ': Role(),
'SYSTEM_ADVANCED_WRITE': Role(includes=['SYSTEM_ADVANCED_READ']),
# Virtualization
'VIRT_GLOBAL_READ': Role(),
'VIRT_GLOBAL_WRITE': Role(includes=['VIRT_GLOBAL_READ']),
'VIRT_INSTANCE_READ': Role(),
'VIRT_INSTANCE_WRITE': Role(includes=['VIRT_INSTANCE_READ']),
'VIRT_INSTANCE_DELETE': Role(),
'VIRT_IMAGE_READ': Role(),
'VIRT_IMAGE_WRITE': Role(includes=['VIRT_IMAGE_READ']),
}
ROLES['READONLY_ADMIN'] = Role(includes=[role for role in ROLES if role.endswith('_READ')], builtin=False)
class ResourceManager:
def __init__(self, resource_title: str, resource_method: str, roles: typing.Dict[str, Role]):
self.resource_title: str = resource_title
self.resource_method: str = resource_method
self.roles: typing.Dict[str, Role] = roles
self.resources: typing.Dict[str, list[str]] = {}
self.allowlists_for_roles: typing.Dict[str, list[dict[str, str]]] = defaultdict(list)
def register_resource(self, resource_name: str, roles: typing.Iterable[str], exist_ok: bool):
if resource_name in self.resources:
if not exist_ok:
raise ValueError(f"{self.resource_title} {resource_name!r} is already registered in this role manager")
else:
self.resources[resource_name] = []
self.add_roles_to_resource(resource_name, roles)
def add_roles_to_resource(self, resource_name: str, roles: typing.Iterable[str]):
if resource_name not in self.resources:
raise ValueError(f"{self.resource_title} {resource_name!r} is not registered in this role manager")
for role in roles:
if role not in self.roles:
raise ValueError(f"Invalid role {role!r}")
self.resources[resource_name] += roles
for role in roles:
self.allowlists_for_roles[role].append({"method": self.resource_method, "resource": resource_name})
def roles_for_resource(self, resource_name: str) -> typing.List[str]:
roles = set(self.resources.get(resource_name, []))
changed = True
while changed:
changed = False
for role_name, role in self.roles.items():
if role_name not in roles:
for child_role_name in role.includes:
if child_role_name in roles:
roles.add(role_name)
changed = True
return sorted(roles)
class RoleManager:
def __init__(self, roles: typing.Dict[str, Role]):
self.roles = roles
self.methods = ResourceManager("Method", "CALL", self.roles)
self.events = ResourceManager("Event", "SUBSCRIBE", self.roles)
def register_method(self, method_name: str, roles: typing.Iterable[str], *, exist_ok: bool = False):
self.methods.register_resource(method_name, roles, exist_ok)
def add_roles_to_method(self, method_name: str, roles: typing.Iterable[str]):
self.methods.add_roles_to_resource(method_name, roles)
def register_event(self, event_name: str, roles: typing.Iterable[str], *, exist_ok: bool = False):
self.events.register_resource(event_name, roles, exist_ok)
def roles_for_role(self, role: str) -> typing.Set[str]:
if role not in self.roles:
return set()
return set.union({role}, *[self.roles_for_role(included_role) for included_role in self.roles[role].includes])
def allowlist_for_role(self, role: str) -> typing.List[dict[str, str]]:
if role in self.roles and self.roles[role].full_admin:
return [{"method": "CALL", "resource": "*"}, {"method": "SUBSCRIBE", "resource": "*"}]
return sum([
self.methods.allowlists_for_roles[role] + self.events.allowlists_for_roles[role]
for role in self.roles_for_role(role)
], [])
def roles_for_method(self, method_name: str) -> typing.List[str]:
return self.methods.roles_for_resource(method_name)
def roles_for_event(self, event_name: str) -> typing.List[str]:
return self.events.roles_for_resource(event_name)
| 12,109 | Python | .py | 233 | 40.862661 | 119 | 0.599475 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,683 | async_validators.py | truenas_middleware/src/middlewared/middlewared/async_validators.py | import asyncio
import socket
from middlewared.validators import IpAddress, check_path_resides_within_volume_sync
async def check_path_resides_within_volume(verrors, middleware, schema_name, path):
"""
async wrapper around synchronous general-purpose path validation function
"""
vol_names = [vol["vol_name"] for vol in await middleware.call("datastore.query", "storage.volume")]
return await middleware.run_in_thread(
check_path_resides_within_volume_sync,
verrors, schema_name, path, vol_names
)
async def resolve_hostname(middleware, verrors, name, hostname):
def resolve_host_name_thread(hostname):
try:
try:
ip = IpAddress()
ip(hostname)
return hostname
except ValueError:
return socket.gethostbyname(hostname)
except Exception:
return False
try:
aw = middleware.create_task(middleware.run_in_thread(resolve_host_name_thread, hostname))
result = await asyncio.wait_for(aw, timeout=5)
except TimeoutError:
result = False
if not result:
verrors.add(
name,
'Couldn\'t resolve hostname'
)
async def validate_country(middleware, country_name, verrors, v_field_name):
if country_name not in (await middleware.call('system.general.country_choices')):
verrors.add(
v_field_name,
f'{country_name} not in countries recognized by the system'
)
async def validate_port(middleware, schema, port, whitelist_namespace=None, bind_ip='0.0.0.0'):
return await middleware.call('port.validate_port', schema, port, bind_ip, whitelist_namespace)
| 1,724 | Python | .py | 41 | 33.829268 | 103 | 0.669659 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,684 | service_exception.py | truenas_middleware/src/middlewared/middlewared/service_exception.py | import errno
import subprocess
import typing
from truenas_api_client import ErrnoMixin
def get_errname(code: int) -> str:
return errno.errorcode.get(code) or ErrnoMixin._get_errname(code) or 'EUNKNOWN'
class CallException(ErrnoMixin, Exception):
pass
class CallError(CallException):
def __init__(self, errmsg: str, errno: int = errno.EFAULT, extra=None):
self.errmsg = errmsg
self.errno = errno
self.extra = extra
def __str__(self):
errname = get_errname(self.errno)
return f'[{errname}] {self.errmsg}'
class ValidationError(CallException):
"""
ValidationError is an exception used to point when a provided
attribute of a middleware method is invalid/not allowed.
"""
def __init__(self, attribute, errmsg, errno: int = errno.EINVAL):
self.attribute = attribute
self.errmsg = errmsg
self.errno = errno
def __str__(self):
errname = get_errname(self.errno)
return f'[{errname}] {self.attribute}: {self.errmsg}'
def __eq__(self, other):
return (
isinstance(other, ValidationError) and
self.attribute == other.attribute and
self.errmsg == other.errmsg and
self.errno == other.errno
)
class ValidationErrors(CallException):
"""
CallException with a collection of ValidationError
"""
def __init__(self, errors: typing.List[ValidationError] = None):
self.errors = errors or []
super().__init__(self.errors)
def add(self, attribute, errmsg: str, errno: int = errno.EINVAL):
self.errors.append(ValidationError(attribute, errmsg, errno))
def add_validation_error(self, validation_error: ValidationError):
self.errors.append(validation_error)
def add_child(self, attribute, child: 'ValidationErrors'):
for e in child.errors:
self.add(f"{attribute}.{e.attribute}", e.errmsg, e.errno)
def check(self):
if self:
raise self
def extend(self, errors: 'ValidationErrors'):
for e in errors.errors:
self.add(e.attribute, e.errmsg, e.errno)
def __iter__(self):
for e in self.errors:
yield e.attribute, e.errmsg, e.errno
def __bool__(self):
return bool(self.errors)
def __str__(self):
output = ''
for e in self.errors:
output += str(e) + '\n'
return output
def __contains__(self, item):
# check if an error exists for a given attribute ( item )
return item in [e.attribute for e in self.errors]
def adapt_exception(e) -> typing.Union[CallError, None]:
from .utils.shell import join_commandline
if isinstance(e, subprocess.CalledProcessError):
if isinstance(e.cmd, (list, tuple)):
cmd = join_commandline(e.cmd)
else:
cmd = e.cmd
stdout = e.stdout or ""
if isinstance(stdout, bytes):
stdout = stdout.decode("utf-8", "ignore")
stderr = e.stderr or ""
if isinstance(stderr, bytes):
stderr = stderr.decode("utf-8", "ignore")
output = ''.join([stdout, stderr]).rstrip()
return CallError(f'Command {cmd} failed (code {e.returncode}):\n{output}')
class InstanceNotFound(ValidationError):
"""Raised when `get_instance` failed to locate specific object"""
def __init__(self, errmsg):
super().__init__(None, errmsg, errno.ENOENT)
class MatchNotFound(IndexError):
"""Raised when there is no matching id eg: filter_utils/datastore.query"""
pass
| 3,599 | Python | .py | 90 | 32.433333 | 83 | 0.637986 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,685 | worker.py | truenas_middleware/src/middlewared/middlewared/worker.py | import asyncio
import inspect
import os
import setproctitle
from truenas_api_client import Client
from . import logger
from .common.environ import environ_update
from .utils import MIDDLEWARE_RUN_DIR
from .utils.plugins import LoadPluginsMixin
from .utils.prctl import die_with_parent
from .utils.service.call import MethodNotFoundError, ServiceCallMixin
MIDDLEWARE = None
class FakeMiddleware(LoadPluginsMixin, ServiceCallMixin):
"""
Implements same API from real middleware
"""
def __init__(self):
super().__init__()
self.client = None
_logger = logger.Logger('worker')
self.logger = _logger.getLogger()
_logger.configure_logging('console')
self.loop = asyncio.get_event_loop()
def _call(self, name, serviceobj, methodobj, params=None, app=None, pipes=None, job=None):
try:
with Client(f'ws+unix://{MIDDLEWARE_RUN_DIR}/middlewared-internal.sock', py_exceptions=True) as c:
self.client = c
job_options = getattr(methodobj, '_job', None)
if job and job_options:
params = list(params) if params else []
params.insert(0, FakeJob(job['id'], self.client))
return methodobj(*params)
finally:
self.client = None
def _run(self, name, args, job):
serviceobj, methodobj = self.get_method(name)
return self._call(name, serviceobj, methodobj, args, job=job)
def call_sync(self, method, *params, timeout=None, **kwargs):
"""
Calls a method using middleware client
"""
serviceobj, methodobj = self.get_method(method)
if serviceobj._config.process_pool and not hasattr(method, '_job'):
if asyncio.iscoroutinefunction(methodobj):
try:
# Search for a synchronous implementation of the asynchronous method (i.e. `get_instance`).
# Why is this needed? Imagine we have a `ZFSSnapshot` service that uses a process pool. Let's say
# its `create` method calls `zfs.snapshot.get_instance` to return the result. That call will have
# to be forwarded to the main middleware process, which will call `zfs.snapshot.query` in the
# process pool. If the process pool is already exhausted, it will lead to a deadlock.
# By executing a synchronous implementation of the same method in the same process pool we
# eliminate `Hold and wait` condition and prevent deadlock situation from arising.
_, sync_methodobj = self.get_method(f'{method}__sync')
except MethodNotFoundError:
# FIXME: Make this an exception in 22.MM
self.logger.warning('Service uses a process pool but has an asynchronous method: %r', method)
sync_methodobj = None
else:
sync_methodobj = methodobj
if sync_methodobj is not None:
self.logger.trace('Calling %r in current process', method)
return sync_methodobj(*params)
return self.client.call(method, *params, timeout=timeout, **kwargs)
def event_register(self, *args, **kwargs):
pass
def get_events(self):
return []
def send_event(self, name, event_type, **kwargs):
with Client(py_exceptions=True) as c:
return c.call('core.event_send', name, event_type, kwargs)
class FakeJob(object):
def __init__(self, id_, client):
self.id = id_
self.client = client
self.progress = {
'percent': None,
'description': None,
'extra': None,
}
def set_progress(self, percent, description=None, extra=None):
self.progress['percent'] = percent
if description:
self.progress['description'] = description
if extra:
self.progress['extra'] = extra
self.client.call('core.job_update', self.id, {'progress': self.progress})
def main_worker(*call_args):
global MIDDLEWARE
try:
res = MIDDLEWARE._run(*call_args)
except SystemExit:
raise RuntimeError('Worker call raised SystemExit exception')
# TODO: python cant pickle generator for obvious reasons, we should implement
# it using Pipe.
if inspect.isgenerator(res):
res = list(res)
return res
def receive_events():
c = Client(f'ws+unix://{MIDDLEWARE_RUN_DIR}/middlewared-internal.sock', py_exceptions=True)
c.subscribe('core.environ', lambda *args, **kwargs: environ_update(kwargs['fields']))
environ_update(c.call('core.environ'))
def worker_init(debug_level, log_handler):
global MIDDLEWARE
MIDDLEWARE = FakeMiddleware()
os.environ['MIDDLEWARED_LOADING'] = 'True'
MIDDLEWARE._load_plugins()
os.environ['MIDDLEWARED_LOADING'] = 'False'
setproctitle.setproctitle('middlewared (worker)')
die_with_parent()
logger.setup_logging('worker', debug_level, log_handler)
receive_events()
| 5,118 | Python | .py | 111 | 36.756757 | 117 | 0.638755 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,686 | event.py | truenas_middleware/src/middlewared/middlewared/event.py | import asyncio
import contextlib
import json
import threading
import typing
from middlewared.role import RoleManager
from middlewared.schema import Any, clean_and_validate_arg, ValidationErrors
class Events:
def __init__(self, role_manager: RoleManager):
self.role_manager = role_manager
self._events: typing.Dict[str, dict[str, typing.Any]] = {}
self.__events_private: typing.Set[str] = set()
def register(self, name: str, description: str, private: bool, returns, new_style_returns, no_auth_required,
no_authz_required, roles: typing.Iterable[str]):
if name in self._events:
raise ValueError(f'Event {name!r} already registered.')
self.role_manager.register_event(name, roles)
self._events[name] = {
'description': description,
'accepts': [],
'returns': [returns] if returns else [Any(name, null=True)],
'new_style_returns': new_style_returns,
'no_auth_required': no_auth_required,
'no_authz_required': no_authz_required,
'roles': self.role_manager.roles_for_event(name),
}
if private:
self.__events_private.add(name)
def get_event(self, name: str) -> typing.Optional[dict[str, typing.Any]]:
return self._events.get(name)
def __contains__(self, name):
return name in self._events
def __iter__(self):
for k, v in self._events.items():
yield k, {
'private': k in self.__events_private,
'wildcard_subscription': True,
**v,
}
class EventSourceMetabase(type):
def __new__(cls, name, bases, attrs):
klass = super().__new__(cls, name, bases, attrs)
if name == 'EventSource' and bases == ():
return klass
for i in (('ACCEPTS', name.lower()), ('RETURNS', f'{name.lower()}_returns')):
doc_type = getattr(klass, i[0])
if doc_type == NotImplementedError:
doc_type = Any(null=True)
if not doc_type.name:
doc_type.name = i[1]
setattr(klass, i[0], [doc_type])
return klass
class EventSource(metaclass=EventSourceMetabase):
ACCEPTS = NotImplementedError
RETURNS = NotImplementedError
def __init__(self, middleware, name, arg, send_event, unsubscribe_all):
self.middleware = middleware
self.name = name
self.arg = arg
self.send_event_internal = send_event
self.unsubscribe_all = unsubscribe_all
self._cancel = asyncio.Event()
self._cancel_sync = threading.Event()
def send_event(self, event_type: str, **kwargs):
self.send_event_internal(event_type, **kwargs)
async def validate_arg(self):
verrors = ValidationErrors()
try:
with contextlib.suppress(json.JSONDecodeError):
self.arg = json.loads(self.arg)
except TypeError:
self.arg = self.ACCEPTS[0].default
self.arg = clean_and_validate_arg(verrors, self.ACCEPTS[0], self.arg)
verrors.check()
async def process(self):
error = None
try:
await self.run()
except Exception as e:
error = e
self.middleware.logger.error('EventSource %r run() failed', self.name, exc_info=True)
try:
await self.on_finish()
except Exception:
self.middleware.logger.error('EventSource %r on_finish() failed', self.name, exc_info=True)
await self.unsubscribe_all(error)
async def run(self):
await self.middleware.run_in_thread(self.run_sync)
def run_sync(self):
raise NotImplementedError('run_sync() method not implemented')
async def cancel(self):
self._cancel.set()
await self.middleware.run_in_thread(self._cancel_sync.set)
async def on_finish(self):
await self.middleware.run_in_thread(self.on_finish_sync)
def on_finish_sync(self):
pass
| 4,054 | Python | .py | 97 | 32.515464 | 112 | 0.613839 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,687 | pipe.py | truenas_middleware/src/middlewared/middlewared/pipe.py | import os
import tempfile
class Pipes:
"""
`job.pipes` object containing job's open pipes. Each pipe is a :class:`middlewared.pipe.Pipe` object.
:ivar input: Input pipe
:ivar output: Output pipe
"""
def __init__(self, input_=None, output=None):
self.input = input_
self.output = output
def __iter__(self):
if self.input is not None:
yield self.input
if self.output is not None:
yield self.output
async def close(self):
for pipe in self:
await pipe.close()
class Pipe:
"""
Job's opened pipe.
:ivar buffered: Determines whether the pipe is buffered.
If a pipe is unbuffered (default) then the underlying resource is `os.pipe` object. Pipe writer will block
if the reader is not reading the pipe. Pipe reader will block until the writer finishes (or explicitly closes
the pipe).
If a pipe is buffered then the underlying resource is unbuffered temporary file. Pipe writer can write to the
pipe at his own cadence and will never block. Pipe reader can read from the pipe and will never block, but if
it reaches EOF, it does not mean that the writer has already finished writing. Reader must implement another
method of determining whether it has already consumed all the input (e.g. checking if writer job has
terminated).
:ivar r: Reading side of the pipe
:ivar w: Writing side of the pipe
"""
def __init__(self, middleware, buffered=False):
self.middleware = middleware
if buffered:
self.w = tempfile.NamedTemporaryFile(buffering=0)
self.r = open(self.w.name, "rb")
else:
r, w = os.pipe()
self.r = os.fdopen(r, "rb")
self.w = os.fdopen(w, "wb")
async def close(self):
await self.middleware.run_in_thread(self.r.close)
await self.middleware.run_in_thread(self.w.close)
| 1,985 | Python | .py | 46 | 35.217391 | 117 | 0.653846 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,688 | restful.py | truenas_middleware/src/middlewared/middlewared/restful.py | import asyncio
import base64
import binascii
from collections import defaultdict
import copy
import errno
import pam
import traceback
import types
import urllib.parse
from aiohttp import web
from truenas_api_client import json
from .api.base.server.app import App
from .auth import ApiKeySessionManagerCredentials, LoginPasswordSessionManagerCredentials
from .job import Job
from .pipe import Pipes
from .schema import Error as SchemaError
from .service_exception import adapt_exception, CallError, MatchNotFound, ValidationError, ValidationErrors
from .utils.auth import AA_LEVEL1, CURRENT_AAL
from .utils.origin import ConnectionOrigin
def parse_credentials(request):
auth = request.headers.get('Authorization')
if auth is None:
qs = urllib.parse.parse_qs(request.query_string)
if 'auth_token' in qs:
return {
'credentials': 'TOKEN',
'credentials_data': {
'token': qs.get('auth_token')[0],
},
}
else:
return None
elif auth.startswith('Token '):
token = auth.split(' ', 1)[1]
return {
'credentials': 'TOKEN',
'credentials_data': {
'token': token,
},
}
if auth.startswith('Basic '):
try:
username, password = base64.b64decode(auth[6:]).decode('utf-8').split(':', 1)
except UnicodeDecodeError:
raise web.HTTPBadRequest()
except binascii.Error:
raise web.HTTPBadRequest()
return {
'credentials': 'LOGIN_PASSWORD',
'credentials_data': {
'username': username,
'password': password,
},
}
elif auth.startswith('Bearer '):
key = auth.split(' ', 1)[1]
return {
'credentials': 'API_KEY',
'credentials_data': {
'api_key': key,
}
}
async def authenticate(middleware, request, credentials, method, resource):
if CURRENT_AAL.level is not AA_LEVEL1:
raise web.HTTPForbidden(text='REST authentication not permitted by server authentication security level')
if credentials['credentials'] == 'TOKEN':
origin = await middleware.run_in_thread(ConnectionOrigin.create, request)
token = await middleware.call('auth.get_token_for_action', credentials['credentials_data']['token'],
origin, method, resource)
if token is None:
raise web.HTTPForbidden(text='Invalid token')
return token
elif credentials['credentials'] == 'LOGIN_PASSWORD':
twofactor_auth = await middleware.call('auth.twofactor.config')
if twofactor_auth['enabled']:
raise web.HTTPUnauthorized(text='HTTP Basic Auth is unavailable when OTP is enabled')
resp = await middleware.call('auth.authenticate_plain',
credentials['credentials_data']['username'],
credentials['credentials_data']['password'])
if resp['pam_response']['code'] != pam.PAM_SUCCESS:
raise web.HTTPUnauthorized(text='Bad username or password')
return LoginPasswordSessionManagerCredentials(resp['user_data'], assurance=CURRENT_AAL.level)
elif credentials['credentials'] == 'API_KEY':
api_key = await middleware.call('api_key.authenticate', credentials['credentials_data']['api_key'])
if api_key is None:
raise web.HTTPUnauthorized(text='Invalid API key')
return ApiKeySessionManagerCredentials(*api_key, assurance=CURRENT_AAL.level)
else:
raise web.HTTPUnauthorized()
def create_application_impl(request, credentials=None):
return Application(ConnectionOrigin.create(request), credentials)
async def create_application(request, credentials=None):
return await asyncio.to_thread(create_application_impl, request, credentials)
def normalize_query_parameter(value):
try:
return json.loads(value)
except json.json.JSONDecodeError:
return value
class Application(App):
def __init__(self, origin, authenticated_credentials):
super().__init__(origin)
self.session_id = None
self.authenticated = authenticated_credentials is not None
self.authenticated_credentials = authenticated_credentials
self.rest = True
class RESTfulAPI(object):
def __init__(self, middleware, app):
self.middleware = middleware
self.app = app
# Keep methods cached for future lookups
self._methods = {}
self._methods_by_service = defaultdict(dict)
self._openapi = OpenAPIResource(self)
def get_app(self):
return self.app
async def register_resources(self):
for methodname, method in list((await self.middleware.call('core.get_methods', None, 'REST')).items()):
self._methods[methodname] = method
self._methods_by_service[methodname.rsplit('.', 1)[0]][methodname] = method
for name, service in list((await self.middleware.call('core.get_services', 'REST')).items()):
openapi = True
if name == 'resttest':
openapi = False
kwargs = {}
blacklist_methods = []
"""
Hook up methods for the resource entrypoint.
For CRUD:
- GET -> $name.query
- POST - $name.create
For Config:
- GET -> $name.config
- PUT -> $name.update
"""
if service['type'] == 'crud':
kwargs['get'] = '{}.query'.format(name)
post = f'{name}.create'
if post in self._methods:
kwargs['post'] = '{}.create'.format(name)
blacklist_methods.extend(list(kwargs.values()))
elif service['type'] == 'config':
kwargs['get'] = '{}.config'.format(name)
put = '{}.update'.format(name)
if put in self._methods:
kwargs['put'] = put
blacklist_methods.extend(list(kwargs.values()))
service_resource = Resource(self, self.middleware, name.replace('.', '/'), service['config'], openapi,
**kwargs)
"""
For CRUD services we also need a direct subresource so we can
operate on items in the entity, e.g. update or delete "john" of user namespace.
"""
subresource = None
if service['type'] == 'crud':
kwargs = {}
get = f'{name}.query'
if get in self._methods:
kwargs['get'] = get
delete = f'{name}.delete'
if delete in self._methods:
kwargs['delete'] = delete
put = f'{name}.update'
if put in self._methods:
kwargs['put'] = put
blacklist_methods.extend(list(kwargs.values()))
subresource = Resource(
self, self.middleware, 'id/{id_}', service['config'], openapi,
parent=service_resource, **kwargs,
)
for methodname, method in list(self._methods_by_service[name].items()):
if methodname in blacklist_methods:
continue
if method['require_websocket']:
continue
short_methodname = methodname.rsplit('.', 1)[-1]
if method.get('item_method') is True:
parent = subresource
else:
parent = service_resource
res_kwargs = {}
"""
Methods with not empty accepts list and not filterable
are treated as POST HTTP methods.
"""
if (method['accepts'] and not method['filterable']) or method['uploadable']:
res_kwargs['post'] = methodname
else:
res_kwargs['get'] = methodname
for rest_method in map(str.lower, (method['extra_methods'] or [])):
assert rest_method in ('get',)
# Only allow get for now as that's the only use case we have for now NAS-110243
res_kwargs[rest_method] = methodname
Resource(self, self.middleware, short_methodname, service['config'], openapi,
parent=parent, **res_kwargs)
await asyncio.sleep(0) # Force context switch
class OpenAPIResource(object):
def __init__(self, rest):
self.rest = rest
self.rest.app.router.add_route('GET', '/api/v2.0', self.get)
self.rest.app.router.add_route('GET', '/api/v2.0/', self.get)
self.rest.app.router.add_route('GET', '/api/v2.0/openapi.json', self.get)
self._paths = defaultdict(dict)
self._schemas = dict()
self._components = defaultdict(dict)
self._components['schemas'] = self._schemas
self._components['responses'] = {
'NotFound': {
'description': 'Endpoint not found',
},
'Unauthorized': {
'description': 'No authorization for this endpoint',
},
'Success': {
'description': 'Operation succeeded',
},
}
self._components['securitySchemes'] = {
'basic': {
'type': 'http',
'scheme': 'basic'
},
}
def add_path(self, path, operation, methodname, service_config):
assert operation in ('get', 'post', 'put', 'delete')
opobject = {
'tags': [methodname.rsplit('.', 1)[0]],
'responses': {
'200': {'$ref': '#/components/responses/Success'},
'401': {'$ref': '#/components/responses/Unauthorized'},
},
'parameters': [],
}
method = self.rest._methods.get(methodname)
if method:
desc = method['description'] or ''
if method['downloadable']:
if method['check_pipes']:
desc += '\n\nA file will be downloaded from this endpoint.'
else:
desc += (
'\n\nA file might be downloaded from this endpoint. Please specify `?download=0` to fetch a '
'method call result instead.'
)
if method['uploadable']:
if method['check_pipes']:
desc += '\n\nA file must be uploaded to this endpoint. '
else:
desc += (
'\n\nA file might be uploaded to this endpoint. '
)
desc += (
'To upload a file, please send a multipart request with two parts. The first, named `data`, should '
'contain a JSON-encoded payload, and the second, named `file`, should contain an uploaded file.'
)
opobject['description'] = desc
accepts = method.get('accepts')
if method['filterable']:
opobject['parameters'] += [
{
'name': 'limit',
'in': 'query',
'required': False,
'schema': {'type': 'integer'},
},
{
'name': 'offset',
'in': 'query',
'required': False,
'schema': {'type': 'integer'},
},
{
'name': 'count',
'in': 'query',
'required': False,
'schema': {'type': 'boolean'},
},
{
'name': 'sort',
'in': 'query',
'required': False,
'schema': {'type': 'string'},
},
] if '{id_}' not in path else []
desc = f'{desc}\n\n' if desc else ''
opobject['description'] = desc + '`query-options.extra` can be specified as query parameters with ' \
'prefixing them with `extra.` prefix. For example, ' \
'`extra.retrieve_properties=false` will pass `retrieve_properties` ' \
'as an extra argument to pool/dataset endpoint.'
elif accepts and not (operation == 'delete' and method['item_method'] and len(accepts) == 1) and (
not method['filterable']
):
opobject['requestBody'] = self._accepts_to_request(methodname, method, accepts)
# For now we only accept `id_` as an url parameters
if '{id_}' in path:
opobject['parameters'].append({
'name': 'id_',
'in': 'path',
'required': True,
'schema': {'type': service_config['datastore_primary_key_type']},
})
method_returns = method.get('returns') or []
if method_returns:
opobject['responses']['200'] = self._returns_to_request(methodname, method_returns)
self._paths[f'/{path}'][operation] = opobject
def _convert_schema(self, schema):
"""
Convert JSON Schema to OpenAPI Schema
"""
schema = copy.deepcopy(schema)
_type = schema.get('type')
schema.pop('_required_', None)
if isinstance(_type, list):
if 'null' in _type:
_type.remove('null')
schema['nullable'] = True
schema['type'] = _type = _type[0]
if _type == 'object':
for key, val in schema.get('properties', {}).items():
schema['properties'][key] = self._convert_schema(val)
elif _type == 'array':
items = schema.get('items')
if isinstance(items, list):
for i, item in enumerate(list(items)):
if item.get('type') == 'null':
items.remove(item)
if len(items) > 1:
schema['items'] = {'oneOf': items}
elif len(items) > 0:
schema['items'] = items[0]
else:
schema['items'] = {}
return schema
def _returns_to_request(self, methodname, method_returns):
method_name = f'return_schema_of_{methodname.replace(".", "_")}'
for schema in method_returns:
self._schemas[method_name] = self._convert_schema(schema)
json_request = {'schema': {'$ref': f'#/components/schemas/{method_name}'}}
return {
'description': 'Response schema:',
'content': {
'application/json': json_request,
}
}
def _accepts_to_request(self, methodname, method, schemas):
# Create an unique ID for every argument and register the schema
methodname = methodname.replace(".", "_")
if len(schemas) == 1 and not method["item_method"]:
self._schemas[methodname] = self._convert_schema(schemas[0])
elif len(schemas) == 2 and method["item_method"]:
# In this case, we ignore the first schema
self._schemas[methodname] = self._convert_schema(schemas[1])
else:
# If the method accepts multiple arguments lets emulate/create
# a new schema, which is a object containing every argument as an
# attribute.
props = {}
for i, schema in enumerate(schemas):
if i == 0 and method['item_method']:
continue
unique_id = f'{methodname}_{i}'
self._schemas[unique_id] = self._convert_schema(schema)
props[schema['title']] = {'$ref': f'#/components/schemas/{unique_id}'}
new_schema = {
'type': 'object',
'properties': props
}
self._schemas[methodname] = new_schema
json_request = {'schema': {'$ref': f'#/components/schemas/{methodname}'}}
for i, example in enumerate(method['examples']['rest']):
try:
title, example = example.split('{', 1)
example = json.loads('{' + example.strip())
except ValueError:
pass
else:
json_request.setdefault('examples', {})
json_request['examples'][f'example_{i + 1}'] = {'summary': title.strip(), 'value': example}
return {
'content': {
'application/json': json_request,
}
}
def get(self, req, **kwargs):
servers = []
host = req.headers.get('Host')
scheme = req.headers.get('X-Scheme') or req.scheme
port = int(req.headers.get('X-Server-Port') or 80)
if host:
# This condition is only cosmetic to avoid specifying 80/443 in the uri
if port not in [80, 443]:
host = f'{host}:{port}'
servers.append({
'url': f'{scheme}://{host}/api/v2.0',
})
result = {
'openapi': '3.0.0',
'info': {
'title': 'TrueNAS RESTful API',
'version': 'v2.0',
},
'paths': self._paths,
'servers': servers,
'components': self._components,
'security': [{'basic': []}],
}
resp = web.Response()
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps(result, indent=True)
return resp
class Resource(object):
name = None
parent = None
delete = None
get = None
post = None
put = None
def __init__(
self, rest, middleware, name, service_config, openapi, parent=None,
delete=None, get=None, post=None, put=None,
):
self.rest = rest
self.middleware = middleware
self.name = name
self.parent = parent
self.service_config = service_config
self.__method_params = {}
path = self.get_path()
if delete:
self.delete = delete
if get:
self.get = get
if post:
self.post = post
if put:
self.put = put
for i in ('delete', 'get', 'post', 'put'):
operation = getattr(self, i)
if operation is None:
continue
self.rest.app.router.add_route(i.upper(), f'/api/v2.0/{path}', getattr(self, f'on_{i}'))
self.rest.app.router.add_route(i.upper(), f'/api/v2.0/{path}/', getattr(self, f'on_{i}'))
if openapi:
self.rest._openapi.add_path(path, i, operation, self.service_config)
self.__map_method_params(operation)
self.middleware.logger.trace(f"add route {self.get_path()}")
def __map_method_params(self, method_name):
"""
Middleware methods which accepts more than one argument are mapped to a single
schema of object type.
For that reason we need to keep track of each parameter and its order
"""
method = self.rest._methods.get(method_name)
if not method:
return
accepts = method.get('accepts')
self.__method_params[method_name] = {}
if accepts is None:
return
for i, accept in enumerate(accepts):
# First param of an `item_method` is the item `id` and must be skipped
# since thats gotten from the URL.
if i == 0 and method['item_method']:
continue
self.__method_params[method_name][accept['title']] = {
'order': i,
'required': accept['_required_'],
}
def __getattr__(self, attr):
if attr in ('on_get', 'on_post', 'on_delete', 'on_put'):
do = object.__getattribute__(self, 'do')
method = attr.split('_')[-1]
if object.__getattribute__(self, method) is None:
return None
async def on_method(req, *args, **kwargs):
resp = web.Response()
info = req.match_info.route.resource.get_info()
if "path" in info:
resource = info["path"][len("/api/v2.0"):]
elif "formatter" in info:
resource = info["formatter"][len("/api/v2.0"):]
else:
resource = None
app = await create_application(req)
auth_required = not self.rest._methods[getattr(self, method)]['no_auth_required']
credentials = parse_credentials(req)
if credentials is None:
if auth_required:
raise web.HTTPUnauthorized()
authenticated_credentials = None
else:
try:
authenticated_credentials = await authenticate(self.middleware, req, credentials,
method.upper(), resource)
except web.HTTPException as e:
credentials['credentials_data'].pop('password', None)
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': credentials,
'error': e.text,
}, False)
raise
app = await create_application(req, authenticated_credentials)
credentials['credentials_data'].pop('password', None)
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': credentials,
'error': None,
}, True)
if auth_required:
if authenticated_credentials is None:
raise web.HTTPUnauthorized()
kwargs.update(dict(req.match_info))
return await do(method, req, resp, app,
not auth_required or authenticated_credentials.authorize(method.upper(), resource),
*args, **kwargs)
return on_method
return object.__getattribute__(self, attr)
def get_path(self):
path = []
parent = self.parent
while parent is not None:
path.append(parent.name)
parent = parent.parent
path.reverse()
path.append(self.name)
return '/'.join(path)
def _filterable_args(self, req):
filters = []
extra_args = {}
options = {}
for key, val in list(req.query.items()):
if '__' in key:
field, op = key.split('__', 1)
else:
field, op = key, '='
def convert(val):
if val.isdigit():
val = int(val)
elif val.lower() in ('true', 'false', '0', '1'):
if val.lower() in ('true', '1'):
val = True
elif val.lower() in ('false', '0'):
val = False
return val
if key in ('limit', 'offset', 'count'):
options[key] = convert(val)
continue
elif key == 'sort':
options[key] = [convert(v) for v in val.split(',')]
continue
elif key.startswith('extra.'):
key = key[len('extra.'):]
extra_args[key] = normalize_query_parameter(val)
continue
op_map = {
'eq': '=',
'neq': '!=',
'gt': '>',
'lt': '<',
'gte': '>=',
'lte': '<=',
'regex': '~',
}
op = op_map.get(op, op)
if val.isdigit():
val = int(val)
elif val.lower() == 'true':
val = True
elif val.lower() == 'false':
val = False
elif val.lower() == 'null':
val = None
filters.append((field, op, val))
if extra_args:
options['extra'] = extra_args
return [filters, options] if filters or options else []
async def parse_rest_json_request(self, req, resp):
body, error = None, False
try:
body = await req.json()
except json.decoder.JSONDecodeError as e:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps({
'message': f'json parse error: {e}',
'errno': errno.EINVAL,
}, indent=True)
error = True
return body, error
async def do(self, http_method, req, resp, app, authorized, **kwargs):
assert http_method in ('delete', 'get', 'post', 'put')
methodname = getattr(self, http_method)
method = self.rest._methods[methodname]
method_kwargs = {}
method_kwargs['app'] = app
has_request_body = False
request_body = None
upload_pipe = None
filepart = None
if method['uploadable']:
if req.headers.get('Content-Type', '').startswith('multipart/'):
reader = await req.multipart()
part = await reader.next()
if not part or part.name != "data":
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps({
'message': 'The method accepts multipart requests with two parts (`data` and `file`).',
'errno': errno.EINVAL,
}, indent=True)
return resp
has_request_body = True
try:
request_body = json.loads(await part.read())
except ValueError as e:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps({
'message': f'`data` json parse error: {e}',
'errno': errno.EINVAL,
}, indent=True)
return resp
filepart = await reader.next()
if not filepart or filepart.name != "file":
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps({
'message': ('The method accepts multipart requests with two parts (`data` and `file`). '
'`file` not found.'),
'errno': errno.EINVAL,
}, indent=True)
return resp
upload_pipe = self.middleware.pipe()
else:
if method['check_pipes']:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps({
'message': 'This method accepts only multipart requests.',
'errno': errno.EINVAL,
}, indent=True)
return resp
else:
if await req.text():
has_request_body = True
request_body, error = await self.parse_rest_json_request(req, resp)
if error:
return resp
else:
if await req.text():
has_request_body = True
request_body, error = await self.parse_rest_json_request(req, resp)
if error:
return resp
download_pipe = None
if method['downloadable']:
if req.query.get('download', '1') == '1':
download_pipe = self.middleware.pipe()
else:
if method['check_pipes']:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps({
'message': 'JSON response is not supported for this method.',
'errno': errno.EINVAL,
}, indent=True)
return resp
if upload_pipe and download_pipe:
method_kwargs['pipes'] = Pipes(input_=upload_pipe, output=download_pipe)
elif upload_pipe:
method_kwargs['pipes'] = Pipes(input_=upload_pipe)
elif download_pipe:
method_kwargs['pipes'] = Pipes(output=download_pipe)
method_args = []
if http_method == 'get' and method['filterable']:
if self.parent and 'id_' in kwargs:
primary_key = kwargs['id_']
if primary_key.isdigit():
primary_key = int(primary_key)
extra = {}
for key, val in list(req.query.items()):
if key.startswith('extra.'):
extra[key[len('extra.'):]] = normalize_query_parameter(val)
method_args = [
[(self.service_config['datastore_primary_key'], '=', primary_key)],
{'get': True, 'force_sql_filters': True, 'extra': extra}
]
else:
method_args = self._filterable_args(req)
if not method_args:
# RFC 7231 specifies that a GET request can accept a payload body
# This means that all the http methods now ( delete, get, post, put ) accept a payload body
try:
if not has_request_body:
method_args = []
else:
data = request_body
params = self.__method_params.get(methodname)
if not params and http_method in ('get', 'delete') and not data:
# This will happen when the request body contains empty dict "{}"
# Keeping compatibility with how we used to accept the above case, this
# makes sure that existing client implementations are not affected
method_args = []
elif not params or len(params) == 1:
method_args = [data]
else:
if not isinstance(data, dict):
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.body = json.dumps({
'message': 'Endpoint accepts multiple params, object/dict expected.',
})
return resp
# These parameters were renamed as pydantic does not support `-` in parameter names
if 'query-filters' in data and 'query-filters' not in params and 'filters' in params:
data['filters'] = data.pop('query-filters')
if 'query-options' in data and 'query-options' not in params and 'options' in params:
data['options'] = data.pop('query-options')
method_args = []
for p, options in sorted(params.items(), key=lambda x: x[1]['order']):
if p not in data and options['required']:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.body = json.dumps({
'message': f'{p} attribute expected.',
})
return resp
elif p in data:
method_args.append(data.pop(p))
if data:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.body = json.dumps({
'message': f'The following attributes are not expected: {", ".join(data.keys())}',
})
return resp
except Exception as e:
resp.set_status(400)
resp.headers['Content-type'] = 'application/json'
resp.body = json.dumps({
'message': str(e),
})
return resp
"""
If the method is marked `item_method` then the first argument
must be the item id (from url param)
"""
if method.get('item_method') is True:
id_ = kwargs['id_']
try:
id_ = int(id_)
except ValueError:
pass
method_args.insert(0, id_)
try:
serviceobj, methodobj = self.middleware.get_method(methodname)
if authorized:
result = await self.middleware.call_with_audit(methodname, serviceobj, methodobj, method_args,
**method_kwargs)
else:
await self.middleware.log_audit_message_for_method(methodname, methodobj, method_args, app,
True, False, False)
resp.set_status(403)
return resp
if upload_pipe:
await self.middleware.run_in_thread(copy_multipart_to_pipe, self.middleware.loop, filepart, upload_pipe)
if method['downloadable'] and download_pipe is None:
result = await result.wait()
except CallError as e:
resp = web.Response(status=422)
result = {
'message': e.errmsg,
'errno': e.errno,
}
except (SchemaError, ValidationError, ValidationErrors) as e:
if isinstance(e, (SchemaError, ValidationError)):
e = [(e.attribute, e.errmsg, e.errno)]
result = defaultdict(list)
for attr, errmsg, errno_ in e:
result[attr].append({
'message': errmsg,
'errno': errno_,
})
resp = web.Response(status=422)
except Exception as e:
adapted = adapt_exception(e)
if adapted:
resp = web.Response(status=422)
result = {
'message': adapted.errmsg,
'errno': adapted.errno,
}
else:
if isinstance(e, (MatchNotFound,)):
resp = web.Response(status=404)
result = {
'message': str(e),
}
else:
resp = web.Response(status=500)
result = {
'message': str(e),
'traceback': ''.join(traceback.format_exc()),
}
if download_pipe is not None:
resp = web.StreamResponse(status=200, reason='OK', headers={
'Content-Type': 'application/octet-stream',
'Transfer-Encoding': 'chunked',
})
await resp.prepare(req)
loop = asyncio.get_event_loop()
def do_copy():
while True:
read = download_pipe.r.read(1048576)
if read == b'':
break
asyncio.run_coroutine_threadsafe(resp.write(read), loop=loop).result()
await self.middleware.run_in_thread(do_copy)
await resp.drain()
return resp
if isinstance(result, types.GeneratorType):
result = list(result)
elif isinstance(result, types.AsyncGeneratorType):
result = [i async for i in result]
elif isinstance(result, Job):
result = result.id
resp.headers['Content-type'] = 'application/json'
resp.text = json.dumps(result, indent=True)
return resp
def copy_multipart_to_pipe(loop, filepart, pipe):
try:
try:
while True:
read = asyncio.run_coroutine_threadsafe(
filepart.read_chunk(filepart.chunk_size),
loop=loop,
).result()
if read == b'':
break
pipe.w.write(read)
finally:
pipe.w.close()
except BrokenPipeError:
pass
| 37,493 | Python | .py | 843 | 29.10083 | 120 | 0.495608 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,689 | i18n.py | truenas_middleware/src/middlewared/middlewared/i18n.py | from gettext import NullTranslations, GNUTranslations
import logging
import pkg_resources
logger = logging.getLogger(__name__)
translations = NullTranslations()
def set_language(language):
global translations
try:
with open(pkg_resources.resource_filename("middlewared", f"locale/{language}/LC_MESSAGES/middlewared.mo"), "rb") as f:
translations = GNUTranslations(f)
return True
except Exception as e:
if language != "en":
logger.warning("Failed to set language %r: %r", language, e)
translations = NullTranslations()
return False
def _(message):
return translations.gettext(message)
def __(singular, plural, n):
return translations.ngettext(singular, plural, n)
| 759 | Python | .py | 20 | 32.15 | 126 | 0.711142 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,690 | webui_auth.py | truenas_middleware/src/middlewared/middlewared/webui_auth.py | from ipaddress import ip_address, ip_network
from aiohttp import web
def addr_in_allowlist(remote_addr, allowlist):
valid = False
try:
remote_addr = ip_address(remote_addr)
except Exception:
# invalid/malformed IP so play it safe and
# return False
valid = False
else:
for allowed in allowlist:
try:
allowed = ip_network(allowed)
except Exception:
# invalid/malformed network so play it safe
valid = False
break
else:
if remote_addr == allowed or remote_addr in allowed:
valid = True
break
return valid
class WebUIAuth(object):
def __init__(self, middleware):
self.middleware = middleware
async def __call__(self, request):
"""
TrueCommand authenticates client's browser in WebUI by sending POST request with `auth_token`.
This is more secure than using query string.
"""
# We are not able to use nginx to allow/deny client for this specific endpoint so we'll have to make that
# check ourselves.
if allowlist := await self.middleware.call('system.general.get_ui_allowlist'):
if not addr_in_allowlist(request.headers['X-Real-Remote-Addr'], allowlist):
return web.Response(status=403)
post = await request.post()
if 'auth_token' not in post:
return web.Response(status=400, text='No token provided.')
if not await self.middleware.call('auth.get_token', post['auth_token']):
return web.Response(status=400, text='Invalid token.')
with open('/usr/share/truenas/webui/index.html', 'r') as f:
index = f.read()
index = index.replace(
'</head>',
f'<script>var MIDDLEWARE_TOKEN = "{post["auth_token"]}";</script></head>',
)
return web.Response(status=200, body=index, content_type='text/html')
| 2,028 | Python | .py | 48 | 32.041667 | 113 | 0.602538 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,691 | auth.py | truenas_middleware/src/middlewared/middlewared/auth.py | import pam
import re
import threading
from dataclasses import dataclass
from datetime import datetime, UTC
from middlewared.utils.allowlist import Allowlist
from middlewared.utils.auth import AuthMech, AuthenticatorAssuranceLevel
from time import monotonic
class SessionManagerCredentials:
is_user_session = False
allowlist = None
@classmethod
def class_name(cls):
return re.sub(
r"([A-Z])",
r"_\1",
cls.__name__.replace("SessionManagerCredentials", "")
).lstrip("_").upper()
def login(self):
pass
def is_valid(self):
return True
def authorize(self, method, resource):
return False
def has_role(self, role):
return False
def notify_used(self):
pass
def logout(self):
pass
def dump(self):
return {}
class UserSessionManagerCredentials(SessionManagerCredentials):
""" Credentials for authenticated user session """
def __init__(self, user: dict, assurance: AuthenticatorAssuranceLevel | None):
"""
user: dictionary generated by `auth.authenticate_user`
assurance: authenticator assurance level for the session. This
may be None if the session is authenticated by virtue of unix domain
socket connection.
Our default is AAL1 which allows authenticated sessions to use
credentials for up to 30 days before requiring reauthentication, and
does not set an inactivity timeout.
See comments in utils/auth.py for more information.
"""
now = monotonic()
self.user = user
self.assurance = assurance
self.allowlist = Allowlist(user["privilege"]["allowlist"])
self.is_user_session = True
self.login_at = datetime.now(UTC)
self.expiry = None
self.inactivity_timeout = None
self.last_used_at = now
if assurance:
self.expiry = now + self.assurance.max_session_age
self.inactivity_timeout = self.assurance.max_inactivity
def notify_used(self):
if self.inactivity_timeout:
now = monotonic()
if now < self.last_used_at + self.inactivity_timeout:
self.last_used_at = now
def is_valid(self):
if self.assurance and (now := monotonic()) > self.expiry:
return False
if self.inactivity_timeout:
if now > self.last_used_at + self.inactivity_timeout:
return False
return True
def authorize(self, method, resource):
if not self.is_valid():
return False
return self.allowlist.authorize(method, resource)
def has_role(self, role):
return role in self.user["privilege"]["roles"]
def dump(self):
return {
"username": self.user["username"],
"login_at": self.login_at
}
class ApiKeySessionManagerCredentials(UserSessionManagerCredentials):
""" Credentials for a specific user account on TrueNAS
Authenticated by user-linked API key
"""
def __init__(self, user: dict, api_key: dict, assurance: AuthenticatorAssuranceLevel):
super().__init__(user, assurance)
self.api_key = api_key
def dump(self):
out = super().dump()
return out | {
"api_key": {
"id": self.api_key["id"],
"name": self.api_key["name"],
}
}
class UnixSocketSessionManagerCredentials(UserSessionManagerCredentials):
""" Credentials for a specific user account on TrueNAS
Authenticated by unix domain socket connection
"""
def __init__(self, user: dict):
super().__init__(user, None)
class LoginPasswordSessionManagerCredentials(UserSessionManagerCredentials):
""" Credentials for a specific user account on TrueNAS
Authenticated by username + password combination.
"""
pass
class LoginTwofactorSessionManagerCredentials(LoginPasswordSessionManagerCredentials):
""" Credentials for a specific user account on TrueNAS
Authenticated by username + password combination and additional
OTP token.
"""
pass
class TokenSessionManagerCredentials(SessionManagerCredentials):
def __init__(self, token_manager, token):
self.root_credentials = token.root_credentials()
self.token_manager = token_manager
self.token = token
self.is_user_session = self.root_credentials.is_user_session
if self.is_user_session:
self.user = self.root_credentials.user
self.allowlist = self.root_credentials.allowlist
def is_valid(self):
if not self.root_credentials.is_valid():
return False
return self.token.is_valid()
def authorize(self, method, resource):
return self.token.parent_credentials.authorize(method, resource)
def has_role(self, role):
return self.token.parent_credentials.has_role(role)
def notify_used(self):
self.root_credentials.notify_used()
self.token.notify_used()
def logout(self):
self.token_manager.destroy(self.token)
def dump(self):
data = {
"parent": dump_credentials(self.token.parent_credentials),
}
if self.is_user_session:
data["username"] = self.user["username"]
return data
class TrueNasNodeSessionManagerCredentials(SessionManagerCredentials):
def authorize(self, method, resource):
return True
@dataclass()
class AuthenticationContext:
"""
This stores PAM context for authentication mechanisms that implement
challenge-response protocol. We need to keep reference for PAM handle
to handle any required PAM conversations.
"""
pam_lock: threading.Lock = threading.Lock()
pam_hdl: pam.PamAuthenticator | None = None
next_mech: AuthMech | None = None
auth_data: dict | None = None
class FakeApplication:
authenticated_credentials = SessionManagerCredentials()
def fake_app():
return FakeApplication()
def dump_credentials(credentials):
return {
"credentials": credentials.class_name(),
"credentials_data": credentials.dump(),
}
| 6,233 | Python | .py | 164 | 30.432927 | 90 | 0.669107 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,692 | __init__.py | truenas_middleware/src/middlewared/middlewared/__init__.py | # Fool setuptools to prevent
# error: Namespace package problem: middlewared is a namespace package, but its
# __init__.py does not call declare_namespace()! Please fix it.
# (See the setuptools manual under "Namespace Packages" for details.)
# when running setup_test.py
# (it checks for presence of declare_namespace in __init__.py so the above paragraph
# alone does the job)
| 391 | Python | .py | 7 | 54.857143 | 84 | 0.744792 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,693 | sqlalchemy.py | truenas_middleware/src/middlewared/middlewared/sqlalchemy.py | import datetime
import isodate
from sqlalchemy import (
Table, Column as _Column, ForeignKey, Index,
Boolean, CHAR, DateTime, Integer, SmallInteger, String, Text, UniqueConstraint
) # noqa
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import relationship # noqa
from sqlalchemy.types import UserDefinedType
from truenas_api_client import json
from middlewared.plugins.pwenc import encrypt, decrypt
class Base(object):
__table_args__ = {"sqlite_autoincrement": True}
Model = declarative_base(cls=Base)
Model.metadata.naming_convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
class Column(_Column):
inherit_cache = True
def __init__(self, *args, **kwargs):
kwargs.setdefault("nullable", False)
super().__init__(*args, **kwargs)
class EncryptedText(UserDefinedType):
cache_ok = True
def get_col_spec(self, **kw):
return "TEXT"
def _bind_processor(self, value):
if value is None:
return None
return encrypt(value) if value else ''
def bind_processor(self, dialect):
return self._bind_processor
def _result_processor(self, value):
if value is None:
return None
return decrypt(value) if value else ''
def result_processor(self, dialect, coltype):
return self._result_processor
class JSON(UserDefinedType):
cache_ok = True
def __init__(self, type_=dict, encrypted=False):
self.type = type_
self.encrypted = encrypted
def get_col_spec(self, **kw):
return "TEXT"
def _bind_processor(self, value):
if value is None:
if self.type is not None:
value = self.type()
result = json.dumps(value)
if self.encrypted:
result = encrypt(result)
return result
def bind_processor(self, dialect):
return self._bind_processor
def _result_processor(self, value):
try:
if self.encrypted:
value = decrypt(value, _raise=True)
return json.loads(value)
except Exception:
if self.type is not None:
return self.type()
else:
return None
def result_processor(self, dialect, coltype):
return self._result_processor
class MultiSelectField(UserDefinedType):
cache_ok = True
def get_col_spec(self, **kw):
return "TEXT"
def _bind_processor(self, value):
if value is None:
return None
return ",".join(value)
def bind_processor(self, dialect):
return self._bind_processor
def _result_processor(self, value):
if value:
try:
return value.split(",")
except Exception:
pass
return []
def result_processor(self, dialect, coltype):
return self._result_processor
class Time(UserDefinedType):
cache_ok = True
def get_col_spec(self, **kw):
return "TIME"
def _bind_processor(self, value):
if value is None:
return None
if isinstance(value, str):
value = isodate.parse_time(value)
return value.isoformat()
def bind_processor(self, dialect):
return self._bind_processor
def _result_processor(self, value):
try:
return isodate.parse_time(value)
except Exception:
return datetime.time()
def result_processor(self, dialect, coltype):
return self._result_processor
| 3,749 | Python | .py | 109 | 26.59633 | 82 | 0.626704 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,694 | logger.py | truenas_middleware/src/middlewared/middlewared/logger.py | import logging
import logging.handlers
import os
import queue
import typing
import warnings
from .logging.console_formatter import ConsoleLogFormatter
# markdown debug is also considered useless
logging.getLogger('MARKDOWN').setLevel(logging.INFO)
# asyncio runs in debug mode but we do not need INFO/DEBUG
logging.getLogger('asyncio').setLevel(logging.WARNING)
# We dont need internal aiohttp debug logging
logging.getLogger('aiohttp.internal').setLevel(logging.WARNING)
# We dont need internal botocore debug logging
logging.getLogger('botocore').setLevel(logging.WARNING)
# we dont need websocket debug messages
logging.getLogger('websocket').setLevel(logging.WARNING)
# issues garbage warnings
logging.getLogger('googleapiclient').setLevel(logging.ERROR)
# It's too verbose (when used to list remote datasets/snapshots)
logging.getLogger('paramiko').setLevel(logging.INFO)
# registered 'pbkdf2_sha256' handler: <class 'passlib.handlers.pbkdf2.pbkdf2_sha256'>
logging.getLogger('passlib.registry').setLevel(logging.INFO)
logging.getLogger('passlib.handlers').setLevel(logging.INFO)
logging.getLogger('passlib.utils.compat').setLevel(logging.INFO)
# pyroute2.ndb is chatty....only log errors
logging.getLogger('pyroute2.ndb').setLevel(logging.CRITICAL)
logging.getLogger('pyroute2.netlink').setLevel(logging.CRITICAL)
logging.getLogger('pyroute2.netlink.nlsocket').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# ACME is very verbose in logging the request it sends with headers etc, let's not pollute the logs
# with that much information and raise the log level in this case
logging.getLogger('acme.client').setLevel(logging.WARNING)
logging.getLogger('certbot_dns_cloudflare._internal.dns_cloudflare').setLevel(logging.WARNING)
# "Encoding detection: ascii is most likely the one."
logging.getLogger('charset_normalizer').setLevel(logging.INFO)
# Prevent debug docker logs
logging.getLogger('docker.utils.config').setLevel(logging.ERROR)
logging.getLogger('docker.auth').setLevel(logging.ERROR)
# /usr/lib/python3/dist-packages/pydantic/json_schema.py:2158: PydanticJsonSchemaWarning:
# Default value <object object at 0x7fa8ac040d30> is not JSON serializable; excluding default from JSON schema
# [non-serializable-default]
# This default value is `middlewared.utils.lang.undefined`. It must be there for our
# `middlewared.api.base.ForUpdateMetaclass` to work so this warning is false positive.
# Excluding this default from the generated JSON schema is the correct behavior, so there is no real issue here.
warnings.filterwarnings("ignore", module="pydantic.json_schema")
logging.TRACE = 6
APP_LIFECYCLE_LOGFILE = '/var/log/app_lifecycle.log'
APP_MIGRATION_LOGFILE = '/var/log/app_migrations.log'
DOCKER_IMAGE_LOGFILE = '/var/log/docker_image.log'
FAILOVER_LOGFILE = '/var/log/failover.log'
LOGFILE = '/var/log/middlewared.log'
NETDATA_API_LOGFILE = '/var/log/netdata_api.log'
ZETTAREPL_LOGFILE = '/var/log/zettarepl.log'
def trace(self, message, *args, **kws):
if self.isEnabledFor(logging.TRACE):
self._log(logging.TRACE, message, args, **kws)
logging.addLevelName(logging.TRACE, "TRACE")
logging.Logger.trace = trace
class Logger:
"""Pseudo-Class for Logger - Wrapper for logging module"""
def __init__(
self, application_name: str, debug_level: typing.Optional[str]=None,
log_format: str='[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s'
):
self.application_name = application_name
self.debug_level = debug_level or 'DEBUG'
self.log_format = log_format
def getLogger(self):
return logging.getLogger(self.application_name)
def configure_logging(self, output_option: str):
"""
Configure the log output to file or console.
`output_option` str: Default is `file`, can be set to `console`.
"""
if output_option.lower() == 'console':
console_handler = logging.StreamHandler()
logging.root.setLevel(getattr(logging, self.debug_level))
time_format = "%Y/%m/%d %H:%M:%S"
console_handler.setFormatter(ConsoleLogFormatter(self.log_format, datefmt=time_format))
logging.root.addHandler(console_handler)
else:
for name, filename, log_format in [
(None, LOGFILE, self.log_format),
('app_lifecycle', APP_LIFECYCLE_LOGFILE, self.log_format),
('app_migrations', APP_MIGRATION_LOGFILE, self.log_format),
('docker_image', DOCKER_IMAGE_LOGFILE, self.log_format),
('failover', FAILOVER_LOGFILE, self.log_format),
('netdata_api', NETDATA_API_LOGFILE, self.log_format),
('zettarepl', ZETTAREPL_LOGFILE,
'[%(asctime)s] %(levelname)-8s [%(threadName)s] [%(name)s] %(message)s'),
]:
self.setup_file_logger(name, filename, log_format)
logging.root.setLevel(getattr(logging, self.debug_level))
def setup_file_logger(self, name: typing.Optional[str], filename: str, log_format: typing.Optional[str]):
# Use `QueueHandler` to avoid blocking IO in asyncio main loop
log_queue = queue.Queue()
queue_handler = logging.handlers.QueueHandler(log_queue)
file_handler = logging.handlers.RotatingFileHandler(filename, 'a', 10485760, 5, 'utf-8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(log_format, '%Y/%m/%d %H:%M:%S'))
queue_listener = logging.handlers.QueueListener(log_queue, file_handler)
queue_listener.start()
logging.getLogger(name).addHandler(queue_handler)
if name is not None:
logging.getLogger(name).propagate = False
# Make sure various log files are not readable by everybody.
# umask could be another approach but chmod was chosen so
# it affects existing installs.
try:
os.chmod(filename, 0o640)
except OSError:
pass
def setup_logging(name: str, debug_level: typing.Optional[str], log_handler: typing.Optional[str]):
_logger = Logger(name, debug_level)
_logger.getLogger()
if log_handler == 'console':
_logger.configure_logging('console')
else:
_logger.configure_logging('file')
| 6,354 | Python | .py | 120 | 47.125 | 112 | 0.72056 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,695 | validators.py | truenas_middleware/src/middlewared/middlewared/validators.py | from datetime import time
import ipaddress
import os
import re
from urllib.parse import urlparse
import uuid
from string import digits, ascii_uppercase, ascii_lowercase, punctuation
from pathlib import Path
from middlewared.utils import filters
from middlewared.utils.filesystem.constants import ZFSCTL
from middlewared.utils.path import path_location
from zettarepl.snapshot.name import validate_snapshot_naming_schema
RE_MAC_ADDRESS = re.compile(r"^([0-9A-Fa-f]{2}[:-]?){5}([0-9A-Fa-f]{2})$")
filters_obj = filters()
validate_filters = filters_obj.validate_filters
validate_options = filters_obj.validate_options
class ValidatorBase:
"""The base validator class to be inherited by all validators"""
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class Email(ValidatorBase):
def __init__(self, empty=False):
assert isinstance(empty, bool)
self.empty = empty
# https://www.rfc-editor.org/rfc/rfc5321#section-4.5.3.1.3
# (subtract 2 because path portion of email is separated
# by enclosing "<" which we cannot control)
self.max_path = 254
def __call__(self, value):
if value is None or (self.empty and not value):
return
elif len(value) > self.max_path:
raise ValueError("Maximum length is {self.max_path} characters.")
else:
right_most_atsign = value.rfind("@")
if right_most_atsign == -1:
raise ValueError("Missing '@' symbol.")
# The email validation/RFC debacle is a vortex of endless
# despair. There have been erratas for the erratas to "fix"
# the email address issues but it's still very much a source of
# debate. It's actually gotten to a point where most interwebz
# people claim that validating email addresses more than the
# bare minimum is only harmful. I tend to agree with them because
# each email server implementation follows their own set of rules.
# This means NO MATTER WHAT WE DO, we're bound to still allow an
# "invalid" email address depending on the email server being
# used. It's better to be false-positive than false-negative here.
# The only guaranteed way to "validate" an email address is to send
# a test email to the given address.
local_part = value[:right_most_atsign]
if not local_part:
raise ValueError("Missing local part of email string (part before the '@').")
domain_part = value[right_most_atsign:]
if domain_part == '@':
raise ValueError("Missing domain part of email string (part after the '@').")
class Exact(ValidatorBase):
def __init__(self, value):
self.value = value
def __call__(self, value):
if value != self.value:
raise ValueError(f"Should be {self.value!r}")
class IpAddress(ValidatorBase):
def __call__(self, value):
try:
ipaddress.ip_address(value)
except ValueError:
raise ValueError('Not a valid IP address')
class Netmask(ValidatorBase):
def __init__(self, ipv4=True, ipv6=True, prefix_length=True):
self.ipv4 = ipv4
self.ipv6 = ipv6
self.prefix_length = prefix_length
def __call__(self, value):
if not self.prefix_length and value.isdigit():
raise ValueError('Please specify expanded netmask i.e 255.255.255.128.')
ip = '1.1.1.1'
if self.ipv4 and self.ipv6 and value.isdigit():
if int(value) > 32:
ip = '2001:db8::'
elif self.ipv6 and not self.ipv4:
# ipaddress module does not currently support ipv6 expanded netmasks
# TODO: Convert expanded netmasks to prefix lengths for ipv6 till ipaddress adds support
ip = '2001:db8::'
try:
ipaddress.ip_network(f'{ip}/{value}', strict=False)
except ValueError:
raise ValueError('Not a valid netmask')
class Time(ValidatorBase):
def __call__(self, value):
try:
hours, minutes = value.split(':')
except ValueError:
raise ValueError('Time should be in 24 hour format like "18:00"')
else:
try:
time(int(hours), int(minutes))
except TypeError:
raise ValueError('Time should be in 24 hour format like "18:00"')
class Match(ValidatorBase):
def __init__(self, pattern, flags=0, explanation=None):
self.pattern = pattern
self.flags = flags
self.explanation = explanation
self.regex = re.compile(pattern, flags)
def __call__(self, value):
if value is not None and not self.regex.match(value):
raise ValueError(self.explanation or f"Value does not match {self.pattern!r} pattern")
def __deepcopy__(self, memo):
return Match(self.pattern, self.flags, self.explanation)
class NotMatch(ValidatorBase):
def __init__(self, pattern, flags=0, explanation=None):
self.pattern = pattern
self.flags = flags
self.explanation = explanation
self.regex = re.compile(pattern, flags)
def __call__(self, value):
if value is not None and self.regex.match(value):
raise ValueError(self.explanation or f"Value matches {self.pattern!r} pattern")
def __deepcopy__(self, memo):
return NotMatch(self.pattern, self.flags, self.explanation)
class Hostname(Match):
def __init__(self, explanation=None):
super().__init__(
r'^[a-z\.\-0-9]*[a-z0-9]$',
flags=re.IGNORECASE,
explanation=explanation
)
class Or(ValidatorBase):
def __init__(self, *validators):
self.validators = validators
def __call__(self, value):
errors = []
for validator in self.validators:
try:
validator(value)
except ValueError as e:
errors.append(str(e))
else:
return
raise ValueError(" or ".join(errors))
class Range(ValidatorBase):
def __init__(self, min_=None, max_=None, exclude=None):
self.min = min_
self.max = max_
self.exclude = exclude or []
def __call__(self, value):
if value is None:
return
if isinstance(value, str):
value = len(value)
if value in self.exclude:
raise ValueError(
f'{value} is a reserved for internal use. Please select another value.'
)
error = {
(True, True): f"between {self.min} and {self.max}",
(False, True): f"less or equal than {self.max}",
(True, False): f"greater or equal than {self.min}",
(False, False): "",
}[self.min is not None, self.max is not None]
if self.min is not None and value < self.min:
raise ValueError(f"Should be {error}")
if self.max is not None and value > self.max:
raise ValueError(f"Should be {error}")
class Port(Range):
''' Example usage with exclude:
validators=[Port(exclude=[NFS_RDMA_DEFAULT_PORT])]
'''
def __init__(self, exclude=None):
super().__init__(min_=1, max_=65535, exclude=exclude)
class QueryFilters(ValidatorBase):
def __call__(self, value):
validate_filters(value)
class QueryOptions(ValidatorBase):
def __call__(self, value):
validate_options(value)
class Unique(ValidatorBase):
def __call__(self, value):
for item in value:
if value.count(item) > 1:
raise ValueError(f"Duplicate values are not allowed: {item!r}")
class IpInUse(ValidatorBase):
def __init__(self, middleware, exclude=None):
self.middleware = middleware
self.exclude = exclude or []
def __call__(self, ip):
IpAddress()(ip)
# ip is valid
if ip not in self.exclude:
ips = [d['address'] for d in self.middleware.call_sync('interface.ip_in_use')]
if ip in ips:
raise ValueError(
f'{ip} is already being used by the system. Please select another IP'
)
class MACAddr(ValidatorBase):
SEPARATORS = [':', '-']
def __init__(self, separator=None):
if separator:
assert separator in self.SEPARATORS
self.separator = separator
def __call__(self, value):
if not RE_MAC_ADDRESS.match(value.lower()) or (
self.separator and (
self.separator not in value or ({self.separator} ^ set(self.SEPARATORS)).pop() in value.lower()
)
):
raise ValueError('Please provide a valid MAC address')
class ReplicationSnapshotNamingSchema(ValidatorBase):
def __call__(self, value):
validate_snapshot_naming_schema(value)
class UUID(ValidatorBase):
def __call__(self, value):
if value is None:
return
try:
uuid.UUID(value, version=4)
except ValueError as e:
raise ValueError(f'Invalid UUID: {e}')
class PasswordComplexity(ValidatorBase):
def __init__(self, required_types, required_cnt=None):
self.required_types = required_types
self.required_cnt = required_cnt
def __call__(self, value):
cnt = 0
reqs = []
errstr = ''
if value and self.required_types:
if 'ASCII_LOWER' in self.required_types:
reqs.append('lowercase character')
if not any(c in ascii_lowercase for c in value):
if self.required_cnt is None:
errstr += 'Must contain at least one lowercase character. '
else:
cnt += 1
if 'ASCII_UPPER' in self.required_types:
reqs.append('uppercase character')
if not any(c in ascii_uppercase for c in value):
if self.required_cnt is None:
errstr += 'Must contain at least one uppercase character. '
else:
cnt += 1
if 'DIGIT' in self.required_types:
reqs.append('digits 0-9')
if not any(c in digits for c in value):
if self.required_cnt is None:
errstr += 'Must contain at least one numeric digit (0-9). '
else:
cnt += 1
if 'SPECIAL' in self.required_types:
reqs.append('special characters (!, $, #, %, etc.)')
if not any(c in punctuation for c in value):
if self.required_cnt is None:
errstr += 'Must contain at least one special character (!, $, #, %, etc.). '
else:
cnt += 1
if self.required_cnt and self.required_cnt > cnt:
raise ValueError(
f'Must contain at least {self.required_cnt} of the following categories: {", ".join(reqs)}'
)
if errstr:
raise ValueError(errstr)
def validate_schema(schema, data, additional_attrs=False, dict_kwargs=None):
from middlewared.schema import Dict, Error
from middlewared.service import ValidationErrors
verrors = ValidationErrors()
dict_kwargs = dict_kwargs or {}
schema = Dict("attributes", *schema, additional_attrs=additional_attrs, **dict_kwargs)
try:
schema.clean(data)
except Error as e:
verrors.add(e.attribute, e.errmsg, e.errno)
except ValidationErrors as e:
verrors.extend(e)
else:
try:
schema.validate(data)
except ValidationErrors as e:
verrors.extend(e)
for verror in verrors.errors:
if not verror.attribute.startswith("attributes."):
raise ValueError(f"Got an invalid attribute name: {verror.attribute!r}")
verror.attribute = verror.attribute[len("attributes."):]
return verrors
class URL(ValidatorBase):
def __init__(self, **kwargs):
kwargs.setdefault("empty", False)
kwargs.setdefault("scheme", ["http", "https"])
self.empty = kwargs["empty"]
self.scheme = kwargs["scheme"]
def __call__(self, value):
if self.empty and not value:
return
try:
result = urlparse(value)
except Exception as e:
raise ValueError(f'Invalid URL: {e}')
if not result.scheme:
raise ValueError('Invalid URL: no scheme specified')
if self.scheme and result.scheme not in self.scheme:
raise ValueError(f'Invalid URL: invalid scheme: {result.scheme}')
if not result.netloc:
raise ValueError('Invalid URL: no netloc specified')
def check_path_resides_within_volume_sync(verrors, schema_name, path, vol_names):
"""
This provides basic validation of whether a given `path` is allowed to
be exposed to end-users.
`verrors` - ValidationErrors created by calling function
`schema_name` - schema name to use in validation error message
`path` - path to validate
`vol_names` - list of expected pool names
It checks the following:
* path is within /mnt
* path is located within one of the specified `vol_names`
* path is not explicitly a `.zfs` or `.zfs/snapshot` directory
"""
if path_location(path).name == 'EXTERNAL':
# There are some fields where we allow external paths
verrors.add(schema_name, "Path is external to TrueNAS.")
return
try:
inode = os.stat(path).st_ino
except FileNotFoundError:
inode = None
rp = Path(os.path.realpath(path))
vol_paths = [os.path.join("/mnt", vol_name) for vol_name in vol_names]
if not path.startswith("/mnt/") or not any(
os.path.commonpath([parent]) == os.path.commonpath([parent, rp]) for parent in vol_paths
):
verrors.add(schema_name, "The path must reside within a pool mount point")
if inode in (ZFSCTL.INO_ROOT.value, ZFSCTL.INO_SNAPDIR.value):
verrors.add(schema_name,
"The ZFS control directory (.zfs) and snapshot directory (.zfs/snapshot) "
"are not permitted paths. If a snapshot within this directory must "
"be accessed through the path-based API, then it should be called "
"directly, e.g. '/mnt/dozer/.zfs/snapshot/mysnap'.")
| 14,601 | Python | .py | 335 | 33.820896 | 111 | 0.609488 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,696 | job.py | truenas_middleware/src/middlewared/middlewared/job.py | import asyncio
import contextlib
from collections import OrderedDict
import copy
import enum
import errno
from functools import partial
import logging
import os
import shutil
import sys
import time
import traceback
import threading
from middlewared.service_exception import CallError, ValidationError, ValidationErrors, adapt_exception
from middlewared.pipe import Pipes
from middlewared.utils.privilege import credential_is_limited_to_own_jobs
from middlewared.utils.time_utils import utc_now
logger = logging.getLogger(__name__)
LOGS_DIR = '/var/log/jobs'
def send_job_event(middleware, event_type, job, fields):
middleware.send_event('core.get_jobs', event_type, id=job.id, fields=fields,
should_send_event=partial(should_send_job_event, job))
def should_send_job_event(job, wsclient):
if wsclient.authenticated_credentials and credential_is_limited_to_own_jobs(wsclient.authenticated_credentials):
if job.credentials is None:
return False
if not job.credentials.is_user_session:
return False
return job.credentials.user['username'] == wsclient.authenticated_credentials.user['username']
return True
class State(enum.Enum):
WAITING = 1
RUNNING = 2
SUCCESS = 3
FAILED = 4
ABORTED = 5
class JobSharedLock:
"""
Shared lock for jobs.
Each job method can specify a lock which will be shared
among all calls for that job and only one job can run at a time
for this lock.
"""
def __init__(self, queue, name):
self.queue = queue
self.name = name
self.jobs = set()
self.lock = asyncio.Lock()
def add_job(self, job):
self.jobs.add(job)
def get_jobs(self):
return self.jobs
def remove_job(self, job):
self.jobs.discard(job)
def locked(self):
return self.lock.locked()
async def acquire(self):
return await self.lock.acquire()
def release(self):
return self.lock.release()
class JobsQueue:
def __init__(self, middleware):
self.middleware = middleware
self.deque = JobsDeque()
self.queue = []
# Event responsible for the job queue schedule loop.
# This event is set and a new job is potentially ready to run
self.queue_event = asyncio.Event()
# Shared lock (JobSharedLock) dict
self.job_locks = {}
self.middleware.event_register('core.get_jobs', 'Updates on job changes.', no_authz_required=True)
def __getitem__(self, item):
return self.deque[item]
def get(self, item):
return self.deque.get(item)
def all(self) -> dict[int, "Job"]:
return self.deque.all()
def for_username(self, username):
out = {}
for jid, job in self.all().items():
if job.credentials is None or not job.credentials.is_user_session:
continue
if job.credentials.user['username'] != username:
continue
out[jid] = job
return out
def add(self, job):
self.handle_lock(job)
if job.options["lock_queue_size"] is not None:
if job.options["lock_queue_size"] == 0:
for another_job in self.all().values():
if another_job.state == State.RUNNING and another_job.lock is job.lock:
raise CallError("This job is already being performed", errno.EBUSY)
else:
queued_jobs = [another_job for another_job in self.queue if another_job.lock is job.lock]
if len(queued_jobs) >= job.options["lock_queue_size"]:
for queued_job in reversed(queued_jobs):
if not credential_is_limited_to_own_jobs(job.credentials):
return queued_job
if (
job.credentials.is_user_session and
queued_job.credentials.is_user_session and
job.credentials.user['username'] == queued_job.credentials.user['username']
):
return queued_job
raise CallError('This job is already being performed by another user', errno.EBUSY)
self.deque.add(job)
self.queue.append(job)
send_job_event(self.middleware, 'ADDED', job, job.__encode__())
# A job has been added to the queue, let the queue scheduler run
self.queue_event.set()
return job
def remove(self, job_id):
self.deque.remove(job_id)
def handle_lock(self, job):
name = job.get_lock_name()
if name is None:
return
lock = self.job_locks.get(name)
if lock is None:
lock = JobSharedLock(self, name)
self.job_locks[lock.name] = lock
lock.add_job(job)
job.lock = lock
def release_lock(self, job):
lock = job.lock
if job.lock is None:
return
# Remove job from lock list and release it so another job can use it
lock.remove_job(job)
lock.release()
if len(lock.get_jobs()) == 0:
self.job_locks.pop(lock.name)
# Once a lock is released there could be another job in the queue
# waiting for the same lock
self.queue_event.set()
async def next(self):
"""
Returns when there is a new job ready to run.
"""
while True:
# Awaits a new event to look for a job
await self.queue_event.wait()
found = None
for job in self.queue:
# Get job in the queue if it has no lock or its not locked
if job.lock is None or not job.lock.locked():
found = job
if job.lock:
await job.lock.acquire()
break
if found:
# Unlocked job found to run
self.queue.remove(found)
# If there are no more jobs in the queue, clear the event
if len(self.queue) == 0:
self.queue_event.clear()
return found
else:
# No jobs available to run, clear the event
self.queue_event.clear()
async def run(self):
while True:
job = await self.next()
self.middleware.create_task(job.run(self))
async def receive(self, job, logs):
await self.deque.receive(self.middleware, job, logs)
class JobsDeque(object):
"""
A jobs deque to do not keep more than `maxlen` in memory
with a `id` assigner.
"""
def __init__(self, maxlen=1000):
self.maxlen = maxlen
self.count = 0
self.__dict = OrderedDict()
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(LOGS_DIR)
def __getitem__(self, item):
return self.__dict[item]
def get(self, item):
return self.__dict.get(item)
def all(self):
return self.__dict.copy()
def _get_next_id(self):
self.count += 1
return self.count
def add(self, job):
job.set_id(self._get_next_id())
if len(self.__dict) > self.maxlen:
for old_job_id, old_job in self.__dict.items():
if old_job.state in (State.SUCCESS, State.FAILED, State.ABORTED):
self.remove(old_job_id)
break
else:
logger.warning("There are %d jobs waiting or running", len(self.__dict))
self.__dict[job.id] = job
def remove(self, job_id):
if job_id in self.__dict:
self.__dict[job_id].cleanup()
del self.__dict[job_id]
async def receive(self, middleware, job_dict, logs):
job_dict['id'] = self._get_next_id()
job = await Job.receive(middleware, job_dict, logs)
self.__dict[job.id] = job
class Job:
"""
Represents a long-running call, methods marked with @job decorator.
:ivar pipes: :class:`middlewared.pipe.Pipes` object containing job's opened pipes.
:ivar logs_fd: Unbuffered binary file descriptor for writing logs (if the job was defined with `@job(logs=True)`
"""
pipes: Pipes
logs_fd: None
def __init__(self, middleware, method_name, serviceobj, method, args, options, pipes, on_progress_cb, app,
audit_callback):
self._finished = asyncio.Event()
self.middleware = middleware
self.method_name = method_name
self.serviceobj = serviceobj
self.method = method
self.args = args
self.options = options
self.pipes = pipes or Pipes(input_=None, output=None)
self.on_progress_cb = on_progress_cb
self.app = app
self.audit_callback = audit_callback
self.id = None
self.lock = None
self.result = None
self.error = None
self.exception = None
self.exc_info = None
self.aborted = False
self.state = State.WAITING
self.description = None
self.progress = {
'percent': 0,
'description': '',
'extra': None,
}
self.internal_data = {}
self.time_started = utc_now()
self.time_finished = None
self.loop = self.middleware.loop
self.future = None
self.wrapped = []
self.on_finish_cb = None
self.on_finish_cb_called = False
self.logs_path = None
self.logs_fd = None
self.logs_excerpt = None
if self.options["check_pipes"]:
for pipe in self.options["pipes"]:
self.check_pipe(pipe)
if self.options["description"]:
try:
self.description = self.options["description"](*args)
except Exception:
logger.error("Error setting job description", exc_info=True)
@property
def credentials(self):
if self.app is None:
return None
return self.app.authenticated_credentials
def check_pipe(self, pipe):
"""
Check if pipe named `pipe` was opened by caller. Will raise a `ValueError` if it was not.
:param pipe: Pipe name.
"""
if getattr(self.pipes, pipe) is None:
raise ValueError("Pipe %r is not open" % pipe)
def get_lock_name(self):
lock_name = self.options.get('lock')
if callable(lock_name):
lock_name = lock_name(self.args)
return lock_name
def set_id(self, id_):
self.id = id_
def set_result(self, result):
self.result = result
def set_exception(self, exc_info):
self.error = str(exc_info[1])
self.exception = ''.join(traceback.format_exception(*exc_info))
self.exc_info = exc_info
def set_state(self, state):
if self.state == State.WAITING:
assert state not in ('WAITING', 'SUCCESS')
if self.state == State.RUNNING:
assert state not in ('WAITING', 'RUNNING')
assert self.state not in (State.SUCCESS, State.FAILED, State.ABORTED)
self.state = State.__members__[state]
if self.state in (State.SUCCESS, State.FAILED, State.ABORTED):
self.time_finished = utc_now()
def set_description(self, description):
"""
Sets a human-readable job description for the task manager UI. Use this if you need to build a job description
with more advanced logic that a simple lambda function given to `@job` decorator can provide.
:param description: Human-readable description.
"""
if self.description != description:
self.description = description
send_job_event(self.middleware, 'CHANGED', self, self.__encode__())
def set_progress(self, percent=None, description=None, extra=None):
"""
Sets job completion progress. All arguments are optional and only passed arguments will be changed in the
whole job progress state.
Don't change this too often as every time an event is sent. Use :class:`middlewared.job.JobProgressBuffer` to
throttle progress reporting if you are receiving it from an external source (e.g. network response reading
progress).
:param percent: Job progress [0-100]. It will be rounded down to an integer as precision is not required here,
and also to avoid sending extra events when progress is changed from, e.g. 73.11 to 73.64
:param description: Human-readable description of what the job is currently doing.
:param extra: Extra data (any type) that can be used by specific job progress bar in the UI.
"""
changed = False
if percent is not None:
assert isinstance(percent, (int, float))
percent = int(percent)
if self.progress['percent'] != percent:
self.progress['percent'] = percent
changed = True
if description:
if self.progress['description'] != description:
self.progress['description'] = description
changed = True
if extra:
if self.progress['extra'] != extra:
self.progress['extra'] = extra
changed = True
encoded = self.__encode__()
if self.on_progress_cb:
try:
self.on_progress_cb(encoded)
except Exception:
logger.warning('Failed to run on progress callback', exc_info=True)
if changed:
send_job_event(self.middleware, 'CHANGED', self, encoded)
for wrapped in self.wrapped:
wrapped.set_progress(**self.progress)
async def wait(self, timeout=None, raise_error=False, raise_error_forward_classes=(CallError,)):
if timeout is None:
await self._finished.wait()
else:
await asyncio.wait_for(self.middleware.create_task(self._finished.wait()), timeout)
if raise_error:
if self.error:
if isinstance(self.exc_info[1], raise_error_forward_classes):
raise self.exc_info[1]
raise CallError(self.error)
return self.result
def wait_sync(self, timeout=None, raise_error=False, raise_error_forward_classes=(CallError,)):
"""
Synchronous method to wait for a job in another thread.
"""
fut = asyncio.run_coroutine_threadsafe(self._finished.wait(), self.loop)
event = threading.Event()
def done(_):
event.set()
fut.add_done_callback(done)
if not event.wait(timeout):
fut.cancel()
raise TimeoutError()
if raise_error:
if self.error:
if isinstance(self.exc_info[1], raise_error_forward_classes):
raise self.exc_info[1]
raise CallError(self.error)
return self.result
def abort(self):
if self.loop is not None and self.future is not None:
self.loop.call_soon_threadsafe(self.future.cancel)
elif self.state == State.WAITING:
self.aborted = True
async def run(self, queue):
"""
Run a Job and set state/result accordingly.
This method is supposed to run in a greenlet.
"""
if self.options["logs"]:
self.logs_path = self._logs_path()
await self.middleware.run_in_thread(self.start_logging)
try:
if self.aborted:
raise asyncio.CancelledError()
else:
self.set_state('RUNNING')
send_job_event(self.middleware, 'CHANGED', self, self.__encode__())
self.future = asyncio.ensure_future(self.__run_body())
try:
await self.future
except Exception as e:
handled = adapt_exception(e)
if handled is not None:
raise handled
else:
raise
except asyncio.CancelledError:
self.set_state('ABORTED')
except Exception as e:
self.set_state('FAILED')
self.set_exception(sys.exc_info())
if isinstance(e, CallError):
logger.error("Job %r failed: %r", self.method, e)
else:
logger.error("Job %r failed", self.method, exc_info=True)
finally:
await self.__close_logs()
await self.__close_pipes()
queue.release_lock(self)
self._finished.set()
await self.call_on_finish_cb()
send_job_event(self.middleware, 'CHANGED', self, self.__encode__())
if self.options['transient']:
queue.remove(self.id)
async def __run_body(self):
"""
If job is flagged as process a new process is spawned
with the job id which will in turn run the method
and return the result as a json
"""
if self.options.get('process'):
rv = await self.middleware._call_worker(self.method_name, *self.args, job={'id': self.id})
else:
prepend = []
if hasattr(self.method, '_pass_app'):
prepend.append(self.app)
prepend.append(self)
if getattr(self.method, 'audit_callback', None):
prepend.append(self.audit_callback)
# Make sure args are not altered during job run
args = prepend + copy.deepcopy(self.args)
if asyncio.iscoroutinefunction(self.method):
rv = await self.method(*args)
else:
rv = await self.middleware.run_in_thread(self.method, *args)
self.set_result(rv)
self.set_state('SUCCESS')
if self.progress['percent'] != 100:
self.set_progress(100, '')
def _logs_path(self):
return os.path.join(LOGS_DIR, f"{self.id}.log")
async def __close_logs(self):
if self.logs_fd:
self.logs_fd.close()
def get_logs_excerpt():
head = []
tail = []
lines = 0
try:
with open(self.logs_path, "r", encoding="utf-8", errors="ignore") as f:
for line in f:
if len(head) < 10:
head.append(line)
else:
tail.append(line)
tail = tail[-10:]
lines += 1
except FileNotFoundError:
return "Log file was removed"
if lines > 20:
excerpt = "%s... %d more lines ...\n%s" % ("".join(head), lines - 20, "".join(tail))
else:
excerpt = "".join(head + tail)
return excerpt
self.logs_excerpt = await self.middleware.run_in_thread(get_logs_excerpt)
async def __close_pipes(self):
def close_pipes():
if self.pipes.input:
self.pipes.input.r.close()
if self.pipes.output:
self.pipes.output.w.close()
await self.middleware.run_in_thread(close_pipes)
def __encode__(self, raw_result=True):
exc_info = None
if self.exc_info:
etype = self.exc_info[0]
evalue = self.exc_info[1]
if isinstance(evalue, ValidationError):
extra = [(evalue.attribute, evalue.errmsg, evalue.errno)]
errno = evalue.errno
etype = 'VALIDATION'
elif isinstance(evalue, ValidationErrors):
extra = list(evalue)
errno = None
etype = 'VALIDATION'
elif isinstance(evalue, CallError):
etype = etype.__name__
errno = evalue.errno
extra = evalue.extra
else:
etype = etype.__name__
errno = None
extra = None
exc_info = {
'repr': repr(evalue),
'type': etype,
'errno': errno,
'extra': extra,
}
result_encoding_error = None
if self.state == State.SUCCESS:
if raw_result:
result = self.result
else:
try:
result = self.middleware.dump_result(self.method, self.result, False)
except Exception as e:
result = None
result_encoding_error = repr(e)
else:
result = None
return {
'id': self.id,
'method': self.method_name,
'arguments': self.middleware.dump_args(self.args, method=self.method),
'transient': self.options['transient'],
'description': self.description,
'abortable': self.options['abortable'],
'logs_path': self.logs_path,
'logs_excerpt': self.logs_excerpt,
'progress': self.progress,
'result': result,
'result_encoding_error': result_encoding_error,
'error': self.error,
'exception': self.exception,
'exc_info': exc_info,
'state': self.state.name,
'time_started': self.time_started,
'time_finished': self.time_finished,
'credentials': (
{
'type': self.credentials.class_name(),
'data': self.credentials.dump(),
} if self.credentials is not None
else None
)
}
@staticmethod
async def receive(middleware, job_dict, logs):
service_name, method_name = job_dict['method'].rsplit(".", 1)
serviceobj = middleware._services[service_name]
methodobj = getattr(serviceobj, method_name)
job = Job(middleware, job_dict['method'], serviceobj, methodobj, job_dict['arguments'], methodobj._job, None,
None, None, None)
job.id = job_dict['id']
job.description = job_dict['description']
if logs is not None:
job.logs_path = job._logs_path()
job.logs_excerpt = job_dict['logs_excerpt']
job.progress = job_dict['progress']
job.result = job_dict['result']
job.error = job_dict['error']
job.exception = job_dict['exception']
job.state = State.__members__[job_dict['state']]
job.time_started = job_dict['time_started']
job.time_finished = job_dict['time_finished']
if logs is not None:
def write_logs():
os.makedirs(LOGS_DIR, exist_ok=True)
os.chmod(LOGS_DIR, 0o700)
with open(job.logs_path, "wb") as f:
f.write(logs)
await middleware.run_in_thread(write_logs)
return job
async def wrap(self, subjob):
"""
Wrap a job in another job, proxying progress and result/error.
This is useful when we want to run a job inside a job.
:param subjob: The job to wrap.
"""
self.set_progress(**subjob.progress)
subjob.wrapped.append(self)
return await subjob.wait(raise_error=True)
def wrap_sync(self, subjob):
self.set_progress(**subjob.progress)
subjob.wrapped.append(self)
return subjob.wait_sync(raise_error=True)
def cleanup(self):
if self.logs_path:
try:
os.unlink(self.logs_path)
except Exception:
pass
def start_logging(self):
if self.logs_path is not None:
os.makedirs(LOGS_DIR, mode=0o700, exist_ok=True)
self.logs_fd = open(self.logs_path, 'ab', buffering=0)
async def logs_fd_write(self, data):
await self.middleware.run_in_thread(self.logs_fd.write, data)
async def set_on_finish_cb(self, cb):
self.on_finish_cb = cb
if self.on_finish_cb_called:
await self.call_on_finish_cb()
async def call_on_finish_cb(self):
if self.on_finish_cb:
try:
await self.on_finish_cb(self)
except Exception:
logger.warning('Failed to run on finish callback', exc_info=True)
self.on_finish_cb_called = True
class JobProgressBuffer:
"""
This wrapper for `job.set_progress` strips too frequent progress updated
(more frequent than `interval` seconds) so they don't spam websocket
connections.
"""
def __init__(self, job, interval=1):
self.job = job
self.interval = interval
self.last_update_at = 0
self.pending_update_body = None
self.pending_update = None
def set_progress(self, *args, **kwargs):
t = time.monotonic()
if t - self.last_update_at >= self.interval:
if self.pending_update is not None:
self.pending_update.cancel()
self.pending_update_body = None
self.pending_update = None
self.last_update_at = t
self.job.set_progress(*args, **kwargs)
else:
self.pending_update_body = args, kwargs
if self.pending_update is None:
self.pending_update = self.job.loop.call_later(self.interval, self._do_pending_update)
def cancel(self):
if self.pending_update is not None:
self.pending_update.cancel()
self.pending_update_body = None
self.pending_update = None
def flush(self):
if self.pending_update is not None:
self.pending_update.cancel()
self._do_pending_update()
def _do_pending_update(self):
self.last_update_at = time.monotonic()
self.job.set_progress(*self.pending_update_body[0], **self.pending_update_body[1])
self.pending_update_body = None
self.pending_update = None
| 26,212 | Python | .py | 640 | 29.660938 | 118 | 0.573069 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,697 | main.py | truenas_middleware/src/middlewared/middlewared/main.py | from .api.base.handler.dump_params import dump_params
from .api.base.handler.result import serialize_result
from .api.base.handler.version import APIVersion, APIVersionsAdapter
from .api.base.server.legacy_api_method import LegacyAPIMethod
from .api.base.server.method import Method
from .api.base.server.ws_handler.base import BaseWebSocketHandler
from .api.base.server.ws_handler.rpc import RpcWebSocketApp, RpcWebSocketAppEvent
from .api.base.server.ws_handler.rpc_factory import create_rpc_ws_handler
from .apidocs import routes as apidocs_routes
from .common.event_source.manager import EventSourceManager
from .event import Events
from .job import Job, JobsQueue, State
from .pipe import Pipes, Pipe
from .restful import parse_credentials, authenticate, create_application, copy_multipart_to_pipe, RESTfulAPI
from .role import ROLES, RoleManager
from .schema import Error as SchemaError, OROperator
import middlewared.service
from .service_exception import (
adapt_exception, CallError, CallException, ErrnoMixin, InstanceNotFound, MatchNotFound, ValidationError, ValidationErrors,
get_errname,
)
from .utils import MIDDLEWARE_RUN_DIR, sw_version
from .utils.audit import audit_username_from_session
from .utils.debug import get_frame_details, get_threads_stacks
from .utils.limits import MsgSizeError, MsgSizeLimit, parse_message
from .utils.lock import SoftHardSemaphore, SoftHardSemaphoreLimit
from .utils.origin import ConnectionOrigin
from .utils.os import close_fds
from .utils.plugins import LoadPluginsMixin
from .utils.privilege import credential_has_full_admin
from .utils.profile import profile_wrap
from .utils.rate_limit.cache import RateLimitCache
from .utils.service.call import ServiceCallMixin
from .utils.service.crud import real_crud_method
from .utils.syslog import syslog_message
from .utils.threading import set_thread_name, IoThreadPoolExecutor, io_thread_pool_executor
from .utils.time_utils import utc_now
from .utils.type import copy_function_metadata
from .webui_auth import WebUIAuth
from .worker import main_worker, worker_init
from aiohttp import web
from aiohttp.http_websocket import WSCloseCode
from aiohttp.web_exceptions import HTTPPermanentRedirect
from aiohttp.web_middlewares import normalize_path_middleware
from collections import defaultdict
import argparse
import asyncio
import binascii
from collections import namedtuple
import concurrent.futures
import concurrent.futures.process
import concurrent.futures.thread
import contextlib
from dataclasses import dataclass
import errno
import fcntl
import functools
import importlib
import inspect
import itertools
import multiprocessing
import os
import pathlib
import pickle
import re
import queue
import setproctitle
import signal
import struct
import sys
import termios
import threading
import time
import traceback
import types
import typing
import urllib.parse
import uuid
import tracemalloc
from anyio import create_connected_unix_datagram_socket
import psutil
from systemd.daemon import notify as systemd_notify
from truenas_api_client import json
from . import logger
SYSTEMD_EXTEND_USECS = 240000000 # 4mins in microseconds
# Type of the output of sys.exc_info()
ExcInfoType = typing.Union[
tuple[typing.Type[BaseException], BaseException, types.TracebackType],
tuple[None, None, None],
]
@dataclass
class LoopMonitorIgnoreFrame:
regex: typing.Pattern
substitute: str = None
cut_below: bool = False
class Application(RpcWebSocketApp):
def __init__(
self,
middleware,
origin: ConnectionOrigin,
loop: asyncio.AbstractEventLoop,
request,
response
):
super().__init__(middleware, origin, response)
self.websocket = True
self.loop = loop
self.request = request
self.response = response
self.handshake = False
self.logger = logger.Logger('application').getLogger()
# Allow at most 10 concurrent calls and only queue up until 20
self._softhardsemaphore = SoftHardSemaphore(10, 20)
self._py_exceptions = False
self.__subscribed = {}
def _send(self, data: typing.Dict[str, typing.Any]):
serialized = json.dumps(data)
asyncio.run_coroutine_threadsafe(self.response.send_str(serialized), loop=self.loop)
def _tb_error(self, exc_info: ExcInfoType) -> typing.Dict[str, typing.Union[str, list[dict]]]:
klass, exc, trace = exc_info
frames = []
cur_tb = trace
while cur_tb:
tb_frame = cur_tb.tb_frame
cur_tb = cur_tb.tb_next
cur_frame = get_frame_details(tb_frame, self.logger)
if cur_frame:
frames.append(cur_frame)
return {
'class': klass.__name__,
'frames': frames,
'formatted': ''.join(traceback.format_exception(*exc_info)),
'repr': repr(exc_info[1]),
}
def get_error_dict(
self,
errno: int,
reason: str | None = None,
exc_info: ExcInfoType | None = None,
etype: str | None = None,
extra: list | None = None
) -> dict[str, typing.Any]:
error_extra = {}
if self._py_exceptions and exc_info:
error_extra['py_exception'] = binascii.b2a_base64(pickle.dumps(exc_info[1])).decode()
return dict({
'error': errno,
'errname': get_errname(errno),
'type': etype,
'reason': reason,
'trace': self._tb_error(exc_info) if exc_info else None,
'extra': extra,
}, **error_extra)
def send_error(
self,
message: dict[str, typing.Any],
errno: int,
reason: str | None = None,
exc_info: ExcInfoType | None = None,
etype: str | None = None,
extra: list | None = None
):
self._send({
'msg': 'result',
'id': message['id'],
'error': self.get_error_dict(errno, reason, exc_info, etype, extra),
})
async def call_method(self, message, serviceobj, methodobj):
params = message.get('params') or []
if not isinstance(params, list):
self.send_error(message, errno.EINVAL, '`params` must be a list.')
return
if mock := self.middleware._mock_method(message['method'], params):
methodobj = mock
try:
async with self._softhardsemaphore:
result = await self.middleware.call_with_audit(message['method'], serviceobj, methodobj, params, self)
if isinstance(result, Job):
result = result.id
elif isinstance(result, types.GeneratorType):
result = list(result)
elif isinstance(result, types.AsyncGeneratorType):
result = [i async for i in result]
self._send({
'id': message['id'],
'msg': 'result',
'result': result,
})
except SoftHardSemaphoreLimit as e:
self.send_error(
message,
errno.ETOOMANYREFS,
f'Maximum number of concurrent calls ({e.args[0]}) has exceeded.',
)
except ValidationError as e:
self.send_error(message, e.errno, str(e), sys.exc_info(), etype='VALIDATION', extra=[
(e.attribute, e.errmsg, e.errno),
])
except ValidationErrors as e:
self.send_error(message, errno.EAGAIN, str(e), sys.exc_info(), etype='VALIDATION', extra=list(e))
except (CallException, SchemaError) as e:
# CallException and subclasses are the way to gracefully
# send errors to the client
self.send_error(message, e.errno, str(e), sys.exc_info(), extra=e.extra)
except Exception as e:
adapted = adapt_exception(e)
if adapted:
self.send_error(message, adapted.errno, str(adapted) or repr(adapted), sys.exc_info(),
extra=adapted.extra)
else:
self.send_error(message, errno.EINVAL, str(e) or repr(e), sys.exc_info())
if not self._py_exceptions:
self.logger.warn('Exception while calling {}(*{})'.format(
message['method'],
self.middleware.dump_args(message.get('params', []), method_name=message['method'])
), exc_info=True)
async def subscribe(self, ident, name):
shortname, arg = self.middleware.event_source_manager.short_name_arg(name)
if shortname in self.middleware.event_source_manager.event_sources:
await self.middleware.event_source_manager.subscribe_app(self, self.__esm_ident(ident), shortname, arg)
else:
self.__subscribed[ident] = name
self._send({
'msg': 'ready',
'subs': [ident],
})
async def unsubscribe(self, ident):
if ident in self.__subscribed:
self.__subscribed.pop(ident)
elif self.__esm_ident(ident) in self.middleware.event_source_manager.idents:
await self.middleware.event_source_manager.unsubscribe(self.__esm_ident(ident))
def __esm_ident(self, ident):
return self.session_id + ident
def send_event(self, name, event_type, **kwargs):
if (
not any(i == name or i == '*' for i in self.__subscribed.values()) and
self.middleware.event_source_manager.short_name_arg(
name
)[0] not in self.middleware.event_source_manager.event_sources
):
return
event = {
'msg': event_type.lower(),
'collection': name,
}
kwargs = kwargs.copy()
if 'id' in kwargs:
event['id'] = kwargs.pop('id')
if event_type in ('ADDED', 'CHANGED'):
if 'fields' in kwargs:
event['fields'] = kwargs.pop('fields')
if kwargs:
event['extra'] = kwargs
self._send(event)
def notify_unsubscribed(self, collection, error):
error_dict = {}
if error:
if isinstance(error, ValidationErrors):
error_dict['error'] = self.get_error_dict(
errno.EAGAIN, str(error), etype='VALIDATION', extra=list(error)
)
elif isinstance(error, CallError):
error_dict['error'] = self.get_error_dict(
error.errno, str(error), extra=error.extra
)
else:
error_dict['error'] = self.get_error_dict(errno.EINVAL, str(error))
self._send({'msg': 'nosub', 'collection': collection, **error_dict})
async def __log_audit_message_for_method(self, message, methodobj, authenticated, authorized, success):
return await self.middleware.log_audit_message_for_method(
message['method'], methodobj, message.get('params') or [], self, authenticated, authorized, success,
)
def on_open(self):
self.middleware.register_wsclient(self)
async def on_close(self):
await self.run_callback(RpcWebSocketAppEvent.CLOSE)
await self.middleware.event_source_manager.unsubscribe_app(self)
self.middleware.unregister_wsclient(self)
async def on_message(self, message: typing.Dict[str, typing.Any]):
await self.run_callback(RpcWebSocketAppEvent.MESSAGE, message)
if message['msg'] == 'connect':
if message.get('version') != '1':
self._send({'msg': 'failed', 'version': '1'})
else:
features = message.get('features') or []
if 'PY_EXCEPTIONS' in features:
self._py_exceptions = True
# aiohttp can cancel tasks if a request take too long to finish
# It is desired to prevent that in this stage in case we are debugging
# middlewared via gdb (which makes the program execution a lot slower)
await asyncio.shield(self.middleware.call_hook('core.on_connect', app=self))
self._send({'msg': 'connected', 'session': self.session_id})
self.handshake = True
elif not self.handshake:
self._send({'msg': 'failed', 'version': '1'})
elif message['msg'] == 'method':
if 'method' not in message:
self.send_error(message, errno.EINVAL, "Message is malformed: 'method' is absent.")
else:
try:
serviceobj, methodobj = self.middleware.get_method(message['method'])
await self.middleware.authorize_method_call(
self, message['method'], methodobj, message.get('params') or [],
)
except CallError as e:
self.send_error(message, e.errno, str(e), sys.exc_info(), extra=e.extra)
else:
self.middleware.create_task(self.call_method(message, serviceobj, methodobj))
elif message['msg'] == 'ping':
pong = {'msg': 'pong'}
if 'id' in message:
pong['id'] = message['id']
self._send(pong)
elif message['msg'] == 'sub':
if not self.middleware.can_subscribe(self, message['name'].split(':', 1)[0]):
self.send_error(message, errno.EACCES, 'Not authorized')
else:
await self.subscribe(message['id'], message['name'])
elif message['msg'] == 'unsub':
await self.unsubscribe(message['id'])
def __getstate__(self):
return {}
def __setstate__(self, newstate):
pass
class FileApplication(object):
def __init__(self, middleware, loop):
self.middleware = middleware
self.loop = loop
self.jobs = {}
def register_job(self, job_id, buffered):
self.jobs[job_id] = self.middleware.loop.call_later(
3600 if buffered else 60, # FIXME: Allow the job to run for infinite time + give 300 seconds to begin
# download instead of waiting 3600 seconds for the whole operation
lambda: self.middleware.create_task(self._cleanup_job(job_id)),
)
async def _cleanup_cancel(self, job_id):
job_cleanup = self.jobs.pop(job_id, None)
if job_cleanup:
job_cleanup.cancel()
async def _cleanup_job(self, job_id):
if job_id not in self.jobs:
return
self.jobs[job_id].cancel()
del self.jobs[job_id]
job = self.middleware.jobs[job_id]
await job.pipes.close()
async def download(self, request):
path = request.path.split('/')
if not request.path[-1].isdigit():
resp = web.Response()
resp.set_status(404)
return resp
job_id = int(path[-1])
qs = urllib.parse.parse_qs(request.query_string)
denied = False
filename = None
if 'auth_token' not in qs:
denied = True
else:
auth_token = qs.get('auth_token')[0]
token = await self.middleware.call('auth.get_token', auth_token)
if not token:
denied = True
else:
if token['attributes'].get('job') != job_id:
denied = True
else:
filename = token['attributes'].get('filename')
if denied:
resp = web.Response()
resp.set_status(401)
return resp
job = self.middleware.jobs.get(job_id)
if not job:
resp = web.Response()
resp.set_status(404)
return resp
if job_id not in self.jobs:
resp = web.Response()
resp.set_status(410)
return resp
resp = web.StreamResponse(status=200, reason='OK', headers={
'Content-Type': 'application/octet-stream',
'Content-Disposition': f'attachment; filename="{filename}"',
'Transfer-Encoding': 'chunked',
})
await resp.prepare(request)
def do_copy():
while True:
read = job.pipes.output.r.read(1048576)
if read == b'':
break
asyncio.run_coroutine_threadsafe(resp.write(read), loop=self.loop).result()
try:
await self._cleanup_cancel(job_id)
await self.middleware.run_in_thread(do_copy)
finally:
await job.pipes.close()
await resp.drain()
return resp
async def upload(self, request):
reader = await request.multipart()
part = await reader.next()
if not part:
resp = web.Response(status=405, body='No part found on payload')
resp.set_status(405)
return resp
if part.name != 'data':
resp = web.Response(status=405, body='"data" part must be the first on payload')
resp.set_status(405)
return resp
try:
data = json.loads(await part.read())
except Exception as e:
return web.Response(status=400, body=str(e))
if 'method' not in data:
return web.Response(status=422)
try:
credentials = parse_credentials(request)
if credentials is None:
raise web.HTTPUnauthorized()
except web.HTTPException as e:
return web.Response(status=e.status_code, body=e.text)
app = await create_application(request)
try:
authenticated_credentials = await authenticate(self.middleware, request, credentials, 'CALL',
data['method'])
if authenticated_credentials is None:
raise web.HTTPUnauthorized()
except web.HTTPException as e:
credentials['credentials_data'].pop('password', None)
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': credentials,
'error': e.text,
}, False)
return web.Response(status=e.status_code, body=e.text)
app = await create_application(request, authenticated_credentials)
credentials['credentials_data'].pop('password', None)
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': credentials,
'error': None,
}, True)
filepart = await reader.next()
if not filepart or filepart.name != 'file':
resp = web.Response(status=405, body='"file" not found as second part on payload')
resp.set_status(405)
return resp
try:
serviceobj, methodobj = self.middleware.get_method(data['method'])
if authenticated_credentials.authorize('CALL', data['method']):
job = await self.middleware.call_with_audit(data['method'], serviceobj, methodobj,
data.get('params') or [], app,
pipes=Pipes(input_=self.middleware.pipe()))
else:
await self.middleware.log_audit_message_for_method(data['method'], methodobj, data.get('params') or [],
app, True, False, False)
raise web.HTTPForbidden()
await self.middleware.run_in_thread(copy_multipart_to_pipe, self.loop, filepart, job.pipes.input)
except CallError as e:
if e.errno == CallError.ENOMETHOD:
status_code = 422
else:
status_code = 412
return web.Response(status=status_code, body=str(e))
except web.HTTPException as e:
return web.Response(status=e.status_code, body=e.text)
except Exception as e:
return web.Response(status=500, body=str(e))
resp = web.Response(
status=200,
headers={
'Content-Type': 'application/json',
},
body=json.dumps({'job_id': job.id}).encode(),
)
return resp
ShellResize = namedtuple("ShellResize", ["cols", "rows"])
class ShellWorkerThread(threading.Thread):
"""
Worker thread responsible for forking and running the shell
and spawning the reader and writer threads.
"""
def __init__(self, middleware, ws, input_queue, loop, username, as_root, options):
self.middleware = middleware
self.ws = ws
self.input_queue = input_queue
self.loop = loop
self.shell_pid = None
self.command, self.sudo_warning = self.get_command(username, as_root, options)
self._die = False
super(ShellWorkerThread, self).__init__(daemon=True)
def get_command(self, username, as_root, options):
allowed_options = ('vm_id', 'app_name', 'virt_instance_id')
if all(options.get(k) for k in allowed_options):
raise CallError(f'Only one option is supported from {", ".join(allowed_options)}')
if options.get('vm_id'):
command = [
'/usr/bin/virsh', '-c', 'qemu+unix:///system?socket=/run/truenas_libvirt/libvirt-sock',
'console', f'{options["vm_data"]["id"]}_{options["vm_data"]["name"]}'
]
if not as_root:
command = ['/usr/bin/sudo', '-H', '-u', username] + command
return command, not as_root
elif options.get('app_name'):
command = [
'/usr/bin/docker', 'exec', '-it', options['container_id'], options.get('command', '/bin/bash'),
]
if not as_root:
command = ['/usr/bin/sudo', '-H', '-u', username] + command
return command, not as_root
elif options.get('virt_instance_id'):
command = ['/usr/bin/incus', 'exec', options['virt_instance_id'], options.get('command', '/bin/bash')]
if not as_root:
command = ['/usr/bin/sudo', '-H', '-u', username] + command
return command, not as_root
else:
return ['/usr/bin/login', '-p', '-f', username], False
def resize(self, cols, rows):
self.input_queue.put(ShellResize(cols, rows))
def run(self):
self.shell_pid, master_fd = os.forkpty()
if self.shell_pid == 0:
close_fds(3)
os.chdir('/root')
env = {
'TERM': 'xterm',
'HOME': '/root',
'LANG': 'en_US.UTF-8',
'PATH': '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin',
'LC_ALL': 'C.UTF-8',
}
os.execve(self.command[0], self.command, env)
# Terminal baudrate affects input queue size
attr = termios.tcgetattr(master_fd)
attr[4] = attr[5] = termios.B921600
termios.tcsetattr(master_fd, termios.TCSANOW, attr)
if self.sudo_warning:
asyncio.run_coroutine_threadsafe(
self.ws.send_bytes(
(
f"WARNING: Your user does not have sudo privileges so {self.command[4]} command will run\r\n"
f"on your behalf. This might cause permission issues.\r\n\r\n"
).encode("utf-8")
), loop=self.loop
).result()
def reader():
"""
Reader thread for reading from pty file descriptor
and forwarding it to the websocket.
"""
try:
while True:
try:
read = os.read(master_fd, 1024)
except OSError:
break
if read == b'':
break
asyncio.run_coroutine_threadsafe(
self.ws.send_bytes(read), loop=self.loop
).result()
except Exception:
self.middleware.logger.error("Error in ShellWorkerThread.reader", exc_info=True)
self.abort()
def writer():
"""
Writer thread for reading from input_queue and write to
the shell pty file descriptor.
"""
try:
while True:
try:
get = self.input_queue.get(timeout=1)
if isinstance(get, ShellResize):
fcntl.ioctl(master_fd, termios.TIOCSWINSZ, struct.pack("HHHH", get.rows, get.cols, 0, 0))
else:
os.write(master_fd, get)
except queue.Empty:
# If we timeout waiting in input query lets make sure
# the shell process is still alive
try:
os.kill(self.shell_pid, 0)
except ProcessLookupError:
break
except Exception:
self.middleware.logger.error("Error in ShellWorkerThread.writer", exc_info=True)
self.abort()
t_reader = threading.Thread(target=reader, daemon=True)
t_reader.start()
t_writer = threading.Thread(target=writer, daemon=True)
t_writer.start()
# Wait for shell to exit
while True:
try:
pid, rv = os.waitpid(self.shell_pid, os.WNOHANG)
except ChildProcessError:
break
if self._die:
return
if pid <= 0:
time.sleep(1)
t_reader.join()
t_writer.join()
asyncio.run_coroutine_threadsafe(self.ws.close(), self.loop)
def die(self):
self._die = True
def abort(self):
asyncio.run_coroutine_threadsafe(self.ws.close(), self.loop)
with contextlib.suppress(ProcessLookupError):
os.kill(self.shell_pid, signal.SIGTERM)
self.die()
class ShellConnectionData(object):
id = None
t_worker = None
class ShellApplication:
shells = {}
def __init__(self, middleware):
self.middleware = middleware
async def ws_handler(self, request):
ws, prepared = await self.middleware.create_and_prepare_ws(request)
if not prepared:
return ws
handler = BaseWebSocketHandler(self.middleware)
origin = await handler.get_origin(request)
if not await self.middleware.ws_can_access(ws, origin):
return ws
conndata = ShellConnectionData()
conndata.id = str(uuid.uuid4())
try:
await self.run(ws, origin, conndata)
except Exception:
if conndata.t_worker:
await self.worker_kill(conndata.t_worker)
finally:
self.shells.pop(conndata.id, None)
return ws
async def run(self, ws, origin, conndata):
# Each connection will have its own input queue
input_queue = queue.Queue()
authenticated = False
async for msg in ws:
if authenticated:
# Add content of every message received in input queue
input_queue.put(msg.data)
else:
try:
data = json.loads(msg.data)
except json.decoder.JSONDecodeError:
continue
token = data.get('token')
if not token:
continue
token = await self.middleware.call('auth.get_token_for_shell_application', token, origin)
if not token:
await ws.send_json({
'msg': 'failed',
'error': {
'error': ErrnoMixin.ENOTAUTHENTICATED,
'reason': 'Invalid token',
}
})
continue
authenticated = True
options = data.get('options', {})
if options.get('vm_id'):
options['vm_data'] = await self.middleware.call('vm.get_instance', options['vm_id'])
if options.get('virt_instance_id'):
try:
await self.middleware.call('virt.instance.get_instance', options['virt_instance_id'])
except InstanceNotFound:
raise CallError('Provided instance id is not valid')
if options.get('app_name'):
if not options.get('container_id'):
raise CallError('Container id must be specified')
if options['container_id'] not in await self.middleware.call(
'app.container_console_choices', options['app_name']
):
raise CallError('Provided container id is not valid')
# By default we want to run virsh with user's privileges and assume all "permission denied"
# errors this can cause, unless the user has a sudo permission for all commands; in that case, let's
# run them straight with root privileges.
as_root = False
try:
user = await self.middleware.call(
'user.query',
[['username', '=', token['username']], ['local', '=', True]],
{'get': True},
)
except MatchNotFound:
# Currently only local users can be sudoers
pass
else:
if 'ALL' in user['sudo_commands'] or 'ALL' in user['sudo_commands_nopasswd']:
as_root = True
else:
for group in await self.middleware.call('group.query', [
['id', 'in', user['groups']], ['local', '=', True]
]):
if 'ALL' in group['sudo_commands'] or 'ALL' in group['sudo_commands_nopasswd']:
as_root = True
break
conndata.t_worker = ShellWorkerThread(
middleware=self.middleware, ws=ws, input_queue=input_queue, loop=asyncio.get_event_loop(),
username=token['username'], as_root=as_root, options=options,
)
conndata.t_worker.start()
self.shells[conndata.id] = conndata.t_worker
await ws.send_json({
'msg': 'connected',
'id': conndata.id,
})
# If connection was not authenticated, return earlier
if not authenticated:
return ws
if conndata.t_worker:
self.middleware.create_task(self.worker_kill(conndata.t_worker))
return ws
async def worker_kill(self, t_worker):
def worker_kill_impl():
# If connection has been closed lets make sure shell is killed
if t_worker.shell_pid:
with contextlib.suppress(psutil.NoSuchProcess):
shell = psutil.Process(t_worker.shell_pid)
to_terminate = [shell] + shell.children(recursive=True)
for p in to_terminate:
with contextlib.suppress(psutil.NoSuchProcess):
p.terminate()
gone, alive = psutil.wait_procs(to_terminate, timeout=2)
for p in alive:
with contextlib.suppress(psutil.NoSuchProcess):
p.kill()
t_worker.join()
await self.middleware.run_in_thread(worker_kill_impl)
class PreparedCall(typing.NamedTuple):
args: list[typing.Any] | None = None
executor: typing.Any | None = None
job: Job | None = None
class Middleware(LoadPluginsMixin, ServiceCallMixin):
CONSOLE_ONCE_PATH = f'{MIDDLEWARE_RUN_DIR}/.middlewared-console-once'
def __init__(
self, loop_debug=False, loop_monitor=True, debug_level=None,
log_handler=None, trace_malloc=False,
log_format='[%(asctime)s] (%(levelname)s) %(name)s.%(funcName)s():%(lineno)d - %(message)s',
):
super().__init__()
self.logger = logger.Logger(
'middlewared', debug_level, log_format
).getLogger()
self.logger.info('Starting %s middleware', sw_version())
self.crash_reporting_semaphore = asyncio.Semaphore(value=2)
self.loop_debug = loop_debug
self.loop_monitor = loop_monitor
self.trace_malloc = trace_malloc
self.debug_level = debug_level
self.log_handler = log_handler
self.log_format = log_format
self.app = None
self.loop = None
self.runner = None
self.__thread_id = threading.get_ident()
multiprocessing.set_start_method('spawn') # Spawn new processes for ProcessPool instead of forking
self.__init_procpool()
self.__wsclients = {}
self.role_manager = RoleManager(ROLES)
self.events = Events(self.role_manager)
self.event_source_manager = EventSourceManager(self)
self.__event_subs = defaultdict(list)
self.__hooks = defaultdict(list)
self.__blocked_hooks = defaultdict(lambda: 0)
self.__blocked_hooks_lock = threading.Lock()
self.__init_services()
self.__console_io = False if os.path.exists(self.CONSOLE_ONCE_PATH) else None
self.__terminate_task = None
self.jobs = JobsQueue(self)
self.mocks: typing.Dict[str, list[tuple[list, typing.Callable]]] = defaultdict(list)
self.tasks = set()
def create_task(self, coro, *, name=None):
task = self.loop.create_task(coro, name=name)
self.tasks.add(task)
task.add_done_callback(self.tasks.discard)
return task
def _load_api_versions(self):
versions = []
api_dir = os.path.join(os.path.dirname(__file__), 'api')
for version_dir in sorted(pathlib.Path(api_dir).iterdir()):
if version_dir.name.startswith('v') and version_dir.is_dir():
version = version_dir.name.replace('_', '.')
self._console_write(f'loading API version {version}')
versions.append(
APIVersion.from_module(
version,
importlib.import_module(f'middlewared.api.{version_dir.name}'),
),
)
return versions
def __init_services(self):
from middlewared.service import CoreService
self.add_service(CoreService(self))
self.event_register('core.environ', 'Send on middleware process environment changes.', private=True)
def __plugins_load(self):
setup_funcs = []
def on_module_begin(mod):
self._console_write(f'loaded plugin {mod.__name__}')
self.__notify_startup_progress()
def on_module_end(mod):
if not hasattr(mod, 'setup'):
return
mod_name = mod.__name__.split('.')
setup_plugin = '.'.join(mod_name[mod_name.index('plugins') + 1:])
setup_funcs.append((setup_plugin, mod.setup))
def on_modules_loaded():
self._console_write('resolving plugins schemas')
self._load_plugins(
on_module_begin=on_module_begin,
on_module_end=on_module_end,
on_modules_loaded=on_modules_loaded,
)
for namespace, service in self.get_services().items():
self.role_manager.register_method(f'{service._config.namespace}.config', ['READONLY_ADMIN'])
self.role_manager.register_method(f'{service._config.namespace}.get_instance', ['READONLY_ADMIN'])
self.role_manager.register_method(f'{service._config.namespace}.query', ['READONLY_ADMIN'])
if service._config.role_prefix:
self.role_manager.add_roles_to_method(
f'{service._config.namespace}.config', [f'{service._config.role_prefix}_READ']
)
self.role_manager.add_roles_to_method(
f'{service._config.namespace}.get_instance', [f'{service._config.role_prefix}_READ']
)
self.role_manager.add_roles_to_method(
f'{service._config.namespace}.query', [f'{service._config.role_prefix}_READ']
)
self.role_manager.register_method(
f'{service._config.namespace}.create', [f'{service._config.role_prefix}_WRITE']
)
self.role_manager.register_method(
f'{service._config.namespace}.update', [f'{service._config.role_prefix}_WRITE']
)
if service._config.role_separate_delete:
self.role_manager.register_method(
f'{service._config.namespace}.delete', [f'{service._config.role_prefix}_DELETE']
)
else:
self.role_manager.register_method(
f'{service._config.namespace}.delete', [f'{service._config.role_prefix}_WRITE']
)
for method_name in dir(service):
roles = getattr(getattr(service, method_name), 'roles', None) or []
if method_name in ['do_create', 'do_update', 'do_delete']:
method_name = method_name.removeprefix('do_')
if method_name.endswith('_choices'):
roles.append('READONLY_ADMIN')
if service._config.role_prefix:
roles.append(f'{service._config.role_prefix}_READ')
if roles:
self.role_manager.register_method(f'{service._config.namespace}.{method_name}', roles,
exist_ok=True)
return setup_funcs
async def __plugins_setup(self, setup_funcs):
# TODO: Rework it when we have order defined for setup functions
def sort_key(plugin__function):
plugin, function = plugin__function
beginning = [
# Move uploaded config files to their appropriate locations
'config',
# Connect to the database
'datastore',
# Allow internal UNIX socket authentication for plugins that run in separate pools
'auth',
# We need to register all services because pseudo-services can still be used by plugins setup functions
'service',
# We need to run pwenc first to ensure we have secret setup to work for encrypted fields which
# might be used in the setup functions.
'pwenc',
# We run boot plugin first to ensure we are able to retrieve
# BOOT POOL during system plugin initialization
'boot',
# We need to run system plugin setup's function first because when system boots, the right
# timezone is not configured. See #72131
'system',
# Initialize mail before other plugins try to send e-mail messages
'mail',
# We also need to load alerts first because other plugins can issue one-shot alerts during their
# initialization
'alert',
# Migrate users and groups ASAP
'account',
# Replication plugin needs to be initialized before zettarepl in order to register network activity
'replication',
# Migrate network interfaces ASAP
'network',
# catalog needs to be initialized before docker setup funcs are executed
# TODO: Remove this when we have upgrade alerts in place
'catalog',
]
try:
return beginning.index(plugin)
except ValueError:
return len(beginning)
setup_funcs = sorted(setup_funcs, key=sort_key)
# Only call setup after all schemas have been resolved because
# they can call methods with schemas defined.
setup_total = len(setup_funcs)
for i, setup_func in enumerate(setup_funcs):
name, f = setup_func
self._console_write(f'setting up plugins ({name}) [{i + 1}/{setup_total}]')
self.__notify_startup_progress()
call = f(self)
# Allow setup to be a coroutine
if asyncio.iscoroutinefunction(f):
await call
self.logger.debug('All plugins loaded')
def _setup_periodic_tasks(self):
for service_name, service_obj in self.get_services().items():
for task_name in dir(service_obj):
method = getattr(service_obj, task_name)
if callable(method) and hasattr(method, "_periodic"):
if method._periodic.run_on_start:
delay = 0
else:
delay = method._periodic.interval
method_name = f'{service_name}.{task_name}'
self.logger.debug(
f"Setting up periodic task {method_name} to run every {method._periodic.interval} seconds"
)
self.loop.call_soon_threadsafe(
self.loop.call_later,
delay,
functools.partial(
self.__call_periodic_task,
method, service_name, service_obj, method_name, method._periodic.interval
)
)
def __call_periodic_task(self, method, service_name, service_obj, method_name, interval):
self.create_task(self.__periodic_task_wrapper(method, service_name, service_obj, method_name, interval))
async def __periodic_task_wrapper(self, method, service_name, service_obj, method_name, interval):
self.logger.trace("Calling periodic task %s", method_name)
try:
await self._call(method_name, service_obj, method, [])
except Exception:
self.logger.warning("Exception while calling periodic task", exc_info=True)
self.loop.call_later(
interval,
functools.partial(
self.__call_periodic_task,
method, service_name, service_obj, method_name, interval
)
)
console_error_counter = 0
def _console_write(self, text, fill_blank=True, append=False):
"""
Helper method to write the progress of middlewared loading to the
system console.
There are some cases where loading will take a considerable amount of time,
giving user at least some basic feedback is fundamental.
"""
console_error_log_max = 3
if self.console_error_counter == console_error_log_max:
# sigh, truenas is installed on "gamer" hardware which
# is miserable. The amount of quirks seen on this style
# of hardware is astounding really. If we continually
# fail to log to console, there is no reason to spam
# our log file with it.
return
# False means we are running in a terminal, no console needed
self.logger.trace('_console_write %r', text)
if self.__console_io is False:
return
elif self.__console_io is None:
if sys.stdin and sys.stdin.isatty():
self.__console_io = False
return
try:
self.__console_io = open('/dev/console', 'w')
except Exception as e:
self.logger.debug('Failed to open console: %r', e)
self.console_error_counter += 1
return
try:
# We need to make sure we only try to write to console one time
# in case middlewared crashes and keep writing to console in a loop.
with open(self.CONSOLE_ONCE_PATH, 'w'):
pass
except Exception:
pass
try:
if append:
self.__console_io.write(text)
else:
prefix = 'middlewared: '
maxlen = 60
text = text[:maxlen - len(prefix)]
# new line needs to go after all the blanks
if text.endswith('\n'):
newline = '\n'
text = text[:-1]
else:
newline = ''
if fill_blank:
blank = ' ' * (maxlen - (len(prefix) + len(text)))
else:
blank = ''
self.__console_io.write(f'\r{prefix}{text}{blank}{newline}')
self.__console_io.flush()
# be sure and reset error counter after we successfully log
# to the console
self.console_error_counter = 0
except Exception as e:
self.logger.debug('Failed to write to console: %r', e)
self.console_error_counter += 1
def __notify_startup_progress(self):
systemd_notify(f'EXTEND_TIMEOUT_USEC={SYSTEMD_EXTEND_USECS}')
def __notify_startup_complete(self):
with open(middlewared.service.MIDDLEWARE_STARTED_SENTINEL_PATH, 'w'):
pass
systemd_notify('READY=1')
def plugin_route_add(self, plugin_name, route, method):
self.app.router.add_route('*', f'/_plugins/{plugin_name}/{route}', method)
def register_wsclient(self, client):
self.__wsclients[client.session_id] = client
def unregister_wsclient(self, client):
self.__wsclients.pop(client.session_id)
def register_hook(self, name, method, *, blockable=False, inline=False, order=0, raise_error=False, sync=True):
"""
Register a hook under `name`.
The given `method` will be called whenever using call_hook.
Args:
name(str): name of the hook, e.g. service.hook_name
method(callable): method to be called
blockable(bool): whether the hook can be blocked (using `block_hooks` context manager)
inline(bool): whether the method should be called in executor's context synchronously
order(int): hook execution order
raise_error(bool): whether an exception should be raised if a sync hook call fails
sync(bool): whether the method should be called in a sync way
"""
for hook in self.__hooks[name]:
if hook['blockable'] != blockable:
qualname = hook['method'].__qualname__
method_qualname = method.__qualname__
raise RuntimeError(
f'Hook {name!r}: {qualname!r} has blockable={hook["blockable"]!r}, but {method_qualname!r} has '
f'blockable={blockable!r}'
)
if inline:
if asyncio.iscoroutinefunction(method):
raise RuntimeError('You can\'t register coroutine function as inline hook')
if not sync:
raise RuntimeError('Inline hooks are always called in a sync way')
if raise_error:
if not sync:
raise RuntimeError('Hooks that raise error must be called in a sync way')
self.__hooks[name].append({
'method': method,
'blockable': blockable,
'inline': inline,
'order': order,
'raise_error': raise_error,
'sync': sync,
})
self.__hooks[name] = sorted(self.__hooks[name], key=lambda hook: hook['order'])
@contextlib.contextmanager
def block_hooks(self, *names):
for name in names:
if not self.__hooks[name]:
raise RuntimeError(f'Hook {name!r} does not exist')
if not self.__hooks[name][0]['blockable']:
raise RuntimeError(f'Hook {name!r} is not blockable')
with self.__blocked_hooks_lock:
for name in names:
self.__blocked_hooks[name] += 1
yield
with self.__blocked_hooks_lock:
for name in names:
self.__blocked_hooks[name] -= 1
def _call_hook_base(self, name, *args, **kwargs):
if self.__blocked_hooks[name] > 0:
return
for hook in self.__hooks[name]:
try:
if asyncio.iscoroutinefunction(hook['method']) or hook['inline']:
fut = hook['method'](self, *args, **kwargs)
else:
fut = self.run_in_thread(hook['method'], self, *args, **kwargs)
yield hook, fut
except Exception:
if hook['raise_error']:
raise
self.logger.error(
'Failed to run hook {}:{}(*{}, **{})'.format(name, hook['method'], args, kwargs), exc_info=True
)
async def call_hook(self, name, *args, **kwargs):
"""
Call all hooks registered under `name` passing *args and **kwargs.
Args:
name(str): name of the hook, e.g. service.hook_name
"""
for hook, fut in self._call_hook_base(name, *args, **kwargs):
try:
if hook['inline']:
raise RuntimeError('Inline hooks should be called with call_hook_inline')
elif hook['sync']:
await fut
else:
self.create_task(fut)
except Exception:
if hook['raise_error']:
raise
self.logger.error(
'Failed to run hook {}:{}(*{}, **{})'.format(name, hook['method'], args, kwargs), exc_info=True
)
def call_hook_sync(self, name, *args, **kwargs):
return self.run_coroutine(self.call_hook(name, *args, **kwargs))
def call_hook_inline(self, name, *args, **kwargs):
for hook, fut in self._call_hook_base(name, *args, **kwargs):
if not hook['inline']:
raise RuntimeError('Only inline hooks can be called with call_hook_inline')
def register_event_source(self, name, event_source, roles=None):
roles = roles or []
self.event_source_manager.register(name, event_source, roles)
async def run_in_executor(self, pool, method, *args, **kwargs):
"""
Runs method in a native thread using concurrent.futures.Pool.
This prevents a CPU intensive or non-asyncio friendly method
to block the event loop indefinitely.
Also used to run non thread safe libraries (using a ProcessPool)
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(pool, functools.partial(method, *args, **kwargs))
async def run_in_thread(self, method, *args, **kwargs):
return await self.run_in_executor(io_thread_pool_executor, method, *args, **kwargs)
def __init_procpool(self):
self.__procpool = concurrent.futures.ProcessPoolExecutor(
max_workers=5,
max_tasks_per_child=20,
initializer=functools.partial(worker_init, self.debug_level, self.log_handler)
)
async def run_in_proc(self, method, *args, **kwargs):
retries = 2
for i in range(retries):
try:
return await self.run_in_executor(self.__procpool, method, *args, **kwargs)
except concurrent.futures.process.BrokenProcessPool:
if i == retries - 1:
raise
self.__init_procpool()
def pipe(self, buffered=False):
"""
:param buffered: Please see :class:`middlewared.pipe.Pipe` documentation for information on unbuffered and
buffered pipes.
"""
return Pipe(self, buffered)
def _call_prepare(
self, name, serviceobj, methodobj, params, *, app=None, audit_callback=None, job_on_progress_cb=None,
pipes=None, in_event_loop: bool = True,
):
"""
:param in_event_loop: Whether we are in the event loop thread.
:return:
"""
audit_callback = audit_callback or (lambda message: None)
params = list(params)
args = []
if hasattr(methodobj, '_pass_app'):
if methodobj._pass_app['require'] and app is None:
raise CallError('`app` is required')
args.append(app)
if getattr(methodobj, 'audit_callback', None):
args.append(audit_callback)
args.extend(params)
# If the method is marked as a @job we need to create a new
# entry to keep track of its state.
job_options = getattr(methodobj, '_job', None)
if job_options:
if serviceobj._config.process_pool:
job_options['process'] = True
# Create a job instance with required args
job = Job(self, name, serviceobj, methodobj, params, job_options, pipes, job_on_progress_cb, app,
audit_callback)
# Add the job to the queue.
# At this point an `id` is assigned to the job.
# Job might be replaced with an already existing job if `lock_queue_size` is used.
if in_event_loop:
job = self.jobs.add(job)
else:
event = threading.Event()
def cb():
nonlocal job
job = self.jobs.add(job)
event.set()
self.loop.call_soon_threadsafe(cb)
event.wait()
return PreparedCall(job=job)
if hasattr(methodobj, '_thread_pool'):
executor = methodobj._thread_pool
elif serviceobj._config.thread_pool:
executor = serviceobj._config.thread_pool
else:
executor = io_thread_pool_executor
return PreparedCall(args=args, executor=executor)
async def _call(self, name, serviceobj, methodobj, params, **kwargs):
prepared_call = self._call_prepare(name, serviceobj, methodobj, params, **kwargs)
if prepared_call.job:
return prepared_call.job
if asyncio.iscoroutinefunction(methodobj):
self.logger.trace('Calling %r in current IO loop', name)
return await methodobj(*prepared_call.args)
if not self.mocks.get(name) and serviceobj._config.process_pool:
self.logger.trace('Calling %r in process pool', name)
if isinstance(serviceobj, middlewared.service.CRUDService):
service_name, method_name = name.rsplit('.', 1)
if method_name in ['create', 'update', 'delete']:
name = f'{service_name}.do_{method_name}'
return await self._call_worker(name, *prepared_call.args)
self.logger.trace('Calling %r in executor %r', name, prepared_call.executor)
return await self.run_in_executor(prepared_call.executor, methodobj, *prepared_call.args)
async def _call_worker(self, name, *args, job=None):
return await self.run_in_proc(main_worker, name, args, job)
def dump_args(self, args, method=None, method_name=None):
if method is None:
if method_name is not None:
try:
method = self.get_method(method_name)[1]
except Exception:
return args
if mock := self._mock_method(method_name, args):
method = mock
if not hasattr(method, 'accepts'):
if crud_method := real_crud_method(method):
method = crud_method
if hasattr(method, 'new_style_accepts'):
return dump_params(method.new_style_accepts, args, False)
if not hasattr(method, 'accepts'):
return args
return [method.accepts[i].dump(arg) if i < len(method.accepts) else arg
for i, arg in enumerate(args)]
def dump_result(self, method, result, expose_secrets):
if isinstance(result, Job):
return result
if hasattr(method, "new_style_returns"):
return serialize_result(method.new_style_returns, result, expose_secrets)
if not expose_secrets and hasattr(method, "returns") and method.returns:
schema = method.returns[0]
if isinstance(schema, OROperator):
result = schema.dump(result, False)
else:
result = schema.dump(result)
return result
async def authorize_method_call(self, app, method_name, methodobj, params):
if hasattr(methodobj, '_no_auth_required'):
if app.authenticated:
# Do not rate limit authenticated users
return
if not getattr(methodobj, 'rate_limit', True):
# The method is not subjected to rate limit.
return
ip_added = await RateLimitCache.add(method_name, app.origin)
if ip_added is None:
# the origin of the request for the unauthenticated method is an
# internal call or comes from the other controller on an HA system
return
if any((
RateLimitCache.max_entries_reached,
RateLimitCache.rate_limit_exceeded(method_name, ip_added),
)):
# 1 of 2 things happened:
# 1. we've hit maximum amount of entries for global rate limit
# cache (this is an edge-case and something bad is going on)
# 2. OR this endpoint has been hit too many times by the same
# origin IP address
# In either scenario, sleep a random delay and send an error
await self.log_audit_message_for_method(method_name, methodobj, params, app, False, False, False)
await RateLimitCache.random_sleep()
raise CallError('Rate Limit Exceeded', errno.EBUSY)
# was added to rate limit cache but rate limit thresholds haven't
# been met so no error
return
if not app.authenticated:
await self.log_audit_message_for_method(method_name, methodobj, params, app, False, False, False)
raise CallError('Not authenticated', ErrnoMixin.ENOTAUTHENTICATED)
# Some methods require authentication to the NAS (a valid account)
# but not explicit authorization. In this case the authorization
# check is bypassed as long as it is a user session. API keys
# explicitly whitelist particular methods and are used for targeted
# purposes, and so authorization is _always_ enforced.
if app.authenticated_credentials.is_user_session and hasattr(methodobj, '_no_authz_required'):
return
if not app.authenticated_credentials.authorize('CALL', method_name):
await self.log_audit_message_for_method(method_name, methodobj, params, app, True, False, False)
if app.authenticated_credentials.is_user_session and not app.authenticated_credentials.is_valid():
raise CallError('Session is expired', errno.EACCES)
raise CallError('Not authorized', errno.EACCES)
def can_subscribe(self, app, name):
if event := self.events.get_event(name):
if event['no_auth_required']:
return True
if not app.authenticated:
return False
if event:
if event['no_authz_required']:
return True
return app.authenticated_credentials.authorize('SUBSCRIBE', name)
async def call_with_audit(self, method, serviceobj, methodobj, params, app, **kwargs):
audit_callback_messages = []
async def log_audit_message_for_method(success):
await self.log_audit_message_for_method(method, methodobj, params, app, True, True, success,
audit_callback_messages)
async def job_on_finish_cb(job):
await log_audit_message_for_method(job.state == State.SUCCESS)
success = False
job = None
try:
result = await self._call(method, serviceobj, methodobj, params, app=app,
audit_callback=audit_callback_messages.append, **kwargs)
success = True
if isinstance(result, Job):
job = result
await job.set_on_finish_cb(job_on_finish_cb)
expose_secrets = True
if app and app.authenticated_credentials:
if app.authenticated_credentials.is_user_session and not (
credential_has_full_admin(app.authenticated_credentials) or
(
serviceobj._config.role_prefix and
app.authenticated_credentials.has_role(f'{serviceobj._config.role_prefix}_WRITE')
)
):
expose_secrets = False
result = self.dump_result(methodobj, result, expose_secrets)
finally:
# If the method is a job, audit message will be logged by `job_on_finish_cb`
if job is None:
await log_audit_message_for_method(success)
return result
async def log_audit_message_for_method(self, method, methodobj, params, app, authenticated, authorized, success,
callback_messages=None):
callback_messages = callback_messages or []
audit = getattr(methodobj, 'audit', None)
audit_extended = getattr(methodobj, 'audit_extended', None)
if audit is None:
if crud_method := real_crud_method(methodobj):
audit = getattr(crud_method, 'audit', None)
audit_extended = getattr(crud_method, 'audit_extended', None)
if audit:
audits = [audit]
if callback_messages:
audits = [f'{audit} {callback_message}' for callback_message in callback_messages]
elif audit_extended:
try:
audits[0] = f'{audit} {audit_extended(*params)}'
except Exception:
pass
for description in audits:
await self.log_audit_message(app, 'METHOD_CALL', {
'method': method,
'params': self.dump_args(params, methodobj),
'description': description,
'authenticated': authenticated,
'authorized': authorized,
}, success)
async def log_audit_message(self, app, event, event_data, success):
remote_addr, origin = "127.0.0.1", None
if app is not None and app.origin is not None:
origin = app.origin.repr
if app.origin.is_tcp_ip_family:
remote_addr = origin
message = "@cee:" + json.dumps({
"TNAUDIT": {
"aid": str(uuid.uuid4()),
"vers": {
"major": 0,
"minor": 1
},
"addr": remote_addr,
"user": audit_username_from_session(app.authenticated_credentials),
"sess": app.session_id,
"time": utc_now().strftime('%Y-%m-%d %H:%M:%S.%f'),
"svc": "MIDDLEWARE",
"svc_data": json.dumps({
"vers": {
"major": 0,
"minor": 1,
},
"origin": origin,
"protocol": "WEBSOCKET" if app.websocket else "REST",
"credentials": {
"credentials": app.authenticated_credentials.class_name(),
"credentials_data": app.authenticated_credentials.dump(),
} if app.authenticated_credentials else None,
}),
"event": event,
"event_data": json.dumps(event_data),
"success": success,
}
})
async with await create_connected_unix_datagram_socket("/dev/log") as s:
await s.send(syslog_message(message))
async def call(self, name, *params, app=None, audit_callback=None, job_on_progress_cb=None, pipes=None,
profile=False):
serviceobj, methodobj = self.get_method(name)
if mock := self._mock_method(name, params):
methodobj = mock
if profile:
methodobj = profile_wrap(methodobj)
return await self._call(
name, serviceobj, methodobj, params,
app=app, audit_callback=audit_callback, job_on_progress_cb=job_on_progress_cb, pipes=pipes,
)
def call_sync(self, name, *params, job_on_progress_cb=None, app=None, audit_callback=None, background=False):
if threading.get_ident() == self.__thread_id:
raise RuntimeError('You cannot use call_sync from main thread')
if background:
return self.loop.call_soon_threadsafe(lambda: self.create_task(self.call(name, *params, app=app)))
serviceobj, methodobj = self.get_method(name)
if mock := self._mock_method(name, params):
methodobj = mock
prepared_call = self._call_prepare(name, serviceobj, methodobj, params, app=app, audit_callback=audit_callback,
job_on_progress_cb=job_on_progress_cb, in_event_loop=False)
if prepared_call.job:
return prepared_call.job
if asyncio.iscoroutinefunction(methodobj):
self.logger.trace('Calling %r in main IO loop', name)
return self.run_coroutine(methodobj(*prepared_call.args))
if serviceobj._config.process_pool:
self.logger.trace('Calling %r in process pool', name)
return self.run_coroutine(self._call_worker(name, *prepared_call.args))
if not self._in_executor(prepared_call.executor):
self.logger.trace('Calling %r in executor %r', name, prepared_call.executor)
return self.run_coroutine(self.run_in_executor(prepared_call.executor, methodobj, *prepared_call.args))
self.logger.trace('Calling %r in current thread', name)
return methodobj(*prepared_call.args)
def _in_executor(self, executor):
if isinstance(executor, concurrent.futures.thread.ThreadPoolExecutor):
return threading.current_thread() in executor._threads
elif isinstance(executor, IoThreadPoolExecutor):
return threading.current_thread().name.startswith(("IoThread", "ExtraIoThread"))
else:
raise RuntimeError(f"Unknown executor: {executor!r}")
def run_coroutine(self, coro, wait=True):
if threading.get_ident() == self.__thread_id:
raise RuntimeError('You cannot use run_coroutine from main thread')
fut = asyncio.run_coroutine_threadsafe(coro, self.loop)
if not wait:
return fut
event = threading.Event()
def done(_):
event.set()
fut.add_done_callback(done)
# In case middleware dies while we are waiting for a `call_sync` result
while not event.wait(1):
if not self.loop.is_running():
raise RuntimeError('Middleware is terminating')
return fut.result()
def get_events(self):
return itertools.chain(
self.events, map(
lambda n: (
n[0],
{
'description': inspect.getdoc(n[1]),
'private': False,
'wildcard_subscription': False,
'accepts': n[1].ACCEPTS,
'returns': n[1].RETURNS,
'new_style_returns': None,
}
),
self.event_source_manager.event_sources.items()
)
)
def event_subscribe(self, name, handler):
"""
Internal way for middleware/plugins to subscribe to events.
"""
self.__event_subs[name].append(handler)
def event_register(self, name, description, *, private=False, returns=None, new_style_returns=None,
no_auth_required=False, no_authz_required=False, roles=None):
"""
All events middleware can send should be registered, so they are properly documented
and can be browsed in documentation page without source code inspection.
"""
roles = roles or []
self.events.register(name, description, private, returns, new_style_returns, no_auth_required,
no_authz_required, roles)
def send_event(self, name, event_type: str, **kwargs):
should_send_event = kwargs.pop('should_send_event', None)
if name not in self.events:
# We should eventually deny events that are not registered to ensure every event is
# documented but for backward-compatibility and safety just log it for now.
self.logger.warning(f'Event {name!r} not registered.')
assert event_type in ('ADDED', 'CHANGED', 'REMOVED')
self.logger.trace(f'Sending event {name!r}:{event_type!r}:{kwargs!r}')
for session_id, wsclient in list(self.__wsclients.items()):
try:
if should_send_event is None or should_send_event(wsclient):
wsclient.send_event(name, event_type, **kwargs)
except Exception:
self.logger.warn('Failed to send event {} to {}'.format(name, session_id), exc_info=True)
async def wrap(handler):
try:
await handler(self, event_type, kwargs)
except Exception:
self.logger.error('Unhandled exception in event handler', exc_info=True)
# Send event also for internally subscribed plugins
for handler in self.__event_subs.get(name, []):
asyncio.run_coroutine_threadsafe(wrap(handler), loop=self.loop)
def pdb(self):
import pdb
pdb.set_trace()
def log_threads_stacks(self):
for thread_id, stack in get_threads_stacks().items():
self.logger.debug('Thread %d stack:\n%s', thread_id, ''.join(stack))
def _tracemalloc_start(self, limit, interval):
"""
Run an endless loop grabbing snapshots of allocated memory using
the python's builtin "tracemalloc" module.
`limit` integer representing number of lines to print showing
highest memory consumer
`interval` integer representing the time in seconds to wait
before taking another memory snapshot
"""
# set the thread name
set_thread_name('tracemalloc_monitor')
# initalize tracemalloc
tracemalloc.start()
# if given bogus numbers, default both of them respectively
if limit <= 0:
limit = 5
if interval <= 0:
interval = 5
# filters for the snapshots so we can
# ignore modules that we don't care about
filters = (
tracemalloc.Filter(False, '<frozen importlib._bootstrap>'),
tracemalloc.Filter(False, '<frozen importlib._bootstrap_external>'),
tracemalloc.Filter(False, '<unknown>'),
tracemalloc.Filter(False, '*tracemalloc.py'),
)
# start the loop
prev = None
while True:
if prev is None:
prev = tracemalloc.take_snapshot()
prev = prev.filter_traces(filters)
else:
curr = tracemalloc.take_snapshot()
curr = curr.filter_traces(filters)
diff = curr.compare_to(prev, 'lineno')
prev = curr
curr = None
stats = f'\nTop {limit} consumers:'
for idx, stat in enumerate(diff[:limit], 1):
stats += f'#{idx}: {stat}\n'
# print the memory used by the tracemalloc module itself
tm_mem = tracemalloc.get_tracemalloc_memory()
# add a newline at end of output to make logs more readable
stats += f'Memory used by tracemalloc module: {tm_mem:.1f} KiB\n'
self.logger.debug(stats)
time.sleep(interval)
def set_mock(self, name, args, mock):
for _args, _mock in self.mocks[name]:
if args == _args:
raise ValueError(f'{name!r} is already mocked with {args!r}')
serviceobj, methodobj = self.get_method(name)
if inspect.iscoroutinefunction(mock):
async def f(*args, **kwargs):
return await mock(serviceobj, *args, **kwargs)
else:
def f(*args, **kwargs):
return mock(serviceobj, *args, **kwargs)
if hasattr(methodobj, '_job'):
f._job = methodobj._job
copy_function_metadata(mock, f)
self.mocks[name].append((args, f))
def remove_mock(self, name, args):
for i, (_args, _mock) in enumerate(self.mocks[name]):
if args == _args:
del self.mocks[name][i]
break
def _mock_method(self, name, params):
if mocks := self.mocks.get(name):
for args, mock in mocks:
if args == list(params):
return mock
for args, mock in mocks:
if args is None:
return mock
async def create_and_prepare_ws(self, request):
ws = web.WebSocketResponse()
prepared = False
try:
await ws.prepare(request)
prepared = True
except ConnectionResetError:
# happens when we're preparing a new session
# and during the time we prepare, the server
# is stopped/killed/restarted etc. Ignore these
# to prevent log spam
pass
return ws, prepared
async def ws_handler(self, request):
ws, prepared = await self.create_and_prepare_ws(request)
if not prepared:
return ws
handler = BaseWebSocketHandler(self)
origin = await handler.get_origin(request)
if not await self.ws_can_access(ws, origin):
return ws
connection = Application(self, origin, self.loop, request, ws)
connection.on_open()
try:
async for msg in ws:
if msg.type == web.WSMsgType.ERROR:
self.logger.error('Websocket error: %r', msg.data)
break
if msg.type != web.WSMsgType.TEXT:
await ws.close(
code=WSCloseCode.UNSUPPORTED_DATA,
message=f'Invalid websocket message type: {msg.type!r}'.encode('utf-8'),
)
break
try:
message = parse_message(connection.authenticated, msg.data)
except MsgSizeError as err:
if err.limit is not MsgSizeLimit.UNAUTHENTICATED:
origin = connection.origin.repr if connection.origin else None
if connection.authenticated_credentials:
creds = connection.authenticated_credentials.dump()
else:
creds = None
self.logger.error(
'Client using credentials [%s] at [%s] sent message with payload size [%d bytes] '
'exceeding limit of %d for method %s',
creds, origin, err.datalen, err.limit, err.method_name
)
await ws.close(
code=err.ws_close_code,
message=err.ws_errmsg.encode('utf-8'),
)
break
except ValueError as f:
await ws.close(
code=WSCloseCode.INVALID_TEXT,
message=f'{f}'.encode('utf-8'),
)
break
try:
await connection.on_message(message)
except Exception as e:
self.logger.error('Connection closed unexpectedly', exc_info=True)
await ws.close(
code=WSCloseCode.INTERNAL_ERROR,
message=str(e).encode('utf-8'),
)
break
finally:
await connection.on_close()
return ws
async def ws_can_access(self, ws, origin):
if not await BaseWebSocketHandler(self).can_access(origin):
await ws.close(
code=WSCloseCode.POLICY_VIOLATION,
message='You are not allowed to access this resource'.encode('utf-8'),
)
return False
return True
_loop_monitor_ignore_frames = (
LoopMonitorIgnoreFrame(
re.compile(r'\s+File ".+/middlewared/main\.py", line [0-9]+, in run_in_thread\s+'
'return await self.loop.run_in_executor'),
'run_in_thread',
),
LoopMonitorIgnoreFrame(
re.compile(r'\s+File ".+/asyncio/subprocess\.py", line [0-9]+, in create_subprocess_(exec|shell)'),
cut_below=True,
),
)
def _loop_monitor_thread(self):
"""
Thread responsible for checking current tasks that are taking too long
to finish and printing the stack.
DISCLAIMER/TODO: This is not free of race condition so it may show
false positives.
"""
set_thread_name('loop_monitor')
last = None
while True:
time.sleep(2)
current = asyncio.current_task(loop=self.loop)
if current is None:
last = None
continue
if last == current:
frame = sys._current_frames()[self.__thread_id]
stack = traceback.format_stack(frame, limit=10)
skip = False
for ignore in self._loop_monitor_ignore_frames:
for i, s in enumerate(stack):
if ignore.regex.match(s):
break
else:
continue
if ignore.substitute:
self.logger.warn('%s seems to be blocking event loop', ignore.substitute)
skip = True
elif ignore.cut_below:
stack = stack[:i + 1] + [f' ... + {len(stack)} lines below ...']
break
if not skip:
self.logger.warn(''.join(['Task seems blocked:\n'] + stack))
last = current
def run(self):
self._console_write('starting')
set_thread_name('asyncio_loop')
self.loop = asyncio.get_event_loop()
if self.loop_debug:
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.run_until_complete(self.__initialize())
try:
self.loop.run_forever()
except RuntimeError as e:
if e.args[0] != "Event loop is closed":
raise
# As we don't do clean shutdown (which will terminate multiprocessing children gracefully),
# let's just kill our entire process group
os.killpg(os.getpgid(os.getpid()), signal.SIGKILL)
# We use "_exit" specifically as otherwise process pool executor won't let middlewared process die because
# it is still active. We don't initiate a shutdown for it because it may hang forever for any reason
os._exit(0)
async def __initialize(self):
self.app = app = web.Application(middlewares=[
normalize_path_middleware(redirect_class=HTTPPermanentRedirect)
], loop=self.loop)
self.app['middleware'] = self
api_versions = self._load_api_versions()
api_versions_adapter = APIVersionsAdapter(api_versions)
# Needs to happen after setting debug or may cause race condition
# http://bugs.python.org/issue30805
setup_funcs = self.__plugins_load()
self._console_write('registering services')
if self.loop_monitor:
# Start monitor thread after plugins have been loaded
# because of the time spent doing I/O
t = threading.Thread(target=self._loop_monitor_thread)
t.setDaemon(True)
t.start()
self.loop.add_signal_handler(signal.SIGINT, self.terminate)
self.loop.add_signal_handler(signal.SIGTERM, self.terminate)
self.loop.add_signal_handler(signal.SIGUSR1, self.pdb)
self.loop.add_signal_handler(signal.SIGUSR2, self.log_threads_stacks)
current_rpc_ws_handler = create_rpc_ws_handler(self, Method)
app.router.add_route('GET', '/api/current', current_rpc_ws_handler)
app.router.add_route('GET', f'/api/{api_versions[-1].version}', current_rpc_ws_handler)
for version in api_versions[:-1]:
app.router.add_route(
'GET',
f'/api/{version.version}',
create_rpc_ws_handler(
self,
lambda middleware, method_name: LegacyAPIMethod(
middleware,
method_name,
version.version,
api_versions_adapter,
)
),
)
app.router.add_route('GET', '/websocket', self.ws_handler)
app.router.add_routes(apidocs_routes)
app.router.add_route('*', '/ui{path_info:.*}', WebUIAuth(self))
self.fileapp = FileApplication(self, self.loop)
app.router.add_route('*', '/_download{path_info:.*}', self.fileapp.download)
app.router.add_route('*', '/_upload{path_info:.*}', self.fileapp.upload)
shellapp = ShellApplication(self)
app.router.add_route('*', '/_shell{path_info:.*}', shellapp.ws_handler)
restful_api = RESTfulAPI(self, app)
await restful_api.register_resources()
self.create_task(self.jobs.run())
# Start up middleware worker process pool
self.__procpool._start_executor_manager_thread()
self.runner = web.AppRunner(app, handle_signals=False, access_log=None)
await self.runner.setup()
await web.UnixSite(self.runner, os.path.join(MIDDLEWARE_RUN_DIR, 'middlewared-internal.sock')).start()
await self.__plugins_setup(setup_funcs)
if await self.call('system.state') == 'READY':
self._setup_periodic_tasks()
unix_socket_path = os.path.join(MIDDLEWARE_RUN_DIR, 'middlewared.sock')
await self.start_tcp_site('127.0.0.1')
await web.UnixSite(self.runner, unix_socket_path).start()
os.chmod(unix_socket_path, 0o666)
if self.trace_malloc:
limit = self.trace_malloc[0]
interval = self.trace_malloc[1]
_thr = threading.Thread(target=self._tracemalloc_start, args=(limit, interval,))
_thr.setDaemon(True)
_thr.start()
self.logger.debug('Accepting connections')
self._console_write('loading completed\n')
self.__notify_startup_complete()
async def start_tcp_site(self, host):
site = web.TCPSite(self.runner, host, 6000, reuse_address=True, reuse_port=True)
await site.start()
return site
def terminate(self):
self.logger.info('Terminating')
self.__terminate_task = self.create_task(self.__terminate())
async def __terminate(self):
for service_name, service in self.get_services().items():
# We're using this instead of having no-op `terminate`
# in base class to reduce number of awaits
if hasattr(service, "terminate"):
self.logger.trace("Terminating %r", service)
timeout = None
if hasattr(service, 'terminate_timeout'):
try:
timeout = await asyncio.wait_for(
self.create_task(self.call(f'{service_name}.terminate_timeout')), 5
)
except Exception:
self.logger.error(
'Failed to retrieve terminate timeout value for %s', service_name, exc_info=True
)
# This is to ensure if some service returns 0 as a timeout value meaning it is probably not being
# used, we still give it the standard default 10 seconds timeout to ensure a clean exit
timeout = timeout or 10
try:
await asyncio.wait_for(self.create_task(service.terminate()), timeout)
except Exception:
self.logger.error('Failed to terminate %s', service_name, exc_info=True)
for task in asyncio.all_tasks(loop=self.loop):
if task != self.__terminate_task:
self.logger.trace("Canceling %r", task)
task.cancel()
self.loop.stop()
def main():
# Workaround for development
modpath = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..',
))
if modpath not in sys.path:
sys.path.insert(0, modpath)
parser = argparse.ArgumentParser()
parser.add_argument('restart', nargs='?')
parser.add_argument('--pidfile', '-P', action='store_true')
parser.add_argument('--disable-loop-monitor', '-L', action='store_true')
parser.add_argument('--loop-debug', action='store_true')
parser.add_argument('--trace-malloc', '-tm', action='store', nargs=2, type=int, default=False)
parser.add_argument('--overlay-dirs', '-o', action='append')
parser.add_argument('--debug-level', choices=[
'TRACE',
'DEBUG',
'INFO',
'WARN',
'ERROR',
], default='DEBUG')
parser.add_argument('--log-handler', choices=[
'console',
'file',
], default='console')
args = parser.parse_args()
os.makedirs(MIDDLEWARE_RUN_DIR, exist_ok=True)
pidpath = os.path.join(MIDDLEWARE_RUN_DIR, 'middlewared.pid')
if args.restart:
if os.path.exists(pidpath):
with open(pidpath, 'r') as f:
pid = int(f.read().strip())
try:
os.kill(pid, 15)
except ProcessLookupError as e:
if e.errno != errno.ESRCH:
raise
logger.setup_logging('middleware', args.debug_level, args.log_handler)
setproctitle.setproctitle('middlewared')
if args.pidfile:
with open(pidpath, "w") as _pidfile:
_pidfile.write(f"{str(os.getpid())}\n")
Middleware(
loop_debug=args.loop_debug,
loop_monitor=not args.disable_loop_monitor,
trace_malloc=args.trace_malloc,
debug_level=args.debug_level,
log_handler=args.log_handler,
).run()
if __name__ == '__main__':
main()
| 88,150 | Python | .py | 1,885 | 33.73687 | 126 | 0.569801 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,698 | console_formatter.py | truenas_middleware/src/middlewared/middlewared/logging/console_formatter.py | import logging
class ConsoleLogFormatter(logging.Formatter):
"""Format the console log messages"""
CONSOLE_COLOR_FORMATTER = {
'YELLOW': '\033[1;33m', # (warning)
'GREEN': '\033[1;32m', # (info)
'RED': '\033[1;31m', # (error)
'HIGHRED': '\033[1;41m', # (critical)
'RESET': '\033[1;m', # Reset
}
LOGGING_LEVEL = {
'CRITICAL': 50,
'ERROR': 40,
'WARNING': 30,
'INFO': 20,
'DEBUG': 10,
'NOTSET': 0
}
def format(self, record):
"""Set the color based on the log level.
Returns:
logging.Formatter class.
"""
if record.levelno == self.LOGGING_LEVEL['CRITICAL']:
color_start = self.CONSOLE_COLOR_FORMATTER['HIGHRED']
elif record.levelno == self.LOGGING_LEVEL['ERROR']:
color_start = self.CONSOLE_COLOR_FORMATTER['HIGHRED']
elif record.levelno == self.LOGGING_LEVEL['WARNING']:
color_start = self.CONSOLE_COLOR_FORMATTER['RED']
elif record.levelno == self.LOGGING_LEVEL['INFO']:
color_start = self.CONSOLE_COLOR_FORMATTER['GREEN']
elif record.levelno == self.LOGGING_LEVEL['DEBUG']:
color_start = self.CONSOLE_COLOR_FORMATTER['YELLOW']
else:
color_start = self.CONSOLE_COLOR_FORMATTER['RESET']
color_reset = self.CONSOLE_COLOR_FORMATTER['RESET']
record.levelname = color_start + record.levelname + color_reset
return logging.Formatter.format(self, record)
| 1,560 | Python | .py | 38 | 31.921053 | 71 | 0.588235 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
23,699 | pytest.py | truenas_middleware/src/middlewared/middlewared/test/integration/utils/pytest.py | __all__ = ["fail"]
failed = [None]
def fail(reason):
"""
Prematurely abort the whole test suite execution, failing the test where this function is called
(as opposed to just using `pytest.exit` which will not fail the test, and, if no previous tests failed, junit
Jenkins plugin will display the test suite as green)
"""
failed[0] = reason
assert False, reason
| 392 | Python | .py | 10 | 35.1 | 113 | 0.707124 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |