id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15,700
|
contact.py
|
translate_pootle/tests/forms/contact.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from __future__ import absolute_import
import pytest
from django.conf import settings
from django.template.defaultfilters import escape
from django.template.loader import render_to_string
from contact.forms import ContactForm, ReportForm
from pootle_store.models import Unit
@pytest.mark.django_db
def test_contact_form(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
recipient_email = settings.POOTLE_CONTACT_EMAIL
specified_subject = "My subject"
subject = "[%s] %s" % (settings.POOTLE_TITLE, specified_subject)
data = {
'name': admin.full_name,
'email': admin.email,
'email_subject': specified_subject,
'body': "First paragraph of message\n\nSecond paragraph of message.",
}
form = ContactForm(request=request, data=data)
assert form.is_valid()
form.save()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert message.from_email == settings.DEFAULT_FROM_EMAIL
reply_to = u'%s <%s>' % (data['name'], data['email'])
assert reply_to == message.extra_headers['Reply-To']
assert [recipient_email] == message.recipients()
assert subject == message.subject
assert data['body'] in message.body
assert "Your question or comment:" not in message.body
@pytest.mark.django_db
def test_contact_form_escaped_tags(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
recipient_email = settings.POOTLE_CONTACT_EMAIL
specified_subject = "My <tag> subject"
subject = "[%s] %s" % (settings.POOTLE_TITLE, specified_subject)
data = {
'name': admin.full_name,
'email': admin.email,
'email_subject': specified_subject,
'body': "First <tag> of message.",
}
form = ContactForm(request=request, data=data)
assert form.is_valid()
form.save()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert message.from_email == settings.DEFAULT_FROM_EMAIL
reply_to = u'%s <%s>' % (data['name'], data['email'])
assert reply_to == message.extra_headers['Reply-To']
assert [recipient_email] == message.recipients()
assert escape(subject) == message.subject
assert escape(data['body']) in message.body
assert "Your question or comment:" not in message.body
@pytest.mark.django_db
def test_contact_form_subject(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
data = {
'name': admin.full_name,
'email': admin.email,
'email_subject': "a" * 101,
'body': "Whatever",
}
form = ContactForm(request=request, data=data)
assert not form.is_valid()
data['email_subject'] = "a" * 100
form = ContactForm(request=request, data=data)
assert form.is_valid()
@pytest.mark.django_db
def test_contact_form_required_fields(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
form = ContactForm(request=request, data={})
assert not form.is_valid()
assert 'email' in form.errors
assert form.errors['email'] == [u'This field is required.']
assert 'name' in form.errors
assert form.errors['name'] == [u'This field is required.']
assert 'email_subject' in form.errors
assert form.errors['email_subject'] == [u'This field is required.']
assert 'body' in form.errors
assert form.errors['body'] == [u'This field is required.']
def _test_report_form(unit, recipient_email, user, rf, mailoutbox):
request = rf.request()
request.user = user
request.META['REMOTE_ADDR'] = '127.0.0.1'
# Get initial data for the form.
subject_ctx = {
'server_name': settings.POOTLE_TITLE,
'unit': unit.pk,
'language': unit.store.translation_project.language.code,
'project': unit.store.translation_project.project.code,
}
subject = render_to_string('contact_form/report_form_subject.txt',
context=subject_ctx)
subject = subject.strip()
context_ctx = {
'unit': unit,
'unit_absolute_url':
request.build_absolute_uri(unit.get_translate_url()),
}
context = render_to_string('contact_form/report_form_context.txt',
context=context_ctx)
context = context.strip()
translator_comment = "The string is wrong"
data = {
'name': user.full_name,
'email': user.email,
'context': context,
'body': translator_comment,
}
email_body_ctx = {
'request': request,
'context': context,
'ip_address': '127.0.0.1',
'body': translator_comment,
}
email_body = render_to_string('contact_form/report_form.txt',
context=email_body_ctx)
# Instantiate form and test.
form = ReportForm(request=request, initial=data, data=data, unit=unit)
assert form.is_valid()
form.save()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert message.from_email == settings.DEFAULT_FROM_EMAIL
reply_to = u'%s <%s>' % (data['name'], data['email'])
assert reply_to == message.extra_headers['Reply-To']
assert [recipient_email] == message.recipients()
assert message.subject.startswith(u'[%s] ' % settings.POOTLE_TITLE)
assert subject == message.subject
assert email_body in message.body
@pytest.mark.django_db
def test_report_error_form_settings_email(admin, rf, mailoutbox):
unit = Unit.objects.select_related(
'store__translation_project__project',
'store__translation_project__language',
).last()
recipient_email = getattr(settings, 'POOTLE_CONTACT_REPORT_EMAIL',
settings.POOTLE_CONTACT_EMAIL)
_test_report_form(unit, recipient_email, admin, rf, mailoutbox)
@pytest.mark.django_db
def test_report_error_form_project_email(admin, rf, mailoutbox):
unit = Unit.objects.select_related(
'store__translation_project__project',
'store__translation_project__language',
).last()
project = unit.store.translation_project.project
project.report_email = "errors@example.net"
project.save()
_test_report_form(unit, project.report_email, admin, rf, mailoutbox)
@pytest.mark.django_db
def test_report_error_form_context_cannot_be_altered(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
unit = Unit.objects.select_related(
'store__translation_project__project',
'store__translation_project__language',
).last()
context_ctx = {
'unit': unit,
'unit_absolute_url':
request.build_absolute_uri(unit.get_translate_url()),
}
context = render_to_string('contact_form/report_form_context.txt',
context=context_ctx)
context = context.strip()
initial = {
'name': admin.full_name,
'email': admin.email,
'context': context,
'body': "The string is wrong",
}
data = initial.copy()
sent_context = "Different context"
data['context'] = sent_context
# Instantiate form and test.
form = ReportForm(request=request, initial=initial, data=data, unit=unit)
assert form.is_valid()
form.save()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert sent_context not in message.body
@pytest.mark.django_db
def test_report_error_form_escaped_tags(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
unit_target = "some <tag>"
unit = Unit.objects.select_related(
'store__translation_project__project',
'store__translation_project__language',
).last()
unit.target = unit_target
unit.save()
context_ctx = {
'unit': unit,
'unit_absolute_url':
request.build_absolute_uri(unit.get_translate_url()),
}
context = render_to_string('contact_form/report_form_context.txt',
context=context_ctx)
context = context.strip()
data = {
'name': admin.full_name,
'email': admin.email,
'context': context,
'body': "The string <tag> is wrong",
}
# Instantiate form and test.
form = ReportForm(request=request, initial=data, data=data, unit=unit)
assert form.is_valid()
form.save()
assert len(mailoutbox) == 1
message = mailoutbox[0]
assert escape(unit_target) in message.body
assert escape(data['body']) in message.body
@pytest.mark.django_db
def test_report_error_form_required_fields(admin, rf, mailoutbox):
request = rf.request()
request.user = admin
request.META['REMOTE_ADDR'] = '127.0.0.1'
unit = Unit.objects.select_related(
'store__translation_project__project',
'store__translation_project__language',
).last()
# Instantiate form and test.
form = ReportForm(request=request, initial={}, data={}, unit=unit)
assert not form.is_valid()
assert 'email' in form.errors
assert form.errors['email'] == [u'This field is required.']
assert 'name' in form.errors
assert form.errors['name'] == [u'This field is required.']
assert 'context' in form.errors
assert form.errors['context'] == [u'This field is required.']
assert 'body' in form.errors
assert form.errors['body'] == [u'This field is required.']
| 9,854
|
Python
|
.tac
| 253
| 32.83004
| 77
| 0.65806
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,701
|
contact_form.txt
|
translate_pootle/pootle/templates/contact_form/contact_form.txt
|
Username: {% if request.user.is_authenticated %}{{ request.user.username }}{% else %}anonymous user{% endif %}
Current URL: {{ request.META.HTTP_REFERER }}
IP address: {{ ip_address }}
User-Agent: {{ request.META.HTTP_USER_AGENT }}
{% block email_body %}{{ body|escape }}{% endblock %}
| 287
|
Python
|
.tac
| 5
| 56.2
| 110
| 0.683274
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,702
|
xhr_contact_form.html
|
translate_pootle/pootle/templates/contact_form/xhr_contact_form.html
|
{% load i18n locale %}
{% get_current_language as LANGUAGE_CODE %}
<div id="contact" class="popup-form" dir="{% locale_dir %}">
<div id="js-contact">
<h1>{{ contact_form_title }}</h1>
{% block contact_preamble %}
<p>
{% blocktrans trimmed %}
Please fill in all the fields.
{% endblocktrans %}
</p>
{% endblock %}
<form method="post" action="{{ contact_form_url }}">
{% csrf_token %}
{{ form.as_p }}
<p class="buttons">
<input type="submit" name="send" id="js-contact-submit"
class="btn btn-primary"
value="{% trans 'Send Message' %}" />
</p>
</form>
</div>
<div id="js-sent" class="hide">
<h1>{% trans "Thanks!" %}</h1>
<p>
{% blocktrans trimmed %}
We have received your message and will try to respond as soon as possible.
{% endblocktrans %}
</p>
</div>
</div>
| 909
|
Python
|
.tac
| 31
| 23.612903
| 80
| 0.552392
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,703
|
contact_form.html
|
translate_pootle/pootle/templates/contact_form/contact_form.html
|
{% extends 'welcome.html' %}
{% block scripts_extra %}
{{ block.super }}
<script type="text/javascript">
$(function () {
PTL.contact.open({url: '{% url "pootle-contact-xhr" %}'});
});
</script>
{% endblock %}
| 218
|
Python
|
.tac
| 9
| 22.222222
| 62
| 0.610577
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,704
|
contact.js
|
translate_pootle/pootle/static/js/contact.js
|
/*
* Copyright (C) Pootle contributors.
*
* This file is a part of the Pootle project. It is distributed under the GPL3
* or later license. See the LICENSE file for a copy of the license and the
* AUTHORS file for copyright and authorship information.
*/
import $ from 'jquery';
import 'jquery-magnific-popup';
import 'jquery-serializeObject';
const sel = {
data: {
target: '[data-action="contact"]',
subjectPrefix: 'subject-prefix',
subject: 'subject',
body: 'body',
},
trigger: '.js-contact',
wrapper: '#js-contact',
form: '#js-contact form',
formSent: '#js-sent',
subject: '#js-contact #id_email_subject',
body: '#js-contact #id_body',
};
const contact = {
url: null,
init(options) {
if (options) {
$.extend(this, options);
}
$(document).on('click', sel.trigger, (e) => {
e.preventDefault();
this.open();
});
$(document).on('click', sel.data.target, this.onClick.bind(this));
$(document).on('submit', sel.form, this.onSubmit.bind(this));
},
onClick(e) {
e.preventDefault();
const $el = $(e.target);
const sP = $el.data(sel.data.subjectPrefix);
const subjectPrefix = sP ? ['[', sP, '] '].join('') : sP;
const subject = $el.data(sel.data.subject);
const body = $el.data(sel.data.body);
this.open({
subjectPrefix,
subject,
body,
});
},
open(opts = {}) {
const contactUrl = opts.url || this.url;
if (contactUrl === null) {
return false;
}
$.magnificPopup.open({
items: {
src: contactUrl,
type: 'ajax',
},
callbacks: {
ajaxContentAdded() {
const newSubject = [];
if (opts.subjectPrefix) {
newSubject.push(opts.subjectPrefix);
}
if (opts.subject) {
newSubject.push(opts.subject);
}
if (newSubject.length) {
$(sel.subject).val(newSubject.join(''));
}
if (opts.body) {
$(sel.body).val(opts.body);
}
},
},
mainClass: 'popup-ajax',
});
return true;
},
onSubmit(e) {
e.preventDefault();
const $form = $(sel.form);
const url = $form.attr('action');
const data = $form.serializeObject();
const captchaCallbacks = {
sfn: 'PTL.contact.onSubmit',
efn: 'PTL.contact.onError',
};
$.extend(data, captchaCallbacks);
this.sendMessage(url, data);
},
sendMessage(url, data) {
const that = this;
$.ajax({
url,
data,
type: 'POST',
dataType: 'json',
success: that.onSuccess.bind(that),
error: that.onError.bind(that),
});
},
onSuccess() {
// Display thank you message
$(sel.wrapper).hide();
$(sel.formSent).show();
},
onError(xhr) {
this.displayErrors(xhr.responseJSON.errors);
},
/* Displays errors returned by the contact request */
displayErrors(errors) {
$('ul.errorlist').remove();
for (const fieldName in errors) {
if (!errors.hasOwnProperty(fieldName)) {
continue;
}
this.validationError(fieldName, errors[fieldName]);
}
},
/* Injects a form validation error next to the input it failed to
* validate */
validationError(fieldName, msgs) {
const $field = $(`#id_${fieldName}`);
const errorList = ['<ul class="errorlist">'];
for (let i = 0; i < msgs.length; i++) {
errorList.push(['<li>', msgs[i], '</li>'].join(''));
}
errorList.push(['</ul>']);
$field.after(errorList.join(''));
},
};
export default contact;
| 3,601
|
Python
|
.tac
| 135
| 21.17037
| 78
| 0.587962
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,705
|
jquery.flot.stack.js
|
translate_pootle/pootle/static/js/vendor/jquery/jquery.flot.stack.js
|
/* Flot plugin for stacking bars (mix of positive and negative).
Copyright (c) 2007-2012 IOLA and Ole Laursen.
Licensed under the MIT license.
Based on the jquery.flot.stack plugin and stackpercent plugin by skeleton9
Modified by ericliao 2013-1-29 to support stacking of positive and negative values
IMPORTANT: Only tested to work properly with bar charts, for other chart types
please use the official stacking plugin.
*/
(function ($) {
var options = {
series: { stack: null } // or number/string
};
function init(plot) {
var stackBases_pos = {};
var stackBases_neg = {};
function stackData(plot, s, datapoints) {
var i;
var allseries = plot.getData();
var all_bars = true;
for (i = 0; i < allseries.length; i++) {
if (!allseries[i].bars.show && (allseries[i].lines && allseries[i].lines.show)) {
all_bars = false;
break;
}
}
if (s.stack == null || !all_bars || !s.bars.show)
return;
var newPoints = [];
for (i = 0; i < datapoints.points.length; i += datapoints.pointsize) {
if (!stackBases_pos[datapoints.points[i]]) {
stackBases_pos[datapoints.points[i]] = 0;
}
if (!stackBases_neg[datapoints.points[i]]) {
stackBases_neg[datapoints.points[i]] = 0;
}
newPoints[i] = datapoints.points[i];
if (datapoints.points[i + 1] > 0) {
newPoints[i + 1] = datapoints.points[i + 1] + stackBases_pos[datapoints.points[i]];
newPoints[i + 2] = stackBases_pos[datapoints.points[i]];
stackBases_pos[datapoints.points[i]] += datapoints.points[i + 1];
} else {
newPoints[i + 1] = datapoints.points[i + 1] + stackBases_neg[datapoints.points[i]];
newPoints[i + 2] = stackBases_neg[datapoints.points[i]];
stackBases_neg[datapoints.points[i]] += datapoints.points[i + 1];
}
}
datapoints.points = newPoints;
}
plot.hooks.processDatapoints.push(stackData);
}
$.plot.plugins.push({
init: init,
options: options,
name: 'stackbars',
version: '1.0'
});
})(jQuery);
| 2,464
|
Python
|
.tac
| 57
| 31.263158
| 103
| 0.542415
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,706
|
contact.css
|
translate_pootle/pootle/static/css/contact.css
|
/*
* Copyright (C) Pootle contributors.
*
* This file is a part of the Pootle project. It is distributed under the GPL3
* or later license. See the LICENSE file for a copy of the license and the
* AUTHORS file for copyright and authorship information.
*/
/*
* CONTACT PAGE
*/
#contact label[for="id_captcha_answer"]
{
display: inline;
}
#contact #id_name,
#contact #id_email
{
width: 25em;
}
#contact #id_email_subject,
#contact #id_context,
#contact #id_body
{
width: 40em;
}
| 501
|
Python
|
.tac
| 25
| 18.08
| 78
| 0.722458
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,707
|
pootle.wsgi
|
translate_pootle/docker/app/wsgi/pootle.wsgi
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
from django.core.wsgi import get_wsgi_application
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
os.environ['POOTLE_SETTINGS'] = '/app/pootle.conf'
# import newrelic.agent
# NEW_RELIC_CONFIG = os.path.join(os.path.dirname(__file__), "newrelic.ini")
# print "LOADING NEWRELIC..."
# print "from %s" % NEW_RELIC_CONFIG
# print newrelic.agent.initialize(NEW_RELIC_CONFIG)
application = get_wsgi_application()
| 729
|
Python
|
.wsgi
| 18
| 39.111111
| 77
| 0.755682
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,708
|
pootle-uwsgi.ini
|
translate_pootle/docker/app/wsgi/pootle-uwsgi.ini
|
[uwsgi]
chdir = /app
virtualenv = /app
socket = /var/run/pootle/pootle.sock
wsgi-file = wsgi/pootle.wsgi
pythonpath = /app/src
pythonpath = /app/lib/python2.7/site-packages/pootle/apps
env = DJANGO_SETTINGS_MODULE=pootle.settings
POOTLE_SETTINGS=/app/pootle.conf
master = true
vacuum = true
processes = 2
enable-threads = true
threads = 3
uid = pootle
gid = pootle
chmod-socket = 666
thread-stacksize = 512
skip-atexit = true
max-requests = 500
manage-script-name = true
single-interpreter = true
ignore-sigpipe = true
ignore-write-errors = true
disable-write-exception = true
buffer-size=65535
stats=/var/run/pootle/stats.sock
memory-report=true
logto=/app/log/uwsgi.log
| 676
|
Python
|
.wsgi
| 29
| 22.172414
| 57
| 0.795981
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,709
|
apache-wsgi.py
|
translate_pootle/docs/server/apache-wsgi.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import site
import sys
# You probably will need to change these paths to match your deployment,
# most likely because of the Python version you are using.
ALLDIRS = [
'/var/www/pootle/env/lib/python2.7/site-packages',
'/var/www/pootle/env/lib/python2.7/site-packages/pootle/apps',
]
# Remember original sys.path.
prev_sys_path = list(sys.path)
# Add each new site-packages directory.
for directory in ALLDIRS:
site.addsitedir(directory)
# Reorder sys.path so new directories at the front.
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
# Set the Pootle settings module as DJANGO_SETTINGS_MODULE.
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
# Set the WSGI application.
def application(environ, start_response):
"""Wrapper for Django's WSGIHandler().
This allows to get values specified by SetEnv in the Apache
configuration or interpose other changes to that environment, like
installing middleware.
"""
try:
os.environ['POOTLE_SETTINGS'] = environ['POOTLE_SETTINGS']
except KeyError:
pass
from django.core.wsgi import get_wsgi_application
_wsgi_application = get_wsgi_application()
return _wsgi_application(environ, start_response)
| 1,407
|
Python
|
.wsgi
| 39
| 32.487179
| 72
| 0.735793
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,710
|
goal.py
|
rpm-software-management_dnf/dnf/goal.py
|
# goal.py
# Customized hawkey.Goal
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from hawkey import Goal
| 1,101
|
Python
|
.py
| 22
| 48.954545
| 77
| 0.787372
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,711
|
const.py.in
|
rpm-software-management_dnf/dnf/const.py.in
|
# const.py
# dnf constants.
#
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
CONF_FILENAME='/etc/dnf/dnf.conf' # :api
CONF_AUTOMATIC_FILENAME='/etc/dnf/automatic.conf'
DISTROVERPKG=('system-release(releasever)', 'system-release',
'distribution-release(releasever)', 'distribution-release',
'redhat-release', 'suse-release')
GROUP_PACKAGE_TYPES = ('mandatory', 'default', 'conditional') # :api
INSTALLONLYPKGS=['kernel', 'kernel-PAE',
'installonlypkg(kernel)',
'installonlypkg(kernel-module)',
'installonlypkg(vm)',
'multiversion(kernel)']
LOG='dnf.log'
LOG_HAWKEY='hawkey.log'
LOG_LIBREPO='dnf.librepo.log'
LOG_MARKER='--- logging initialized ---'
LOG_RPM='dnf.rpm.log'
NAME='DNF'
PERSISTDIR='/var/lib/dnf' # :api
PID_FILENAME = '/var/run/dnf.pid'
RUNDIR='/run'
USER_RUNDIR='/run/user'
SYSTEM_CACHEDIR='/var/cache/dnf'
TMPDIR='/var/tmp/'
# CLI verbose values greater or equal to this are considered "verbose":
VERBOSE_LEVEL=6
PREFIX=NAME.lower()
PROGRAM_NAME=NAME.lower() # Deprecated - no longer used, Argparser prints program name based on sys.argv
PLUGINCONFPATH = '/etc/dnf/plugins' # :api
PLUGINPATH = '@PYTHON_INSTALL_DIR@/dnf-plugins'
VERSION='@DNF_VERSION@'
USER_AGENT = "dnf/%s" % VERSION
BUGTRACKER_COMPONENT=NAME.lower()
BUGTRACKER='https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora&component=%s' % BUGTRACKER_COMPONENT
| 2,393
|
Python
|
.py
| 53
| 42.264151
| 105
| 0.738442
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,712
|
util.py
|
rpm-software-management_dnf/dnf/util.py
|
# util.py
# Basic dnf utils.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .pycomp import PY3, basestring
from dnf.i18n import _, ucd
import argparse
import dnf
import dnf.callback
import dnf.const
import dnf.pycomp
import errno
import functools
import hawkey
import itertools
import locale
import logging
import os
import pwd
import shutil
import sys
import tempfile
import time
import libdnf.repo
import libdnf.transaction
logger = logging.getLogger('dnf')
MAIN_PROG = argparse.ArgumentParser().prog if argparse.ArgumentParser().prog == "yum" else "dnf"
MAIN_PROG_UPPER = MAIN_PROG.upper()
"""DNF Utilities."""
def _parse_specs(namespace, values):
"""
Categorize :param values list into packages, groups and filenames
:param namespace: argparse.Namespace, where specs will be stored
:param values: list of specs, whether packages ('foo') or groups/modules ('@bar')
or filenames ('*.rmp', 'http://*', ...)
To access packages use: specs.pkg_specs,
to access groups use: specs.grp_specs,
to access filenames use: specs.filenames
"""
setattr(namespace, "filenames", [])
setattr(namespace, "grp_specs", [])
setattr(namespace, "pkg_specs", [])
tmp_set = set()
for value in values:
if value in tmp_set:
continue
tmp_set.add(value)
schemes = dnf.pycomp.urlparse.urlparse(value)[0]
if value.endswith('.rpm'):
namespace.filenames.append(value)
elif schemes and schemes in ('http', 'ftp', 'file', 'https'):
namespace.filenames.append(value)
elif value.startswith('@'):
namespace.grp_specs.append(value[1:])
else:
namespace.pkg_specs.append(value)
def _urlopen_progress(url, conf, progress=None):
if progress is None:
progress = dnf.callback.NullDownloadProgress()
pload = dnf.repo.RemoteRPMPayload(url, conf, progress)
est_remote_size = sum([pload.download_size])
progress.start(1, est_remote_size)
targets = [pload._librepo_target()]
try:
libdnf.repo.PackageTarget.downloadPackages(libdnf.repo.VectorPPackageTarget(targets), True)
except RuntimeError as e:
if conf.strict:
raise IOError(str(e))
logger.error(str(e))
return pload.local_path
def _urlopen(url, conf=None, repo=None, mode='w+b', **kwargs):
"""
Open the specified absolute url, return a file object
which respects proxy setting even for non-repo downloads
"""
if PY3 and 'b' not in mode:
kwargs.setdefault('encoding', 'utf-8')
fo = tempfile.NamedTemporaryFile(mode, **kwargs)
try:
if repo:
repo._repo.downloadUrl(url, fo.fileno())
else:
libdnf.repo.Downloader.downloadURL(conf._config if conf else None, url, fo.fileno())
except RuntimeError as e:
raise IOError(str(e))
fo.seek(0)
return fo
def rtrim(s, r):
if s.endswith(r):
s = s[:-len(r)]
return s
def am_i_root():
# used by ansible (lib/ansible/modules/packaging/os/dnf.py)
return os.geteuid() == 0
def clear_dir(path):
"""Remove all files and dirs under `path`
Also see rm_rf()
"""
for entry in os.listdir(path):
contained_path = os.path.join(path, entry)
rm_rf(contained_path)
def ensure_dir(dname):
# used by ansible (lib/ansible/modules/packaging/os/dnf.py)
try:
os.makedirs(dname, mode=0o755)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(dname):
raise e
def split_path(path):
"""
Split path by path separators.
Use os.path.join() to join the path back to string.
"""
result = []
head = path
while True:
head, tail = os.path.split(head)
if not tail:
if head or not result:
# if not result: make sure result is [""] so os.path.join(*result) can be called
result.insert(0, head)
break
result.insert(0, tail)
return result
def empty(iterable):
try:
l = len(iterable)
except TypeError:
l = len(list(iterable))
return l == 0
def first(iterable):
"""Returns the first item from an iterable or None if it has no elements."""
it = iter(iterable)
try:
return next(it)
except StopIteration:
return None
def first_not_none(iterable):
it = iter(iterable)
try:
return next(item for item in it if item is not None)
except StopIteration:
return None
def file_age(fn):
return time.time() - file_timestamp(fn)
def file_timestamp(fn):
return os.stat(fn).st_mtime
def get_effective_login():
try:
return pwd.getpwuid(os.geteuid())[0]
except KeyError:
return "UID: %s" % os.geteuid()
def get_in(dct, keys, not_found):
"""Like dict.get() for nested dicts."""
for k in keys:
dct = dct.get(k)
if dct is None:
return not_found
return dct
def group_by_filter(fn, iterable):
def splitter(acc, item):
acc[not bool(fn(item))].append(item)
return acc
return functools.reduce(splitter, iterable, ([], []))
def insert_if(item, iterable, condition):
"""Insert an item into an iterable by a condition."""
for original_item in iterable:
if condition(original_item):
yield item
yield original_item
def is_exhausted(iterator):
"""Test whether an iterator is exhausted."""
try:
next(iterator)
except StopIteration:
return True
else:
return False
def is_glob_pattern(pattern):
if is_string_type(pattern):
pattern = [pattern]
return (isinstance(pattern, list) and any(set(p) & set("*[?") for p in pattern))
def is_string_type(obj):
if PY3:
return isinstance(obj, str)
else:
return isinstance(obj, basestring)
def lazyattr(attrname):
"""Decorator to get lazy attribute initialization.
Composes with @property. Force reinitialization by deleting the <attrname>.
"""
def get_decorated(fn):
def cached_getter(obj):
try:
return getattr(obj, attrname)
except AttributeError:
val = fn(obj)
setattr(obj, attrname, val)
return val
return cached_getter
return get_decorated
def mapall(fn, *seq):
"""Like functools.map(), but return a list instead of an iterator.
This means all side effects of fn take place even without iterating the
result.
"""
return list(map(fn, *seq))
def normalize_time(timestamp):
"""Convert time into locale aware datetime string object."""
t = time.strftime("%c", time.localtime(timestamp))
if not dnf.pycomp.PY3:
current_locale_setting = locale.getlocale()[1]
if current_locale_setting:
t = t.decode(current_locale_setting)
return t
def on_ac_power():
"""Decide whether we are on line power.
Returns True if we are on line power, False if not, None if it can not be
decided.
"""
try:
ps_folder = "/sys/class/power_supply"
ac_nodes = [node for node in os.listdir(ps_folder) if node.startswith("AC")]
if len(ac_nodes) > 0:
ac_node = ac_nodes[0]
with open("{}/{}/online".format(ps_folder, ac_node)) as ac_status:
data = ac_status.read()
return int(data) == 1
return None
except (IOError, ValueError):
return None
def on_metered_connection():
"""Decide whether we are on metered connection.
Returns:
True: if on metered connection
False: if not
None: if it can not be decided
"""
try:
import dbus
except ImportError:
return None
try:
bus = dbus.SystemBus()
proxy = bus.get_object("org.freedesktop.NetworkManager",
"/org/freedesktop/NetworkManager")
iface = dbus.Interface(proxy, "org.freedesktop.DBus.Properties")
metered = iface.Get("org.freedesktop.NetworkManager", "Metered")
except dbus.DBusException:
return None
if metered == 0: # NM_METERED_UNKNOWN
return None
elif metered in (1, 3): # NM_METERED_YES, NM_METERED_GUESS_YES
return True
elif metered in (2, 4): # NM_METERED_NO, NM_METERED_GUESS_NO
return False
else: # Something undocumented (at least at this moment)
raise ValueError("Unknown value for metered property: %r", metered)
def partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries.
Credit: Python library itertools' documentation.
"""
t1, t2 = itertools.tee(iterable)
return dnf.pycomp.filterfalse(pred, t1), filter(pred, t2)
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError:
pass
def split_by(iterable, condition):
"""Split an iterable into tuples by a condition.
Inserts a separator before each item which meets the condition and then
cuts the iterable by these separators.
"""
separator = object() # A unique object.
# Create a function returning tuple of objects before the separator.
def next_subsequence(it):
return tuple(itertools.takewhile(lambda e: e != separator, it))
# Mark each place where the condition is met by the separator.
marked = insert_if(separator, iterable, condition)
# The 1st subsequence may be empty if the 1st item meets the condition.
yield next_subsequence(marked)
while True:
subsequence = next_subsequence(marked)
if not subsequence:
break
yield subsequence
def strip_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return None
def touch(path, no_create=False):
"""Create an empty file if it doesn't exist or bump it's timestamps.
If no_create is True only bumps the timestamps.
"""
if no_create or os.access(path, os.F_OK):
return os.utime(path, None)
with open(path, 'a'):
pass
def _terminal_messenger(tp='write', msg="", out=sys.stdout):
try:
if tp == 'write':
out.write(msg)
elif tp == 'flush':
out.flush()
elif tp == 'write_flush':
out.write(msg)
out.flush()
elif tp == 'print':
print(msg, file=out)
else:
raise ValueError('Unsupported type: ' + tp)
except IOError as e:
logger.critical('{}: {}'.format(type(e).__name__, ucd(e)))
pass
def _format_resolve_problems(resolve_problems):
"""
Format string about problems in resolve
:param resolve_problems: list with list of strings (output of goal.problem_rules())
:return: string
"""
msg = ""
count_problems = (len(resolve_problems) > 1)
for i, rs in enumerate(resolve_problems, start=1):
if count_problems:
msg += "\n " + _("Problem") + " %d: " % i
else:
msg += "\n " + _("Problem") + ": "
msg += "\n - ".join(rs)
return msg
def _te_nevra(te):
nevra = te.N() + '-'
if te.E() is not None and te.E() != '0':
nevra += te.E() + ':'
return nevra + te.V() + '-' + te.R() + '.' + te.A()
def _log_rpm_trans_with_swdb(rpm_transaction, swdb_transaction):
logger.debug("Logging transaction elements")
for rpm_el in rpm_transaction:
tsi = rpm_el.Key()
tsi_state = None
if tsi is not None:
tsi_state = tsi.state
msg = "RPM element: '{}', Key(): '{}', Key state: '{}', Failed() '{}': ".format(
_te_nevra(rpm_el), tsi, tsi_state, rpm_el.Failed())
logger.debug(msg)
for tsi in swdb_transaction:
msg = "SWDB element: '{}', State: '{}', Action: '{}', From repo: '{}', Reason: '{}', " \
"Get reason: '{}'".format(str(tsi), tsi.state, tsi.action, tsi.from_repo, tsi.reason,
tsi.get_reason())
logger.debug(msg)
def _sync_rpm_trans_with_swdb(rpm_transaction, swdb_transaction):
revert_actions = {libdnf.transaction.TransactionItemAction_DOWNGRADED,
libdnf.transaction.TransactionItemAction_OBSOLETED,
libdnf.transaction.TransactionItemAction_REMOVE,
libdnf.transaction.TransactionItemAction_UPGRADED,
libdnf.transaction.TransactionItemAction_REINSTALLED}
cached_tsi = [tsi for tsi in swdb_transaction]
el_not_found = False
error = False
for rpm_el in rpm_transaction:
te_nevra = _te_nevra(rpm_el)
tsi = rpm_el.Key()
if tsi is None or not hasattr(tsi, "pkg"):
for tsi_candidate in cached_tsi:
if tsi_candidate.state != libdnf.transaction.TransactionItemState_UNKNOWN:
continue
if tsi_candidate.action not in revert_actions:
continue
if str(tsi_candidate) == te_nevra:
tsi = tsi_candidate
break
if tsi is None or not hasattr(tsi, "pkg"):
logger.critical(_("TransactionItem not found for key: {}").format(te_nevra))
el_not_found = True
continue
if rpm_el.Failed():
tsi.state = libdnf.transaction.TransactionItemState_ERROR
error = True
else:
tsi.state = libdnf.transaction.TransactionItemState_DONE
for tsi in cached_tsi:
if tsi.state == libdnf.transaction.TransactionItemState_UNKNOWN:
logger.critical(_("TransactionSWDBItem not found for key: {}").format(str(tsi)))
el_not_found = True
if error:
logger.debug(_('Errors occurred during transaction.'))
if el_not_found:
_log_rpm_trans_with_swdb(rpm_transaction, cached_tsi)
class tmpdir(object):
# used by subscription-manager (src/dnf-plugins/product-id.py)
def __init__(self):
prefix = '%s-' % dnf.const.PREFIX
self.path = tempfile.mkdtemp(prefix=prefix)
def __enter__(self):
return self.path
def __exit__(self, exc_type, exc_value, traceback):
rm_rf(self.path)
class Bunch(dict):
"""Dictionary with attribute accessing syntax.
In DNF, prefer using this over dnf.yum.misc.GenericHolder.
Credit: Alex Martelli, Doug Hudgeon
"""
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
def __hash__(self):
return id(self)
class MultiCallList(list):
def __init__(self, iterable):
super(MultiCallList, self).__init__()
self.extend(iterable)
def __getattr__(self, what):
def fn(*args, **kwargs):
def call_what(v):
method = getattr(v, what)
return method(*args, **kwargs)
return list(map(call_what, self))
return fn
def __setattr__(self, what, val):
def setter(item):
setattr(item, what, val)
return list(map(setter, self))
def _make_lists(transaction):
b = Bunch({
'downgraded': [],
'erased': [],
'erased_clean': [],
'erased_dep': [],
'installed': [],
'installed_group': [],
'installed_dep': [],
'installed_weak': [],
'reinstalled': [],
'upgraded': [],
'failed': [],
})
for tsi in transaction:
if tsi.state == libdnf.transaction.TransactionItemState_ERROR:
b.failed.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADE:
b.downgraded.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_INSTALL:
if tsi.reason == libdnf.transaction.TransactionItemReason_GROUP:
b.installed_group.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY:
b.installed_dep.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_WEAK_DEPENDENCY:
b.installed_weak.append(tsi)
else:
# TransactionItemReason_USER
b.installed.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL:
b.reinstalled.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REMOVE:
if tsi.reason == libdnf.transaction.TransactionItemReason_CLEAN:
b.erased_clean.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY:
b.erased_dep.append(tsi)
else:
b.erased.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADE:
b.upgraded.append(tsi)
return b
def _post_transaction_output(base, transaction, action_callback):
"""Returns a human-readable summary of the results of the
transaction.
:param action_callback: function generating output for specific action. It
takes two parameters - action as a string and list of affected packages for
this action
:return: a list of lines containing a human-readable summary of the
results of the transaction
"""
def _tsi_or_pkg_nevra_cmp(item1, item2):
"""Compares two transaction items or packages by nevra.
Used as a fallback when tsi does not contain package object.
"""
ret = (item1.name > item2.name) - (item1.name < item2.name)
if ret != 0:
return ret
nevra1 = hawkey.NEVRA(name=item1.name, epoch=item1.epoch, version=item1.version,
release=item1.release, arch=item1.arch)
nevra2 = hawkey.NEVRA(name=item2.name, epoch=item2.epoch, version=item2.version,
release=item2.release, arch=item2.arch)
ret = nevra1.evr_cmp(nevra2, base.sack)
if ret != 0:
return ret
return (item1.arch > item2.arch) - (item1.arch < item2.arch)
list_bunch = dnf.util._make_lists(transaction)
skipped_conflicts, skipped_broken = base._skipped_packages(
report_problems=False, transaction=transaction)
skipped = skipped_conflicts.union(skipped_broken)
out = []
for (action, tsis) in [(_('Upgraded'), list_bunch.upgraded),
(_('Downgraded'), list_bunch.downgraded),
(_('Installed'), list_bunch.installed +
list_bunch.installed_group +
list_bunch.installed_weak +
list_bunch.installed_dep),
(_('Reinstalled'), list_bunch.reinstalled),
(_('Skipped'), skipped),
(_('Removed'), list_bunch.erased +
list_bunch.erased_dep +
list_bunch.erased_clean),
(_('Failed'), list_bunch.failed)]:
out.extend(action_callback(
action, sorted(tsis, key=functools.cmp_to_key(_tsi_or_pkg_nevra_cmp))))
return out
def _name_unset_wrapper(input_name):
# returns <name-unset> for everything that evaluates to False (None, empty..)
return input_name if input_name else _("<name-unset>")
def _is_file_pattern_present(specs):
for spec in specs:
subj = dnf.subject.Subject(spec)
if subj._filename_pattern:
return True
return False
def _is_bootc_host():
"""Returns true is the system is managed as an immutable container,
false otherwise. If msg is True, a warning message is displayed
for the user.
"""
ostree_booted = '/run/ostree-booted'
usr = '/usr/'
# Check if usr is writtable and we are in a running ostree system.
# We want this code to return true only when the system is in locked state. If someone ran
# bootc overlay or ostree admin unlock we would want normal DNF path to be ran as it will be
# temporary changes (until reboot).
return os.path.isfile(ostree_booted) and not os.access(usr, os.W_OK)
| 21,294
|
Python
|
.py
| 546
| 31.091575
| 99
| 0.628549
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,713
|
lock.py
|
rpm-software-management_dnf/dnf/lock.py
|
# lock.py
# DNF Locking Subsystem.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.exceptions import ProcessLockError, ThreadLockError, LockError
from dnf.i18n import _
from dnf.yum import misc
import dnf.logging
import dnf.util
import errno
import fcntl
import hashlib
import logging
import os
import threading
import time
logger = logging.getLogger("dnf")
def _fit_lock_dir(dir_):
if not dnf.util.am_i_root():
# for regular users the best we currently do is not to clash with
# another DNF process of the same user. Since dir_ is quite definitely
# not writable for us, yet significant, use its hash:
hexdir = hashlib.sha1(dir_.encode('utf-8')).hexdigest()
dir_ = os.path.join(misc.getCacheDir(), 'locks', hexdir)
return dir_
def build_download_lock(cachedir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'download_lock.pid'),
'cachedir', not exit_on_lock)
def build_metadata_lock(cachedir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'metadata_lock.pid'),
'metadata', not exit_on_lock)
def build_rpmdb_lock(persistdir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(persistdir), 'rpmdb_lock.pid'),
'RPMDB', not exit_on_lock)
def build_log_lock(logdir, exit_on_lock):
return ProcessLock(os.path.join(_fit_lock_dir(logdir), 'log_lock.pid'),
'log', not exit_on_lock)
class ProcessLock(object):
def __init__(self, target, description, blocking=False):
self.blocking = blocking
self.count = 0
self.description = description
self.target = target
self.thread_lock = threading.RLock()
def _lock_thread(self):
if not self.thread_lock.acquire(blocking=False):
msg = '%s already locked by a different thread' % self.description
raise ThreadLockError(msg)
self.count += 1
def _try_lock(self, pid):
fd = os.open(self.target, os.O_CREAT | os.O_RDWR, 0o644)
try:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
if e.errno == errno.EWOULDBLOCK:
return -1
raise
old_pid = os.read(fd, 20)
if len(old_pid) == 0:
# empty file, write our pid
os.write(fd, str(pid).encode('utf-8'))
return pid
try:
old_pid = int(old_pid)
except ValueError:
msg = _('Malformed lock file found: %s.\n'
'Ensure no other dnf/yum process is running and '
'remove the lock file manually or run '
'systemd-tmpfiles --remove dnf.conf.') % (self.target)
raise LockError(msg)
if old_pid == pid:
# already locked by this process
return pid
if not os.access('/proc/%d/stat' % old_pid, os.F_OK):
# locked by a dead process, write our pid
os.lseek(fd, 0, os.SEEK_SET)
os.ftruncate(fd, 0)
os.write(fd, str(pid).encode('utf-8'))
return pid
return old_pid
finally:
os.close(fd)
def _unlock_thread(self):
self.count -= 1
self.thread_lock.release()
def __enter__(self):
dnf.util.ensure_dir(os.path.dirname(self.target))
self._lock_thread()
prev_pid = -1
my_pid = os.getpid()
pid = self._try_lock(my_pid)
while pid != my_pid:
if pid != -1:
if not self.blocking:
self._unlock_thread()
msg = '%s already locked by %d' % (self.description, pid)
raise ProcessLockError(msg, pid)
if prev_pid != pid:
msg = _('Waiting for process with pid %d to finish.') % (pid)
logger.info(msg)
prev_pid = pid
time.sleep(1)
pid = self._try_lock(my_pid)
def __exit__(self, *exc_args):
if self.count == 1:
os.unlink(self.target)
self._unlock_thread()
| 5,322
|
Python
|
.py
| 125
| 33.136
| 82
| 0.607074
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,714
|
transaction.py
|
rpm-software-management_dnf/dnf/transaction.py
|
# -*- coding: utf-8 -*-
# transaction.py
# Managing the transaction to be passed to RPM.
#
# Copyright (C) 2013-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import libdnf.transaction
from dnf.i18n import _, C_
# :api - all action constants are considered an API
# per-package actions - from libdnf
PKG_DOWNGRADE = libdnf.transaction.TransactionItemAction_DOWNGRADE
PKG_DOWNGRADED = libdnf.transaction.TransactionItemAction_DOWNGRADED
PKG_INSTALL = libdnf.transaction.TransactionItemAction_INSTALL
PKG_OBSOLETE = libdnf.transaction.TransactionItemAction_OBSOLETE
PKG_OBSOLETED = libdnf.transaction.TransactionItemAction_OBSOLETED
PKG_REINSTALL = libdnf.transaction.TransactionItemAction_REINSTALL
PKG_REINSTALLED = libdnf.transaction.TransactionItemAction_REINSTALLED
PKG_REMOVE = libdnf.transaction.TransactionItemAction_REMOVE
PKG_UPGRADE = libdnf.transaction.TransactionItemAction_UPGRADE
PKG_UPGRADED = libdnf.transaction.TransactionItemAction_UPGRADED
# compatibility
PKG_ERASE = PKG_REMOVE
# per-package actions - additional
PKG_CLEANUP = 101
PKG_VERIFY = 102
PKG_SCRIPTLET = 103
# transaction-wide actions
TRANS_PREPARATION = 201
TRANS_POST = 202
# packages that appeared on the system
FORWARD_ACTIONS = [
libdnf.transaction.TransactionItemAction_INSTALL,
libdnf.transaction.TransactionItemAction_DOWNGRADE,
libdnf.transaction.TransactionItemAction_OBSOLETE,
libdnf.transaction.TransactionItemAction_UPGRADE,
libdnf.transaction.TransactionItemAction_REINSTALL,
]
# packages that got removed from the system
BACKWARD_ACTIONS = [
libdnf.transaction.TransactionItemAction_DOWNGRADED,
libdnf.transaction.TransactionItemAction_OBSOLETED,
libdnf.transaction.TransactionItemAction_UPGRADED,
libdnf.transaction.TransactionItemAction_REMOVE,
# TODO: REINSTALLED may and may not belong here; the same NEVRA is in FORWARD_ACTIONS already
# libdnf.transaction.TransactionItemAction_REINSTALLED,
]
ACTIONS = {
# TRANSLATORS: This is for a single package currently being downgraded.
PKG_DOWNGRADE: C_('currently', 'Downgrading'),
PKG_DOWNGRADED: _('Cleanup'),
# TRANSLATORS: This is for a single package currently being installed.
PKG_INSTALL: C_('currently', 'Installing'),
PKG_OBSOLETE: _('Obsoleting'),
PKG_OBSOLETED: _('Obsoleting'),
# TRANSLATORS: This is for a single package currently being reinstalled.
PKG_REINSTALL: C_('currently', 'Reinstalling'),
PKG_REINSTALLED: _('Cleanup'),
# TODO: 'Removing'?
PKG_REMOVE: _('Erasing'),
# TRANSLATORS: This is for a single package currently being upgraded.
PKG_UPGRADE: C_('currently', 'Upgrading'),
PKG_UPGRADED: _('Cleanup'),
PKG_CLEANUP: _('Cleanup'),
PKG_VERIFY: _('Verifying'),
PKG_SCRIPTLET: _('Running scriptlet'),
TRANS_PREPARATION: _('Preparing'),
# TODO: TRANS_POST
}
# untranslated strings, logging to /var/log/dnf/dnf.rpm.log
FILE_ACTIONS = {
PKG_DOWNGRADE: 'Downgrade',
PKG_DOWNGRADED: 'Downgraded',
PKG_INSTALL: 'Installed',
PKG_OBSOLETE: 'Obsolete',
PKG_OBSOLETED: 'Obsoleted',
PKG_REINSTALL: 'Reinstall',
PKG_REINSTALLED: 'Reinstalled',
# TODO: 'Removed'?
PKG_REMOVE: 'Erase',
PKG_UPGRADE: 'Upgrade',
PKG_UPGRADED: 'Upgraded',
PKG_CLEANUP: 'Cleanup',
PKG_VERIFY: 'Verified',
PKG_SCRIPTLET: 'Running scriptlet',
TRANS_PREPARATION: 'Preparing',
# TODO: TRANS_POST
}
| 4,397
|
Python
|
.py
| 103
| 39.737864
| 93
| 0.770185
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,715
|
persistor.py
|
rpm-software-management_dnf/dnf/persistor.py
|
# persistor.py
# Persistence data container.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# The current implementation is storing to files in persistdir. Do not depend on
# specific files existing, instead use the persistor API. The underlying
# implementation can change, e.g. for one general file with a serialized dict of
# data etc.
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
import dnf.util
import errno
import fnmatch
import json
import logging
import os
import re
logger = logging.getLogger("dnf")
class JSONDB(object):
def _check_json_db(self, json_path):
if not os.path.isfile(json_path):
# initialize new db
dnf.util.ensure_dir(os.path.dirname(json_path))
self._write_json_db(json_path, [])
def _get_json_db(self, json_path, default=[]):
with open(json_path, 'r') as f:
content = f.read()
if content == "":
# empty file is invalid json format
logger.warning(_("%s is empty file"), json_path)
self._write_json_db(json_path, default)
else:
try:
default = json.loads(content)
except ValueError as e:
logger.warning(e)
return default
@staticmethod
def _write_json_db(json_path, content):
with open(json_path, 'w') as f:
json.dump(content, f)
class RepoPersistor(JSONDB):
"""Persistent data kept for repositories.
Is arch/releasever specific and stores to cachedir.
"""
def __init__(self, cachedir):
self.cachedir = cachedir
self.db_path = os.path.join(self.cachedir, "expired_repos.json")
self.expired_to_add = set()
self.reset_last_makecache = False
@property
def _last_makecache_path(self):
return os.path.join(self.cachedir, "last_makecache")
def get_expired_repos(self):
try:
self._check_json_db(self.db_path)
return set(self._get_json_db(self.db_path))
except OSError as e:
logger.warning(_("Failed to load expired repos cache: %s"), e)
return None
def save(self):
try:
self._check_json_db(self.db_path)
self._write_json_db(self.db_path, list(self.expired_to_add))
except OSError as e:
logger.warning(_("Failed to store expired repos cache: %s"), e)
return False
if self.reset_last_makecache:
try:
dnf.util.touch(self._last_makecache_path)
return True
except IOError:
logger.warning(_("Failed storing last makecache time."))
return False
def since_last_makecache(self):
try:
return int(dnf.util.file_age(self._last_makecache_path))
except OSError:
logger.warning(_("Failed determining last makecache time."))
return None
class TempfilePersistor(JSONDB):
def __init__(self, cachedir):
self.db_path = os.path.join(cachedir, "tempfiles.json")
self.tempfiles_to_add = set()
self._empty = False
def get_saved_tempfiles(self):
self._check_json_db(self.db_path)
return self._get_json_db(self.db_path)
def save(self):
if not self._empty and not self.tempfiles_to_add:
return
self._check_json_db(self.db_path)
if self._empty:
self._write_json_db(self.db_path, [])
return
if self.tempfiles_to_add:
data = set(self._get_json_db(self.db_path))
data.update(self.tempfiles_to_add)
self._write_json_db(self.db_path, list(data))
def empty(self):
self._empty = True
| 4,694
|
Python
|
.py
| 117
| 32.632479
| 80
| 0.649978
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,716
|
sack.py
|
rpm-software-management_dnf/dnf/sack.py
|
# sack.py
# The dnf.Sack class, derived from hawkey.Sack
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf.util
import dnf.package
import dnf.query
import logging
import hawkey
import os
from dnf.pycomp import basestring
from dnf.i18n import _
logger = logging.getLogger("dnf")
class Sack(hawkey.Sack):
# :api
def __init__(self, *args, **kwargs):
super(Sack, self).__init__(*args, **kwargs)
def _configure(self, installonly=None, installonly_limit=0, allow_vendor_change=None):
if installonly:
self.installonly = installonly
self.installonly_limit = installonly_limit
if allow_vendor_change is not None:
self.allow_vendor_change = allow_vendor_change
if allow_vendor_change is False:
logger.warning(_("allow_vendor_change is disabled. This option is currently not supported for downgrade and distro-sync commands"))
def query(self, flags=0):
# :api
"""Factory function returning a DNF Query."""
return dnf.query.Query(self, flags)
def _build_sack(base):
cachedir = base.conf.cachedir
# create the dir ourselves so we have the permissions under control:
dnf.util.ensure_dir(cachedir)
return Sack(pkgcls=dnf.package.Package, pkginitval=base,
arch=base.conf.substitutions["arch"],
cachedir=cachedir, rootdir=base.conf.installroot,
logfile=os.path.join(base.conf.logdir, dnf.const.LOG_HAWKEY),
logdebug=base.conf.logfilelevel > 9)
def _rpmdb_sack(base):
# used by subscription-manager (src/dnf-plugins/product-id.py)
sack = _build_sack(base)
try:
# It can fail if rpmDB is not present
sack.load_system_repo(build_cache=False)
except IOError:
pass
return sack
def rpmdb_sack(base):
# :api
"""
Returns a new instance of sack containing only installed packages (@System repo)
Useful to get list of the installed RPMs after transaction.
"""
return _rpmdb_sack(base)
| 3,043
|
Python
|
.py
| 71
| 37.859155
| 147
| 0.717905
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,717
|
drpm.py
|
rpm-software-management_dnf/dnf/drpm.py
|
# drpm.py
# Delta RPM support
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from binascii import hexlify
from dnf.yum.misc import unlink_f
from dnf.i18n import _
import dnf.callback
import dnf.logging
import dnf.repo
import hawkey
import logging
import libdnf.repo
import os
APPLYDELTA = '/usr/bin/applydeltarpm'
logger = logging.getLogger("dnf")
class DeltaPayload(dnf.repo.PackagePayload):
def __init__(self, delta_info, delta, pkg, progress):
super(DeltaPayload, self).__init__(pkg, progress)
self.delta_info = delta_info
self.delta = delta
def __str__(self):
return os.path.basename(self.delta.location)
def _end_cb(self, cbdata, lr_status, msg):
super(DeltaPayload, self)._end_cb(cbdata, lr_status, msg)
if lr_status != libdnf.repo.PackageTargetCB.TransferStatus_ERROR:
self.delta_info.enqueue(self)
def _target_params(self):
delta = self.delta
ctype, csum = delta.chksum
ctype = hawkey.chksum_name(ctype)
chksum = hexlify(csum).decode()
ctype_code = libdnf.repo.PackageTarget.checksumType(ctype)
if ctype_code == libdnf.repo.PackageTarget.ChecksumType_UNKNOWN:
logger.warning(_("unsupported checksum type: %s"), ctype)
return {
'relative_url' : delta.location,
'checksum_type' : ctype_code,
'checksum' : chksum,
'expectedsize' : delta.downloadsize,
'base_url' : delta.baseurl,
}
@property
def download_size(self):
return self.delta.downloadsize
@property
def _full_size(self):
return self.pkg.downloadsize
def localPkg(self):
location = self.delta.location
return os.path.join(self.pkg.repo.pkgdir, os.path.basename(location))
class DeltaInfo(object):
def __init__(self, query, progress, deltarpm_percentage=None):
'''A delta lookup and rebuild context
query -- installed packages to use when looking up deltas
progress -- progress obj to display finished delta rebuilds
'''
self.deltarpm_installed = False
if os.access(APPLYDELTA, os.X_OK):
self.deltarpm_installed = True
try:
self.deltarpm_jobs = os.sysconf('SC_NPROCESSORS_ONLN')
except (TypeError, ValueError):
self.deltarpm_jobs = 4
if deltarpm_percentage is None:
self.deltarpm_percentage = dnf.conf.Conf().deltarpm_percentage
else:
self.deltarpm_percentage = deltarpm_percentage
self.query = query
self.progress = progress
self.queue = []
self.jobs = {}
self.err = {}
def delta_factory(self, po, progress):
'''Turn a po to Delta RPM po, if possible'''
if not self.deltarpm_installed:
# deltarpm is not installed
return None
if not po.repo.deltarpm or not self.deltarpm_percentage:
# drpm disabled
return None
if po._is_local_pkg():
# drpm disabled for local
return None
if os.path.exists(po.localPkg()):
# already there
return None
best = po._size * self.deltarpm_percentage / 100
best_delta = None
for ipo in self.query.filter(name=po.name, arch=po.arch):
delta = po.get_delta_from_evr(ipo.evr)
if delta and delta.downloadsize < best:
best = delta.downloadsize
best_delta = delta
if best_delta:
return DeltaPayload(self, best_delta, po, progress)
return None
def job_done(self, pid, code):
# handle a finished delta rebuild
logger.log(dnf.logging.SUBDEBUG, 'drpm: %d: return code: %d, %d', pid,
code >> 8, code & 0xff)
pload = self.jobs.pop(pid)
pkg = pload.pkg
if code != 0:
unlink_f(pload.pkg.localPkg())
self.err[pkg] = [_('Delta RPM rebuild failed')]
elif not pload.pkg.verifyLocalPkg():
self.err[pkg] = [_('Checksum of the delta-rebuilt RPM failed')]
else:
os.unlink(pload.localPkg())
self.progress.end(pload, dnf.callback.STATUS_DRPM, _('done'))
def start_job(self, pload):
# spawn a delta rebuild job
spawn_args = [APPLYDELTA, APPLYDELTA,
'-a', pload.pkg.arch,
pload.localPkg(), pload.pkg.localPkg()]
pid = os.spawnl(os.P_NOWAIT, *spawn_args)
logger.log(dnf.logging.SUBDEBUG, 'drpm: spawned %d: %s', pid,
' '.join(spawn_args[1:]))
self.jobs[pid] = pload
def enqueue(self, pload):
# process finished jobs, start new ones
while self.jobs:
pid, code = os.waitpid(-1, os.WNOHANG)
if not pid:
break
self.job_done(pid, code)
self.queue.append(pload)
while len(self.jobs) < self.deltarpm_jobs:
self.start_job(self.queue.pop(0))
if not self.queue:
break
def wait(self):
'''Wait until all jobs have finished'''
while self.jobs:
pid, code = os.wait()
self.job_done(pid, code)
if self.queue:
self.start_job(self.queue.pop(0))
| 6,352
|
Python
|
.py
| 156
| 32.205128
| 78
| 0.629294
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,718
|
match_counter.py
|
rpm-software-management_dnf/dnf/match_counter.py
|
# match_counter.py
# Implements class MatchCounter.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from functools import reduce
WEIGHTS = {
'name' : 7,
'summary' : 4,
'description' : 2,
'url' : 1,
}
def _canonize_string_set(sset, length):
""" Ordered sset with empty strings prepended. """
current = len(sset)
l = [''] * (length - current) + sorted(sset)
return l
class MatchCounter(dict):
"""Map packages to which of their attributes matched in a search against
what values.
The mapping is: ``package -> [(key, needle), ... ]``.
"""
@staticmethod
def _eval_weights(pkg, matches):
# how much is each match worth and return their sum:
def weight(match):
key = match[0]
needle = match[1]
haystack = getattr(pkg, key)
if key == "name" and haystack == needle:
# if package matches exactly by name, increase weight
return 2 * WEIGHTS[key]
return WEIGHTS[key]
return sum(map(weight, matches))
def _key_func(self):
"""Get the key function used for sorting matches.
It is not enough to only look at the matches and order them by the sum
of their weighted hits. In case this number is the same we have to
ensure that the same matched needles are next to each other in the
result.
Returned function is:
pkg -> (weights_sum, canonized_needles_set, -distance)
"""
def get_key(pkg):
return (
# use negative value to make sure packages with the highest weight come first
- self._eval_weights(pkg, self[pkg]),
# then order packages alphabetically
pkg.name,
)
return get_key
def _max_needles(self):
"""Return the max count of needles of all packages."""
if self:
return max(len(self.matched_needles(pkg)) for pkg in self)
return 0
def add(self, pkg, key, needle):
self.setdefault(pkg, []).append((key, needle))
def dump(self):
for pkg in self:
print('%s\t%s' % (pkg, self[pkg]))
def matched_haystacks(self, pkg):
return set(getattr(pkg, m[0]) for m in self[pkg])
def matched_keys(self, pkg):
# return keys in the same order they appear in the list
result = []
for i in self[pkg]:
if i[0] in result:
continue
result.append(i[0])
return result
def matched_needles(self, pkg):
return set(m[1] for m in self[pkg])
def sorted(self, reverse=False, limit_to=None):
keys = limit_to if limit_to else self.keys()
return sorted(keys, key=self._key_func())
def total(self):
return reduce(lambda total, pkg: total + len(self[pkg]), self, 0)
| 3,924
|
Python
|
.py
| 95
| 34.221053
| 93
| 0.644865
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,719
|
plugin.py
|
rpm-software-management_dnf/dnf/plugin.py
|
# plugin.py
# The interface for building DNF plugins.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import glob
import importlib
import inspect
import logging
import operator
import os
import rpm
import sys
import traceback
import libdnf
import dnf.logging
import dnf.pycomp
import dnf.util
from dnf.i18n import _
logger = logging.getLogger('dnf')
DYNAMIC_PACKAGE = 'dnf.plugin.dynamic'
class Plugin(object):
"""The base class custom plugins must derive from. #:api"""
name = '<invalid>'
config_name = None
@classmethod
def read_config(cls, conf):
# :api
parser = libdnf.conf.ConfigParser()
name = cls.config_name if cls.config_name else cls.name
files = ['%s/%s.conf' % (path, name) for path in conf.pluginconfpath]
for file in files:
if os.path.isfile(file):
try:
parser.read(file)
except Exception as e:
raise dnf.exceptions.ConfigError(_("Parsing file failed: %s") % str(e))
return parser
def __init__(self, base, cli):
# :api
self.base = base
self.cli = cli
def pre_config(self):
# :api
pass
def config(self):
# :api
pass
def resolved(self):
# :api
pass
def sack(self):
# :api
pass
def pre_transaction(self):
# :api
pass
def transaction(self):
# :api
pass
class Plugins(object):
def __init__(self):
self.plugin_cls = []
self.plugins = []
def __del__(self):
self._unload()
def _caller(self, method):
for plugin in self.plugins:
try:
getattr(plugin, method)()
except dnf.exceptions.Error:
raise
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
except_list = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical(''.join(except_list))
def _check_enabled(self, conf, enable_plugins):
"""Checks whether plugins are enabled or disabled in configuration files
and removes disabled plugins from list"""
for plug_cls in self.plugin_cls[:]:
name = plug_cls.name
if any(fnmatch.fnmatch(name, pattern) for pattern in enable_plugins):
continue
parser = plug_cls.read_config(conf)
# has it enabled = False?
disabled = (parser.has_section('main')
and parser.has_option('main', 'enabled')
and not parser.getboolean('main', 'enabled'))
if disabled:
self.plugin_cls.remove(plug_cls)
def _load(self, conf, skips, enable_plugins):
"""Dynamically load relevant plugin modules."""
if DYNAMIC_PACKAGE in sys.modules:
raise RuntimeError("load_plugins() called twice")
sys.modules[DYNAMIC_PACKAGE] = package = dnf.pycomp.ModuleType(DYNAMIC_PACKAGE)
package.__path__ = []
files = _get_plugins_files(conf.pluginpath, skips, enable_plugins)
_import_modules(package, files)
self.plugin_cls = _plugin_classes()[:]
self._check_enabled(conf, enable_plugins)
if len(self.plugin_cls) > 0:
names = sorted(plugin.name for plugin in self.plugin_cls)
logger.debug(_('Loaded plugins: %s'), ', '.join(names))
def _run_pre_config(self):
self._caller('pre_config')
def _run_config(self):
self._caller('config')
def _run_init(self, base, cli=None):
for p_cls in self.plugin_cls:
plugin = p_cls(base, cli)
self.plugins.append(plugin)
def run_sack(self):
self._caller('sack')
def run_resolved(self):
self._caller('resolved')
def run_pre_transaction(self):
self._caller('pre_transaction')
def run_transaction(self):
self._caller('transaction')
def _unload(self):
if DYNAMIC_PACKAGE in sys.modules:
logger.log(dnf.logging.DDEBUG, 'Plugins were unloaded.')
del sys.modules[DYNAMIC_PACKAGE]
def unload_removed_plugins(self, transaction):
"""
Unload plugins that were removed in the `transaction`.
"""
if not transaction.remove_set:
return
# gather all installed plugins and their files
plugins = dict()
for plugin in self.plugins:
plugins[inspect.getfile(plugin.__class__)] = plugin
# gather all removed files that are plugin files
plugin_files = set(plugins.keys())
erased_plugin_files = set()
for pkg in transaction.remove_set:
erased_plugin_files.update(plugin_files.intersection(pkg.files))
if not erased_plugin_files:
return
# check whether removed plugin file is added at the same time (upgrade of a plugin)
for pkg in transaction.install_set:
erased_plugin_files.difference_update(pkg._header[rpm.RPMTAG_FILENAMES])
# unload plugins that were removed in transaction
for plugin_file in erased_plugin_files:
self.plugins.remove(plugins[plugin_file])
def _plugin_classes():
return Plugin.__subclasses__()
def _import_modules(package, py_files):
for fn in py_files:
path, module = os.path.split(fn)
package.__path__.append(path)
(module, ext) = os.path.splitext(module)
name = '%s.%s' % (package.__name__, module)
try:
module = importlib.import_module(name)
except Exception as e:
logger.error(_('Failed loading plugin "%s": %s'), module, e)
logger.log(dnf.logging.SUBDEBUG, '', exc_info=True)
def _get_plugins_files(paths, disable_plugins, enable_plugins):
plugins = []
disable_plugins = set(disable_plugins)
enable_plugins = set(enable_plugins)
pattern_enable_found = set()
pattern_disable_found = set()
for p in paths:
for fn in glob.glob('%s/*.py' % p):
(plugin_name, dummy) = os.path.splitext(os.path.basename(fn))
matched = True
enable_pattern_tested = False
for pattern_skip in disable_plugins:
if _plugin_name_matches_pattern(plugin_name, pattern_skip):
pattern_disable_found.add(pattern_skip)
matched = False
for pattern_enable in enable_plugins:
if _plugin_name_matches_pattern(plugin_name, pattern_enable):
matched = True
pattern_enable_found.add(pattern_enable)
enable_pattern_tested = True
if not enable_pattern_tested:
for pattern_enable in enable_plugins:
if _plugin_name_matches_pattern(plugin_name, pattern_enable):
pattern_enable_found.add(pattern_enable)
if matched:
plugins.append(fn)
enable_not_found = enable_plugins.difference(pattern_enable_found)
if enable_not_found:
logger.warning(_("No matches found for the following enable plugin patterns: {}").format(
", ".join(sorted(enable_not_found))))
disable_not_found = disable_plugins.difference(pattern_disable_found)
if disable_not_found:
logger.warning(_("No matches found for the following disable plugin patterns: {}").format(
", ".join(sorted(disable_not_found))))
return plugins
def _plugin_name_matches_pattern(plugin_name, pattern):
"""
Checks plugin name matches the pattern.
The alternative plugin name using dashes instead of underscores is tried
in case of original name is not matched.
(see https://bugzilla.redhat.com/show_bug.cgi?id=1980712)
"""
try_names = set((plugin_name, plugin_name.replace('_', '-')))
return any(fnmatch.fnmatch(name, pattern) for name in try_names)
def register_command(command_class):
# :api
"""A class decorator for automatic command registration."""
def __init__(self, base, cli):
if cli:
cli.register_command(command_class)
plugin_class = type(str(command_class.__name__ + 'Plugin'),
(dnf.Plugin,),
{"__init__": __init__,
"name": command_class.aliases[0]})
command_class._plugin = plugin_class
return command_class
| 9,593
|
Python
|
.py
| 233
| 32.403433
| 98
| 0.626638
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,720
|
transaction_sr.py
|
rpm-software-management_dnf/dnf/transaction_sr.py
|
# Copyright (C) 2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import libdnf
import hawkey
from dnf.i18n import _
import dnf.exceptions
import json
VERSION_MAJOR = 0
VERSION_MINOR = 0
VERSION = "%s.%s" % (VERSION_MAJOR, VERSION_MINOR)
"""
The version of the stored transaction.
MAJOR version denotes backwards incompatible changes (old dnf won't work with
new transaction JSON).
MINOR version denotes extending the format without breaking backwards
compatibility (old dnf can work with new transaction JSON). Forwards
compatibility needs to be handled by being able to process the old format as
well as the new one.
"""
class TransactionError(dnf.exceptions.Error):
def __init__(self, msg):
super(TransactionError, self).__init__(msg)
class TransactionReplayError(dnf.exceptions.Error):
def __init__(self, filename, errors):
"""
:param filename: The name of the transaction file being replayed
:param errors: a list of error classes or a string with an error description
"""
# store args in case someone wants to read them from a caught exception
self.filename = filename
if isinstance(errors, (list, tuple)):
self.errors = errors
else:
self.errors = [errors]
if filename:
msg = _('The following problems occurred while replaying the transaction from file "{filename}":').format(filename=filename)
else:
msg = _('The following problems occurred while running a transaction:')
for error in self.errors:
msg += "\n " + str(error)
super(TransactionReplayError, self).__init__(msg)
class IncompatibleTransactionVersionError(TransactionReplayError):
def __init__(self, filename, msg):
super(IncompatibleTransactionVersionError, self).__init__(filename, msg)
def _check_version(version, filename):
major, minor = version.split('.')
try:
major = int(major)
except ValueError as e:
raise TransactionReplayError(
filename,
_('Invalid major version "{major}", number expected.').format(major=major)
)
try:
int(minor) # minor is unused, just check it's a number
except ValueError as e:
raise TransactionReplayError(
filename,
_('Invalid minor version "{minor}", number expected.').format(minor=minor)
)
if major != VERSION_MAJOR:
raise IncompatibleTransactionVersionError(
filename,
_('Incompatible major version "{major}", supported major version is "{major_supp}".')
.format(major=major, major_supp=VERSION_MAJOR)
)
def serialize_transaction(transaction):
"""
Serializes a transaction to a data structure that is equivalent to the stored JSON format.
:param transaction: the transaction to serialize (an instance of dnf.db.history.TransactionWrapper)
"""
data = {
"version": VERSION,
}
rpms = []
groups = []
environments = []
if transaction is None:
return data
for tsi in transaction.packages():
if tsi.is_package():
rpms.append({
"action": tsi.action_name,
"nevra": tsi.nevra,
"reason": libdnf.transaction.TransactionItemReasonToString(tsi.reason),
"repo_id": tsi.from_repo
})
elif tsi.is_group():
group = tsi.get_group()
group_data = {
"action": tsi.action_name,
"id": group.getGroupId(),
"packages": [],
"package_types": libdnf.transaction.compsPackageTypeToString(group.getPackageTypes())
}
for pkg in group.getPackages():
group_data["packages"].append({
"name": pkg.getName(),
"installed": pkg.getInstalled(),
"package_type": libdnf.transaction.compsPackageTypeToString(pkg.getPackageType())
})
groups.append(group_data)
elif tsi.is_environment():
env = tsi.get_environment()
env_data = {
"action": tsi.action_name,
"id": env.getEnvironmentId(),
"groups": [],
"package_types": libdnf.transaction.compsPackageTypeToString(env.getPackageTypes())
}
for grp in env.getGroups():
env_data["groups"].append({
"id": grp.getGroupId(),
"installed": grp.getInstalled(),
"group_type": libdnf.transaction.compsPackageTypeToString(grp.getGroupType())
})
environments.append(env_data)
if rpms:
data["rpms"] = rpms
if groups:
data["groups"] = groups
if environments:
data["environments"] = environments
return data
class TransactionReplay(object):
"""
A class that encapsulates replaying a transaction. The transaction data are
loaded and stored when the class is initialized. The transaction is run by
calling the `run()` method, after the transaction is created (but before it is
performed), the `post_transaction()` method needs to be called to verify no
extra packages were pulled in and also to fix the reasons.
"""
def __init__(
self,
base,
filename="",
data=None,
ignore_extras=False,
ignore_installed=False,
skip_unavailable=False
):
"""
:param base: the dnf base
:param filename: the filename to load the transaction from (conflicts with the 'data' argument)
:param data: the dictionary to load the transaction from (conflicts with the 'filename' argument)
:param ignore_extras: whether to ignore extra package pulled into the transaction
:param ignore_installed: whether to ignore installed versions of packages
:param skip_unavailable: whether to skip transaction packages that aren't available
"""
self._base = base
self._filename = filename
self._ignore_installed = ignore_installed
self._ignore_extras = ignore_extras
self._skip_unavailable = skip_unavailable
if not self._base.conf.strict:
self._skip_unavailable = True
self._nevra_cache = set()
self._nevra_reason_cache = {}
self._warnings = []
if filename and data:
raise ValueError(_("Conflicting TransactionReplay arguments have been specified: filename, data"))
elif filename:
self._load_from_file(filename)
else:
self._load_from_data(data)
def _load_from_file(self, fn):
self._filename = fn
with open(fn, "r") as f:
try:
replay_data = json.load(f)
except json.decoder.JSONDecodeError as e:
raise TransactionReplayError(fn, str(e) + ".")
try:
self._load_from_data(replay_data)
except TransactionError as e:
raise TransactionReplayError(fn, e)
def _load_from_data(self, data):
self._replay_data = data
self._verify_toplevel_json(self._replay_data)
self._rpms = self._replay_data.get("rpms", [])
self._assert_type(self._rpms, list, "rpms", "array")
self._groups = self._replay_data.get("groups", [])
self._assert_type(self._groups, list, "groups", "array")
self._environments = self._replay_data.get("environments", [])
self._assert_type(self._environments, list, "environments", "array")
def _raise_or_warn(self, warn_only, msg):
if warn_only:
self._warnings.append(msg)
else:
raise TransactionError(msg)
def _assert_type(self, value, t, id, expected):
if not isinstance(value, t):
raise TransactionError(_('Unexpected type of "{id}", {exp} expected.').format(id=id, exp=expected))
def _verify_toplevel_json(self, replay_data):
fn = self._filename
if "version" not in replay_data:
raise TransactionReplayError(fn, _('Missing key "{key}".'.format(key="version")))
self._assert_type(replay_data["version"], str, "version", "string")
_check_version(replay_data["version"], fn)
def _replay_pkg_action(self, pkg_data):
try:
action = pkg_data["action"]
nevra = pkg_data["nevra"]
repo_id = pkg_data["repo_id"]
reason = libdnf.transaction.StringToTransactionItemReason(pkg_data["reason"])
except KeyError as e:
raise TransactionError(
_('Missing object key "{key}" in an rpm.').format(key=e.args[0])
)
except IndexError as e:
raise TransactionError(
_('Unexpected value of package reason "{reason}" for rpm nevra "{nevra}".')
.format(reason=pkg_data["reason"], nevra=nevra)
)
subj = hawkey.Subject(nevra)
parsed_nevras = subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])
if len(parsed_nevras) != 1:
raise TransactionError(_('Cannot parse NEVRA for package "{nevra}".').format(nevra=nevra))
parsed_nevra = parsed_nevras[0]
na = "%s.%s" % (parsed_nevra.name, parsed_nevra.arch)
query_na = self._base.sack.query().filter(name=parsed_nevra.name, arch=parsed_nevra.arch)
epoch = parsed_nevra.epoch if parsed_nevra.epoch is not None else 0
query = query_na.filter(epoch=epoch, version=parsed_nevra.version, release=parsed_nevra.release)
# In case the package is found in the same repo as in the original
# transaction, limit the query to that plus installed packages. IOW
# remove packages with the same NEVRA in case they are found in
# multiple repos and the repo the package came from originally is one
# of them.
# This can e.g. make a difference in the system-upgrade plugin, in case
# the same NEVRA is in two repos, this makes sure the same repo is used
# for both download and upgrade steps of the plugin.
if repo_id:
query_repo = query.filter(reponame=repo_id)
if query_repo:
query = query_repo.union(query.installed())
if not query:
self._raise_or_warn(self._skip_unavailable, _('Cannot find rpm nevra "{nevra}".').format(nevra=nevra))
return
# a cache to check no extra packages were pulled into the transaction
if action != "Reason Change":
self._nevra_cache.add(nevra)
# store reasons for forward actions and "Removed", the rest of the
# actions reasons should stay as they were determined by the transaction
if action in ("Install", "Upgrade", "Downgrade", "Reinstall", "Removed"):
self._nevra_reason_cache[nevra] = reason
if action in ("Install", "Upgrade", "Downgrade"):
if action == "Install" and query_na.installed() and not self._base._get_installonly_query(query_na):
self._raise_or_warn(self._ignore_installed,
_('Package "{na}" is already installed for action "{action}".').format(na=na, action=action))
sltr = dnf.selector.Selector(self._base.sack).set(pkg=query)
self._base.goal.install(select=sltr, optional=not self._base.conf.strict)
elif action == "Reinstall":
query = query.available()
if not query:
self._raise_or_warn(self._skip_unavailable,
_('Package nevra "{nevra}" not available in repositories for action "{action}".')
.format(nevra=nevra, action=action))
return
sltr = dnf.selector.Selector(self._base.sack).set(pkg=query)
self._base.goal.install(select=sltr, optional=not self._base.conf.strict)
elif action in ("Upgraded", "Downgraded", "Reinstalled", "Removed", "Obsoleted"):
query = query.installed()
if not query:
self._raise_or_warn(self._ignore_installed,
_('Package nevra "{nevra}" not installed for action "{action}".').format(nevra=nevra, action=action))
return
# erasing the original version (the reverse part of an action like
# e.g. upgrade) is more robust, but we can't do it if
# skip_unavailable is True, because if the forward part of the
# action is skipped, we would simply remove the package here
if not self._skip_unavailable or action == "Removed":
for pkg in query:
self._base.goal.erase(pkg, clean_deps=False)
elif action == "Reason Change":
self._base.history.set_reason(query[0], reason)
else:
raise TransactionError(
_('Unexpected value of package action "{action}" for rpm nevra "{nevra}".')
.format(action=action, nevra=nevra)
)
def _create_swdb_group(self, group_id, pkg_types, pkgs):
comps_group = self._base.comps._group_by_id(group_id)
if not comps_group:
self._raise_or_warn(self._skip_unavailable, _("Group id '%s' is not available.") % group_id)
return None
swdb_group = self._base.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types)
try:
for pkg in pkgs:
name = pkg["name"]
self._assert_type(name, str, "groups.packages.name", "string")
installed = pkg["installed"]
self._assert_type(installed, bool, "groups.packages.installed", "boolean")
package_type = pkg["package_type"]
self._assert_type(package_type, str, "groups.packages.package_type", "string")
try:
swdb_group.addPackage(name, installed, libdnf.transaction.stringToCompsPackageType(package_type))
except libdnf.error.Error as e:
raise TransactionError(str(e))
except KeyError as e:
raise TransactionError(
_('Missing object key "{key}" in groups.packages.').format(key=e.args[0])
)
return swdb_group
def _swdb_group_install(self, group_id, pkg_types, pkgs):
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.install(swdb_group)
def _swdb_group_upgrade(self, group_id, pkg_types, pkgs):
if not self._base.history.group.get(group_id):
self._raise_or_warn( self._ignore_installed, _("Group id '%s' is not installed.") % group_id)
return
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.upgrade(swdb_group)
def _swdb_group_downgrade(self, group_id, pkg_types, pkgs):
if not self._base.history.group.get(group_id):
self._raise_or_warn(self._ignore_installed, _("Group id '%s' is not installed.") % group_id)
return
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.downgrade(swdb_group)
def _swdb_group_remove(self, group_id, pkg_types, pkgs):
if not self._base.history.group.get(group_id):
self._raise_or_warn(self._ignore_installed, _("Group id '%s' is not installed.") % group_id)
return
swdb_group = self._create_swdb_group(group_id, pkg_types, pkgs)
if swdb_group is not None:
self._base.history.group.remove(swdb_group)
def _create_swdb_environment(self, env_id, pkg_types, groups):
comps_env = self._base.comps._environment_by_id(env_id)
if not comps_env:
self._raise_or_warn(self._skip_unavailable, _("Environment id '%s' is not available.") % env_id)
return None
swdb_env = self._base.history.env.new(env_id, comps_env.name, comps_env.ui_name, pkg_types)
try:
for grp in groups:
id = grp["id"]
self._assert_type(id, str, "environments.groups.id", "string")
installed = grp["installed"]
self._assert_type(installed, bool, "environments.groups.installed", "boolean")
group_type = grp["group_type"]
self._assert_type(group_type, str, "environments.groups.group_type", "string")
try:
group_type = libdnf.transaction.stringToCompsPackageType(group_type)
except libdnf.error.Error as e:
raise TransactionError(str(e))
if group_type not in (
libdnf.transaction.CompsPackageType_MANDATORY,
libdnf.transaction.CompsPackageType_OPTIONAL
):
raise TransactionError(
_('Invalid value "{group_type}" of environments.groups.group_type, '
'only "mandatory" or "optional" is supported.'
).format(group_type=grp["group_type"])
)
swdb_env.addGroup(id, installed, group_type)
except KeyError as e:
raise TransactionError(
_('Missing object key "{key}" in environments.groups.').format(key=e.args[0])
)
return swdb_env
def _swdb_environment_install(self, env_id, pkg_types, groups):
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.install(swdb_env)
def _swdb_environment_upgrade(self, env_id, pkg_types, groups):
if not self._base.history.env.get(env_id):
self._raise_or_warn(self._ignore_installed,_("Environment id '%s' is not installed.") % env_id)
return
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.upgrade(swdb_env)
def _swdb_environment_downgrade(self, env_id, pkg_types, groups):
if not self._base.history.env.get(env_id):
self._raise_or_warn(self._ignore_installed, _("Environment id '%s' is not installed.") % env_id)
return
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.downgrade(swdb_env)
def _swdb_environment_remove(self, env_id, pkg_types, groups):
if not self._base.history.env.get(env_id):
self._raise_or_warn(self._ignore_installed, _("Environment id '%s' is not installed.") % env_id)
return
swdb_env = self._create_swdb_environment(env_id, pkg_types, groups)
if swdb_env is not None:
self._base.history.env.remove(swdb_env)
def get_data(self):
"""
:returns: the loaded data of the transaction
"""
return self._replay_data
def get_warnings(self):
"""
:returns: an array of warnings gathered during the transaction replay
"""
return self._warnings
def run(self):
"""
Replays the transaction.
"""
fn = self._filename
errors = []
for pkg_data in self._rpms:
try:
self._replay_pkg_action(pkg_data)
except TransactionError as e:
errors.append(e)
for group_data in self._groups:
try:
action = group_data["action"]
group_id = group_data["id"]
try:
pkg_types = libdnf.transaction.stringToCompsPackageType(group_data["package_types"])
except libdnf.error.Error as e:
errors.append(TransactionError(str(e)))
continue
if action == "Install":
self._swdb_group_install(group_id, pkg_types, group_data["packages"])
elif action == "Removed":
self._swdb_group_remove(group_id, pkg_types, group_data["packages"])
# Groups are not versioned, but a reverse transaction could be applied,
# therefore we treat both actions the same way
elif action == "Upgrade" or action == "Upgraded":
self._swdb_group_upgrade(group_id, pkg_types, group_data["packages"])
elif action == "Downgrade" or action == "Downgraded":
self._swdb_group_downgrade(group_id, pkg_types, group_data["packages"])
else:
errors.append(TransactionError(
_('Unexpected value of group action "{action}" for group "{group}".')
.format(action=action, group=group_id)
))
except KeyError as e:
errors.append(TransactionError(
_('Missing object key "{key}" in a group.').format(key=e.args[0])
))
except TransactionError as e:
errors.append(e)
for env_data in self._environments:
try:
action = env_data["action"]
env_id = env_data["id"]
try:
pkg_types = libdnf.transaction.stringToCompsPackageType(env_data["package_types"])
except libdnf.error.Error as e:
errors.append(TransactionError(str(e)))
continue
if action == "Install":
self._swdb_environment_install(env_id, pkg_types, env_data["groups"])
elif action == "Removed":
self._swdb_environment_remove(env_id, pkg_types, env_data["groups"])
# Environments are not versioned, but a reverse transaction could be applied,
# therefore we treat both actions the same way
elif action == "Upgrade" or action == "Upgraded":
self._swdb_environment_upgrade(env_id, pkg_types, env_data["groups"])
elif action == "Downgrade" or action == "Downgraded":
self._swdb_environment_downgrade(env_id, pkg_types, env_data["groups"])
else:
errors.append(TransactionError(
_('Unexpected value of environment action "{action}" for environment "{env}".')
.format(action=action, env=env_id)
))
except KeyError as e:
errors.append(TransactionError(
_('Missing object key "{key}" in an environment.').format(key=e.args[0])
))
except TransactionError as e:
errors.append(e)
if errors:
raise TransactionReplayError(fn, errors)
def post_transaction(self):
"""
Sets reasons in the transaction history to values from the stored transaction.
Also serves to check whether additional packages were pulled in by the
transaction, which results in an error (unless ignore_extras is True).
"""
if not self._base.transaction:
return
errors = []
for tsi in self._base.transaction:
try:
pkg = tsi.pkg
except KeyError as e:
# the transaction item has no package, happens for action == "Reason Change"
continue
nevra = str(pkg)
if nevra not in self._nevra_cache:
# if ignore_installed is True, we don't want to check for
# Upgraded/Downgraded/Reinstalled extras in the transaction,
# basically those may be installed and we are ignoring them
if not self._ignore_installed or not tsi.action in (
libdnf.transaction.TransactionItemAction_UPGRADED,
libdnf.transaction.TransactionItemAction_DOWNGRADED,
libdnf.transaction.TransactionItemAction_REINSTALLED
):
msg = _('Package nevra "{nevra}", which is not present in the transaction file, was pulled '
'into the transaction.'
).format(nevra=nevra)
if not self._ignore_extras:
errors.append(TransactionError(msg))
else:
self._warnings.append(msg)
try:
replay_reason = self._nevra_reason_cache[nevra]
if tsi.action in (
libdnf.transaction.TransactionItemAction_INSTALL,
libdnf.transaction.TransactionItemAction_REMOVE
) or libdnf.transaction.TransactionItemReasonCompare(replay_reason, tsi.reason) > 0:
tsi.reason = replay_reason
except KeyError as e:
# if the pkg nevra wasn't found, we don't want to change the reason
pass
if errors:
raise TransactionReplayError(self._filename, errors)
| 26,209
|
Python
|
.py
| 529
| 37.557656
| 136
| 0.598497
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,721
|
history.py
|
rpm-software-management_dnf/dnf/history.py
|
# history.py
# Interfaces to the history of transactions.
#
# Copyright (C) 2013-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""Interfaces to the history of transactions."""
from __future__ import absolute_import
from __future__ import unicode_literals
| 1,150
|
Python
|
.py
| 22
| 51.136364
| 77
| 0.784889
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,722
|
logging.py
|
rpm-software-management_dnf/dnf/logging.py
|
# logging.py
# DNF Logging Subsystem.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf.exceptions
import dnf.const
import dnf.lock
import dnf.util
import libdnf.repo
import logging
import logging.handlers
import os
import sys
import time
import warnings
import gzip
# :api loggers are: 'dnf', 'dnf.plugin', 'dnf.rpm'
SUPERCRITICAL = 100 # do not use this for logging
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
DDEBUG = 8 # used by anaconda (pyanaconda/payload/dnfpayload.py)
SUBDEBUG = 6
TRACE = 4
ALL = 2
def only_once(func):
"""Method decorator turning the method into noop on second or later calls."""
def noop(*_args, **_kwargs):
pass
def swan_song(self, *args, **kwargs):
func(self, *args, **kwargs)
setattr(self, func.__name__, noop)
return swan_song
class _MaxLevelFilter(object):
def __init__(self, max_level):
self.max_level = max_level
def filter(self, record):
if record.levelno >= self.max_level:
return 0
return 1
_VERBOSE_VAL_MAPPING = {
0 : SUPERCRITICAL,
1 : logging.INFO,
2 : logging.INFO, # the default
3 : logging.DEBUG,
4 : logging.DEBUG,
5 : logging.DEBUG,
6 : logging.DEBUG, # verbose value
7 : DDEBUG,
8 : SUBDEBUG,
9 : TRACE,
10: ALL, # more verbous librepo and hawkey
}
def _cfg_verbose_val2level(cfg_errval):
assert 0 <= cfg_errval <= 10
return _VERBOSE_VAL_MAPPING.get(cfg_errval, TRACE)
# Both the DNF default and the verbose default are WARNING. Note that ERROR has
# no specific level.
_ERR_VAL_MAPPING = {
0: SUPERCRITICAL,
1: logging.CRITICAL,
2: logging.ERROR
}
def _cfg_err_val2level(cfg_errval):
assert 0 <= cfg_errval <= 10
return _ERR_VAL_MAPPING.get(cfg_errval, logging.WARNING)
def compression_namer(name):
return name + ".gz"
CHUNK_SIZE = 128 * 1024 # 128 KB
def compression_rotator(source, dest):
with open(source, "rb") as sf:
with gzip.open(dest, 'wb') as wf:
while True:
data = sf.read(CHUNK_SIZE)
if not data:
break
wf.write(data)
os.remove(source)
class MultiprocessRotatingFileHandler(logging.handlers.RotatingFileHandler):
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
super(MultiprocessRotatingFileHandler, self).__init__(
filename, mode, maxBytes, backupCount, encoding, delay)
self.rotate_lock = dnf.lock.build_log_lock("/var/log/", True)
def emit(self, record):
while True:
try:
if self.shouldRollover(record):
with self.rotate_lock:
# Do rollover while preserving the mode of the new log file
mode = os.stat(self.baseFilename).st_mode
self.doRollover()
os.chmod(self.baseFilename, mode)
logging.FileHandler.emit(self, record)
return
except (dnf.exceptions.ProcessLockError, dnf.exceptions.ThreadLockError):
time.sleep(0.01)
except Exception:
self.handleError(record)
return
def _create_filehandler(logfile, log_size, log_rotate, log_compress):
if not os.path.exists(logfile):
dnf.util.ensure_dir(os.path.dirname(logfile))
dnf.util.touch(logfile)
handler = MultiprocessRotatingFileHandler(logfile, maxBytes=log_size, backupCount=log_rotate)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s",
"%Y-%m-%dT%H:%M:%S%z")
formatter.converter = time.localtime
handler.setFormatter(formatter)
if log_compress:
handler.rotator = compression_rotator
handler.namer = compression_namer
return handler
def _paint_mark(logger):
logger.log(INFO, dnf.const.LOG_MARKER)
class Logging(object):
def __init__(self):
self.stdout_handler = self.stderr_handler = None
logging.addLevelName(DDEBUG, "DDEBUG")
logging.addLevelName(SUBDEBUG, "SUBDEBUG")
logging.addLevelName(TRACE, "TRACE")
logging.addLevelName(ALL, "ALL")
logging.captureWarnings(True)
logging.raiseExceptions = False
@only_once
def _presetup(self):
logger_dnf = logging.getLogger("dnf")
logger_dnf.setLevel(TRACE)
# setup stdout
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(INFO)
stdout.addFilter(_MaxLevelFilter(logging.WARNING))
logger_dnf.addHandler(stdout)
self.stdout_handler = stdout
# setup stderr
stderr = logging.StreamHandler(sys.stderr)
stderr.setLevel(WARNING)
logger_dnf.addHandler(stderr)
self.stderr_handler = stderr
@only_once
def _setup_file_loggers(self, logfile_level, logdir, log_size, log_rotate, log_compress):
logger_dnf = logging.getLogger("dnf")
logger_dnf.setLevel(TRACE)
# setup file logger
logfile = os.path.join(logdir, dnf.const.LOG)
handler = _create_filehandler(logfile, log_size, log_rotate, log_compress)
handler.setLevel(logfile_level)
logger_dnf.addHandler(handler)
# setup Python warnings
logger_warnings = logging.getLogger("py.warnings")
logger_warnings.addHandler(handler)
logger_librepo = logging.getLogger("librepo")
logger_librepo.setLevel(TRACE)
logfile = os.path.join(logdir, dnf.const.LOG_LIBREPO)
handler = _create_filehandler(logfile, log_size, log_rotate, log_compress)
logger_librepo.addHandler(handler)
libdnf.repo.LibrepoLog.addHandler(logfile, logfile_level <= ALL)
# setup RPM callbacks logger
logger_rpm = logging.getLogger("dnf.rpm")
logger_rpm.propagate = False
logger_rpm.setLevel(SUBDEBUG)
logfile = os.path.join(logdir, dnf.const.LOG_RPM)
handler = _create_filehandler(logfile, log_size, log_rotate, log_compress)
logger_rpm.addHandler(handler)
@only_once
def _setup(self, verbose_level, error_level, logfile_level, logdir, log_size, log_rotate, log_compress):
self._presetup()
self._setup_file_loggers(logfile_level, logdir, log_size, log_rotate, log_compress)
logger_warnings = logging.getLogger("py.warnings")
logger_warnings.addHandler(self.stderr_handler)
# setup RPM callbacks logger
logger_rpm = logging.getLogger("dnf.rpm")
logger_rpm.addHandler(self.stdout_handler)
logger_rpm.addHandler(self.stderr_handler)
logger_dnf = logging.getLogger("dnf")
# temporarily turn off stdout/stderr handlers:
self.stdout_handler.setLevel(WARNING)
self.stderr_handler.setLevel(WARNING)
_paint_mark(logger_dnf)
_paint_mark(logger_rpm)
# bring std handlers to the preferred level
self.stdout_handler.setLevel(verbose_level)
self.stderr_handler.setLevel(error_level)
def _setup_from_dnf_conf(self, conf, file_loggers_only=False):
verbose_level_r = _cfg_verbose_val2level(conf.debuglevel)
error_level_r = _cfg_err_val2level(conf.errorlevel)
logfile_level_r = _cfg_verbose_val2level(conf.logfilelevel)
logdir = conf.logdir
log_size = conf.log_size
log_rotate = conf.log_rotate
log_compress = conf.log_compress
if file_loggers_only:
return self._setup_file_loggers(logfile_level_r, logdir, log_size, log_rotate, log_compress)
else:
return self._setup(
verbose_level_r, error_level_r, logfile_level_r, logdir, log_size, log_rotate, log_compress)
class Timer(object):
def __init__(self, what):
self.what = what
self.start = time.time()
def __call__(self):
diff = time.time() - self.start
msg = 'timer: %s: %d ms' % (self.what, diff * 1000)
logging.getLogger("dnf").log(DDEBUG, msg)
_LIBDNF_TO_DNF_LOGLEVEL_MAPPING = {
libdnf.utils.Logger.Level_CRITICAL: CRITICAL,
libdnf.utils.Logger.Level_ERROR: ERROR,
libdnf.utils.Logger.Level_WARNING: WARNING,
libdnf.utils.Logger.Level_NOTICE: INFO,
libdnf.utils.Logger.Level_INFO: INFO,
libdnf.utils.Logger.Level_DEBUG: DEBUG,
libdnf.utils.Logger.Level_TRACE: TRACE
}
class LibdnfLoggerCB(libdnf.utils.Logger):
def __init__(self):
super(LibdnfLoggerCB, self).__init__()
self._dnf_logger = logging.getLogger("dnf")
self._librepo_logger = logging.getLogger("librepo")
def write(self, source, *args):
"""Log message.
source -- integer, defines origin (libdnf, librepo, ...) of message, 0 - unknown
"""
if len(args) == 2:
level, message = args
elif len(args) == 4:
time, pid, level, message = args
if source == libdnf.utils.Logger.LOG_SOURCE_LIBREPO:
self._librepo_logger.log(_LIBDNF_TO_DNF_LOGLEVEL_MAPPING[level], message)
else:
self._dnf_logger.log(_LIBDNF_TO_DNF_LOGLEVEL_MAPPING[level], message)
libdnfLoggerCB = LibdnfLoggerCB()
libdnf.utils.Log.setLogger(libdnfLoggerCB)
| 10,354
|
Python
|
.py
| 250
| 34.256
| 108
| 0.669286
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,723
|
repodict.py
|
rpm-software-management_dnf/dnf/repodict.py
|
# repodict.py
# Managing repo configuration in DNF.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
from dnf.exceptions import ConfigError
from dnf.i18n import _
import dnf.util
import libdnf.conf
import fnmatch
import os
logger = dnf.util.logger
class RepoDict(dict):
# :api
def add(self, repo):
# :api
id_ = repo.id
if id_ in self:
msg = 'Repository %s is listed more than once in the configuration'
raise ConfigError(msg % id_)
try:
repo._repo.verify()
except RuntimeError as e:
raise ConfigError("{0}".format(e))
self[id_] = repo
def all(self):
# :api
return dnf.util.MultiCallList(self.values())
def _any_enabled(self):
return not dnf.util.empty(self.iter_enabled())
def _enable_sub_repos(self, sub_name_fn):
for repo in self.iter_enabled():
for found in self.get_matching(sub_name_fn(repo.id)):
if not found.enabled:
logger.info(_('enabling %s repository'), found.id)
found.enable()
def add_new_repo(self, repoid, conf, baseurl=(), **kwargs):
# :api
"""
Creates new repo object and add it into RepoDict. Variables in provided values will be
automatically substituted using conf.substitutions (like $releasever, ...)
@param repoid: Repo ID - string
@param conf: dnf Base().conf object
@param baseurl: List of strings
@param kwargs: keys and values that will be used to setattr on dnf.repo.Repo() object
@return: dnf.repo.Repo() object
"""
def substitute(values):
if isinstance(values, str):
return libdnf.conf.ConfigParser.substitute(values, conf.substitutions)
elif isinstance(values, list) or isinstance(values, tuple):
substituted = []
for value in values:
if isinstance(value, str):
substituted.append(
libdnf.conf.ConfigParser.substitute(value, conf.substitutions))
if substituted:
return substituted
return values
repo = dnf.repo.Repo(repoid, conf)
for path in baseurl:
if '://' not in path:
path = 'file://{}'.format(os.path.abspath(path))
repo.baseurl += [substitute(path)]
for (key, value) in kwargs.items():
setattr(repo, key, substitute(value))
self.add(repo)
logger.info(_("Added %s repo from %s"), repoid, ', '.join(baseurl))
return repo
def enable_debug_repos(self):
# :api
"""enable debug repos corresponding to already enabled binary repos"""
def debug_name(name):
return ("{}-debug-rpms".format(name[:-5]) if name.endswith("-rpms")
else "{}-debuginfo".format(name))
self._enable_sub_repos(debug_name)
def enable_source_repos(self):
# :api
"""enable source repos corresponding to already enabled binary repos"""
def source_name(name):
return ("{}-source-rpms".format(name[:-5]) if name.endswith("-rpms")
else "{}-source".format(name))
self._enable_sub_repos(source_name)
def get_matching(self, key):
# :api
if dnf.util.is_glob_pattern(key):
l = [self[k] for k in self if fnmatch.fnmatch(k, key)]
return dnf.util.MultiCallList(l)
repo = self.get(key, None)
if repo is None:
return dnf.util.MultiCallList([])
return dnf.util.MultiCallList([repo])
def iter_enabled(self):
# :api
return (r for r in self.values() if r.enabled)
def items(self):
"""return repos sorted by priority"""
return (item for item in sorted(super(RepoDict, self).items(),
key=lambda x: (x[1].priority, x[1].cost)))
def __iter__(self):
return self.keys()
def keys(self):
return (k for k, v in self.items())
def values(self):
return (v for k, v in self.items())
| 5,158
|
Python
|
.py
| 120
| 34.025
| 94
| 0.619342
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,724
|
i18n.py
|
rpm-software-management_dnf/dnf/i18n.py
|
# i18n.py
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import unicode_literals
from dnf.pycomp import unicode
import dnf
import locale
import os
import signal
import sys
import unicodedata
"""
Centralize i18n stuff here. Must be unittested.
"""
class UnicodeStream(object):
def __init__(self, stream, encoding):
self.stream = stream
self.encoding = encoding
def write(self, s):
if not isinstance(s, str):
s = (s.decode(self.encoding, 'replace') if dnf.pycomp.PY3 else
s.encode(self.encoding, 'replace'))
try:
self.stream.write(s)
except UnicodeEncodeError:
s_bytes = s.encode(self.stream.encoding, 'backslashreplace')
if hasattr(self.stream, 'buffer'):
self.stream.buffer.write(s_bytes)
else:
s = s_bytes.decode(self.stream.encoding, 'ignore')
self.stream.write(s)
def __getattr__(self, name):
return getattr(self.stream, name)
def _full_ucd_support(encoding):
"""Return true if encoding can express any Unicode character.
Even if an encoding can express all accented letters in the given language,
we can't generally settle for it in DNF since sometimes we output special
characters like the registered trademark symbol (U+00AE) and surprisingly
many national non-unicode encodings, including e.g. ASCII and ISO-8859-2,
don't contain it.
"""
if encoding is None:
return False
lower = encoding.lower()
if lower.startswith('utf-') or lower.startswith('utf_'):
return True
return False
def _guess_encoding():
""" Take the best shot at the current system's string encoding. """
encoding = locale.getpreferredencoding(False)
return 'utf-8' if encoding.startswith("ANSI") else encoding
def setup_locale():
try:
dnf.pycomp.setlocale(locale.LC_ALL, '')
except locale.Error:
# default to C.UTF-8 or C locale if we got a failure.
try:
dnf.pycomp.setlocale(locale.LC_ALL, 'C.UTF-8')
os.environ['LC_ALL'] = 'C.UTF-8'
except locale.Error:
dnf.pycomp.setlocale(locale.LC_ALL, 'C')
os.environ['LC_ALL'] = 'C'
print('Failed to set locale, defaulting to {}'.format(os.environ['LC_ALL']),
file=sys.stderr)
def setup_stdout():
""" Check that stdout is of suitable encoding and handle the situation if
not.
Returns True if stdout was of suitable encoding already and no changes
were needed.
"""
stdout = sys.stdout
if not stdout.isatty():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
encoding = stdout.encoding
except AttributeError:
encoding = None
if not _full_ucd_support(encoding):
sys.stdout = UnicodeStream(stdout, _guess_encoding())
return False
return True
def ucd_input(ucstring):
# :api, deprecated in 2.0.0, will be erased when python2 is abandoned
""" It uses print instead of passing the prompt to raw_input.
raw_input doesn't encode the passed string and the output
goes into stderr
"""
print(ucstring, end='')
return dnf.pycomp.raw_input()
def ucd(obj):
# :api, deprecated in 2.0.0, will be erased when python2 is abandoned
""" Like the builtin unicode() but tries to use a reasonable encoding. """
if dnf.pycomp.PY3:
if dnf.pycomp.is_py3bytes(obj):
return str(obj, _guess_encoding(), errors='ignore')
elif isinstance(obj, str):
return obj
return str(obj)
else:
if isinstance(obj, dnf.pycomp.unicode):
return obj
if hasattr(obj, '__unicode__'):
# see the doc for the unicode() built-in. The logic here is: if obj
# implements __unicode__, let it take a crack at it, but handle the
# situation if it fails:
try:
return dnf.pycomp.unicode(obj)
except UnicodeError:
pass
return dnf.pycomp.unicode(str(obj), _guess_encoding(), errors='ignore')
# functions for formatting output according to terminal width,
# They should be used instead of build-in functions to count on different
# widths of Unicode characters
def _exact_width_char(uchar):
return 2 if unicodedata.east_asian_width(uchar) in ('W', 'F') else 1
def chop_str(msg, chop=None):
""" Return the textual width of a Unicode string, chopping it to
a specified value. This is what you want to use instead of %.*s, as it
does the "right" thing with regard to different Unicode character width
Eg. "%.*s" % (10, msg) <= becomes => "%s" % (chop_str(msg, 10)) """
if chop is None:
return exact_width(msg), msg
width = 0
chopped_msg = ""
for char in msg:
char_width = _exact_width_char(char)
if width + char_width > chop:
break
chopped_msg += char
width += char_width
return width, chopped_msg
def exact_width(msg):
""" Calculates width of char at terminal screen
(Asian char counts for two) """
return sum(_exact_width_char(c) for c in msg)
def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
""" Expand a msg to a specified "width" or chop to same.
Expansion can be left or right. This is what you want to use instead of
%*.*s, as it does the "right" thing with regard to different Unicode
character width.
prefix and suffix should be used for "invisible" bytes, like
highlighting.
Examples:
``"%-*.*s" % (10, 20, msg)`` becomes
``"%s" % (fill_exact_width(msg, 10, 20))``.
``"%20.10s" % (msg)`` becomes
``"%s" % (fill_exact_width(msg, 20, 10, left=False))``.
``"%s%.10s%s" % (pre, msg, suf)`` becomes
``"%s" % (fill_exact_width(msg, 0, 10, prefix=pre, suffix=suf))``.
"""
width, msg = chop_str(msg, chop)
if width >= fill:
if prefix or suffix:
msg = ''.join([prefix, msg, suffix])
else:
extra = " " * (fill - width)
if left:
msg = ''.join([prefix, msg, suffix, extra])
else:
msg = ''.join([extra, prefix, msg, suffix])
return msg
def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
""" Works like we want textwrap.wrap() to work, uses Unicode strings
and doesn't screw up lists/blocks/etc. """
def _indent_at_beg(line):
count = 0
byte = 'X'
for byte in line:
if byte != ' ':
break
count += 1
if byte not in ("-", "*", ".", "o", '\xe2'):
return count, 0
list_chr = chop_str(line[count:], 1)[1]
if list_chr in ("-", "*", ".", "o",
"\u2022", "\u2023", "\u2218"):
nxt = _indent_at_beg(line[count+len(list_chr):])
nxt = nxt[1] or nxt[0]
if nxt:
return count, count + 1 + nxt
return count, 0
text = text.rstrip('\n')
lines = text.replace('\t', ' ' * 8).split('\n')
ret = []
indent = initial_indent
wrap_last = False
csab = 0
cspc_indent = 0
for line in lines:
line = line.rstrip(' ')
(lsab, lspc_indent) = (csab, cspc_indent)
(csab, cspc_indent) = _indent_at_beg(line)
force_nl = False # We want to stop wrapping under "certain" conditions:
if wrap_last and cspc_indent: # if line starts a list or
force_nl = True
if wrap_last and csab == len(line): # is empty line
force_nl = True
# if line doesn't continue a list and is "block indented"
if wrap_last and not lspc_indent:
if csab >= 4 and csab != lsab:
force_nl = True
if force_nl:
ret.append(indent.rstrip(' '))
indent = subsequent_indent
wrap_last = False
if csab == len(line): # empty line, remove spaces to make it easier.
line = ''
if wrap_last:
line = line.lstrip(' ')
cspc_indent = lspc_indent
if exact_width(indent + line) <= width:
wrap_last = False
ret.append(indent + line)
indent = subsequent_indent
continue
wrap_last = True
words = line.split(' ')
line = indent
spcs = cspc_indent
if not spcs and csab >= 4:
spcs = csab
for word in words:
if (width < exact_width(line + word)) and \
(exact_width(line) > exact_width(subsequent_indent)):
ret.append(line.rstrip(' '))
line = subsequent_indent + ' ' * spcs
line += word
line += ' '
indent = line.rstrip(' ') + ' '
if wrap_last:
ret.append(indent.rstrip(' '))
return '\n'.join(ret)
def select_short_long(width, msg_short, msg_long):
""" Automatically selects the short (abbreviated) or long (full) message
depending on whether we have enough screen space to display the full
message or not. If a caller by mistake passes a long string as
msg_short and a short string as a msg_long this function recognizes
the mistake and swaps the arguments. This function is especially useful
in the i18n context when you cannot predict how long are the translated
messages.
Limitations:
1. If msg_short is longer than width you will still get an overflow.
This function does not abbreviate the string.
2. You are not obliged to provide an actually abbreviated string, it is
perfectly correct to pass the same string twice if you don't want
any abbreviation. However, if you provide two different strings but
having the same width this function is unable to recognize which one
is correct and you should assume that it is unpredictable which one
is returned.
Example:
``select_short_long (10, _("Repo"), _("Repository"))``
will return "Repository" in English but the results in other languages
may be different. """
width_short = exact_width(msg_short)
width_long = exact_width(msg_long)
# If we have two strings of the same width:
if width_short == width_long:
return msg_long
# If the short string is wider than the long string:
elif width_short > width_long:
return msg_short if width_short <= width else msg_long
# The regular case:
else:
return msg_long if width_long <= width else msg_short
def translation(name):
# :api, deprecated in 2.0.0, will be erased when python2 is abandoned
""" Easy gettext translations setup based on given domain name """
setup_locale()
def ucd_wrapper(fnc):
return lambda *w: ucd(fnc(*w))
t = dnf.pycomp.gettext.translation(name, fallback=True)
return map(ucd_wrapper, dnf.pycomp.gettext_setup(t))
def pgettext(context, message):
result = _(context + chr(4) + message)
if "\004" in result:
return message
else:
return result
# setup translations
_, P_ = translation("dnf")
C_ = pgettext
| 12,321
|
Python
|
.py
| 297
| 33.666667
| 84
| 0.622712
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,725
|
selector.py
|
rpm-software-management_dnf/dnf/selector.py
|
# selector.py
# DNF specific hawkey.Selector handling.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from hawkey import Selector
| 1,125
|
Python
|
.py
| 22
| 50.045455
| 77
| 0.789282
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,726
|
callback.py
|
rpm-software-management_dnf/dnf/callback.py
|
# callbacks.py
# Abstract interfaces to communicate progress on tasks.
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
import dnf.yum.rpmtrans
import dnf.transaction
PKG_DOWNGRADE = dnf.transaction.PKG_DOWNGRADE # :api
PKG_DOWNGRADED = dnf.transaction.PKG_DOWNGRADED # :api
PKG_INSTALL = dnf.transaction.PKG_INSTALL # :api
PKG_OBSOLETE = dnf.transaction.PKG_OBSOLETE # :api
PKG_OBSOLETED = dnf.transaction.PKG_OBSOLETED # :api
PKG_REINSTALL = dnf.transaction.PKG_REINSTALL # :api
PKG_REINSTALLED = dnf.transaction.PKG_REINSTALLED # :api
PKG_REMOVE = dnf.transaction.PKG_ERASE # :api
PKG_ERASE = PKG_REMOVE # deprecated, use PKG_REMOVE instead
PKG_UPGRADE = dnf.transaction.PKG_UPGRADE # :api
PKG_UPGRADED = dnf.transaction.PKG_UPGRADED # :api
PKG_CLEANUP = dnf.transaction.PKG_CLEANUP # :api
PKG_VERIFY = dnf.transaction.PKG_VERIFY # :api
PKG_SCRIPTLET = dnf.transaction.PKG_SCRIPTLET # :api
TRANS_PREPARATION = dnf.transaction.TRANS_PREPARATION # :api
TRANS_POST = dnf.transaction.TRANS_POST # :api
STATUS_OK = None # :api
STATUS_FAILED = 1 # :api
STATUS_ALREADY_EXISTS = 2 # :api
STATUS_MIRROR = 3 # :api
STATUS_DRPM = 4 # :api
class KeyImport(object):
def _confirm(self, id, userid, fingerprint, url, timestamp):
"""Ask the user if the key should be imported."""
return False
class Payload(object):
# :api
def __init__(self, progress):
self.progress = progress
def __str__(self):
"""Nice, human-readable representation. :api"""
pass
@property
def download_size(self):
"""Total size of the download. :api"""
pass
class DownloadProgress(object):
# :api
def end(self, payload, status, msg):
"""Communicate the information that `payload` has finished downloading.
:api, `status` is a constant denoting the type of outcome, `err_msg` is an
error message in case the outcome was an error.
"""
pass
def message(self, msg):
pass
def progress(self, payload, done):
"""Update the progress display. :api
`payload` is the payload this call reports progress for, `done` is how
many bytes of this payload are already downloaded.
"""
pass
def start(self, total_files, total_size, total_drpms=0):
"""Start new progress metering. :api
`total_files` the number of files that will be downloaded,
`total_size` total size of all files.
"""
pass
class NullDownloadProgress(DownloadProgress):
pass
class Depsolve(object):
def start(self):
pass
def pkg_added(self, pkg, mode):
pass
def end(self):
pass
TransactionProgress = dnf.yum.rpmtrans.TransactionDisplay # :api
| 3,724
|
Python
|
.py
| 90
| 37.011111
| 82
| 0.714643
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,727
|
subject.py
|
rpm-software-management_dnf/dnf/subject.py
|
# subject.py
# Implements Subject.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from hawkey import Subject # :api
| 1,150
|
Python
|
.py
| 23
| 48.913043
| 77
| 0.784889
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,728
|
repo.py
|
rpm-software-management_dnf/dnf/repo.py
|
# repo.py
# DNF Repository objects.
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import ucd, _
import dnf.callback
import dnf.conf
import dnf.conf.substitutions
import dnf.const
import dnf.crypto
import dnf.exceptions
import dnf.logging
import dnf.pycomp
import dnf.util
import dnf.yum.misc
import libdnf.error
import libdnf.repo
import functools
import hashlib
import hawkey
import logging
import operator
import os
import re
import shutil
import string
import sys
import time
import traceback
import urllib
_PACKAGES_RELATIVE_DIR = "packages"
_MIRRORLIST_FILENAME = "mirrorlist"
# Chars allowed in a repo ID
_REPOID_CHARS = string.ascii_letters + string.digits + '-_.:'
# Regex pattern that matches a repo cachedir and captures the repo ID
_CACHEDIR_RE = r'(?P<repoid>[%s]+)\-[%s]{16}' % (re.escape(_REPOID_CHARS),
string.hexdigits)
# Regex patterns matching any filename that is repo-specific cache data of a
# particular type. The filename is expected to not contain the base cachedir
# path components.
CACHE_FILES = {
'metadata': r'^%s\/.*((xml|yaml)(\.gz|\.xz|\.bz2|\.zck|\.zst)?|asc|cachecookie|%s)$' %
(_CACHEDIR_RE, _MIRRORLIST_FILENAME),
'packages': r'^%s\/%s\/.+rpm$' % (_CACHEDIR_RE, _PACKAGES_RELATIVE_DIR),
'dbcache': r'^.+(solv|solvx)$',
}
logger = logging.getLogger("dnf")
def repo_id_invalid(repo_id):
# :api
"""Return index of an invalid character in the repo ID (if present)."""
first_invalid = libdnf.repo.Repo.verifyId(repo_id)
return None if first_invalid < 0 else first_invalid
def _pkg2payload(pkg, progress, *factories):
for fn in factories:
pload = fn(pkg, progress)
if pload is not None:
return pload
raise ValueError(_('no matching payload factory for %s') % pkg)
def _download_payloads(payloads, drpm, fail_fast=True):
# download packages
def _download_sort_key(payload):
return not hasattr(payload, 'delta')
drpm.err.clear()
targets = [pload._librepo_target()
for pload in sorted(payloads, key=_download_sort_key)]
errs = _DownloadErrors()
try:
libdnf.repo.PackageTarget.downloadPackages(libdnf.repo.VectorPPackageTarget(targets), fail_fast)
except RuntimeError as e:
errs._fatal = str(e)
drpm.wait()
# process downloading errors
errs._recoverable = drpm.err.copy()
for tgt in targets:
err = tgt.getErr()
if err is None or err.startswith('Not finished'):
continue
callbacks = tgt.getCallbacks()
payload = callbacks.package_pload
pkg = payload.pkg
if err == 'Already downloaded':
errs._skipped.add(pkg)
continue
pkg.repo._repo.expire()
errs._pkg_irrecoverable[pkg] = [err]
return errs
def _update_saving(saving, payloads, errs):
real, full = saving
for pload in payloads:
pkg = pload.pkg
if pkg in errs:
real += pload.download_size
continue
real += pload.download_size
full += pload._full_size
return real, full
class _DownloadErrors(object):
def __init__(self):
self._pkg_irrecoverable = {}
self._val_recoverable = {}
self._fatal = None
self._skipped = set()
def _irrecoverable(self):
if self._pkg_irrecoverable:
return self._pkg_irrecoverable
if self._fatal:
return {'': [self._fatal]}
return {}
@property
def _recoverable(self):
return self._val_recoverable
@_recoverable.setter
def _recoverable(self, new_dct):
self._val_recoverable = new_dct
def _bandwidth_used(self, pload):
if pload.pkg in self._skipped:
return 0
return pload.download_size
class _DetailedLibrepoError(Exception):
def __init__(self, librepo_err, source_url):
Exception.__init__(self)
self.librepo_code = librepo_err.args[0]
self.librepo_msg = librepo_err.args[1]
self.source_url = source_url
class _NullKeyImport(dnf.callback.KeyImport):
def _confirm(self, id, userid, fingerprint, url, timestamp):
return True
class Metadata(object):
def __init__(self, repo):
self._repo = repo
@property
def fresh(self):
# :api
return self._repo.fresh()
class PackageTargetCallbacks(libdnf.repo.PackageTargetCB):
def __init__(self, package_pload):
super(PackageTargetCallbacks, self).__init__()
self.package_pload = package_pload
def end(self, status, msg):
self.package_pload._end_cb(None, status, msg)
return 0
def progress(self, totalToDownload, downloaded):
self.package_pload._progress_cb(None, totalToDownload, downloaded)
return 0
def mirrorFailure(self, msg, url):
self.package_pload._mirrorfail_cb(None, msg, url)
return 0
class PackagePayload(dnf.callback.Payload):
def __init__(self, pkg, progress):
super(PackagePayload, self).__init__(progress)
self.callbacks = PackageTargetCallbacks(self)
self.pkg = pkg
def _end_cb(self, cbdata, lr_status, msg):
"""End callback to librepo operation."""
status = dnf.callback.STATUS_FAILED
if msg is None:
status = dnf.callback.STATUS_OK
elif msg.startswith('Not finished'):
return
elif lr_status == libdnf.repo.PackageTargetCB.TransferStatus_ALREADYEXISTS:
status = dnf.callback.STATUS_ALREADY_EXISTS
self.progress.end(self, status, msg)
def _mirrorfail_cb(self, cbdata, err, url):
self.progress.end(self, dnf.callback.STATUS_MIRROR, err)
def _progress_cb(self, cbdata, total, done):
try:
self.progress.progress(self, done)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
except_list = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical(''.join(except_list))
@property
def _full_size(self):
return self.download_size
def _librepo_target(self):
pkg = self.pkg
pkgdir = pkg.pkgdir
dnf.util.ensure_dir(pkgdir)
target_dct = {
'dest': pkgdir,
'resume': True,
'cbdata': self,
'progresscb': self._progress_cb,
'endcb': self._end_cb,
'mirrorfailurecb': self._mirrorfail_cb,
}
target_dct.update(self._target_params())
return libdnf.repo.PackageTarget(
pkg.repo._repo,
target_dct['relative_url'],
target_dct['dest'], target_dct['checksum_type'], target_dct['checksum'],
target_dct['expectedsize'], target_dct['base_url'], target_dct['resume'],
0, 0, self.callbacks)
class RPMPayload(PackagePayload):
def __str__(self):
return os.path.basename(self.pkg.location)
def _target_params(self):
pkg = self.pkg
ctype, csum = pkg.returnIdSum()
ctype_code = libdnf.repo.PackageTarget.checksumType(ctype)
if ctype_code == libdnf.repo.PackageTarget.ChecksumType_UNKNOWN:
logger.warning(_("unsupported checksum type: %s"), ctype)
return {
'relative_url': pkg.location,
'checksum_type': ctype_code,
'checksum': csum,
'expectedsize': pkg.downloadsize,
'base_url': pkg.baseurl,
}
@property
def download_size(self):
"""Total size of the download."""
return self.pkg.downloadsize
class RemoteRPMPayload(PackagePayload):
def __init__(self, remote_location, conf, progress):
super(RemoteRPMPayload, self).__init__("unused_object", progress)
self.remote_location = remote_location
self.remote_size = 0
self.conf = conf
s = (self.conf.releasever or "") + self.conf.substitutions.get('basearch')
digest = hashlib.sha256(s.encode('utf8')).hexdigest()[:16]
repodir = "commandline-" + digest
self.pkgdir = os.path.join(self.conf.cachedir, repodir, "packages")
dnf.util.ensure_dir(self.pkgdir)
self.local_path = os.path.join(self.pkgdir, self.__str__().lstrip("/"))
def __str__(self):
return os.path.basename(urllib.parse.unquote(self.remote_location))
def _progress_cb(self, cbdata, total, done):
self.remote_size = total
try:
self.progress.progress(self, done)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
except_list = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical(''.join(except_list))
def _librepo_target(self):
return libdnf.repo.PackageTarget(
self.conf._config, self.remote_location,
self.pkgdir, 0, None, 0, None,
True, 0, 0, self.callbacks)
@property
def download_size(self):
"""Total size of the download."""
return self.remote_size
class MDPayload(dnf.callback.Payload):
def __init__(self, progress):
super(MDPayload, self).__init__(progress)
self._text = ""
self._download_size = 0
self.fastest_mirror_running = False
self.mirror_failures = set()
def __str__(self):
if dnf.pycomp.PY3:
return self._text
else:
return self._text.encode('utf-8')
def __unicode__(self):
return self._text
def _progress_cb(self, cbdata, total, done):
self._download_size = total
self.progress.progress(self, done)
def _fastestmirror_cb(self, cbdata, stage, data):
if stage == libdnf.repo.RepoCB.FastestMirrorStage_DETECTION:
# pinging mirrors, this might take a while
msg = _('determining the fastest mirror (%s hosts).. ') % data
self.fastest_mirror_running = True
elif stage == libdnf.repo.RepoCB.FastestMirrorStage_STATUS and self.fastest_mirror_running:
# done.. report but ignore any errors
msg = 'error: %s\n' % data if data else 'done.\n'
else:
return
self.progress.message(msg)
def _mirror_failure_cb(self, cbdata, msg, url, metadata):
self.mirror_failures.add(msg)
msg = 'error: %s (%s).' % (msg, url)
logger.debug(msg)
@property
def download_size(self):
return self._download_size
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, progress):
if progress is None:
progress = dnf.callback.NullDownloadProgress()
self._progress = progress
def start(self, text):
self._text = text
self.progress.start(1, 0)
def end(self):
self._download_size = 0
self.progress.end(self, None, None)
# use the local cache even if it's expired. download if there's no cache.
SYNC_LAZY = libdnf.repo.Repo.SyncStrategy_LAZY
# use the local cache, even if it's expired, never download.
SYNC_ONLY_CACHE = libdnf.repo.Repo.SyncStrategy_ONLY_CACHE
# try the cache, if it is expired download new md.
SYNC_TRY_CACHE = libdnf.repo.Repo.SyncStrategy_TRY_CACHE
class RepoCallbacks(libdnf.repo.RepoCB):
def __init__(self, repo):
super(RepoCallbacks, self).__init__()
self._repo = repo
self._md_pload = repo._md_pload
def start(self, what):
self._md_pload.start(what)
def end(self):
self._md_pload.end()
def progress(self, totalToDownload, downloaded):
self._md_pload._progress_cb(None, totalToDownload, downloaded)
return 0
def fastestMirror(self, stage, ptr):
self._md_pload._fastestmirror_cb(None, stage, ptr)
def handleMirrorFailure(self, msg, url, metadata):
self._md_pload._mirror_failure_cb(None, msg, url, metadata)
return 0
def repokeyImport(self, id, userid, fingerprint, url, timestamp):
return self._repo._key_import._confirm(id, userid, fingerprint, url, timestamp)
class Repo(dnf.conf.RepoConf):
# :api
DEFAULT_SYNC = SYNC_TRY_CACHE
def __init__(self, name=None, parent_conf=None):
# :api
super(Repo, self).__init__(section=name, parent=parent_conf)
self._config.this.disown() # _repo will be the owner of _config
self._repo = libdnf.repo.Repo(name if name else "", self._config)
self._md_pload = MDPayload(dnf.callback.NullDownloadProgress())
self._callbacks = RepoCallbacks(self)
self._callbacks.this.disown() # _repo will be the owner of callbacks
self._repo.setCallbacks(self._callbacks)
self._pkgdir = None
self._key_import = _NullKeyImport()
self.metadata = None # :api
self._repo.setSyncStrategy(SYNC_ONLY_CACHE if parent_conf and parent_conf.cacheonly else self.DEFAULT_SYNC)
if parent_conf:
self._repo.setSubstitutions(parent_conf.substitutions)
self._substitutions = dnf.conf.substitutions.Substitutions()
self._check_config_file_age = parent_conf.check_config_file_age \
if parent_conf is not None else True
@property
def id(self):
# :api
return self._repo.getId()
@property
def repofile(self):
# :api
return self._repo.getRepoFilePath()
@repofile.setter
def repofile(self, value):
self._repo.setRepoFilePath(value)
@property
def pkgdir(self):
# :api
if self._repo.isLocal():
return self._repo.getLocalBaseurl()
return self.cache_pkgdir()
def cache_pkgdir(self):
if self._pkgdir is not None:
return self._pkgdir
return os.path.join(self._repo.getCachedir(), _PACKAGES_RELATIVE_DIR)
@pkgdir.setter
def pkgdir(self, val):
# :api
self._pkgdir = val
@property
def _pubring_dir(self):
return os.path.join(self._repo.getCachedir(), 'pubring')
@property
def load_metadata_other(self):
return self._repo.getLoadMetadataOther()
@load_metadata_other.setter
def load_metadata_other(self, val):
self._repo.setLoadMetadataOther(val)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.id)
def __setattr__(self, name, value):
super(Repo, self).__setattr__(name, value)
def disable(self):
# :api
self._repo.disable()
def enable(self):
# :api
self._repo.enable()
def add_metadata_type_to_download(self, metadata_type):
# :api
"""Ask for additional repository metadata type to download.
Given metadata_type is appended to the default metadata set when
repository is downloaded.
Parameters
----------
metadata_type: string
Example: add_metadata_type_to_download("productid")
"""
self._repo.addMetadataTypeToDownload(metadata_type)
def remove_metadata_type_from_download(self, metadata_type):
# :api
"""Stop asking for this additional repository metadata type
in download.
Given metadata_type is no longer downloaded by default
when this repository is downloaded.
Parameters
----------
metadata_type: string
Example: remove_metadata_type_from_download("productid")
"""
self._repo.removeMetadataTypeFromDownload(metadata_type)
def get_metadata_path(self, metadata_type):
# :api
"""Return path to the file with downloaded repository metadata of given type.
Parameters
----------
metadata_type: string
"""
return self._repo.getMetadataPath(metadata_type)
def get_metadata_content(self, metadata_type):
# :api
"""Return content of the file with downloaded repository metadata of given type.
Content of compressed metadata file is returned uncompressed.
Parameters
----------
metadata_type: string
"""
return self._repo.getMetadataContent(metadata_type)
def load(self):
# :api
"""Load the metadata for this repo.
Depending on the configuration and the age and consistence of data
available on the disk cache, either loads the metadata from the cache or
downloads them from the mirror, baseurl or metalink.
This method will by default not try to refresh already loaded data if
called repeatedly.
Returns True if this call to load() caused a fresh metadata download.
"""
ret = False
try:
ret = self._repo.load()
except (libdnf.error.Error, RuntimeError) as e:
if self._md_pload.mirror_failures:
msg = "Errors during downloading metadata for repository '%s':" % self.id
for failure in self._md_pload.mirror_failures:
msg += "\n - %s" % failure
logger.warning(msg)
raise dnf.exceptions.RepoError(str(e))
finally:
self._md_pload.mirror_failures = set()
self.metadata = Metadata(self._repo)
return ret
def _metadata_expire_in(self):
"""Get the number of seconds after which the cached metadata will expire.
Returns a tuple, boolean whether there even is cached metadata and the
number of seconds it will expire in. Negative number means the metadata
has expired already, None that it never expires.
"""
if not self.metadata:
self._repo.loadCache(False)
if self.metadata:
if self.metadata_expire == -1:
return True, None
expiration = self._repo.getExpiresIn()
if self._repo.isExpired():
expiration = min(0, expiration)
return True, expiration
return False, 0
def _set_key_import(self, key_import):
self._key_import = key_import
def set_progress_bar(self, progress):
# :api
self._md_pload.progress = progress
def get_http_headers(self):
# :api
"""Returns user defined http headers.
Returns
-------
headers : tuple of strings
"""
return self._repo.getHttpHeaders()
def set_http_headers(self, headers):
# :api
"""Sets http headers.
Sets new http headers and rewrites existing ones.
Parameters
----------
headers : tuple or list of strings
Example: set_http_headers(["User-Agent: Agent007", "MyFieldName: MyFieldValue"])
"""
self._repo.setHttpHeaders(headers)
def remote_location(self, location, schemes=('http', 'ftp', 'file', 'https')):
"""
:param location: relative location inside the repo
:param schemes: list of allowed protocols. Default is ('http', 'ftp', 'file', 'https')
:return: absolute url (string) or None
"""
def schemes_filter(url_list):
for url in url_list:
if schemes:
s = dnf.pycomp.urlparse.urlparse(url)[0]
if s in schemes:
return os.path.join(url, location.lstrip('/'))
else:
return os.path.join(url, location.lstrip('/'))
return None
if not location:
return None
mirrors = self._repo.getMirrors()
if mirrors:
return schemes_filter(mirrors)
elif self.baseurl:
return schemes_filter(self.baseurl)
| 20,804
|
Python
|
.py
| 527
| 31.455408
| 115
| 0.635393
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,729
|
__init__.py
|
rpm-software-management_dnf/dnf/__init__.py
|
# __init__.py
# The toplevel DNF package.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
import warnings
import dnf.pycomp
warnings.filterwarnings('once', category=DeprecationWarning, module=r'^dnf\..*$')
from dnf.const import VERSION
__version__ = VERSION # :api
import dnf.base
Base = dnf.base.Base # :api
import dnf.plugin
Plugin = dnf.plugin.Plugin # :api
# setup libraries
dnf.pycomp.urlparse.uses_fragment.append("media")
| 1,389
|
Python
|
.py
| 31
| 43.612903
| 81
| 0.781065
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,730
|
package.py
|
rpm-software-management_dnf/dnf/package.py
|
# package.py
# Module defining the dnf.Package class.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
""" Contains the dnf.Package class. """
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
import binascii
import dnf.exceptions
import dnf.rpm
import dnf.yum.misc
import hawkey
import libdnf.error
import libdnf.utils
import logging
import os
import rpm
logger = logging.getLogger("dnf")
class Package(hawkey.Package):
""" Represents a package. #:api """
DEBUGINFO_SUFFIX = "-debuginfo" # :api
DEBUGSOURCE_SUFFIX = "-debugsource" # :api
def __init__(self, initobject, base):
super(Package, self).__init__(initobject)
self.base = base
self._priv_chksum = None
self._repo = None
self._priv_size = None
@property
def _chksum(self):
if self._priv_chksum:
return self._priv_chksum
if self._from_cmdline:
chksum_type = dnf.yum.misc.get_default_chksum_type()
try:
chksum_val = libdnf.utils.checksum_value(chksum_type, self.location)
except libdnf.error.Error as e:
raise dnf.exceptions.MiscError(str(e))
return (hawkey.chksum_type(chksum_type),
binascii.unhexlify(chksum_val))
return super(Package, self).chksum
@_chksum.setter
def _chksum(self, val):
self._priv_chksum = val
@property
def _from_cmdline(self):
return self.reponame == hawkey.CMDLINE_REPO_NAME
@property
def _from_system(self):
return self.reponame == hawkey.SYSTEM_REPO_NAME
@property
def _from_repo(self):
"""
For installed packages returns id of repository from which the package was installed
prefixed with '@' (if such information is available in the history database). Otherwise
returns id of repository the package belongs to (@System for installed packages of unknown
origin)
"""
pkgrepo = None
if self._from_system:
pkgrepo = self.base.history.repo(self)
if pkgrepo:
return '@' + pkgrepo
return self.reponame
@property
def from_repo(self):
# :api
if self._from_system:
return self.base.history.repo(self)
return ""
@property
def _header(self):
"""
Returns the header of a locally present rpm package file. As opposed to
self.get_header(), which retrieves the header of an installed package
from rpmdb.
"""
return dnf.rpm._header(self.localPkg())
@property
def _size(self):
if self._priv_size:
return self._priv_size
return super(Package, self).size
@_size.setter
def _size(self, val):
self._priv_size = val
@property
def _pkgid(self):
if self.hdr_chksum is None:
return None
(_, chksum) = self.hdr_chksum
return binascii.hexlify(chksum)
@property
def source_name(self):
# :api
"""
returns name of source package
e.g. krb5-libs -> krb5
"""
if self.sourcerpm is not None:
# trim suffix first
srcname = dnf.util.rtrim(self.sourcerpm, ".src.rpm")
# sourcerpm should be in form of name-version-release now, so we
# will strip the two rightmost parts separated by dash.
# Using rtrim with version and release of self is not sufficient
# because the package can have different version to the source
# package.
srcname = srcname.rsplit('-', 2)[0]
else:
srcname = None
return srcname
@property
def debug_name(self):
# :api
"""
Returns name of the debuginfo package for this package.
If this package is a debuginfo package, returns its name.
If this package is a debugsource package, returns the debuginfo package
for the base package.
e.g. kernel-PAE -> kernel-PAE-debuginfo
"""
if self.name.endswith(self.DEBUGINFO_SUFFIX):
return self.name
name = self.name
if self.name.endswith(self.DEBUGSOURCE_SUFFIX):
name = name[:-len(self.DEBUGSOURCE_SUFFIX)]
return name + self.DEBUGINFO_SUFFIX
@property
def debugsource_name(self):
# :api
"""
Returns name of the debugsource package for this package.
e.g. krb5-libs -> krb5-debugsource
"""
# assuming self.source_name is None only for a source package
src_name = self.source_name if self.source_name is not None else self.name
return src_name + self.DEBUGSOURCE_SUFFIX
def get_header(self):
"""
Returns the rpm header of the package if it is installed. If not
installed, returns None. The header is not cached, it is retrieved from
rpmdb on every call. In case of a failure (e.g. when the rpmdb changes
between loading the data and calling this method), raises an instance
of PackageNotFoundError.
"""
if not self._from_system:
return None
try:
# RPMDBI_PACKAGES stands for the header of the package
return next(self.base._ts.dbMatch(rpm.RPMDBI_PACKAGES, self.rpmdbid))
except StopIteration:
raise dnf.exceptions.PackageNotFoundError("Package not found when attempting to retrieve header", str(self))
@property
def source_debug_name(self):
# :api
"""
returns name of debuginfo package for source package of given package
e.g. krb5-libs -> krb5-debuginfo
"""
# assuming self.source_name is None only for a source package
src_name = self.source_name if self.source_name is not None else self.name
return src_name + self.DEBUGINFO_SUFFIX
@property # yum compatibility attribute
def idx(self):
""" Always type it to int, rpm bindings expect it like that. """
return int(self.rpmdbid)
@property # yum compatibility attribute
def repoid(self):
return self.reponame
@property # yum compatibility attribute
def pkgtup(self):
return (self.name, self.arch, str(self.e), self.v, self.r)
@property # yum compatibility attribute
def repo(self):
if self._repo:
return self._repo
return self.base.repos[self.reponame]
@repo.setter
def repo(self, val):
self._repo = val
@property
def reason(self):
if self.repoid != hawkey.SYSTEM_REPO_NAME:
return None
return self.base.history.rpm.get_reason_name(self)
@property # yum compatibility attribute
def relativepath(self):
return self.location
@property # yum compatibility attribute
def a(self):
return self.arch
@property # yum compatibility attribute
def e(self):
return self.epoch
@property # yum compatibility attribute
def v(self):
return self.version
@property # yum compatibility attribute
def r(self):
return self.release
@property # yum compatibility attribute
def ui_from_repo(self):
return self.reponame
# yum compatibility method
def evr_eq(self, pkg):
return self.evr_cmp(pkg) == 0
# yum compatibility method
def evr_gt(self, pkg):
return self.evr_cmp(pkg) > 0
# yum compatibility method
def evr_lt(self, pkg):
return self.evr_cmp(pkg) < 0
# yum compatibility method
def getDiscNum(self):
return self.medianr
# yum compatibility method
def localPkg(self):
""" Package's location in the filesystem.
For packages in remote repo returns where the package will be/has
been downloaded.
"""
if self._from_cmdline:
return self.location
loc = self.location
if self.repo._repo.isLocal() and self.baseurl and self.baseurl.startswith('file://'):
return os.path.join(self.get_local_baseurl(), loc.lstrip("/"))
if not self._is_local_pkg():
loc = os.path.basename(loc)
return os.path.join(self.pkgdir, loc.lstrip("/"))
def remote_location(self, schemes=('http', 'ftp', 'file', 'https')):
# :api
"""
The location from where the package can be downloaded from. Returns None for installed and
commandline packages.
:param schemes: list of allowed protocols. Default is ('http', 'ftp', 'file', 'https')
:return: location (string) or None
"""
if self._from_system or self._from_cmdline:
return None
if self.baseurl:
return os.path.join(self.baseurl, self.location.lstrip("/"))
return self.repo.remote_location(self.location, schemes)
def _is_local_pkg(self):
if self._from_system:
return True
if '://' in self.location and not self.location.startswith('file://'):
# the package has a remote URL as its location
return False
return self._from_cmdline or \
(self.repo._repo.isLocal() and (not self.baseurl or self.baseurl.startswith('file://')))
@property
def pkgdir(self):
if (self.repo._repo.isLocal() and not self._is_local_pkg()):
return self.repo.cache_pkgdir()
else:
return self.repo.pkgdir
# yum compatibility method
def returnIdSum(self):
""" Return the chksum type and chksum string how the legacy yum expects
it.
"""
if self._chksum is None:
return (None, None)
(chksum_type, chksum) = self._chksum
return (hawkey.chksum_name(chksum_type), binascii.hexlify(chksum).decode())
# yum compatibility method
def verifyLocalPkg(self):
if self._from_system:
raise ValueError("Can not verify an installed package.")
if self._from_cmdline:
return True # local package always verifies against itself
(chksum_type, chksum) = self.returnIdSum()
try:
return libdnf.utils.checksum_check(chksum_type, self.localPkg(), chksum)
except libdnf.error.Error as e:
raise dnf.exceptions.MiscError(str(e))
| 11,302
|
Python
|
.py
| 292
| 30.791096
| 120
| 0.642212
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,731
|
base.py
|
rpm-software-management_dnf/dnf/base.py
|
# Copyright 2005 Duke University
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Supplies the Base class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import dnf
import libdnf.transaction
from copy import deepcopy
from dnf.comps import CompsQuery
from dnf.i18n import _, P_, ucd
from dnf.util import _parse_specs
from dnf.db.history import SwdbInterface
from dnf.yum import misc
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
import datetime
import dnf.callback
import dnf.comps
import dnf.conf
import dnf.conf.read
import dnf.crypto
import dnf.dnssec
import dnf.drpm
import dnf.exceptions
import dnf.goal
import dnf.history
import dnf.lock
import dnf.logging
# WITH_MODULES is used by ansible (lib/ansible/modules/packaging/os/dnf.py)
try:
import dnf.module.module_base
WITH_MODULES = True
except ImportError:
WITH_MODULES = False
import dnf.persistor
import dnf.plugin
import dnf.query
import dnf.repo
import dnf.repodict
import dnf.rpm.connection
import dnf.rpm.miscutils
import dnf.rpm.transaction
import dnf.sack
import dnf.selector
import dnf.subject
import dnf.transaction
import dnf.util
import dnf.yum.rpmtrans
import functools
import gc
import hawkey
import itertools
import logging
import math
import os
import operator
import re
import rpm
import tempfile
import time
import shutil
logger = logging.getLogger("dnf")
class Base(object):
def __init__(self, conf=None):
# :api
self._closed = False
self._conf = conf or self._setup_default_conf()
self._goal = None
self._repo_persistor = None
self._sack = None
self._transaction = None
self._priv_ts = None
self._comps = None
self._comps_trans = dnf.comps.TransactionBunch()
self._history = None
self._tempfiles = set()
self._trans_tempfiles = set()
self._ds_callback = dnf.callback.Depsolve()
self._logging = dnf.logging.Logging()
self._repos = dnf.repodict.RepoDict()
self._rpm_probfilter = set([rpm.RPMPROB_FILTER_OLDPACKAGE])
self._plugins = dnf.plugin.Plugins()
self._trans_success = False
self._trans_install_set = False
self._tempfile_persistor = None
# self._update_security_filters is used by ansible
self._update_security_filters = []
self._update_security_options = {}
self._allow_erasing = False
self._repo_set_imported_gpg_keys = set()
self.output = None
def __enter__(self):
return self
def __exit__(self, *exc_args):
self.close()
def __del__(self):
self.close()
def _add_tempfiles(self, files):
if self._transaction:
self._trans_tempfiles.update(files)
elif self.conf.destdir:
pass
else:
self._tempfiles.update(files)
def _add_repo_to_sack(self, repo):
repo.load()
mdload_flags = dict(load_presto=repo.deltarpm,
load_updateinfo=True)
if 'filelists' in self.conf.optional_metadata_types:
mdload_flags["load_filelists"] = True
if repo.load_metadata_other:
mdload_flags["load_other"] = True
try:
self._sack.load_repo(repo._repo, build_cache=True, **mdload_flags)
except hawkey.Exception as e:
logger.debug(_("loading repo '{}' failure: {}").format(repo.id, e))
raise dnf.exceptions.RepoError(
_("Loading repository '{}' has failed").format(repo.id))
@staticmethod
def _setup_default_conf():
conf = dnf.conf.Conf()
subst = conf.substitutions
if 'releasever' not in subst:
subst['releasever'] = \
dnf.rpm.detect_releasever(conf.installroot)
return conf
def _setup_modular_excludes(self):
hot_fix_repos = [i.id for i in self.repos.iter_enabled() if i.module_hotfixes]
try:
solver_errors = self.sack.filter_modules(
self._moduleContainer, hot_fix_repos, self.conf.installroot,
self.conf.module_platform_id, update_only=False, debugsolver=self.conf.debug_solver,
module_obsoletes=self.conf.module_obsoletes)
except hawkey.Exception as e:
raise dnf.exceptions.Error(ucd(e))
if solver_errors:
logger.warning(
dnf.module.module_base.format_modular_solver_errors(solver_errors[0]))
def _setup_excludes_includes(self, only_main=False):
disabled = set(self.conf.disable_excludes)
if 'all' in disabled and WITH_MODULES:
self._setup_modular_excludes()
return
repo_includes = []
repo_excludes = []
# first evaluate repo specific includes/excludes
if not only_main:
for r in self.repos.iter_enabled():
if r.id in disabled:
continue
if len(r.includepkgs) > 0:
incl_query = self.sack.query().filterm(empty=True)
for incl in set(r.includepkgs):
subj = dnf.subject.Subject(incl)
incl_query = incl_query.union(subj.get_best_query(
self.sack, with_nevra=True, with_provides=False, with_filenames=False))
incl_query.filterm(reponame=r.id)
repo_includes.append((incl_query.apply(), r.id))
excl_query = self.sack.query().filterm(empty=True)
for excl in set(r.excludepkgs):
subj = dnf.subject.Subject(excl)
excl_query = excl_query.union(subj.get_best_query(
self.sack, with_nevra=True, with_provides=False, with_filenames=False))
excl_query.filterm(reponame=r.id)
if excl_query:
repo_excludes.append((excl_query, r.id))
# then main (global) includes/excludes because they can mask
# repo specific settings
if 'main' not in disabled:
include_query = self.sack.query().filterm(empty=True)
if len(self.conf.includepkgs) > 0:
for incl in set(self.conf.includepkgs):
subj = dnf.subject.Subject(incl)
include_query = include_query.union(subj.get_best_query(
self.sack, with_nevra=True, with_provides=False, with_filenames=False))
exclude_query = self.sack.query().filterm(empty=True)
for excl in set(self.conf.excludepkgs):
subj = dnf.subject.Subject(excl)
exclude_query = exclude_query.union(subj.get_best_query(
self.sack, with_nevra=True, with_provides=False, with_filenames=False))
if len(self.conf.includepkgs) > 0:
self.sack.add_includes(include_query)
self.sack.set_use_includes(True)
if exclude_query:
self.sack.add_excludes(exclude_query)
if repo_includes:
for query, repoid in repo_includes:
self.sack.add_includes(query)
self.sack.set_use_includes(True, repoid)
if repo_excludes:
for query, repoid in repo_excludes:
self.sack.add_excludes(query)
if not only_main and WITH_MODULES:
self._setup_modular_excludes()
def _store_persistent_data(self):
if self._repo_persistor and not self.conf.cacheonly:
expired = [r.id for r in self.repos.iter_enabled()
if (r.metadata and r._repo.isExpired())]
self._repo_persistor.expired_to_add.update(expired)
self._repo_persistor.save()
if self._tempfile_persistor:
self._tempfile_persistor.save()
@property
def comps(self):
# :api
if self._comps is None:
self.read_comps(arch_filter=True)
return self._comps
@property
def conf(self):
# :api
return self._conf
@property
def repos(self):
# :api
return self._repos
@repos.deleter
def repos(self):
# :api
self._repos = None
@property
@dnf.util.lazyattr("_priv_rpmconn")
def _rpmconn(self):
return dnf.rpm.connection.RpmConnection(self.conf.installroot)
@property
def sack(self):
# :api
return self._sack
@property
def _moduleContainer(self):
if self.sack is None:
raise dnf.exceptions.Error("Sack was not initialized")
if self.sack._moduleContainer is None:
self.sack._moduleContainer = libdnf.module.ModulePackageContainer(
False, self.conf.installroot, self.conf.substitutions["arch"], self.conf.persistdir)
return self.sack._moduleContainer
@property
def transaction(self):
# :api
return self._transaction
@transaction.setter
def transaction(self, value):
# :api
if self._transaction:
raise ValueError('transaction already set')
self._transaction = value
def _activate_persistor(self):
self._repo_persistor = dnf.persistor.RepoPersistor(self.conf.cachedir)
def init_plugins(self, disabled_glob=(), enable_plugins=(), cli=None):
# :api
"""Load plugins and run their __init__()."""
if self.conf.plugins:
self._plugins._load(self.conf, disabled_glob, enable_plugins)
self._plugins._run_init(self, cli)
def pre_configure_plugins(self):
# :api
"""Run plugins pre_configure() method."""
self._plugins._run_pre_config()
def configure_plugins(self):
# :api
"""Run plugins configure() method."""
self._plugins._run_config()
def unload_plugins(self):
# :api
"""Run plugins unload() method."""
self._plugins._unload()
def update_cache(self, timer=False):
# :api
period = self.conf.metadata_timer_sync
if self._repo_persistor is None:
self._activate_persistor()
persistor = self._repo_persistor
if timer:
if dnf.util.on_metered_connection():
msg = _('Metadata timer caching disabled '
'when running on metered connection.')
logger.info(msg)
return False
if dnf.util.on_ac_power() is False:
msg = _('Metadata timer caching disabled '
'when running on a battery.')
logger.info(msg)
return False
if period <= 0:
msg = _('Metadata timer caching disabled.')
logger.info(msg)
return False
since_last_makecache = persistor.since_last_makecache()
if since_last_makecache is not None and since_last_makecache < period:
logger.info(_('Metadata cache refreshed recently.'))
return False
for repo in self.repos.values():
repo._repo.setMaxMirrorTries(1)
if not self.repos._any_enabled():
logger.info(_('There are no enabled repositories in "{}".').format(
'", "'.join(self.conf.reposdir)))
return False
for r in self.repos.iter_enabled():
(is_cache, expires_in) = r._metadata_expire_in()
if expires_in is None:
logger.info(_('%s: will never be expired and will not be refreshed.'), r.id)
elif not is_cache or expires_in <= 0:
logger.debug(_('%s: has expired and will be refreshed.'), r.id)
r._repo.expire()
elif timer and expires_in < period:
# expires within the checking period:
msg = _("%s: metadata will expire after %d seconds and will be refreshed now")
logger.debug(msg, r.id, expires_in)
r._repo.expire()
else:
logger.debug(_('%s: will expire after %d seconds.'), r.id,
expires_in)
if timer:
persistor.reset_last_makecache = True
self.fill_sack(load_system_repo=False, load_available_repos=True) # performs the md sync
logger.info(_('Metadata cache created.'))
return True
def fill_sack(self, load_system_repo=True, load_available_repos=True):
# :api
"""Prepare the Sack and the Goal objects. """
timer = dnf.logging.Timer('sack setup')
self.reset(sack=True, goal=True)
self._sack = dnf.sack._build_sack(self)
lock = dnf.lock.build_metadata_lock(self.conf.cachedir, self.conf.exit_on_lock)
with lock:
if load_system_repo is not False:
try:
# FIXME: If build_cache=True, @System.solv is incorrectly updated in install-
# remove loops
self._sack.load_system_repo(build_cache=False)
except IOError:
if load_system_repo != 'auto':
raise
if load_available_repos:
error_repos = []
mts = 0
age = time.time()
# Iterate over installed GPG keys and check their validity using DNSSEC
if self.conf.gpgkey_dns_verification:
dnf.dnssec.RpmImportedKeys.check_imported_keys_validity()
for r in self.repos.iter_enabled():
try:
self._add_repo_to_sack(r)
if r._repo.getTimestamp() > mts:
mts = r._repo.getTimestamp()
if r._repo.getAge() < age:
age = r._repo.getAge()
logger.debug(_("%s: using metadata from %s."), r.id,
dnf.util.normalize_time(
r._repo.getMaxTimestamp()))
except dnf.exceptions.RepoError as e:
r._repo.expire()
if r.skip_if_unavailable is False:
raise
logger.warning("Error: %s", e)
error_repos.append(r.id)
r.disable()
if error_repos:
logger.warning(
_("Ignoring repositories: %s"), ', '.join(error_repos))
if self.repos._any_enabled():
if age != 0 and mts != 0:
logger.info(_("Last metadata expiration check: %s ago on %s."),
datetime.timedelta(seconds=int(age)),
dnf.util.normalize_time(mts))
else:
self.repos.all().disable()
conf = self.conf
self._sack._configure(conf.installonlypkgs, conf.installonly_limit, conf.allow_vendor_change)
self._setup_excludes_includes()
timer()
self._goal = dnf.goal.Goal(self._sack)
self._goal.protect_running_kernel = conf.protect_running_kernel
self._plugins.run_sack()
return self._sack
def fill_sack_from_repos_in_cache(self, load_system_repo=True):
# :api
"""
Prepare Sack and Goal objects and also load all enabled repositories from cache only,
it doesn't download anything and it doesn't check if metadata are expired.
If there is not enough metadata present (repond.xml or both primary.xml and solv file
are missing) given repo is either skipped or it throws a RepoError exception depending
on skip_if_unavailable configuration.
"""
timer = dnf.logging.Timer('sack setup')
self.reset(sack=True, goal=True)
self._sack = dnf.sack._build_sack(self)
lock = dnf.lock.build_metadata_lock(self.conf.cachedir, self.conf.exit_on_lock)
with lock:
if load_system_repo is not False:
try:
# FIXME: If build_cache=True, @System.solv is incorrectly updated in install-
# remove loops
self._sack.load_system_repo(build_cache=False)
except IOError:
if load_system_repo != 'auto':
raise
error_repos = []
# Iterate over installed GPG keys and check their validity using DNSSEC
if self.conf.gpgkey_dns_verification:
dnf.dnssec.RpmImportedKeys.check_imported_keys_validity()
for repo in self.repos.iter_enabled():
try:
repo._repo.loadCache(throwExcept=True, ignoreMissing=True)
mdload_flags = dict(load_presto=repo.deltarpm,
load_updateinfo=True)
if 'filelists' in self.conf.optional_metadata_types:
mdload_flags["load_filelists"] = True
if repo.load_metadata_other:
mdload_flags["load_other"] = True
self._sack.load_repo(repo._repo, **mdload_flags)
logger.debug(_("%s: using metadata from %s."), repo.id,
dnf.util.normalize_time(
repo._repo.getMaxTimestamp()))
except (RuntimeError, hawkey.Exception) as e:
if repo.skip_if_unavailable is False:
raise dnf.exceptions.RepoError(
_("loading repo '{}' failure: {}").format(repo.id, e))
else:
logger.debug(_("loading repo '{}' failure: {}").format(repo.id, e))
error_repos.append(repo.id)
repo.disable()
if error_repos:
logger.warning(
_("Ignoring repositories: %s"), ', '.join(error_repos))
conf = self.conf
self._sack._configure(conf.installonlypkgs, conf.installonly_limit, conf.allow_vendor_change)
self._setup_excludes_includes()
timer()
self._goal = dnf.goal.Goal(self._sack)
self._goal.protect_running_kernel = conf.protect_running_kernel
self._plugins.run_sack()
return self._sack
def _finalize_base(self):
self._tempfile_persistor = dnf.persistor.TempfilePersistor(
self.conf.cachedir)
if not self.conf.keepcache:
self._clean_packages(self._tempfiles)
if self._trans_success:
self._trans_tempfiles.update(
self._tempfile_persistor.get_saved_tempfiles())
self._tempfile_persistor.empty()
if self._trans_install_set:
self._clean_packages(self._trans_tempfiles)
else:
self._tempfile_persistor.tempfiles_to_add.update(
self._trans_tempfiles)
if self._tempfile_persistor.tempfiles_to_add:
logger.info(_("The downloaded packages were saved in cache "
"until the next successful transaction."))
logger.info(_("You can remove cached packages by executing "
"'%s'."), "{prog} clean packages".format(prog=dnf.util.MAIN_PROG))
# Do not trigger the lazy creation:
if self._history is not None:
self.history.close()
self._store_persistent_data()
self._closeRpmDB()
self._trans_success = False
def close(self):
# :api
"""Close all potential handles and clean cache.
Typically the handles are to data sources and sinks.
"""
if self._closed:
return
logger.log(dnf.logging.DDEBUG, 'Cleaning up.')
self._closed = True
self._finalize_base()
self.reset(sack=True, repos=True, goal=True)
self._plugins = None
def read_all_repos(self, opts=None):
# :api
"""Read repositories from the main conf file and from .repo files."""
reader = dnf.conf.read.RepoReader(self.conf, opts)
for repo in reader:
try:
self.repos.add(repo)
except dnf.exceptions.ConfigError as e:
logger.warning(e)
def reset(self, sack=False, repos=False, goal=False):
# :api
"""Make the Base object forget about various things."""
if sack:
self._sack = None
if repos:
self._repos = dnf.repodict.RepoDict()
if goal:
self._goal = None
if self._sack is not None:
self._goal = dnf.goal.Goal(self._sack)
self._goal.protect_running_kernel = self.conf.protect_running_kernel
if self._sack and self._moduleContainer:
# sack must be set to enable operations on moduleContainer
self._moduleContainer.rollback()
if self._history is not None:
self.history.close()
self._comps_trans = dnf.comps.TransactionBunch()
self._transaction = None
self._update_security_filters = []
if sack and goal:
# We've just done this, above:
#
# _sack _goal
# | |
# -- [CUT] -- -- [CUT] --
# | |
# v | v
# +----------------+ [C] +-------------+
# | DnfSack object | <-[U]- | Goal object |
# +----------------+ [T] +-------------+
# |^ |^ |^ |
# || || ||
# || || || |
# +--||----||----||---+ [C]
# | v| v| v| | <--[U]-- _transaction
# | Pkg1 Pkg2 PkgN | [T]
# | | |
# | Transaction oject |
# +-------------------+
#
# At this point, the DnfSack object would be released only
# eventually, by Python's generational garbage collector, due to the
# cyclic references DnfSack<->Pkg1 ... DnfSack<->PkgN.
#
# The delayed release is a problem: the DnfSack object may
# (indirectly) own "page file" file descriptors in libsolv, via
# libdnf. For example,
#
# sack->priv->pool->repos[1]->repodata[1]->store.pagefd = 7
# sack->priv->pool->repos[1]->repodata[2]->store.pagefd = 8
#
# These file descriptors are closed when the DnfSack object is
# eventually released, that is, when dnf_sack_finalize() (in libdnf)
# calls pool_free() (in libsolv).
#
# We need that to happen right now, as callers may want to unmount
# the filesystems which those file descriptors refer to immediately
# after reset() returns. Therefore, force a garbage collection here.
gc.collect()
def _closeRpmDB(self):
"""Closes down the instances of rpmdb that could be open."""
del self._ts
_TS_FLAGS_TO_RPM = {'noscripts': rpm.RPMTRANS_FLAG_NOSCRIPTS,
'notriggers': rpm.RPMTRANS_FLAG_NOTRIGGERS,
'nodocs': rpm.RPMTRANS_FLAG_NODOCS,
'test': rpm.RPMTRANS_FLAG_TEST,
'justdb': rpm.RPMTRANS_FLAG_JUSTDB,
'nocontexts': rpm.RPMTRANS_FLAG_NOCONTEXTS,
'nocrypto': rpm.RPMTRANS_FLAG_NOFILEDIGEST,
'deploops': rpm.RPMTRANS_FLAG_DEPLOOPS}
if hasattr(rpm, 'RPMTRANS_FLAG_NOCAPS'):
# Introduced in rpm-4.14
_TS_FLAGS_TO_RPM['nocaps'] = rpm.RPMTRANS_FLAG_NOCAPS
_TS_VSFLAGS_TO_RPM = {'nocrypto': rpm._RPMVSF_NOSIGNATURES |
rpm._RPMVSF_NODIGESTS}
@property
def goal(self):
return self._goal
@property
def _ts(self):
"""Set up the RPM transaction set that will be used
for all the work."""
if self._priv_ts is not None:
return self._priv_ts
self._priv_ts = dnf.rpm.transaction.TransactionWrapper(
self.conf.installroot)
self._priv_ts.setFlags(0) # reset everything.
for flag in self.conf.tsflags:
rpm_flag = self._TS_FLAGS_TO_RPM.get(flag)
if rpm_flag is None:
logger.critical(_('Invalid tsflag in config file: %s'), flag)
continue
self._priv_ts.addTsFlag(rpm_flag)
vs_flag = self._TS_VSFLAGS_TO_RPM.get(flag)
if vs_flag is not None:
self._priv_ts.pushVSFlags(vs_flag)
if not self.conf.diskspacecheck:
self._rpm_probfilter.add(rpm.RPMPROB_FILTER_DISKSPACE)
if self.conf.ignorearch:
self._rpm_probfilter.add(rpm.RPMPROB_FILTER_IGNOREARCH)
probfilter = functools.reduce(operator.or_, self._rpm_probfilter, 0)
self._priv_ts.setProbFilter(probfilter)
return self._priv_ts
@_ts.deleter
def _ts(self):
"""Releases the RPM transaction set. """
if self._priv_ts is None:
return
self._priv_ts.close()
del self._priv_ts
self._priv_ts = None
def read_comps(self, arch_filter=False):
# :api
"""Create the groups object to access the comps metadata."""
timer = dnf.logging.Timer('loading comps')
self._comps = dnf.comps.Comps()
logger.log(dnf.logging.DDEBUG, 'Getting group metadata')
for repo in self.repos.iter_enabled():
if not repo.enablegroups:
continue
if not repo.metadata:
continue
comps_fn = repo._repo.getCompsFn()
if not comps_fn:
continue
logger.log(dnf.logging.DDEBUG,
'Adding group file from repository: %s', repo.id)
gen_dir = os.path.join(os.path.dirname(comps_fn), 'gen')
gen_file = os.path.join(gen_dir, 'groups.xml')
temp_file = None
try:
if not os.path.exists(gen_dir):
os.makedirs(gen_dir, mode=0o755)
misc.decompress(comps_fn, dest=gen_file, check_timestamps=True)
except (PermissionError, dnf.exceptions.MiscError):
temp_file = tempfile.NamedTemporaryFile()
gen_file = temp_file.name
misc.decompress(comps_fn, dest=gen_file, check_timestamps=False)
try:
self._comps._add_from_xml_filename(gen_file)
except dnf.exceptions.CompsError as e:
msg = _('Failed to add groups file for repository: %s - %s')
logger.critical(msg, repo.id, e)
if temp_file:
temp_file.close()
if arch_filter:
self._comps._i.arch_filter(
[self._conf.substitutions['basearch']])
timer()
return self._comps
def _getHistory(self):
"""auto create the history object that to access/append the transaction
history information. """
if self._history is None:
releasever = self.conf.releasever
self._history = SwdbInterface(self.conf.persistdir, releasever=releasever)
return self._history
history = property(fget=lambda self: self._getHistory(),
fset=lambda self, value: setattr(
self, "_history", value),
fdel=lambda self: setattr(self, "_history", None),
doc="DNF SWDB Interface Object")
def _goal2transaction(self, goal):
ts = self.history.rpm
all_obsoleted = set(goal.list_obsoleted())
installonly_query = self._get_installonly_query()
installonly_query.apply()
installonly_query_installed = installonly_query.installed().apply()
for pkg in goal.list_downgrades():
obs = goal.obsoleted_by_package(pkg)
downgraded = obs[0]
self._ds_callback.pkg_added(downgraded, 'dd')
self._ds_callback.pkg_added(pkg, 'd')
ts.add_downgrade(pkg, downgraded, obs[1:])
for pkg in goal.list_reinstalls():
self._ds_callback.pkg_added(pkg, 'r')
obs = goal.obsoleted_by_package(pkg)
nevra_pkg = str(pkg)
# reinstall could obsolete multiple packages with the same NEVRA or different NEVRA
# Set the package with the same NEVRA as reinstalled
obsoletes = []
for obs_pkg in obs:
if str(obs_pkg) == nevra_pkg:
obsoletes.insert(0, obs_pkg)
else:
obsoletes.append(obs_pkg)
reinstalled = obsoletes[0]
ts.add_reinstall(pkg, reinstalled, obsoletes[1:])
for pkg in goal.list_installs():
self._ds_callback.pkg_added(pkg, 'i')
obs = goal.obsoleted_by_package(pkg)
# Skip obsoleted packages that are not part of all_obsoleted,
# they are handled as upgrades/downgrades.
# Also keep RPMs with the same name - they're not always in all_obsoleted.
obs = [i for i in obs if i in all_obsoleted or i.name == pkg.name]
reason = goal.get_reason(pkg)
# Inherit reason if package is installonly an package with same name is installed
# Use the same logic like upgrade
# Upgrade of installonly packages result in install or install and remove step
if pkg in installonly_query and installonly_query_installed.filter(name=pkg.name):
reason = ts.get_reason(pkg)
# inherit the best reason from obsoleted packages
for obsolete in obs:
reason_obsolete = ts.get_reason(obsolete)
if libdnf.transaction.TransactionItemReasonCompare(reason, reason_obsolete) == -1:
reason = reason_obsolete
ts.add_install(pkg, obs, reason)
cb = lambda pkg: self._ds_callback.pkg_added(pkg, 'od')
dnf.util.mapall(cb, obs)
for pkg in goal.list_upgrades():
obs = goal.obsoleted_by_package(pkg)
upgraded = None
for i in obs:
# try to find a package with matching name as the upgrade
if i.name == pkg.name:
upgraded = i
break
if upgraded is None:
# no matching name -> pick the first one
upgraded = obs.pop(0)
else:
obs.remove(upgraded)
# Skip obsoleted packages that are not part of all_obsoleted,
# they are handled as upgrades/downgrades.
# Also keep RPMs with the same name - they're not always in all_obsoleted.
obs = [i for i in obs if i in all_obsoleted or i.name == pkg.name]
cb = lambda pkg: self._ds_callback.pkg_added(pkg, 'od')
dnf.util.mapall(cb, obs)
if pkg in installonly_query:
ts.add_install(pkg, obs)
else:
ts.add_upgrade(pkg, upgraded, obs)
self._ds_callback.pkg_added(upgraded, 'ud')
self._ds_callback.pkg_added(pkg, 'u')
erasures = goal.list_erasures()
if erasures:
remaining_installed_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES).installed()
remaining_installed_query.filterm(pkg__neq=erasures)
remaining_installed_query.apply()
for pkg in erasures:
tmp_remaining_installed_query = remaining_installed_query.filter(name=pkg.name, arch=pkg.arch)
if tmp_remaining_installed_query:
remaining = tmp_remaining_installed_query[0]
ts.get_reason(remaining)
self.history.set_reason(remaining, ts.get_reason(remaining))
self._ds_callback.pkg_added(pkg, 'e')
reason = goal.get_reason(pkg)
ts.add_erase(pkg, reason)
return ts
def _query_matches_installed(self, q):
""" See what packages in the query match packages (also in older
versions, but always same architecture) that are already installed.
Unlike in case of _sltr_matches_installed(), it is practical here
to know even the packages in the original query that can still be
installed.
"""
inst = q.installed()
inst_per_arch = inst._na_dict()
avail_per_arch = q.available()._na_dict()
avail_l = []
inst_l = []
for na in avail_per_arch:
if na in inst_per_arch:
inst_l.append(inst_per_arch[na][0])
else:
avail_l.append(avail_per_arch[na])
return inst_l, avail_l
def _sltr_matches_installed(self, sltr):
""" See if sltr matches a patches that is (in older version or different
architecture perhaps) already installed.
"""
inst = self.sack.query().installed().filterm(pkg=sltr.matches())
return list(inst)
def iter_userinstalled(self):
"""Get iterator over the packages installed by the user."""
return (pkg for pkg in self.sack.query().installed()
if self.history.user_installed(pkg))
def _run_hawkey_goal(self, goal, allow_erasing):
ret = goal.run(
allow_uninstall=allow_erasing, force_best=self.conf.best,
ignore_weak_deps=(not self.conf.install_weak_deps))
if self.conf.debug_solver:
goal.write_debugdata('./debugdata/rpms')
return ret
def _set_excludes_from_weak_to_goal(self):
"""
Add exclude_from_weak from configuration and autodetect unmet weak deps exclude them from candidates to satisfy
weak dependencies
"""
self._goal.reset_exclude_from_weak()
if self.conf.exclude_from_weak_autodetect:
self._goal.exclude_from_weak_autodetect()
for weak_exclude in self.conf.exclude_from_weak:
subj = dnf.subject.Subject(weak_exclude)
query = subj.get_best_query(self.sack, with_nevra=True, with_provides=False, with_filenames=False)
query = query.available()
self._goal.add_exclude_from_weak(query)
def resolve(self, allow_erasing=False):
# :api
"""Build the transaction set."""
exc = None
self._finalize_comps_trans()
timer = dnf.logging.Timer('depsolve')
self._ds_callback.start()
goal = self._goal
if goal.req_has_erase():
goal.push_userinstalled(self.sack.query().installed(),
self.history)
elif not self.conf.upgrade_group_objects_upgrade:
# exclude packages installed from groups
# these packages will be marked to installation
# which could prevent them from upgrade, downgrade
# to prevent "conflicting job" error it's not applied
# to "remove" and "reinstall" commands
solver = self._build_comps_solver()
solver._exclude_packages_from_installed_groups(self)
goal.add_protected(self.sack.query().filterm(
name=self.conf.protected_packages))
self._set_excludes_from_weak_to_goal()
if not self._run_hawkey_goal(goal, allow_erasing):
if self.conf.debuglevel >= 6:
goal.log_decisions()
msg = dnf.util._format_resolve_problems(goal.problem_rules())
exc = dnf.exceptions.DepsolveError(msg)
else:
self._transaction = self._goal2transaction(goal)
self._ds_callback.end()
timer()
got_transaction = self._transaction is not None and \
len(self._transaction) > 0
if got_transaction:
msg = self._transaction._rpm_limitations()
if msg:
exc = dnf.exceptions.Error(msg)
if exc is not None:
raise exc
self._plugins.run_resolved()
# auto-enable module streams based on installed RPMs
new_pkgs = self._goal.list_installs()
new_pkgs += self._goal.list_upgrades()
new_pkgs += self._goal.list_downgrades()
new_pkgs += self._goal.list_reinstalls()
self.sack.set_modules_enabled_by_pkgset(self._moduleContainer, new_pkgs)
return got_transaction
def do_transaction(self, display=()):
# :api
if not isinstance(display, Sequence):
display = [display]
display = \
[dnf.yum.rpmtrans.LoggingTransactionDisplay()] + list(display)
if not self.transaction:
# packages are not changed, but comps and modules changes need to be committed
self._moduleContainer.save()
self._moduleContainer.updateFailSafeData()
if self._history and (self._history.group or self._history.env):
cmdline = None
if hasattr(self, 'args') and self.args:
cmdline = ' '.join(self.args)
elif hasattr(self, 'cmds') and self.cmds:
cmdline = ' '.join(self.cmds)
old = self.history.last()
if old is None:
rpmdb_version = self._ts.dbCookie()
else:
rpmdb_version = old.end_rpmdb_version
self.history.beg(rpmdb_version, [], [], cmdline)
self.history.end(rpmdb_version)
self._plugins.run_pre_transaction()
self._plugins.run_transaction()
self._trans_success = True
return
tid = None
logger.info(_('Running transaction check'))
lock = dnf.lock.build_rpmdb_lock(self.conf.persistdir,
self.conf.exit_on_lock)
with lock:
self.transaction._populate_rpm_ts(self._ts)
msgs = self._run_rpm_check()
if msgs:
msg = _('Error: transaction check vs depsolve:')
logger.error(msg)
for msg in msgs:
logger.error(msg)
raise dnf.exceptions.TransactionCheckError(msg)
logger.info(_('Transaction check succeeded.'))
timer = dnf.logging.Timer('transaction test')
logger.info(_('Running transaction test'))
self._ts.order() # order the transaction
self._ts.clean() # release memory not needed beyond this point
testcb = dnf.yum.rpmtrans.RPMTransaction(self, test=True)
tserrors = self._ts.test(testcb)
if len(tserrors) > 0:
for msg in testcb.messages():
logger.critical(_('RPM: {}').format(msg))
errstring = _('Transaction test error:') + '\n'
for descr in tserrors:
errstring += ' %s\n' % ucd(descr)
summary = self._trans_error_summary(errstring)
if summary:
errstring += '\n' + summary
raise dnf.exceptions.Error(errstring)
del testcb
logger.info(_('Transaction test succeeded.'))
# With RPMTRANS_FLAG_TEST return just before anything is stored permanently
if self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
return
timer()
# save module states on disk right before entering rpm transaction,
# because we want system in recoverable state if transaction gets interrupted
self._moduleContainer.save()
self._moduleContainer.updateFailSafeData()
# unset the sigquit handler
timer = dnf.logging.Timer('transaction')
# setup our rpm ts callback
cb = dnf.yum.rpmtrans.RPMTransaction(self, displays=display)
if self.conf.debuglevel < 2:
for display_ in cb.displays:
display_.output = False
self._plugins.run_pre_transaction()
logger.info(_('Running transaction'))
tid = self._run_transaction(cb=cb)
timer()
self._plugins.unload_removed_plugins(self.transaction)
self._plugins.run_transaction()
# log post transaction summary
def _pto_callback(action, tsis):
msgs = []
for tsi in tsis:
msgs.append('{}: {}'.format(action, str(tsi)))
return msgs
for msg in dnf.util._post_transaction_output(self, self.transaction, _pto_callback):
logger.debug(msg)
return tid
def _trans_error_summary(self, errstring):
"""Parse the error string for 'interesting' errors which can
be grouped, such as disk space issues.
:param errstring: the error string
:return: a string containing a summary of the errors
"""
summary = ''
# do disk space report first
p = re.compile(r'needs (\d+)(K|M)B(?: more space)? on the (\S+) filesystem')
disk = {}
for m in p.finditer(errstring):
size_in_mb = int(m.group(1)) if m.group(2) == 'M' else math.ceil(
int(m.group(1)) / 1024.0)
if m.group(3) not in disk:
disk[m.group(3)] = size_in_mb
if disk[m.group(3)] < size_in_mb:
disk[m.group(3)] = size_in_mb
if disk:
summary += _('Disk Requirements:') + "\n"
for k in disk:
summary += " " + P_(
'At least {0}MB more space needed on the {1} filesystem.',
'At least {0}MB more space needed on the {1} filesystem.',
disk[k]).format(disk[k], k) + '\n'
if not summary:
return None
summary = _('Error Summary') + '\n-------------\n' + summary
return summary
def _record_history(self):
return self.conf.history_record and \
not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST)
def _run_transaction(self, cb):
"""
Perform the RPM transaction.
:return: history database transaction ID or None
"""
tid = None
if self._record_history():
using_pkgs_pats = list(self.conf.history_record_packages)
installed_query = self.sack.query().installed()
using_pkgs = installed_query.filter(name=using_pkgs_pats).run()
rpmdbv = self._ts.dbCookie()
lastdbv = self.history.last()
if lastdbv is not None:
lastdbv = lastdbv.end_rpmdb_version
if lastdbv is None or rpmdbv != lastdbv:
logger.debug(_("RPMDB altered outside of {prog}.").format(
prog=dnf.util.MAIN_PROG_UPPER))
cmdline = None
if hasattr(self, 'args') and self.args:
cmdline = ' '.join(self.args)
elif hasattr(self, 'cmds') and self.cmds:
cmdline = ' '.join(self.cmds)
comment = self.conf.comment if self.conf.comment else ""
tid = self.history.beg(rpmdbv, using_pkgs, [], cmdline, comment)
if self.conf.reset_nice:
onice = os.nice(0)
if onice:
try:
os.nice(-onice)
except:
onice = 0
logger.log(dnf.logging.DDEBUG, 'RPM transaction start.')
errors = self._ts.run(cb.callback, '')
logger.log(dnf.logging.DDEBUG, 'RPM transaction over.')
# ts.run() exit codes are, hmm, "creative": None means all ok, empty
# list means some errors happened in the transaction and non-empty
# list that there were errors preventing the ts from starting...
if self.conf.reset_nice:
try:
os.nice(onice)
except:
pass
dnf.util._sync_rpm_trans_with_swdb(self._ts, self._transaction)
if errors is None:
pass
elif len(errors) == 0:
# If there is no failing element it means that some "global" error
# occurred (like rpm failed to obtain the transaction lock). Just pass
# the rpm logs on to the user and raise an Error.
# If there are failing elements the problem is related to those
# elements and the Error is raised later, after saving the failure
# to the history and printing out the transaction table to user.
failed = [el for el in self._ts if el.Failed()]
if not failed:
for msg in cb.messages():
logger.critical(_('RPM: {}').format(msg))
msg = _('Could not run transaction.')
raise dnf.exceptions.Error(msg)
else:
logger.critical(_("Transaction couldn't start:"))
for e in errors:
logger.critical(ucd(e[0]))
if self._record_history() and not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
self.history.end(rpmdbv)
msg = _("Could not run transaction.")
raise dnf.exceptions.Error(msg)
for i in ('ts_all_fn', 'ts_done_fn'):
if hasattr(cb, i):
fn = getattr(cb, i)
try:
misc.unlink_f(fn)
except (IOError, OSError):
msg = _('Failed to remove transaction file %s')
logger.critical(msg, fn)
# keep install_set status because _verify_transaction will clean it
self._trans_install_set = bool(self._transaction.install_set)
# sync up what just happened versus what is in the rpmdb
if not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
self._verify_transaction()
return tid
def _verify_transaction(self, verify_pkg_cb=None):
transaction_items = [
tsi for tsi in self.transaction
if tsi.action != libdnf.transaction.TransactionItemAction_REASON_CHANGE]
total = len(transaction_items)
def display_banner(pkg, count):
count += 1
if verify_pkg_cb is not None:
verify_pkg_cb(pkg, count, total)
return count
timer = dnf.logging.Timer('verify transaction')
count = 0
rpmdb_sack = dnf.sack.rpmdb_sack(self)
# mark group packages that are installed on the system as installed in the db
q = rpmdb_sack.query().installed()
names = set([i.name for i in q])
for ti in self.history.group:
g = ti.getCompsGroupItem()
for p in g.getPackages():
if p.getName() in names:
p.setInstalled(True)
p.save()
# TODO: installed groups in environments
# Post-transaction verification is no longer needed,
# because DNF trusts error codes returned by RPM.
# Verification banner is displayed to preserve UX.
# TODO: drop in future DNF
for tsi in transaction_items:
count = display_banner(tsi.pkg, count)
rpmdbv = self._ts.dbCookie()
self.history.end(rpmdbv)
timer()
self._trans_success = True
def _download_remote_payloads(self, payloads, drpm, progress, callback_total, fail_fast=True):
lock = dnf.lock.build_download_lock(self.conf.cachedir, self.conf.exit_on_lock)
with lock:
beg_download = time.time()
est_remote_size = sum(pload.download_size for pload in payloads)
total_drpm = len(
[payload for payload in payloads if isinstance(payload, dnf.drpm.DeltaPayload)])
# compatibility part for tools that do not accept total_drpms keyword
if progress.start.__code__.co_argcount == 4:
progress.start(len(payloads), est_remote_size, total_drpms=total_drpm)
else:
progress.start(len(payloads), est_remote_size)
errors = dnf.repo._download_payloads(payloads, drpm, fail_fast)
if errors._irrecoverable():
raise dnf.exceptions.DownloadError(errors._irrecoverable())
remote_size = sum(errors._bandwidth_used(pload)
for pload in payloads)
saving = dnf.repo._update_saving((0, 0), payloads,
errors._recoverable)
retries = self.conf.retries
forever = retries == 0
while errors._recoverable and (forever or retries > 0):
if retries > 0:
retries -= 1
msg = _("Some packages were not downloaded. Retrying.")
logger.info(msg)
remaining_pkgs = [pkg for pkg in errors._recoverable]
payloads = \
[dnf.repo._pkg2payload(pkg, progress, dnf.repo.RPMPayload)
for pkg in remaining_pkgs]
est_remote_size = sum(pload.download_size
for pload in payloads)
progress.start(len(payloads), est_remote_size)
errors = dnf.repo._download_payloads(payloads, drpm, fail_fast)
if errors._irrecoverable():
raise dnf.exceptions.DownloadError(errors._irrecoverable())
remote_size += \
sum(errors._bandwidth_used(pload) for pload in payloads)
saving = dnf.repo._update_saving(saving, payloads, {})
if errors._recoverable:
msg = dnf.exceptions.DownloadError.errmap2str(
errors._recoverable)
logger.info(msg)
if callback_total is not None:
callback_total(remote_size, beg_download)
(real, full) = saving
if real != full:
if real < full:
msg = _("Delta RPMs reduced %.1f MB of updates to %.1f MB "
"(%.1f%% saved)")
percent = 100 - real / full * 100
elif real > full:
msg = _("Failed Delta RPMs increased %.1f MB of updates to %.1f MB "
"(%.1f%% wasted)")
percent = 100 - full / real * 100
logger.info(msg, full / 1024 ** 2, real / 1024 ** 2, percent)
def download_packages(self, pkglist, progress=None, callback_total=None):
# :api
"""Download the packages specified by the given list of packages.
`pkglist` is a list of packages to download, `progress` is an optional
DownloadProgress instance, `callback_total` an optional callback to
output messages about the download operation.
"""
remote_pkgs, local_pkgs = self._select_remote_pkgs(pkglist)
if remote_pkgs:
if progress is None:
progress = dnf.callback.NullDownloadProgress()
drpm = dnf.drpm.DeltaInfo(self.sack.query().installed(),
progress, self.conf.deltarpm_percentage)
self._add_tempfiles([pkg.localPkg() for pkg in remote_pkgs])
payloads = [dnf.repo._pkg2payload(pkg, progress, drpm.delta_factory,
dnf.repo.RPMPayload)
for pkg in remote_pkgs]
self._download_remote_payloads(payloads, drpm, progress, callback_total)
if self.conf.destdir:
for pkg in local_pkgs:
if pkg.baseurl:
location = os.path.join(pkg.get_local_baseurl(),
pkg.location.lstrip("/"))
else:
location = os.path.join(pkg.repo.pkgdir, pkg.location.lstrip("/"))
try:
shutil.copy(location, self.conf.destdir)
except shutil.SameFileError:
pass
def add_remote_rpms(self, path_list, strict=True, progress=None):
# :api
pkgs = []
if not path_list:
return pkgs
if self._goal.req_length():
raise dnf.exceptions.Error(
_("Cannot add local packages, because transaction job already exists"))
pkgs_error = []
for path in path_list:
if not os.path.exists(path) and '://' in path:
# download remote rpm to a tempfile
path = dnf.util._urlopen_progress(path, self.conf, progress)
self._add_tempfiles([path])
try:
pkgs.append(self.sack.add_cmdline_package(path))
except IOError as e:
logger.warning(e)
pkgs_error.append(path)
self._setup_excludes_includes(only_main=True)
if pkgs_error and strict:
raise IOError(_("Could not open: {}").format(' '.join(pkgs_error)))
return pkgs
def _sig_check_pkg(self, po):
"""Verify the GPG signature of the given package object.
:param po: the package object to verify the signature of
:return: (result, error_string)
where result is::
0 = GPG signature verifies ok or verification is not required.
1 = GPG verification failed but installation of the right GPG key
might help.
2 = Fatal GPG verification error, give up.
"""
if po._from_cmdline:
check = self.conf.localpkg_gpgcheck
hasgpgkey = 0
else:
repo = self.repos[po.repoid]
check = repo.gpgcheck
hasgpgkey = not not repo.gpgkey
if check:
root = self.conf.installroot
ts = dnf.rpm.transaction.initReadOnlyTransaction(root)
sigresult = dnf.rpm.miscutils.checkSig(ts, po.localPkg())
localfn = os.path.basename(po.localPkg())
del ts
if sigresult == 0:
result = 0
msg = ''
elif sigresult == 1:
if hasgpgkey:
result = 1
else:
result = 2
msg = _('Public key for %s is not installed') % localfn
elif sigresult == 2:
result = 2
msg = _('Problem opening package %s') % localfn
elif sigresult == 3:
if hasgpgkey:
result = 1
else:
result = 2
result = 1
msg = _('Public key for %s is not trusted') % localfn
elif sigresult == 4:
result = 2
msg = _('Package %s is not signed') % localfn
else:
result = 0
msg = ''
return result, msg
def package_signature_check(self, pkg):
# :api
"""Verify the GPG signature of the given package object.
:param pkg: the package object to verify the signature of
:return: (result, error_string)
where result is::
0 = GPG signature verifies ok or verification is not required.
1 = GPG verification failed but installation of the right GPG key
might help.
2 = Fatal GPG verification error, give up.
"""
return self._sig_check_pkg(pkg)
def _clean_packages(self, packages):
for fn in packages:
if not os.path.exists(fn):
continue
try:
misc.unlink_f(fn)
except OSError:
logger.warning(_('Cannot remove %s'), fn)
continue
else:
logger.log(dnf.logging.DDEBUG,
_('%s removed'), fn)
def _do_package_lists(self, pkgnarrow='all', patterns=None, showdups=None,
ignore_case=False, reponame=None):
"""Return a :class:`misc.GenericHolder` containing
lists of package objects. The contents of the lists are
specified in various ways by the arguments.
:param pkgnarrow: a string specifying which types of packages
lists to produces, such as updates, installed, available,
etc.
:param patterns: a list of names or wildcards specifying
packages to list
:param showdups: whether to include duplicate packages in the
lists
:param ignore_case: whether to ignore case when searching by
package names
:param reponame: limit packages list to the given repository
:return: a :class:`misc.GenericHolder` instance with the
following lists defined::
available = list of packageObjects
installed = list of packageObjects
upgrades = tuples of packageObjects (updating, installed)
extras = list of packageObjects
obsoletes = tuples of packageObjects (obsoleting, installed)
recent = list of packageObjects
"""
if showdups is None:
showdups = self.conf.showdupesfromrepos
if patterns is None:
return self._list_pattern(
pkgnarrow, patterns, showdups, ignore_case, reponame)
assert not dnf.util.is_string_type(patterns)
list_fn = functools.partial(
self._list_pattern, pkgnarrow, showdups=showdups,
ignore_case=ignore_case, reponame=reponame)
if patterns is None or len(patterns) == 0:
return list_fn(None)
yghs = map(list_fn, patterns)
return functools.reduce(lambda a, b: a.merge_lists(b), yghs)
def _list_pattern(self, pkgnarrow, pattern, showdups, ignore_case,
reponame=None):
def is_from_repo(package):
"""Test whether given package originates from the repository."""
if reponame is None:
return True
return self.history.repo(package) == reponame
def pkgs_from_repo(packages):
"""Filter out the packages which do not originate from the repo."""
return (package for package in packages if is_from_repo(package))
def query_for_repo(query):
"""Filter out the packages which do not originate from the repo."""
if reponame is None:
return query
return query.filter(reponame=reponame)
ygh = misc.GenericHolder(iter=pkgnarrow)
installed = []
available = []
reinstall_available = []
old_available = []
updates = []
obsoletes = []
obsoletesTuples = []
recent = []
extras = []
autoremove = []
# do the initial pre-selection
ic = ignore_case
q = self.sack.query()
if pattern is not None:
subj = dnf.subject.Subject(pattern, ignore_case=ic)
q = subj.get_best_query(self.sack, with_provides=False)
# list all packages - those installed and available:
if pkgnarrow == 'all':
dinst = {}
ndinst = {} # Newest versions by name.arch
for po in q.installed():
dinst[po.pkgtup] = po
if showdups:
continue
key = (po.name, po.arch)
if key not in ndinst or po > ndinst[key]:
ndinst[key] = po
installed = list(pkgs_from_repo(dinst.values()))
avail = query_for_repo(q.available())
if not showdups:
avail = avail.filterm(latest_per_arch_by_priority=True)
for pkg in avail:
if showdups:
if pkg.pkgtup in dinst:
reinstall_available.append(pkg)
else:
available.append(pkg)
else:
key = (pkg.name, pkg.arch)
if pkg.pkgtup in dinst:
reinstall_available.append(pkg)
elif key not in ndinst or pkg.evr_gt(ndinst[key]):
available.append(pkg)
else:
old_available.append(pkg)
# produce the updates list of tuples
elif pkgnarrow == 'upgrades':
updates = query_for_repo(q).filterm(upgrades_by_priority=True)
# reduce a query to security upgrades if they are specified
updates = self._merge_update_filters(updates, upgrade=True)
# reduce a query to remove src RPMs
updates.filterm(arch__neq=['src', 'nosrc'])
# reduce a query to latest packages
updates = updates.latest().run()
# installed only
elif pkgnarrow == 'installed':
installed = list(pkgs_from_repo(q.installed()))
# available in a repository
elif pkgnarrow == 'available':
if showdups:
avail = query_for_repo(q).available()
installed_dict = q.installed()._na_dict()
for avail_pkg in avail:
key = (avail_pkg.name, avail_pkg.arch)
installed_pkgs = installed_dict.get(key, [])
same_ver = [pkg for pkg in installed_pkgs
if pkg.evr == avail_pkg.evr]
if len(same_ver) > 0:
reinstall_available.append(avail_pkg)
else:
available.append(avail_pkg)
else:
# we will only look at the latest versions of packages:
available_dict = query_for_repo(
q).available().filterm(latest_per_arch_by_priority=True)._na_dict()
installed_dict = q.installed().latest()._na_dict()
for (name, arch) in available_dict:
avail_pkg = available_dict[(name, arch)][0]
inst_pkg = installed_dict.get((name, arch), [None])[0]
if not inst_pkg or avail_pkg.evr_gt(inst_pkg):
available.append(avail_pkg)
elif avail_pkg.evr_eq(inst_pkg):
reinstall_available.append(avail_pkg)
else:
old_available.append(avail_pkg)
# packages to be removed by autoremove
elif pkgnarrow == 'autoremove':
autoremove_q = query_for_repo(q)._unneeded(self.history.swdb)
autoremove = autoremove_q.run()
# not in a repo but installed
elif pkgnarrow == 'extras':
extras = [pkg for pkg in q.extras() if is_from_repo(pkg)]
# obsoleting packages (and what they obsolete)
elif pkgnarrow == 'obsoletes':
inst = q.installed()
obsoletes = query_for_repo(
self.sack.query()).filter(obsoletes_by_priority=inst)
# reduce a query to security upgrades if they are specified
obsoletes = self._merge_update_filters(obsoletes, warning=False, upgrade=True)
# reduce a query to remove src RPMs
obsoletes.filterm(arch__neq=['src', 'nosrc'])
obsoletesTuples = []
for new in obsoletes:
obsoleted_reldeps = new.obsoletes
obsoletesTuples.extend(
[(new, old) for old in
inst.filter(provides=obsoleted_reldeps)])
# packages recently added to the repositories
elif pkgnarrow == 'recent':
avail = q.available()
if not showdups:
avail = avail.filterm(latest_per_arch_by_priority=True)
recent = query_for_repo(avail)._recent(self.conf.recent)
ygh.installed = installed
ygh.available = available
ygh.reinstall_available = reinstall_available
ygh.old_available = old_available
ygh.updates = updates
ygh.obsoletes = obsoletes
ygh.obsoletesTuples = obsoletesTuples
ygh.recent = recent
ygh.extras = extras
ygh.autoremove = autoremove
return ygh
def _add_comps_trans(self, trans):
self._comps_trans += trans
return len(trans)
def _remove_if_unneeded(self, query):
"""
Mark to remove packages that are not required by any user installed package (reason group
or user)
:param query: dnf.query.Query() object
"""
query = query.installed()
if not query:
return
unneeded_pkgs = query._safe_to_remove(self.history.swdb, debug_solver=False)
unneeded_pkgs_history = query.filter(
pkg=[i for i in query if self.history.group.is_removable_pkg(i.name)])
pkg_with_dependent_pkgs = unneeded_pkgs_history.difference(unneeded_pkgs)
# mark packages with dependent packages as a dependency to allow removal with dependent
# package
for pkg in pkg_with_dependent_pkgs:
self.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_DEPENDENCY)
unneeded_pkgs = unneeded_pkgs.intersection(unneeded_pkgs_history)
remove_packages = query.intersection(unneeded_pkgs)
if remove_packages:
for pkg in remove_packages:
self._goal.erase(pkg, clean_deps=self.conf.clean_requirements_on_remove)
def _finalize_comps_trans(self):
trans = self._comps_trans
basearch = self.conf.substitutions['basearch']
def trans_upgrade(query, remove_query, comps_pkg):
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=query)
self._goal.upgrade(select=sltr)
return remove_query
def trans_install(query, remove_query, comps_pkg, strict):
if self.conf.multilib_policy == "all":
if not comps_pkg.requires:
self._install_multiarch(query, strict=strict)
else:
# it installs only one arch for conditional packages
installed_query = query.installed().apply()
self._report_already_installed(installed_query)
sltr = dnf.selector.Selector(self.sack)
sltr.set(provides="({} if {})".format(comps_pkg.name, comps_pkg.requires))
self._goal.install(select=sltr, optional=not strict)
else:
sltr = dnf.selector.Selector(self.sack)
if comps_pkg.requires:
sltr.set(provides="({} if {})".format(comps_pkg.name, comps_pkg.requires))
else:
if self.conf.obsoletes:
# If there is no installed package in the pkgs_list, add only
# obsoleters of the latest versions. Otherwise behave
# consistently with upgrade and add all obsoleters.
# See https://bugzilla.redhat.com/show_bug.cgi?id=2176263
# for details of the problem.
if query.installed():
query = query.union(self.sack.query().filterm(obsoletes=query))
else:
query = query.union(self.sack.query().filterm(
obsoletes=query.filter(latest_per_arch_by_priority=True)))
sltr.set(pkg=query)
self._goal.install(select=sltr, optional=not strict)
return remove_query
def trans_remove(query, remove_query, comps_pkg):
remove_query = remove_query.union(query)
return remove_query
remove_query = self.sack.query().filterm(empty=True)
attr_fn = ((trans.install, functools.partial(trans_install, strict=True)),
(trans.install_opt, functools.partial(trans_install, strict=False)),
(trans.upgrade, trans_upgrade),
(trans.remove, trans_remove))
for (attr, fn) in attr_fn:
for comps_pkg in attr:
query_args = {'name': comps_pkg.name}
if (comps_pkg.basearchonly):
query_args.update({'arch': basearch})
q = self.sack.query().filterm(**query_args).apply()
q.filterm(arch__neq=["src", "nosrc"])
if not q:
package_string = comps_pkg.name
if comps_pkg.basearchonly:
package_string += '.' + basearch
logger.warning(_('No match for group package "{}"').format(package_string))
continue
remove_query = fn(q, remove_query, comps_pkg)
self._goal.group_members.add(comps_pkg.name)
self._remove_if_unneeded(remove_query)
def _build_comps_solver(self):
def reason_fn(pkgname):
q = self.sack.query().installed().filterm(name=pkgname)
if not q:
return None
try:
return self.history.rpm.get_reason(q[0])
except AttributeError:
return libdnf.transaction.TransactionItemReason_UNKNOWN
return dnf.comps.Solver(self.history, self._comps, reason_fn)
def environment_install(self, env_id, types, exclude=None, strict=True, exclude_groups=None):
# :api
"""Installs packages of environment group identified by env_id.
:param types: Types of packages to install. Either an integer as a
logical conjunction of CompsPackageType ids or a list of string
package type ids (conditional, default, mandatory, optional).
"""
assert dnf.util.is_string_type(env_id)
solver = self._build_comps_solver()
if not isinstance(types, int):
types = libdnf.transaction.listToCompsPackageType(types)
trans = solver._environment_install(env_id, types, exclude or set(), strict, exclude_groups)
if not trans:
return 0
return self._add_comps_trans(trans)
def environment_remove(self, env_id):
# :api
assert dnf.util.is_string_type(env_id)
solver = self._build_comps_solver()
trans = solver._environment_remove(env_id)
return self._add_comps_trans(trans)
def group_install(self, grp_id, pkg_types, exclude=None, strict=True):
# :api
"""Installs packages of selected group
:param pkg_types: Types of packages to install. Either an integer as a
logical conjunction of CompsPackageType ids or a list of string
package type ids (conditional, default, mandatory, optional).
:param exclude: list of package name glob patterns
that will be excluded from install set
:param strict: boolean indicating whether group packages that
exist but are non-installable due to e.g. dependency
issues should be skipped (False) or cause transaction to
fail to resolve (True)
"""
def _pattern_to_pkgname(pattern):
if dnf.util.is_glob_pattern(pattern):
q = self.sack.query().filterm(name__glob=pattern)
return map(lambda p: p.name, q)
else:
return (pattern,)
assert dnf.util.is_string_type(grp_id)
exclude_pkgnames = None
if exclude:
nested_excludes = [_pattern_to_pkgname(p) for p in exclude]
exclude_pkgnames = itertools.chain.from_iterable(nested_excludes)
solver = self._build_comps_solver()
if not isinstance(pkg_types, int):
pkg_types = libdnf.transaction.listToCompsPackageType(pkg_types)
trans = solver._group_install(grp_id, pkg_types, exclude_pkgnames, strict)
if not trans:
return 0
if strict:
instlog = trans.install
else:
instlog = trans.install_opt
logger.debug(_("Adding packages from group '%s': %s"),
grp_id, instlog)
return self._add_comps_trans(trans)
def env_group_install(self, patterns, types, strict=True, exclude=None, exclude_groups=None):
q = CompsQuery(self.comps, self.history, CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
CompsQuery.AVAILABLE)
cnt = 0
done = True
for pattern in patterns:
try:
res = q.get(pattern)
except dnf.exceptions.CompsError as err:
logger.error(ucd(err))
done = False
continue
for group_id in res.groups:
if not exclude_groups or group_id not in exclude_groups:
cnt += self.group_install(group_id, types, exclude=exclude, strict=strict)
for env_id in res.environments:
cnt += self.environment_install(env_id, types, exclude=exclude, strict=strict,
exclude_groups=exclude_groups)
if not done and strict:
raise dnf.exceptions.Error(_('Nothing to do.'))
return cnt
def group_remove(self, grp_id):
# :api
assert dnf.util.is_string_type(grp_id)
solver = self._build_comps_solver()
trans = solver._group_remove(grp_id)
return self._add_comps_trans(trans)
def env_group_remove(self, patterns):
q = CompsQuery(self.comps, self.history,
CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
CompsQuery.INSTALLED)
try:
res = q.get(*patterns)
except dnf.exceptions.CompsError as err:
logger.error("Warning: %s", ucd(err))
raise dnf.exceptions.Error(_('No groups marked for removal.'))
cnt = 0
for env in res.environments:
cnt += self.environment_remove(env)
for grp in res.groups:
cnt += self.group_remove(grp)
return cnt
def env_group_upgrade(self, patterns):
q = CompsQuery(self.comps, self.history,
CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS,
CompsQuery.INSTALLED)
group_upgraded = False
for pattern in patterns:
try:
res = q.get(pattern)
except dnf.exceptions.CompsError as err:
logger.error(ucd(err))
continue
for env in res.environments:
try:
self.environment_upgrade(env)
group_upgraded = True
except dnf.exceptions.CompsError as err:
logger.error(ucd(err))
continue
for grp in res.groups:
try:
self.group_upgrade(grp)
group_upgraded = True
except dnf.exceptions.CompsError as err:
logger.error(ucd(err))
continue
if not group_upgraded:
msg = _('No group marked for upgrade.')
raise dnf.cli.CliError(msg)
def environment_upgrade(self, env_id):
# :api
assert dnf.util.is_string_type(env_id)
solver = self._build_comps_solver()
trans = solver._environment_upgrade(env_id)
return self._add_comps_trans(trans)
def group_upgrade(self, grp_id):
# :api
assert dnf.util.is_string_type(grp_id)
solver = self._build_comps_solver()
trans = solver._group_upgrade(grp_id)
return self._add_comps_trans(trans)
def _gpg_key_check(self):
"""Checks for the presence of GPG keys in the rpmdb.
:return: 0 if there are no GPG keys in the rpmdb, and 1 if
there are keys
"""
gpgkeyschecked = self.conf.cachedir + '/.gpgkeyschecked.yum'
if os.path.exists(gpgkeyschecked):
return 1
installroot = self.conf.installroot
myts = dnf.rpm.transaction.initReadOnlyTransaction(root=installroot)
myts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))
idx = myts.dbMatch('name', 'gpg-pubkey')
keys = len(idx)
del idx
del myts
if keys == 0:
return 0
else:
mydir = os.path.dirname(gpgkeyschecked)
if not os.path.exists(mydir):
os.makedirs(mydir)
fo = open(gpgkeyschecked, 'w')
fo.close()
del fo
return 1
def _install_multiarch(self, query, reponame=None, strict=True):
already_inst, available = self._query_matches_installed(query)
self._report_already_installed(already_inst)
for packages in available:
sltr = dnf.selector.Selector(self.sack)
q = self.sack.query().filterm(pkg=packages)
if self.conf.obsoletes:
# use only obsoletes of the latest versions
# See https://bugzilla.redhat.com/show_bug.cgi?id=2176263
# for details of the problem.
q = q.union(self.sack.query().filterm(
obsoletes=q.filter(latest_per_arch_by_priority=True)))
sltr = sltr.set(pkg=q)
if reponame is not None:
sltr = sltr.set(reponame=reponame)
self._goal.install(select=sltr, optional=(not strict))
return len(available)
def _categorize_specs(self, install, exclude):
"""
Categorize :param install and :param exclude list into two groups each (packages and groups)
:param install: list of specs, whether packages ('foo') or groups/modules ('@bar')
:param exclude: list of specs, whether packages ('foo') or groups/modules ('@bar')
:return: categorized install and exclude specs (stored in argparse.Namespace class)
To access packages use: specs.pkg_specs,
to access groups use: specs.grp_specs
"""
install_specs = argparse.Namespace()
exclude_specs = argparse.Namespace()
_parse_specs(install_specs, install)
_parse_specs(exclude_specs, exclude)
return install_specs, exclude_specs
def _exclude_package_specs(self, exclude_specs):
glob_excludes = [exclude for exclude in exclude_specs.pkg_specs
if dnf.util.is_glob_pattern(exclude)]
excludes = [exclude for exclude in exclude_specs.pkg_specs
if exclude not in glob_excludes]
exclude_query = self.sack.query().filter(name=excludes)
glob_exclude_query = self.sack.query().filter(name__glob=glob_excludes)
self.sack.add_excludes(exclude_query)
self.sack.add_excludes(glob_exclude_query)
def _expand_groups(self, group_specs):
groups = set()
q = CompsQuery(self.comps, self.history,
CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
CompsQuery.AVAILABLE | CompsQuery.INSTALLED)
for pattern in group_specs:
try:
res = q.get(pattern)
except dnf.exceptions.CompsError as err:
logger.error("Warning: Module or %s", ucd(err))
continue
groups.update(res.groups)
groups.update(res.environments)
for environment_id in res.environments:
environment = self.comps._environment_by_id(environment_id)
for group in environment.groups_iter():
groups.add(group.id)
return list(groups)
def _install_groups(self, group_specs, excludes, skipped, strict=True):
for group_spec in group_specs:
try:
types = self.conf.group_package_types
if '/' in group_spec:
split = group_spec.split('/')
group_spec = split[0]
types = split[1].split(',')
self.env_group_install([group_spec], types, strict, excludes.pkg_specs,
excludes.grp_specs)
except dnf.exceptions.Error:
skipped.append("@" + group_spec)
def install_specs(self, install, exclude=None, reponame=None, strict=True, forms=None):
# :api
if exclude is None:
exclude = []
no_match_group_specs = []
error_group_specs = []
no_match_pkg_specs = []
error_pkg_specs = []
install_specs, exclude_specs = self._categorize_specs(install, exclude)
self._exclude_package_specs(exclude_specs)
for spec in install_specs.pkg_specs:
try:
self.install(spec, reponame=reponame, strict=strict, forms=forms)
except dnf.exceptions.MarkingError as e:
logger.error(str(e))
no_match_pkg_specs.append(spec)
no_match_module_specs = []
module_depsolv_errors = ()
if WITH_MODULES and install_specs.grp_specs:
try:
module_base = dnf.module.module_base.ModuleBase(self)
module_base.install(install_specs.grp_specs, strict)
except dnf.exceptions.MarkingErrors as e:
if e.no_match_group_specs:
for e_spec in e.no_match_group_specs:
no_match_module_specs.append(e_spec)
if e.error_group_specs:
for e_spec in e.error_group_specs:
error_group_specs.append("@" + e_spec)
module_depsolv_errors = e.module_depsolv_errors
else:
no_match_module_specs = install_specs.grp_specs
if no_match_module_specs:
exclude_specs.grp_specs = self._expand_groups(exclude_specs.grp_specs)
self._install_groups(no_match_module_specs, exclude_specs, no_match_group_specs, strict)
if no_match_group_specs or error_group_specs or no_match_pkg_specs or error_pkg_specs \
or module_depsolv_errors:
raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_group_specs,
error_group_specs=error_group_specs,
no_match_pkg_specs=no_match_pkg_specs,
error_pkg_specs=error_pkg_specs,
module_depsolv_errors=module_depsolv_errors)
def install(self, pkg_spec, reponame=None, strict=True, forms=None):
# :api
"""Mark package(s) given by pkg_spec and reponame for installation."""
kwargs = {'forms': forms, 'with_src': False}
if forms:
kwargs['with_nevra'] = True
kwargs['with_provides'] = False
kwargs['with_filenames'] = False
subj = dnf.subject.Subject(pkg_spec)
solution = subj.get_best_solution(self.sack, **kwargs)
if self.conf.multilib_policy == "all" or subj._is_arch_specified(solution):
q = solution['query']
if reponame is not None:
q.filterm(reponame=reponame)
if not q:
self._raise_package_not_found_error(pkg_spec, forms, reponame)
return self._install_multiarch(q, reponame=reponame, strict=strict)
elif self.conf.multilib_policy == "best":
sltrs = subj._get_best_selectors(self,
forms=forms,
obsoletes=self.conf.obsoletes,
reponame=reponame,
reports=True,
solution=solution)
if not sltrs:
self._raise_package_not_found_error(pkg_spec, forms, reponame)
for sltr in sltrs:
self._goal.install(select=sltr, optional=(not strict))
return 1
return 0
def package_downgrade(self, pkg, strict=False):
# :api
if pkg._from_system:
msg = 'downgrade_package() for an installed package.'
raise NotImplementedError(msg)
q = self.sack.query().installed().filterm(name=pkg.name, arch=[pkg.arch, "noarch"])
if not q:
msg = _("Package %s not installed, cannot downgrade it.")
logger.warning(msg, pkg.name)
raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name)
elif sorted(q)[0] > pkg:
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=[pkg])
self._goal.install(select=sltr, optional=(not strict))
return 1
else:
msg = _("Package %s of lower version already installed, "
"cannot downgrade it.")
logger.warning(msg, pkg.name)
return 0
def package_install(self, pkg, strict=True):
# :api
q = self.sack.query()._nevra(pkg.name, pkg.evr, pkg.arch)
already_inst, available = self._query_matches_installed(q)
if pkg in already_inst:
self._report_already_installed([pkg])
elif pkg not in itertools.chain.from_iterable(available):
raise dnf.exceptions.PackageNotFoundError(_('No match for argument: %s') % pkg.location)
else:
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=[pkg])
self._goal.install(select=sltr, optional=(not strict))
return 1
def package_reinstall(self, pkg):
if self.sack.query().installed().filterm(name=pkg.name, evr=pkg.evr, arch=pkg.arch):
self._goal.install(pkg)
return 1
msg = _("Package %s not installed, cannot reinstall it.")
logger.warning(msg, str(pkg))
raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name)
def package_remove(self, pkg):
self._goal.erase(pkg)
return 1
def package_upgrade(self, pkg):
# :api
if pkg._from_system:
msg = 'upgrade_package() for an installed package.'
raise NotImplementedError(msg)
if pkg.arch == 'src':
msg = _("File %s is a source package and cannot be updated, ignoring.")
logger.info(msg, pkg.location)
return 0
installed = self.sack.query().installed().apply()
if self.conf.obsoletes and self.sack.query().filterm(pkg=[pkg]).filterm(obsoletes=installed):
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=[pkg])
self._goal.upgrade(select=sltr)
return 1
# do not filter by arch if the package is noarch
if pkg.arch == "noarch":
q = installed.filter(name=pkg.name)
else:
q = installed.filter(name=pkg.name, arch=[pkg.arch, "noarch"])
if not q:
msg = _("Package %s not installed, cannot update it.")
logger.warning(msg, pkg.name)
raise dnf.exceptions.MarkingError(
_('No match for argument: %s') % pkg.location, pkg.name)
elif sorted(q)[-1] < pkg:
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=[pkg])
self._goal.upgrade(select=sltr)
return 1
else:
msg = _("The same or higher version of %s is already installed, "
"cannot update it.")
logger.warning(msg, pkg.name)
return 0
def _upgrade_internal(self, query, obsoletes, reponame, pkg_spec=None):
installed_all = self.sack.query().installed()
# Add only relevant obsoletes to transaction => installed, upgrades
q = query.intersection(self.sack.query().filterm(name=[pkg.name for pkg in installed_all]))
installed_query = q.installed()
if obsoletes:
obsoletes = self.sack.query().available().filterm(
obsoletes=installed_query.union(q.upgrades()))
# add obsoletes into transaction
query = query.union(obsoletes)
if reponame is not None:
query.filterm(reponame=reponame)
query = self._merge_update_filters(query, pkg_spec=pkg_spec, upgrade=True)
if query:
# Given that we use libsolv's targeted transactions, we need to ensure that the transaction contains both
# the new targeted version and also the current installed version (for the upgraded package). This is
# because if it only contained the new version, libsolv would decide to reinstall the package even if it
# had just a different buildtime or vendor but the same version
# (https://github.com/openSUSE/libsolv/issues/287)
# - In general, the query already contains both the new and installed versions but not always.
# If repository-packages command is used, the installed packages are filtered out because they are from
# the @system repo. We need to add them back in.
# - However we need to add installed versions of just the packages that are being upgraded. We don't want
# to add all installed packages because it could increase the number of solutions for the transaction
# (especially without --best) and since libsolv prefers the smallest possible upgrade it could result
# in no upgrade even if there is one available. This is a problem in general but its critical with
# --security transactions (https://bugzilla.redhat.com/show_bug.cgi?id=2097757)
# - We want to add only the latest versions of installed packages, this is specifically for installonly
# packages. Otherwise if for example kernel-1 and kernel-3 were installed and present in the
# transaction libsolv could decide to install kernel-2 because it is an upgrade for kernel-1 even
# though we don't want it because there already is a newer version present.
query = query.union(installed_all.latest().filter(name=[pkg.name for pkg in query]))
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=query)
self._goal.upgrade(select=sltr)
return 1
def upgrade(self, pkg_spec, reponame=None):
# :api
subj = dnf.subject.Subject(pkg_spec)
solution = subj.get_best_solution(self.sack)
q = solution["query"]
if q:
wildcard = dnf.util.is_glob_pattern(pkg_spec)
# wildcard shouldn't print not installed packages
# only solution with nevra.name provide packages with same name
if not wildcard and solution['nevra'] and solution['nevra'].name:
pkg_name = solution['nevra'].name
installed = self.sack.query().installed().apply()
obsoleters = q.filter(obsoletes=installed) \
if self.conf.obsoletes else self.sack.query().filterm(empty=True)
if not obsoleters:
installed_name = installed.filter(name=pkg_name).apply()
if not installed_name:
msg = _('Package %s available, but not installed.')
logger.warning(msg, pkg_name)
raise dnf.exceptions.PackagesNotInstalledError(
_('No match for argument: %s') % pkg_spec, pkg_spec)
elif solution['nevra'].arch and not dnf.util.is_glob_pattern(solution['nevra'].arch):
if not installed_name.filterm(arch=solution['nevra'].arch):
msg = _('Package %s available, but installed for different architecture.')
logger.warning(msg, "{}.{}".format(pkg_name, solution['nevra'].arch))
obsoletes = self.conf.obsoletes and solution['nevra'] \
and solution['nevra'].has_just_name()
return self._upgrade_internal(q, obsoletes, reponame, pkg_spec)
raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg_spec, pkg_spec)
def upgrade_all(self, reponame=None):
# :api
# provide only available packages to solver to trigger targeted upgrade
# possibilities will be ignored
# usage of selected packages will unify dnf behavior with other upgrade functions
return self._upgrade_internal(
self.sack.query(), self.conf.obsoletes, reponame, pkg_spec=None)
def distro_sync(self, pkg_spec=None):
if pkg_spec is None:
self._goal.distupgrade_all()
else:
subject = dnf.subject.Subject(pkg_spec)
solution = subject.get_best_solution(self.sack, with_src=False)
solution["query"].filterm(reponame__neq=hawkey.SYSTEM_REPO_NAME)
sltrs = subject._get_best_selectors(self, solution=solution,
obsoletes=self.conf.obsoletes, reports=True)
if not sltrs:
logger.info(_('No match for argument: %s'), pkg_spec)
return 0
for sltr in sltrs:
self._goal.distupgrade(select=sltr)
return 1
def autoremove(self, forms=None, pkg_specs=None, grp_specs=None, filenames=None):
# :api
"""Removes all 'leaf' packages from the system that were originally
installed as dependencies of user-installed packages but which are
no longer required by any such package."""
if any([grp_specs, pkg_specs, filenames]):
pkg_specs += filenames
done = False
# Remove groups.
if grp_specs and forms:
for grp_spec in grp_specs:
msg = _('Not a valid form: %s')
logger.warning(msg, grp_spec)
elif grp_specs:
if self.env_group_remove(grp_specs):
done = True
for pkg_spec in pkg_specs:
try:
self.remove(pkg_spec, forms=forms)
except dnf.exceptions.MarkingError as e:
logger.info(str(e))
else:
done = True
if not done:
logger.warning(_('No packages marked for removal.'))
else:
pkgs = self.sack.query()._unneeded(self.history.swdb,
debug_solver=self.conf.debug_solver)
for pkg in pkgs:
self.package_remove(pkg)
def remove(self, pkg_spec, reponame=None, forms=None):
# :api
"""Mark the specified package for removal."""
kwargs = {'forms': forms}
if forms:
kwargs['with_nevra'] = True
kwargs['with_provides'] = False
kwargs['with_filenames'] = False
matches = dnf.subject.Subject(pkg_spec).get_best_query(self.sack, **kwargs)
installed = [
pkg for pkg in matches.installed()
if reponame is None or
self.history.repo(pkg) == reponame]
if not installed:
self._raise_package_not_installed_error(pkg_spec, forms, reponame)
clean_deps = self.conf.clean_requirements_on_remove
for pkg in installed:
self._goal.erase(pkg, clean_deps=clean_deps)
return len(installed)
def reinstall(self, pkg_spec, old_reponame=None, new_reponame=None,
new_reponame_neq=None, remove_na=False):
subj = dnf.subject.Subject(pkg_spec)
q = subj.get_best_query(self.sack)
installed_pkgs = [
pkg for pkg in q.installed()
if old_reponame is None or
self.history.repo(pkg) == old_reponame]
available_q = q.available()
if new_reponame is not None:
available_q.filterm(reponame=new_reponame)
if new_reponame_neq is not None:
available_q.filterm(reponame__neq=new_reponame_neq)
available_nevra2pkg = dnf.query._per_nevra_dict(available_q)
if not installed_pkgs:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', pkg_spec, available_q.run())
cnt = 0
clean_deps = self.conf.clean_requirements_on_remove
strict = self.conf.strict
for installed_pkg in installed_pkgs:
try:
available_pkgs = available_nevra2pkg[ucd(installed_pkg)]
except KeyError:
if not remove_na:
continue
self._goal.erase(installed_pkg, clean_deps=clean_deps)
else:
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=available_pkgs)
self._goal.install(select=sltr, optional=(not strict))
cnt += 1
if cnt == 0:
raise dnf.exceptions.PackagesNotAvailableError(
'no package matched', pkg_spec, installed_pkgs)
return cnt
def downgrade(self, pkg_spec):
# :api
"""Mark a package to be downgraded.
This is equivalent to first removing the currently installed package,
and then installing an older version.
"""
return self.downgrade_to(pkg_spec)
def downgrade_to(self, pkg_spec, strict=False):
"""Downgrade to specific version if specified otherwise downgrades
to one version lower than the package installed.
"""
subj = dnf.subject.Subject(pkg_spec)
q = subj.get_best_query(self.sack)
if not q:
msg = _('No match for argument: %s') % pkg_spec
raise dnf.exceptions.PackageNotFoundError(msg, pkg_spec)
done = 0
available_pkgs = q.available()
available_pkg_names = list(available_pkgs._name_dict().keys())
q_installed = self.sack.query().installed().filterm(name=available_pkg_names)
if len(q_installed) == 0:
msg = _('Packages for argument %s available, but not installed.') % pkg_spec
raise dnf.exceptions.PackagesNotInstalledError(msg, pkg_spec, available_pkgs)
for pkg_name in q_installed._name_dict().keys():
downgrade_pkgs = available_pkgs.downgrades().filter(name=pkg_name)
if not downgrade_pkgs:
msg = _("Package %s of lowest version already installed, cannot downgrade it.")
logger.warning(msg, pkg_name)
continue
sltr = dnf.selector.Selector(self.sack)
sltr.set(pkg=downgrade_pkgs)
self._goal.install(select=sltr, optional=(not strict))
done = 1
return done
def provides(self, provides_spec):
providers = self.sack.query().filterm(file__glob=provides_spec)
if providers:
return providers, [provides_spec]
providers = dnf.query._by_provides(self.sack, provides_spec)
if providers:
return providers, [provides_spec]
if provides_spec.startswith('/bin/') or provides_spec.startswith('/sbin/'):
# compatibility for packages that didn't do UsrMove
binary_provides = ['/usr' + provides_spec]
elif provides_spec.startswith('/'):
# provides_spec is a file path
return providers, [provides_spec]
else:
# suppose that provides_spec is a command, search in /usr/sbin/
binary_provides = [prefix + provides_spec
for prefix in ['/bin/', '/sbin/', '/usr/bin/', '/usr/sbin/']]
return self.sack.query().filterm(file__glob=binary_provides), binary_provides
def add_security_filters(self, cmp_type, types=(), advisory=(), bugzilla=(), cves=(), severity=()):
# :api
"""
It modifies results of install, upgrade, and distrosync methods according to provided
filters.
:param cmp_type: only 'eq' or 'gte' allowed
:param types: List or tuple with strings. E.g. 'bugfix', 'enhancement', 'newpackage',
'security'
:param advisory: List or tuple with strings. E.g.Eg. FEDORA-2201-123
:param bugzilla: List or tuple with strings. Include packages that fix a Bugzilla ID,
Eg. 123123.
:param cves: List or tuple with strings. Include packages that fix a CVE
(Common Vulnerabilities and Exposures) ID. Eg. CVE-2201-0123
:param severity: List or tuple with strings. Includes packages that provide a fix
for an issue of the specified severity.
"""
cmp_dict = {'eq': '__eqg', 'gte': '__eqg__gt'}
if cmp_type not in cmp_dict:
raise ValueError("Unsupported value for `cmp_type`")
cmp = cmp_dict[cmp_type]
if types:
key = 'advisory_type' + cmp
self._update_security_options.setdefault(key, set()).update(types)
if advisory:
key = 'advisory' + cmp
self._update_security_options.setdefault(key, set()).update(advisory)
if bugzilla:
key = 'advisory_bug' + cmp
self._update_security_options.setdefault(key, set()).update(bugzilla)
if cves:
key = 'advisory_cve' + cmp
self._update_security_options.setdefault(key, set()).update(cves)
if severity:
key = 'advisory_severity' + cmp
self._update_security_options.setdefault(key, set()).update(severity)
def reset_security_filters(self):
# :api
"""
Reset all security filters
"""
self._update_security_options = {}
def _merge_update_filters(self, q, pkg_spec=None, warning=True, upgrade=False):
"""
Merge Queries in _update_filters and return intersection with q Query
@param q: Query
@return: Query
"""
if not (self._update_security_options or self._update_security_filters) or not q:
return q
merged_queries = self.sack.query().filterm(empty=True)
if self._update_security_filters:
for query in self._update_security_filters:
merged_queries = merged_queries.union(query)
self._update_security_filters = [merged_queries]
if self._update_security_options:
for filter_name, values in self._update_security_options.items():
if upgrade:
filter_name = filter_name + '__upgrade'
kwargs = {filter_name: values}
merged_queries = merged_queries.union(q.filter(**kwargs))
merged_queries = q.intersection(merged_queries)
if not merged_queries:
if warning:
q = q.upgrades()
count = len(q._name_dict().keys())
if count > 0:
if pkg_spec is None:
msg1 = _("No security updates needed, but {} update "
"available").format(count)
msg2 = _("No security updates needed, but {} updates "
"available").format(count)
logger.warning(P_(msg1, msg2, count))
else:
msg1 = _('No security updates needed for "{}", but {} '
'update available').format(pkg_spec, count)
msg2 = _('No security updates needed for "{}", but {} '
'updates available').format(pkg_spec, count)
logger.warning(P_(msg1, msg2, count))
return merged_queries
def _get_key_for_package(self, po, askcb=None, fullaskcb=None):
"""Retrieve a key for a package. If needed, use the given
callback to prompt whether the key should be imported.
:param po: the package object to retrieve the key of
:param askcb: Callback function to use to ask permission to
import a key. The arguments *askcb* should take are the
package object, the userid of the key, and the keyid
:param fullaskcb: Callback function to use to ask permission to
import a key. This differs from *askcb* in that it gets
passed a dictionary so that we can expand the values passed.
:raises: :class:`dnf.exceptions.Error` if there are errors
retrieving the keys
"""
if po._from_cmdline:
# raise an exception, because po.repoid is not in self.repos
msg = _('Unable to retrieve a key for a commandline package: %s')
raise ValueError(msg % po)
repo = self.repos[po.repoid]
key_installed = repo.id in self._repo_set_imported_gpg_keys
keyurls = [] if key_installed else repo.gpgkey
def _prov_key_data(msg):
msg += _('. Failing package is: %s') % (po) + '\n '
msg += _('GPG Keys are configured as: %s') % \
(', '.join(repo.gpgkey))
return msg
user_cb_fail = False
self._repo_set_imported_gpg_keys.add(repo.id)
for keyurl in keyurls:
keys = dnf.crypto.retrieve(keyurl, repo)
for info in keys:
# Check if key is already installed
if misc.keyInstalled(self._ts, info.rpm_id, info.timestamp) >= 0:
msg = _('GPG key at %s (0x%s) is already installed')
logger.info(msg, keyurl, info.short_id)
continue
# DNS Extension: create a key object, pass it to the verification class
# and print its result as an advice to the user.
if self.conf.gpgkey_dns_verification:
dns_input_key = dnf.dnssec.KeyInfo.from_rpm_key_object(info.userid,
info.raw_key)
dns_result = dnf.dnssec.DNSSECKeyVerification.verify(dns_input_key)
logger.info(dnf.dnssec.nice_user_msg(dns_input_key, dns_result))
# Try installing/updating GPG key
info.url = keyurl
if self.conf.gpgkey_dns_verification:
dnf.crypto.log_dns_key_import(info, dns_result)
else:
dnf.crypto.log_key_import(info)
rc = False
if self.conf.assumeno:
rc = False
elif self.conf.assumeyes:
# DNS Extension: We assume, that the key is trusted in case it is valid,
# its existence is explicitly denied or in case the domain is not signed
# and therefore there is no way to know for sure (this is mainly for
# backward compatibility)
# FAQ:
# * What is PROVEN_NONEXISTENCE?
# In DNSSEC, your domain does not need to be signed, but this state
# (not signed) has to be proven by the upper domain. e.g. when example.com.
# is not signed, com. servers have to sign the message, that example.com.
# does not have any signing key (KSK to be more precise).
if self.conf.gpgkey_dns_verification:
if dns_result in (dnf.dnssec.Validity.VALID,
dnf.dnssec.Validity.PROVEN_NONEXISTENCE):
rc = True
logger.info(dnf.dnssec.any_msg(_("The key has been approved.")))
else:
rc = False
logger.info(dnf.dnssec.any_msg(_("The key has been rejected.")))
else:
rc = True
# grab the .sig/.asc for the keyurl, if it exists if it
# does check the signature on the key if it is signed by
# one of our ca-keys for this repo or the global one then
# rc = True else ask as normal.
elif fullaskcb:
rc = fullaskcb({"po": po, "userid": info.userid,
"hexkeyid": info.short_id,
"keyurl": keyurl,
"fingerprint": info.fingerprint,
"timestamp": info.timestamp})
elif askcb:
rc = askcb(po, info.userid, info.short_id)
if not rc:
user_cb_fail = True
continue
# Import the key
# If rpm.RPMTRANS_FLAG_TEST in self._ts, gpg keys cannot be imported successfully
# therefore the flag was removed for import operation
test_flag = self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST)
if test_flag:
orig_flags = self._ts.getTsFlags()
self._ts.setFlags(orig_flags - rpm.RPMTRANS_FLAG_TEST)
result = self._ts.pgpImportPubkey(misc.procgpgkey(info.raw_key))
if test_flag:
self._ts.setFlags(orig_flags)
if result != 0:
msg = _('Key import failed (code %d)') % result
raise dnf.exceptions.Error(_prov_key_data(msg))
logger.info(_('Key imported successfully'))
key_installed = True
if not key_installed and user_cb_fail:
raise dnf.exceptions.Error(_("Didn't install any keys"))
if not key_installed:
msg = _('The GPG keys listed for the "%s" repository are '
'already installed but they are not correct for this '
'package.\n'
'Check that the correct key URLs are configured for '
'this repository.') % repo.name
raise dnf.exceptions.Error(_prov_key_data(msg))
# Check if the newly installed keys helped
result, errmsg = self._sig_check_pkg(po)
if result != 0:
if keyurls:
msg = _("Import of key(s) didn't help, wrong key(s)?")
logger.info(msg)
errmsg = ucd(errmsg)
raise dnf.exceptions.Error(_prov_key_data(errmsg))
def package_import_key(self, pkg, askcb=None, fullaskcb=None):
# :api
"""Retrieve a key for a package. If needed, use the given
callback to prompt whether the key should be imported.
:param pkg: the package object to retrieve the key of
:param askcb: Callback function to use to ask permission to
import a key. The arguments *askcb* should take are the
package object, the userid of the key, and the keyid
:param fullaskcb: Callback function to use to ask permission to
import a key. This differs from *askcb* in that it gets
passed a dictionary so that we can expand the values passed.
:raises: :class:`dnf.exceptions.Error` if there are errors
retrieving the keys
"""
self._get_key_for_package(pkg, askcb, fullaskcb)
def _run_rpm_check(self):
results = []
self._ts.check()
for prob in self._ts.problems():
# Newer rpm (4.8.0+) has problem objects, older have just strings.
# Should probably move to using the new objects, when we can. For
# now just be compatible.
results.append(ucd(prob))
return results
def urlopen(self, url, repo=None, mode='w+b', **kwargs):
# :api
"""
Open the specified absolute url, return a file object
which respects proxy setting even for non-repo downloads
"""
return dnf.util._urlopen(url, self.conf, repo, mode, **kwargs)
def _get_installonly_query(self, q=None):
if q is None:
q = self._sack.query(flags=hawkey.IGNORE_EXCLUDES)
installonly = q.filter(provides=self.conf.installonlypkgs)
return installonly
def _report_icase_hint(self, pkg_spec):
subj = dnf.subject.Subject(pkg_spec, ignore_case=True)
solution = subj.get_best_solution(self.sack, with_nevra=True,
with_provides=False, with_filenames=False)
if solution['query'] and solution['nevra'] and solution['nevra'].name and \
pkg_spec != solution['query'][0].name:
logger.info(_(" * Maybe you meant: {}").format(solution['query'][0].name))
def _select_remote_pkgs(self, install_pkgs):
""" Check checksum of packages from local repositories and returns list packages from remote
repositories that will be downloaded. Packages from commandline are skipped.
:param install_pkgs: list of packages
:return: list of remote pkgs
"""
def _verification_of_packages(pkg_list, logger_msg):
all_packages_verified = True
for pkg in pkg_list:
pkg_successfully_verified = False
try:
pkg_successfully_verified = pkg.verifyLocalPkg()
except Exception as e:
logger.critical(str(e))
if pkg_successfully_verified is not True:
logger.critical(logger_msg.format(pkg, pkg.reponame))
all_packages_verified = False
return all_packages_verified
remote_pkgs = []
local_repository_pkgs = []
for pkg in install_pkgs:
if pkg._is_local_pkg():
if pkg.reponame != hawkey.CMDLINE_REPO_NAME:
local_repository_pkgs.append(pkg)
else:
remote_pkgs.append(pkg)
msg = _('Package "{}" from local repository "{}" has incorrect checksum')
if not _verification_of_packages(local_repository_pkgs, msg):
raise dnf.exceptions.Error(
_("Some packages from local repository have incorrect checksum"))
if self.conf.cacheonly:
msg = _('Package "{}" from repository "{}" has incorrect checksum')
if not _verification_of_packages(remote_pkgs, msg):
raise dnf.exceptions.Error(
_('Some packages have invalid cache, but cannot be downloaded due to '
'"--cacheonly" option'))
remote_pkgs = []
return remote_pkgs, local_repository_pkgs
def _report_already_installed(self, packages):
for pkg in packages:
_msg_installed(pkg)
def _raise_package_not_found_error(self, pkg_spec, forms, reponame):
all_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES)
subject = dnf.subject.Subject(pkg_spec)
solution = subject.get_best_solution(
self.sack, forms=forms, with_src=False, query=all_query)
if reponame is not None:
solution['query'].filterm(reponame=reponame)
if not solution['query']:
raise dnf.exceptions.PackageNotFoundError(_('No match for argument'), pkg_spec)
else:
with_regular_query = self.sack.query(flags=hawkey.IGNORE_REGULAR_EXCLUDES)
with_regular_query = solution['query'].intersection(with_regular_query)
# Modular filtering is applied on a package set that already has regular excludes
# filtered out. So if a package wasn't filtered out by regular excludes, it must have
# been filtered out by modularity.
if with_regular_query:
msg = _('All matches were filtered out by exclude filtering for argument')
else:
msg = _('All matches were filtered out by modular filtering for argument')
raise dnf.exceptions.PackageNotFoundError(msg, pkg_spec)
def _raise_package_not_installed_error(self, pkg_spec, forms, reponame):
all_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES).installed()
subject = dnf.subject.Subject(pkg_spec)
solution = subject.get_best_solution(
self.sack, forms=forms, with_src=False, query=all_query)
if not solution['query']:
raise dnf.exceptions.PackagesNotInstalledError(_('No match for argument'), pkg_spec)
if reponame is not None:
installed = [pkg for pkg in solution['query'] if self.history.repo(pkg) == reponame]
else:
installed = solution['query']
if not installed:
msg = _('All matches were installed from a different repository for argument')
else:
msg = _('All matches were filtered out by exclude filtering for argument')
raise dnf.exceptions.PackagesNotInstalledError(msg, pkg_spec)
def setup_loggers(self):
# :api
"""
Setup DNF file loggers based on given configuration file. The loggers are set the same
way as if DNF was run from CLI.
"""
self._logging._setup_from_dnf_conf(self.conf, file_loggers_only=True)
def _skipped_packages(self, report_problems, transaction):
"""returns set of conflicting packages and set of packages with broken dependency that would
be additionally installed when --best and --allowerasing"""
if self._goal.actions & (hawkey.INSTALL | hawkey.UPGRADE | hawkey.UPGRADE_ALL):
best = True
else:
best = False
ng = deepcopy(self._goal)
params = {"allow_uninstall": self._allow_erasing,
"force_best": best,
"ignore_weak": True}
ret = ng.run(**params)
if not ret and report_problems:
msg = dnf.util._format_resolve_problems(ng.problem_rules())
logger.warning(msg)
problem_conflicts = set(ng.problem_conflicts(available=True))
problem_dependency = set(ng.problem_broken_dependency(available=True)) - problem_conflicts
def _nevra(item):
return hawkey.NEVRA(name=item.name, epoch=item.epoch, version=item.version,
release=item.release, arch=item.arch)
# Sometimes, pkg is not in transaction item, therefore, comparing by nevra
transaction_nevras = [_nevra(tsi) for tsi in transaction]
skipped_conflicts = set(
[pkg for pkg in problem_conflicts if _nevra(pkg) not in transaction_nevras])
skipped_dependency = set(
[pkg for pkg in problem_dependency if _nevra(pkg) not in transaction_nevras])
return skipped_conflicts, skipped_dependency
def reboot_needed(self):
"""Check whether a system reboot is recommended following the transaction
:return: bool
"""
if not self.transaction:
return False
# List taken from DNF needs-restarting
need_reboot = frozenset(('kernel', 'kernel-core', 'kernel-rt', 'glibc',
'linux-firmware', 'systemd', 'dbus',
'dbus-broker', 'dbus-daemon', 'microcode_ctl'))
changed_pkgs = self.transaction.install_set | self.transaction.remove_set
return any(pkg.name in need_reboot for pkg in changed_pkgs)
def _msg_installed(pkg):
name = ucd(pkg)
msg = _('Package %s is already installed.')
logger.info(msg, name)
| 120,876
|
Python
|
.py
| 2,510
| 34.959363
| 119
| 0.571878
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,732
|
comps.py
|
rpm-software-management_dnf/dnf/comps.py
|
# comps.py
# Interface to libcomps.
#
# Copyright (C) 2013-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import libdnf.transaction
from dnf.exceptions import CompsError
from dnf.i18n import _, ucd
from functools import reduce
import dnf.i18n
import dnf.util
import fnmatch
import gettext
import itertools
import libcomps
import locale
import logging
import operator
import re
import sys
logger = logging.getLogger("dnf")
# :api :binformat
CONDITIONAL = libdnf.transaction.CompsPackageType_CONDITIONAL
DEFAULT = libdnf.transaction.CompsPackageType_DEFAULT
MANDATORY = libdnf.transaction.CompsPackageType_MANDATORY
OPTIONAL = libdnf.transaction.CompsPackageType_OPTIONAL
ALL_TYPES = CONDITIONAL | DEFAULT | MANDATORY | OPTIONAL
def _internal_comps_length(comps):
collections = (comps.categories, comps.groups, comps.environments)
return reduce(operator.__add__, map(len, collections))
def _first_if_iterable(seq):
if seq is None:
return None
return dnf.util.first(seq)
def _by_pattern(pattern, case_sensitive, sqn):
"""Return items from sqn matching either exactly or glob-wise."""
pattern = dnf.i18n.ucd(pattern)
exact = {g for g in sqn if g.name == pattern or g.id == pattern}
if exact:
return exact
if case_sensitive:
match = re.compile(fnmatch.translate(pattern)).match
else:
match = re.compile(fnmatch.translate(pattern), flags=re.I).match
ret = set()
for g in sqn:
if match(g.id):
ret.add(g)
elif g.name is not None and match(g.name):
ret.add(g)
elif g.ui_name is not None and match(g.ui_name):
ret.add(g)
return ret
def _fn_display_order(group):
return sys.maxsize if group.display_order is None else group.display_order
def install_or_skip(install_fnc, grp_or_env_id, types, exclude=None,
strict=True, exclude_groups=None):
"""
Installs a group or an environment identified by grp_or_env_id.
This method is preserved for API compatibility. It used to catch an
exception thrown when a gorup or env was already installed, which is no
longer thrown.
`install_fnc` has to be Solver._group_install or
Solver._environment_install.
"""
return install_fnc(grp_or_env_id, types, exclude, strict, exclude_groups)
class _Langs(object):
"""Get all usable abbreviations for the current language."""
def __init__(self):
self.last_locale = None
self.cache = None
@staticmethod
def _dotted_locale_str():
lcl = locale.getlocale(locale.LC_MESSAGES)
if lcl == (None, None):
return 'C'
return '.'.join(lcl)
def get(self):
current_locale = self._dotted_locale_str()
if self.last_locale == current_locale:
return self.cache
self.cache = []
locales = [current_locale]
if current_locale != 'C':
locales.append('C')
for l in locales:
for nlang in gettext._expand_lang(l):
if nlang not in self.cache:
self.cache.append(nlang)
self.last_locale = current_locale
return self.cache
class CompsQuery(object):
AVAILABLE = 1
INSTALLED = 2
ENVIRONMENTS = 1
GROUPS = 2
def __init__(self, comps, history, kinds, status):
self.comps = comps
self.history = history
self.kinds = kinds
self.status = status
def _get_groups(self, available, installed):
result = set()
if self.status & self.AVAILABLE:
result.update({i.id for i in available})
if self.status & self.INSTALLED:
for i in installed:
group = i.getCompsGroupItem()
if not group:
continue
result.add(group.getGroupId())
return result
def _get_envs(self, available, installed):
result = set()
if self.status & self.AVAILABLE:
result.update({i.id for i in available})
if self.status & self.INSTALLED:
for i in installed:
env = i.getCompsEnvironmentItem()
if not env:
continue
result.add(env.getEnvironmentId())
return result
def get(self, *patterns):
res = dnf.util.Bunch()
res.environments = []
res.groups = []
for pat in patterns:
envs = grps = []
if self.kinds & self.ENVIRONMENTS:
available = self.comps.environments_by_pattern(pat)
installed = self.history.env.search_by_pattern(pat)
envs = self._get_envs(available, installed)
res.environments.extend(envs)
if self.kinds & self.GROUPS:
available = self.comps.groups_by_pattern(pat)
installed = self.history.group.search_by_pattern(pat)
grps = self._get_groups(available, installed)
res.groups.extend(grps)
if not envs and not grps:
if self.status == self.INSTALLED:
msg = _("Module or Group '%s' is not installed.") % ucd(pat)
elif self.status == self.AVAILABLE:
msg = _("Module or Group '%s' is not available.") % ucd(pat)
else:
msg = _("Module or Group '%s' does not exist.") % ucd(pat)
raise CompsError(msg)
return res
class Forwarder(object):
def __init__(self, iobj, langs):
self._i = iobj
self._langs = langs
def __getattr__(self, name):
return getattr(self._i, name)
def _ui_text(self, default, dct):
for l in self._langs.get():
t = dct.get(l)
if t is not None:
return t
return default
@property
def ui_description(self):
return self._ui_text(self.desc, self.desc_by_lang)
@property
def ui_name(self):
return self._ui_text(self.name, self.name_by_lang)
class Category(Forwarder):
# :api
def __init__(self, iobj, langs, group_factory):
super(Category, self).__init__(iobj, langs)
self._group_factory = group_factory
def _build_group(self, grp_id):
grp = self._group_factory(grp_id.name)
if grp is None:
msg = "no group '%s' from category '%s'"
raise ValueError(msg % (grp_id.name, self.id))
return grp
def groups_iter(self):
for grp_id in self.group_ids:
yield self._build_group(grp_id)
@property
def groups(self):
return list(self.groups_iter())
class Environment(Forwarder):
# :api
def __init__(self, iobj, langs, group_factory):
super(Environment, self).__init__(iobj, langs)
self._group_factory = group_factory
def _build_group(self, grp_id):
grp = self._group_factory(grp_id.name)
if grp is None:
msg = "no group '%s' from environment '%s'"
raise ValueError(msg % (grp_id.name, self.id))
return grp
def _build_groups(self, ids):
groups = []
for gi in ids:
try:
groups.append(self._build_group(gi))
except ValueError as e:
logger.error(e)
return groups
def groups_iter(self):
for grp_id in itertools.chain(self.group_ids, self.option_ids):
try:
yield self._build_group(grp_id)
except ValueError as e:
logger.error(e)
@property
def mandatory_groups(self):
return self._build_groups(self.group_ids)
@property
def optional_groups(self):
return self._build_groups(self.option_ids)
class Group(Forwarder):
# :api
def __init__(self, iobj, langs, pkg_factory):
super(Group, self).__init__(iobj, langs)
self._pkg_factory = pkg_factory
self.selected = iobj.default
def _packages_of_type(self, type_):
return [pkg for pkg in self.packages if pkg.type == type_]
@property
def conditional_packages(self):
return self._packages_of_type(libcomps.PACKAGE_TYPE_CONDITIONAL)
@property
def default_packages(self):
return self._packages_of_type(libcomps.PACKAGE_TYPE_DEFAULT)
def packages_iter(self):
# :api
return map(self._pkg_factory, self.packages)
@property
def mandatory_packages(self):
return self._packages_of_type(libcomps.PACKAGE_TYPE_MANDATORY)
@property
def optional_packages(self):
return self._packages_of_type(libcomps.PACKAGE_TYPE_OPTIONAL)
@property
def visible(self):
return self._i.uservisible
class Package(Forwarder):
"""Represents comps package data. :api"""
_OPT_MAP = {
libcomps.PACKAGE_TYPE_CONDITIONAL : CONDITIONAL,
libcomps.PACKAGE_TYPE_DEFAULT : DEFAULT,
libcomps.PACKAGE_TYPE_MANDATORY : MANDATORY,
libcomps.PACKAGE_TYPE_OPTIONAL : OPTIONAL,
}
def __init__(self, ipkg):
self._i = ipkg
@property
def name(self):
# :api
return self._i.name
@property
def option_type(self):
# :api
return self._OPT_MAP[self.type]
class Comps(object):
# :api
def __init__(self):
self._i = libcomps.Comps()
self._langs = _Langs()
def __len__(self):
return _internal_comps_length(self._i)
def _build_category(self, icategory):
return Category(icategory, self._langs, self._group_by_id)
def _build_environment(self, ienvironment):
return Environment(ienvironment, self._langs, self._group_by_id)
def _build_group(self, igroup):
return Group(igroup, self._langs, self._build_package)
def _build_package(self, ipkg):
return Package(ipkg)
def _add_from_xml_filename(self, fn):
comps = libcomps.Comps()
try:
comps.fromxml_f(fn)
except libcomps.ParserError:
errors = comps.get_last_errors()
raise CompsError(' '.join(errors))
self._i += comps
@property
def categories(self):
# :api
return list(self.categories_iter())
def category_by_pattern(self, pattern, case_sensitive=False):
# :api
assert dnf.util.is_string_type(pattern)
cats = self.categories_by_pattern(pattern, case_sensitive)
return _first_if_iterable(cats)
def categories_by_pattern(self, pattern, case_sensitive=False):
# :api
assert dnf.util.is_string_type(pattern)
return _by_pattern(pattern, case_sensitive, self.categories)
def categories_iter(self):
# :api
return (self._build_category(c) for c in self._i.categories)
@property
def environments(self):
# :api
return sorted(self.environments_iter(), key=_fn_display_order)
def _environment_by_id(self, id):
assert dnf.util.is_string_type(id)
return dnf.util.first(g for g in self.environments_iter() if g.id == id)
def environment_by_pattern(self, pattern, case_sensitive=False):
# :api
assert dnf.util.is_string_type(pattern)
envs = self.environments_by_pattern(pattern, case_sensitive)
return _first_if_iterable(envs)
def environments_by_pattern(self, pattern, case_sensitive=False):
# :api
assert dnf.util.is_string_type(pattern)
envs = list(self.environments_iter())
found_envs = _by_pattern(pattern, case_sensitive, envs)
return sorted(found_envs, key=_fn_display_order)
def environments_iter(self):
# :api
return (self._build_environment(e) for e in self._i.environments)
@property
def groups(self):
# :api
return sorted(self.groups_iter(), key=_fn_display_order)
def _group_by_id(self, id_):
assert dnf.util.is_string_type(id_)
return dnf.util.first(g for g in self.groups_iter() if g.id == id_)
def group_by_pattern(self, pattern, case_sensitive=False):
# :api
assert dnf.util.is_string_type(pattern)
grps = self.groups_by_pattern(pattern, case_sensitive)
return _first_if_iterable(grps)
def groups_by_pattern(self, pattern, case_sensitive=False):
# :api
assert dnf.util.is_string_type(pattern)
grps = _by_pattern(pattern, case_sensitive, list(self.groups_iter()))
return sorted(grps, key=_fn_display_order)
def groups_iter(self):
# :api
return (self._build_group(g) for g in self._i.groups)
class CompsTransPkg(object):
def __init__(self, pkg_or_name):
if dnf.util.is_string_type(pkg_or_name):
# from package name
self.basearchonly = False
self.name = pkg_or_name
self.optional = True
self.requires = None
elif isinstance(pkg_or_name, libdnf.transaction.CompsGroupPackage):
# from swdb package
# TODO:
self.basearchonly = False
# self.basearchonly = pkg_or_name.basearchonly
self.name = pkg_or_name.getName()
self.optional = pkg_or_name.getPackageType() & libcomps.PACKAGE_TYPE_OPTIONAL
# TODO:
self.requires = None
# self.requires = pkg_or_name.requires
else:
# from comps package
self.basearchonly = pkg_or_name.basearchonly
self.name = pkg_or_name.name
self.optional = pkg_or_name.type & libcomps.PACKAGE_TYPE_OPTIONAL
self.requires = pkg_or_name.requires
def __eq__(self, other):
return (self.name == other.name and
self.basearchonly == other.basearchonly and
self.optional == other.optional and
self.requires == other.requires)
def __str__(self):
return self.name
def __hash__(self):
return hash((self.name,
self.basearchonly,
self.optional,
self.requires))
class TransactionBunch(object):
def __init__(self):
self._install = set()
self._install_opt = set()
self._remove = set()
self._upgrade = set()
def __iadd__(self, other):
self._install.update(other._install)
self._install_opt.update(other._install_opt)
self._upgrade.update(other._upgrade)
self._remove = (self._remove | other._remove) - \
self._install - self._install_opt - self._upgrade
return self
def __len__(self):
return len(self.install) + len(self.install_opt) + len(self.upgrade) + len(self.remove)
@staticmethod
def _set_value(param, val):
for item in val:
if isinstance(item, CompsTransPkg):
param.add(item)
else:
param.add(CompsTransPkg(item))
@property
def install(self):
"""
Packages to be installed with strict=True - transaction will
fail if they cannot be installed due to dependency errors etc.
"""
return self._install
@install.setter
def install(self, value):
self._set_value(self._install, value)
@property
def install_opt(self):
"""
Packages to be installed with strict=False - they will be
skipped if they cannot be installed
"""
return self._install_opt
@install_opt.setter
def install_opt(self, value):
self._set_value(self._install_opt, value)
@property
def remove(self):
return self._remove
@remove.setter
def remove(self, value):
self._set_value(self._remove, value)
@property
def upgrade(self):
return self._upgrade
@upgrade.setter
def upgrade(self, value):
self._set_value(self._upgrade, value)
class Solver(object):
def __init__(self, history, comps, reason_fn):
self.history = history
self.comps = comps
self._reason_fn = reason_fn
@staticmethod
def _mandatory_group_set(env):
return {grp.id for grp in env.mandatory_groups}
@staticmethod
def _full_package_set(grp):
return {pkg.getName() for pkg in grp.mandatory_packages +
grp.default_packages + grp.optional_packages +
grp.conditional_packages}
@staticmethod
def _pkgs_of_type(group, pkg_types, exclude=[]):
def filter(pkgs):
return [pkg for pkg in pkgs
if pkg.name not in exclude]
pkgs = set()
if pkg_types & MANDATORY:
pkgs.update(filter(group.mandatory_packages))
if pkg_types & DEFAULT:
pkgs.update(filter(group.default_packages))
if pkg_types & OPTIONAL:
pkgs.update(filter(group.optional_packages))
if pkg_types & CONDITIONAL:
pkgs.update(filter(group.conditional_packages))
return pkgs
def _removable_pkg(self, pkg_name):
assert dnf.util.is_string_type(pkg_name)
return self.history.group.is_removable_pkg(pkg_name)
def _removable_grp(self, group_id):
assert dnf.util.is_string_type(group_id)
return self.history.env.is_removable_group(group_id)
def _environment_install(self, env_id, pkg_types, exclude=None, strict=True, exclude_groups=None):
assert dnf.util.is_string_type(env_id)
comps_env = self.comps._environment_by_id(env_id)
if not comps_env:
raise CompsError(_("Environment id '%s' does not exist.") % ucd(env_id))
swdb_env = self.history.env.new(env_id, comps_env.name, comps_env.ui_name, pkg_types)
self.history.env.install(swdb_env)
trans = TransactionBunch()
for comps_group in comps_env.mandatory_groups:
if exclude_groups and comps_group.id in exclude_groups:
continue
trans += self._group_install(comps_group.id, pkg_types, exclude, strict)
swdb_env.addGroup(comps_group.id, True, MANDATORY)
for comps_group in comps_env.optional_groups:
if exclude_groups and comps_group.id in exclude_groups:
continue
swdb_env.addGroup(comps_group.id, False, OPTIONAL)
# TODO: if a group is already installed, mark it as installed?
return trans
def _environment_remove(self, env_id):
assert dnf.util.is_string_type(env_id) is True
swdb_env = self.history.env.get(env_id)
if not swdb_env:
raise CompsError(_("Environment id '%s' is not installed.") % env_id)
self.history.env.remove(swdb_env)
trans = TransactionBunch()
group_ids = set([i.getGroupId() for i in swdb_env.getGroups()])
for group_id in group_ids:
if not self._removable_grp(group_id):
continue
trans += self._group_remove(group_id)
return trans
def _environment_upgrade(self, env_id):
assert dnf.util.is_string_type(env_id)
comps_env = self.comps._environment_by_id(env_id)
swdb_env = self.history.env.get(env_id)
if not swdb_env:
raise CompsError(_("Environment '%s' is not installed.") % env_id)
if not comps_env:
raise CompsError(_("Environment '%s' is not available.") % env_id)
old_set = set([i.getGroupId() for i in swdb_env.getGroups()])
pkg_types = swdb_env.getPackageTypes()
# create a new record for current transaction
swdb_env = self.history.env.new(comps_env.id, comps_env.name, comps_env.ui_name, pkg_types)
trans = TransactionBunch()
for comps_group in comps_env.mandatory_groups:
if comps_group.id in old_set:
if self.history.group.get(comps_group.id):
# upgrade installed group
trans += self._group_upgrade(comps_group.id)
else:
# install new group
trans += self._group_install(comps_group.id, pkg_types)
swdb_env.addGroup(comps_group.id, True, MANDATORY)
for comps_group in comps_env.optional_groups:
if comps_group.id in old_set and self.history.group.get(comps_group.id):
# upgrade installed group
trans += self._group_upgrade(comps_group.id)
swdb_env.addGroup(comps_group.id, False, OPTIONAL)
# TODO: if a group is already installed, mark it as installed?
self.history.env.upgrade(swdb_env)
return trans
def _group_install(self, group_id, pkg_types, exclude=None, strict=True, exclude_groups=None):
assert dnf.util.is_string_type(group_id)
comps_group = self.comps._group_by_id(group_id)
if not comps_group:
raise CompsError(_("Group id '%s' does not exist.") % ucd(group_id))
swdb_group = self.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types)
for pkg in comps_group.packages_iter():
pkg_installed = self.history.swdb.getPackageCompsGroups(pkg.name) != ()
swdb_group.addPackage(pkg.name, pkg_installed, Package._OPT_MAP[pkg.type])
self.history.group.install(swdb_group)
trans = TransactionBunch()
# TODO: remove exclude
if strict:
trans.install.update(self._pkgs_of_type(comps_group, pkg_types, exclude=[]))
else:
trans.install_opt.update(self._pkgs_of_type(comps_group, pkg_types, exclude=[]))
return trans
def _group_remove(self, group_id):
assert dnf.util.is_string_type(group_id)
swdb_group = self.history.group.get(group_id)
if not swdb_group:
raise CompsError(_("Module or Group '%s' is not installed.") % group_id)
self.history.group.remove(swdb_group)
trans = TransactionBunch()
trans.remove = {pkg for pkg in swdb_group.getPackages() if self._removable_pkg(pkg.getName())}
return trans
def _group_upgrade(self, group_id):
assert dnf.util.is_string_type(group_id)
comps_group = self.comps._group_by_id(group_id)
swdb_group = self.history.group.get(group_id)
exclude = []
if not swdb_group:
argument = comps_group.ui_name if comps_group else group_id
raise CompsError(_("Module or Group '%s' is not installed.") % argument)
if not comps_group:
raise CompsError(_("Module or Group '%s' is not available.") % group_id)
pkg_types = swdb_group.getPackageTypes()
old_set = set([i.getName() for i in swdb_group.getPackages()])
new_set = self._pkgs_of_type(comps_group, pkg_types, exclude)
# create a new record for current transaction
swdb_group = self.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types)
for pkg in comps_group.packages_iter():
pkg_installed = self.history.swdb.getPackageCompsGroups(pkg.name) != ()
swdb_group.addPackage(pkg.name, pkg_installed, Package._OPT_MAP[pkg.type])
self.history.group.upgrade(swdb_group)
trans = TransactionBunch()
trans.install = {pkg for pkg in new_set if pkg.name not in old_set}
trans.remove = {name for name in old_set
if name not in [pkg.name for pkg in new_set]}
trans.upgrade = {pkg for pkg in new_set if pkg.name in old_set}
return trans
def _exclude_packages_from_installed_groups(self, base):
for group in self.persistor.groups:
p_grp = self.persistor.group(group)
if p_grp.installed:
installed_pkg_names = \
set(p_grp.full_list) - set(p_grp.pkg_exclude)
installed_pkgs = base.sack.query().installed().filterm(name=installed_pkg_names)
for pkg in installed_pkgs:
base._goal.install(pkg)
| 25,019
|
Python
|
.py
| 601
| 32.710483
| 103
| 0.62484
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,733
|
pycomp.py
|
rpm-software-management_dnf/dnf/pycomp.py
|
# pycomp.py
# Python 2 and Python 3 compatibility module
#
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from gettext import NullTranslations
from sys import version_info
import base64
import email.mime.text
import gettext
import itertools
import locale
import sys
import types
PY3 = version_info.major >= 3
if PY3:
from io import StringIO
from configparser import ConfigParser
import queue
import urllib.parse
import shlex
# functions renamed in py3
Queue = queue.Queue
basestring = unicode = str
filterfalse = itertools.filterfalse
long = int
NullTranslations.ugettext = NullTranslations.gettext
NullTranslations.ungettext = NullTranslations.ngettext
xrange = range
raw_input = input
base64_decodebytes = base64.decodebytes
urlparse = urllib.parse
urllib_quote = urlparse.quote
shlex_quote = shlex.quote
sys_maxsize = sys.maxsize
def gettext_setup(t):
_ = t.gettext
P_ = t.ngettext
return (_, P_)
# string helpers
def is_py2str_py3bytes(o):
return isinstance(o, bytes)
def is_py3bytes(o):
return isinstance(o, bytes)
# functions that don't take unicode arguments in py2
ModuleType = lambda m: types.ModuleType(m)
format = locale.format_string
def setlocale(category, loc=None):
locale.setlocale(category, loc)
def write_to_file(f, content):
f.write(content)
def email_mime(body):
return email.mime.text.MIMEText(body)
else:
# functions renamed in py3
from __builtin__ import unicode, basestring, long, xrange, raw_input
from StringIO import StringIO
from ConfigParser import ConfigParser
import Queue
import urllib
import urlparse
import pipes
Queue = Queue.Queue
filterfalse = itertools.ifilterfalse
base64_decodebytes = base64.decodestring
urllib_quote = urllib.quote
shlex_quote = pipes.quote
sys_maxsize = sys.maxint
def gettext_setup(t):
_ = t.ugettext
P_ = t.ungettext
return (_, P_)
# string helpers
def is_py2str_py3bytes(o):
return isinstance(o, str)
def is_py3bytes(o):
return False
# functions that don't take unicode arguments in py2
ModuleType = lambda m: types.ModuleType(m.encode('utf-8'))
def format(percent, *args, **kwargs):
return locale.format(percent.encode('utf-8'), *args, **kwargs)
def setlocale(category, loc=None):
locale.setlocale(category, loc.encode('utf-8'))
def write_to_file(f, content):
f.write(content.encode('utf-8'))
def email_mime(body):
return email.mime.text.MIMEText(body.encode('utf-8'))
| 3,604
|
Python
|
.py
| 101
| 31.118812
| 77
| 0.721283
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,734
|
query.py
|
rpm-software-management_dnf/dnf/query.py
|
# query.py
# Implements Query.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import hawkey
from hawkey import Query
from dnf.i18n import ucd
from dnf.pycomp import basestring
def _by_provides(sack, patterns, ignore_case=False, get_query=False):
if isinstance(patterns, basestring):
patterns = [patterns]
q = sack.query()
flags = []
if ignore_case:
flags.append(hawkey.ICASE)
q.filterm(*flags, provides__glob=patterns)
if get_query:
return q
return q.run()
def _per_nevra_dict(pkg_list):
nevra_dic = {}
for pkg in pkg_list:
nevra_dic.setdefault(ucd(pkg), []).append(pkg)
return nevra_dic
| 1,661
|
Python
|
.py
| 41
| 37.560976
| 77
| 0.748139
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,735
|
exceptions.py
|
rpm-software-management_dnf/dnf/exceptions.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Copyright 2004 Duke University
"""
Core DNF Errors.
"""
from __future__ import unicode_literals
from dnf.i18n import ucd, _, P_
import dnf.util
import libdnf
import warnings
class DeprecationWarning(DeprecationWarning):
# :api
pass
class Error(Exception):
# :api
"""Base Error. All other Errors thrown by DNF should inherit from this.
:api
"""
def __init__(self, value=None):
super(Error, self).__init__()
self.value = None if value is None else ucd(value)
def __str__(self):
return "{}".format(self.value)
def __unicode__(self):
return ucd(self.__str__())
class CompsError(Error):
# :api
pass
class ConfigError(Error):
def __init__(self, value=None, raw_error=None):
super(ConfigError, self).__init__(value)
self.raw_error = ucd(raw_error) if raw_error is not None else None
class DatabaseError(Error):
pass
class DepsolveError(Error):
# :api
pass
class DownloadError(Error):
# :api
def __init__(self, errmap):
super(DownloadError, self).__init__()
self.errmap = errmap
@staticmethod
def errmap2str(errmap):
errstrings = []
for key in errmap:
for error in errmap[key]:
msg = '%s: %s' % (key, error) if key else '%s' % error
errstrings.append(msg)
return '\n'.join(errstrings)
def __str__(self):
return self.errmap2str(self.errmap)
class LockError(Error):
pass
class MarkingError(Error):
# :api
def __init__(self, value=None, pkg_spec=None):
"""Initialize the marking error instance."""
super(MarkingError, self).__init__(value)
self.pkg_spec = None if pkg_spec is None else ucd(pkg_spec)
def __str__(self):
string = super(MarkingError, self).__str__()
if self.pkg_spec:
string += ': ' + self.pkg_spec
return string
class MarkingErrors(Error):
# :api
def __init__(self, no_match_group_specs=(), error_group_specs=(), no_match_pkg_specs=(),
error_pkg_specs=(), module_depsolv_errors=()):
"""Initialize the marking error instance."""
msg = _("Problems in request:")
if (no_match_pkg_specs):
msg += "\n" + _("missing packages: ") + ", ".join(no_match_pkg_specs)
if (error_pkg_specs):
msg += "\n" + _("broken packages: ") + ", ".join(error_pkg_specs)
if (no_match_group_specs):
msg += "\n" + _("missing groups or modules: ") + ", ".join(no_match_group_specs)
if (error_group_specs):
msg += "\n" + _("broken groups or modules: ") + ", ".join(error_group_specs)
if (module_depsolv_errors):
msg_mod = dnf.util._format_resolve_problems(module_depsolv_errors[0])
if module_depsolv_errors[1] == \
libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS:
msg += "\n" + "\n".join([P_('Modular dependency problem with Defaults:',
'Modular dependency problems with Defaults:',
len(module_depsolv_errors)),
msg_mod])
else:
msg += "\n" + "\n".join([P_('Modular dependency problem:',
'Modular dependency problems:',
len(module_depsolv_errors)),
msg_mod])
super(MarkingErrors, self).__init__(msg)
self.no_match_group_specs = no_match_group_specs
self.error_group_specs = error_group_specs
self.no_match_pkg_specs = no_match_pkg_specs
self.error_pkg_specs = error_pkg_specs
self.module_depsolv_errors = module_depsolv_errors
@property
def module_debsolv_errors(self):
msg = "Attribute module_debsolv_errors is deprecated. Use module_depsolv_errors " \
"attribute instead."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return self.module_depsolv_errors
class MetadataError(Error):
pass
class MiscError(Error):
pass
class PackagesNotAvailableError(MarkingError):
def __init__(self, value=None, pkg_spec=None, packages=None):
super(PackagesNotAvailableError, self).__init__(value, pkg_spec)
self.packages = packages or []
class PackageNotFoundError(MarkingError):
pass
class PackagesNotInstalledError(MarkingError):
def __init__(self, value=None, pkg_spec=None, packages=None):
super(PackagesNotInstalledError, self).__init__(value, pkg_spec)
self.packages = packages or []
class ProcessLockError(LockError):
def __init__(self, value, pid):
super(ProcessLockError, self).__init__(value)
self.pid = pid
def __reduce__(self):
"""Pickling support."""
return (ProcessLockError, (self.value, self.pid))
class ReadOnlyVariableError(Error):
def __init__(self, value, variable_name):
super(ReadOnlyVariableError, self).__init__(value)
self.variable_name = variable_name
class RepoError(Error):
# :api
pass
class ThreadLockError(LockError):
pass
class TransactionCheckError(Error):
pass
| 6,035
|
Python
|
.py
| 148
| 32.736486
| 92
| 0.623715
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,736
|
dnssec.py
|
rpm-software-management_dnf/dnf/dnssec.py
|
# dnssec.py
# DNS extension for automatic GPG key verification
#
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from enum import Enum
import base64
import hashlib
import logging
import re
from dnf.i18n import _
import dnf.rpm
import dnf.exceptions
logger = logging.getLogger("dnf")
RR_TYPE_OPENPGPKEY = 61
class DnssecError(dnf.exceptions.Error):
"""
Exception used in the dnssec module
"""
def __repr__(self):
return "<DnssecError, value='{}'>"\
.format(self.value if self.value is not None else "Not specified")
def email2location(email_address, tag="_openpgpkey"):
# type: (str, str) -> str
"""
Implements RFC 7929, section 3
https://tools.ietf.org/html/rfc7929#section-3
:param email_address:
:param tag:
:return:
"""
split = email_address.rsplit("@", 1)
if len(split) != 2:
msg = "Email address must contain exactly one '@' sign."
raise DnssecError(msg)
local = split[0]
domain = split[1]
hash = hashlib.sha256()
hash.update(local.encode('utf-8'))
digest = base64.b16encode(hash.digest()[0:28])\
.decode("utf-8")\
.lower()
return digest + "." + tag + "." + domain
class Validity(Enum):
"""
Output of the verification algorithm.
TODO: this type might be simplified in order to less reflect the underlying DNS layer.
TODO: more specifically the variants from 3 to 5 should have more understandable names
"""
VALID = 1
REVOKED = 2
PROVEN_NONEXISTENCE = 3
RESULT_NOT_SECURE = 4
BOGUS_RESULT = 5
ERROR = 9
class NoKey:
"""
This class represents an absence of a key in the cache. It is an expression of non-existence
using the Python's type system.
"""
pass
class KeyInfo:
"""
Wrapper class for email and associated verification key, where both are represented in
form of a string.
"""
def __init__(self, email=None, key=None):
self.email = email
self.key = key
def __repr__(self):
return 'KeyInfo("{}", "{}...")'.format(self.email, self.key.decode('ascii')[:6])
@staticmethod
def from_rpm_key_object(userid, raw_key):
# type: (str, bytes) -> KeyInfo
"""
Since dnf uses different format of the key than the one used in DNS RR, I need to convert
the former one into the new one.
"""
input_email = re.search('<(.*@.*)>', userid)
if input_email is None:
raise DnssecError
email = input_email.group(1)
key = raw_key.decode('ascii').split('\n')
start = 0
stop = 0
for i in range(0, len(key)):
if key[i] == '-----BEGIN PGP PUBLIC KEY BLOCK-----':
start = i
if key[i] == '-----END PGP PUBLIC KEY BLOCK-----':
stop = i
cat_key = ''.join(key[start + 2:stop - 1]).encode('ascii')
return KeyInfo(email, cat_key)
class DNSSECKeyVerification:
"""
The main class when it comes to verification itself. It wraps Unbound context and a cache with
already obtained results.
"""
# Mapping from email address to b64 encoded public key or NoKey in case of proven nonexistence
_cache = {}
# type: Dict[str, Union[str, NoKey]]
@staticmethod
def _cache_hit(key_union, input_key_string):
# type: (Union[str, NoKey], str) -> Validity
"""
Compare the key in case it was found in the cache.
"""
if key_union == input_key_string:
logger.debug("Cache hit, valid key")
return Validity.VALID
elif isinstance(key_union, NoKey):
logger.debug("Cache hit, proven non-existence")
return Validity.PROVEN_NONEXISTENCE
else:
logger.debug("Key in cache: {}".format(key_union))
logger.debug("Input key : {}".format(input_key_string))
return Validity.REVOKED
@staticmethod
def _cache_miss(input_key):
# type: (KeyInfo) -> Validity
"""
In case the key was not found in the cache, create an Unbound context and contact the DNS
system
"""
try:
import unbound
except ImportError as e:
msg = _("Configuration option 'gpgkey_dns_verification' requires "
"python3-unbound ({})".format(e))
raise dnf.exceptions.Error(msg)
ctx = unbound.ub_ctx()
if ctx.set_option("verbosity:", "0") != 0:
logger.debug("Unbound context: Failed to set verbosity")
if ctx.set_option("qname-minimisation:", "yes") != 0:
logger.debug("Unbound context: Failed to set qname minimisation")
if ctx.resolvconf() != 0:
logger.debug("Unbound context: Failed to read resolv.conf")
if ctx.add_ta_file("/var/lib/unbound/root.key") != 0:
logger.debug("Unbound context: Failed to add trust anchor file")
if input_key.email is None:
logger.debug("A key has no associated e-mail address")
return Validity.ERROR
status, result = ctx.resolve(email2location(input_key.email),
RR_TYPE_OPENPGPKEY, unbound.RR_CLASS_IN)
if status != 0:
logger.debug("Communication with DNS servers failed")
return Validity.ERROR
if result.bogus:
logger.debug("DNSSEC signatures are wrong ({})".format(result.why_bogus))
return Validity.BOGUS_RESULT
if not result.secure:
logger.debug("Result is not secured with DNSSEC")
return Validity.RESULT_NOT_SECURE
if result.nxdomain or (result.rcode == unbound.RCODE_NOERROR and not result.havedata):
logger.debug("Non-existence of this record was proven by DNSSEC")
return Validity.PROVEN_NONEXISTENCE
if not result.havedata:
# TODO: This is weird result, but there is no way to perform validation, so just return
# an error
# Should handle only SERVFAIL, REFUSED and similar rcodes
logger.debug("Unknown error in DNS communication: {}".format(result.rcode_str))
return Validity.ERROR
else:
data = result.data.as_raw_data()[0]
dns_data_b64 = base64.b64encode(data)
if dns_data_b64 == input_key.key:
return Validity.VALID
else:
# In case it is different, print the keys for further examination in debug mode
logger.debug("Key from DNS: {}".format(dns_data_b64))
logger.debug("Input key : {}".format(input_key.key))
return Validity.REVOKED
@staticmethod
def verify(input_key):
# type: (KeyInfo) -> Validity
"""
Public API. Use this method to verify a KeyInfo object.
"""
logger.debug("Running verification for key with id: {}".format(input_key.email))
key_union = DNSSECKeyVerification._cache.get(input_key.email)
if key_union is not None:
return DNSSECKeyVerification._cache_hit(key_union, input_key.key)
else:
result = DNSSECKeyVerification._cache_miss(input_key)
if result == Validity.VALID:
DNSSECKeyVerification._cache[input_key.email] = input_key.key
elif result == Validity.PROVEN_NONEXISTENCE:
DNSSECKeyVerification._cache[input_key.email] = NoKey()
return result
def nice_user_msg(ki, v):
# type: (KeyInfo, Validity) -> str
"""
Inform the user about key validity in a human readable way.
"""
prefix = _("DNSSEC extension: Key for user ") + ki.email + " "
if v == Validity.VALID:
return prefix + _("is valid.")
else:
return prefix + _("has unknown status.")
def any_msg(m):
# type: (str) -> str
"""
Label any given message with DNSSEC extension tag
"""
return _("DNSSEC extension: ") + m
class RpmImportedKeys:
"""
Wrapper around keys, that are imported in the RPM database.
The keys are stored in packages with name gpg-pubkey, where the version and
release is different for each of them. The key content itself is stored as
an ASCII armored string in the package description, so it needs to be parsed
before it can be used.
"""
@staticmethod
def _query_db_for_gpg_keys():
# type: () -> List[KeyInfo]
# TODO: base.conf.installroot ?? -----------------------\
transaction_set = dnf.rpm.transaction.TransactionWrapper()
packages = transaction_set.dbMatch("name", "gpg-pubkey")
return_list = []
for pkg in packages:
packager = dnf.rpm.getheader(pkg, 'packager')
if packager is None:
email = None
else:
email = re.search('<(.*@.*)>', packager).group(1)
if email is None:
logger.debug(any_msg(_(
"Exempting key package {} from a validation "
"because it's not bound to any e-mail address").format(
dnf.rpm.getheader(pkg, 'nevra'))))
continue
description = dnf.rpm.getheader(pkg, 'description')
# Extract Radix-64-encoded PGP key. Without armor headers and
# a checksum.
key_lines = []
in_headers = True
for line in description.split('\n')[0:-3]:
if in_headers:
if re.match(r'\A\s*\Z', line, re.NOFLAG):
in_headers = False
else:
key_lines.append(line)
key_str = ''.join(key_lines)
return_list += [KeyInfo(email, key_str.encode('ascii'))]
return return_list
@staticmethod
def check_imported_keys_validity():
keys = RpmImportedKeys._query_db_for_gpg_keys()
logger.info(any_msg(_("Testing already imported keys for their validity.")))
for key in keys:
try:
result = DNSSECKeyVerification.verify(key)
except DnssecError as e:
# Errors in this exception should not be fatal, print it and just continue
logger.warning("DNSSEC extension error (email={}): {}"
.format(key.email, e.value))
continue
# TODO: remove revoked keys automatically and possibly ask user to confirm
if result == Validity.VALID:
logger.debug(any_msg("GPG Key {} is valid".format(key.email)))
pass
elif result == Validity.PROVEN_NONEXISTENCE:
logger.debug(any_msg("GPG Key {} does not support DNS"
" verification".format(key.email)))
elif result == Validity.BOGUS_RESULT:
logger.info(any_msg("GPG Key {} could not be verified, because DNSSEC signatures"
" are bogus. Possible causes: wrong configuration of the DNS"
" server, MITM attack".format(key.email)))
elif result == Validity.REVOKED:
logger.info(any_msg("GPG Key {} has been revoked and should"
" be removed immediately".format(key.email)))
else:
logger.debug(any_msg("GPG Key {} could not be tested".format(key.email)))
| 12,524
|
Python
|
.py
| 290
| 33.851724
| 99
| 0.608564
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,737
|
crypto.py
|
rpm-software-management_dnf/dnf/crypto.py
|
# crypto.py
# Keys and signatures.
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
import libdnf.repo
import contextlib
import dnf.pycomp
import dnf.util
import dnf.yum.misc
import logging
import os
import warnings
GPG_HOME_ENV = 'GNUPGHOME'
logger = logging.getLogger('dnf')
def _extract_signing_subkey(key):
# :deprecated, undocumented
""" It was used internally and is no longer used. """
msg = "Function `_extract_signing_subkey` is deprecated. Will be removed after 2023-10-30."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return dnf.util.first(subkey for subkey in key.subkeys if subkey.can_sign)
def _printable_fingerprint(fpr_hex):
segments = (fpr_hex[i:i + 4] for i in range(0, len(fpr_hex), 4))
return " ".join(segments)
def import_repo_keys(repo):
# :deprecated, undocumented
""" Deprecated function. Please do not use.
It was used internally. In 2018, the code was rewritten into libdnf. This code is no longer used.
It was broken in 2018 - the `repo._key_import._confirm` method now needs 5 arguments.
It is now fixed and marked as deprecated. Please do not use. """
msg = "Function `import_repo_keys` is deprecated. Will be removed after 2023-10-30."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
gpgdir = repo._pubring_dir
known_keys = keyids_from_pubring(gpgdir)
for keyurl in repo.gpgkey:
for keyinfo in retrieve(keyurl, repo):
keyid = keyinfo.id_
if keyid in known_keys:
logger.debug(_('repo %s: 0x%s already imported'), repo.id, keyid)
continue
if not repo._key_import._confirm(
keyid, keyinfo.userid, keyinfo.fingerprint, keyinfo.url, keyinfo.timestamp):
continue
dnf.yum.misc.import_key_to_pubring(
keyinfo.raw_key, keyinfo.short_id, gpgdir=gpgdir,
make_ro_copy=False)
logger.debug(_('repo %s: imported key 0x%s.'), repo.id, keyid)
def keyids_from_pubring(gpgdir):
# :deprecated, undocumented
""" It is used internally by deprecated function `import_repo_keys`. """
msg = "Function `keyids_from_pubring` is deprecated. Will be removed after 2023-10-30."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
keyids = []
for keyid in libdnf.repo.keyidsFromPubring(gpgdir):
keyids.append(keyid)
return keyids
def log_key_import(keyinfo):
msg = (_('Importing GPG key 0x%s:\n'
' Userid : "%s"\n'
' Fingerprint: %s\n'
' From : %s') %
(keyinfo.short_id, keyinfo.userid,
_printable_fingerprint(keyinfo.fingerprint),
keyinfo.url.replace("file://", "")))
logger.critical("%s", msg)
def log_dns_key_import(keyinfo, dns_result):
log_key_import(keyinfo)
if dns_result == dnf.dnssec.Validity.VALID:
logger.critical(_('Verified using DNS record with DNSSEC signature.'))
else:
logger.critical(_('NOT verified using DNS record.'))
@contextlib.contextmanager
def pubring_dir(pubring_dir):
# :deprecated, undocumented
""" It was used internally and is no longer used. """
msg = "Function `pubring_dir` is deprecated. Will be removed after 2023-10-30."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
orig = os.environ.get(GPG_HOME_ENV, None)
os.environ[GPG_HOME_ENV] = pubring_dir
try:
yield
finally:
if orig is None:
del os.environ[GPG_HOME_ENV]
else:
os.environ[GPG_HOME_ENV] = orig
def rawkey2infos(key_fo):
keyinfos = []
keys = libdnf.repo.Key.keysFromFd(key_fo.fileno())
for key in keys:
keyinfos.append(Key(key))
return keyinfos
def retrieve(keyurl, repo=None):
if keyurl.startswith('http:'):
logger.warning(_("retrieving repo key for %s unencrypted from %s"), repo.id, keyurl)
with dnf.util._urlopen(keyurl, repo=repo) as handle:
keyinfos = rawkey2infos(handle)
for keyinfo in keyinfos:
keyinfo.url = keyurl
return keyinfos
class Key(object):
def __init__(self, repokey):
self.id_ = repokey.getId()
self.fingerprint = repokey.getFingerprint()
self.raw_key = bytes(repokey.getAsciiArmoredKey(), 'utf-8')
self.timestamp = repokey.getTimestamp()
self.url = repokey.getUrl()
self.userid = repokey.getUserId()
@property
def short_id(self):
rj = '0' if dnf.pycomp.PY3 else b'0'
return self.id_[-8:].rjust(8, rj)
@property
def rpm_id(self):
return self.short_id.lower()
| 5,705
|
Python
|
.py
| 133
| 36.992481
| 105
| 0.682294
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,738
|
history.py
|
rpm-software-management_dnf/dnf/db/history.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009, 2012-2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import calendar
import os
import time
import libdnf.transaction
import libdnf.utils
from dnf.i18n import ucd
from dnf.yum import misc
from dnf.exceptions import DatabaseError
from .group import GroupPersistor, EnvironmentPersistor, RPMTransaction
class RPMTransactionItemWrapper(object):
def __init__(self, swdb, item):
assert item is not None
self._swdb = swdb
self._item = item
def __str__(self):
return self._item.getItem().toStr()
def __lt__(self, other):
return self._item < other._item
def __eq__(self, other):
return self._item == other._item
def __hash__(self):
return self._item.__hash__()
def match(self, pattern):
return True
def is_package(self):
return self._item.getRPMItem() is not None
def is_group(self):
return self._item.getCompsGroupItem() is not None
def is_environment(self):
return self._item.getCompsEnvironmentItem() is not None
def get_group(self):
return self._item.getCompsGroupItem()
def get_environment(self):
return self._item.getCompsEnvironmentItem()
@property
def name(self):
return self._item.getRPMItem().getName()
@property
def epoch(self):
return self._item.getRPMItem().getEpoch()
@property
def version(self):
return self._item.getRPMItem().getVersion()
@property
def release(self):
return self._item.getRPMItem().getRelease()
@property
def arch(self):
return self._item.getRPMItem().getArch()
@property
def evr(self):
if self.epoch:
return "{}:{}-{}".format(self.epoch, self.version, self.release)
return "{}-{}".format(self.version, self.release)
@property
def nevra(self):
return self._item.getRPMItem().getNEVRA()
@property
def action(self):
return self._item.getAction()
@action.setter
def action(self, value):
self._item.setAction(value)
@property
def reason(self):
return self._item.getReason()
@reason.setter
def reason(self, value):
return self._item.setReason(value)
@property
def action_name(self):
try:
return self._item.getActionName()
except AttributeError:
return ""
@property
def action_short(self):
try:
return self._item.getActionShort()
except AttributeError:
return ""
@property
def state(self):
return self._item.getState()
@state.setter
def state(self, value):
self._item.setState(value)
@property
def from_repo(self):
return self._item.getRepoid()
def ui_from_repo(self):
if not self._item.getRepoid():
return ""
return "@" + self._item.getRepoid()
@property
def obsoleting(self):
return None
def get_reason(self):
# TODO: get_history_reason
return self._swdb.rpm.get_reason(self)
@property
def pkg(self):
return self._swdb.rpm._swdb_ti_pkg[self._item]
@property
def files(self):
return self.pkg.files
@property
def _active(self):
return self.pkg
class TransactionWrapper(object):
altered_lt_rpmdb = False
altered_gt_rpmdb = False
def __init__(self, trans):
self._trans = trans
@property
def tid(self):
return self._trans.getId()
@property
def cmdline(self):
return self._trans.getCmdline()
@property
def releasever(self):
return self._trans.getReleasever()
@property
def beg_timestamp(self):
return self._trans.getDtBegin()
@property
def end_timestamp(self):
return self._trans.getDtEnd()
@property
def beg_rpmdb_version(self):
return self._trans.getRpmdbVersionBegin()
@property
def end_rpmdb_version(self):
return self._trans.getRpmdbVersionEnd()
@property
def return_code(self):
return int(self._trans.getState() != libdnf.transaction.TransactionItemState_DONE)
@property
def loginuid(self):
return self._trans.getUserId()
@property
def data(self):
return self.packages
@property
def is_output(self):
output = self._trans.getConsoleOutput()
return bool(output)
@property
def comment(self):
return self._trans.getComment()
def tids(self):
return [self._trans.getId()]
def performed_with(self):
return []
def packages(self):
result = self._trans.getItems()
return [RPMTransactionItemWrapper(self, i) for i in result]
def output(self):
return [i[1] for i in self._trans.getConsoleOutput()]
def error(self):
return []
def compare_rpmdbv(self, rpmdbv):
self.altered_gt_rpmdb = self._trans.getRpmdbVersionEnd() != rpmdbv
class MergedTransactionWrapper(TransactionWrapper):
def __init__(self, trans):
self._trans = libdnf.transaction.MergedTransaction(trans._trans)
def merge(self, trans):
self._trans.merge(trans._trans)
@property
def loginuid(self):
return self._trans.listUserIds()
def tids(self):
return self._trans.listIds()
@property
def return_code(self):
return [int(i != libdnf.transaction.TransactionItemState_DONE) for i in self._trans.listStates()]
@property
def cmdline(self):
return self._trans.listCmdlines()
@property
def releasever(self):
return self._trans.listReleasevers()
@property
def comment(self):
return self._trans.listComments()
def output(self):
return [i[1] for i in self._trans.getConsoleOutput()]
class SwdbInterface(object):
def __init__(self, db_dir, releasever=""):
# TODO: record all vars
# TODO: remove relreasever from options
self.releasever = str(releasever)
self._rpm = None
self._group = None
self._env = None
self._addon_data = None
self._swdb = None
self._db_dir = db_dir
self._output = []
def __del__(self):
self.close()
@property
def rpm(self):
if self._rpm is None:
self._rpm = RPMTransaction(self)
return self._rpm
@property
def group(self):
if self._group is None:
self._group = GroupPersistor(self)
return self._group
@property
def env(self):
if self._env is None:
self._env = EnvironmentPersistor(self)
return self._env
@property
def dbpath(self):
return os.path.join(self._db_dir, libdnf.transaction.Swdb.defaultDatabaseName)
@property
def swdb(self):
""" Lazy initialize Swdb object """
if not self._swdb:
# _db_dir == persistdir which is prepended with installroot already
try:
self._swdb = libdnf.transaction.Swdb(self.dbpath)
except RuntimeError as ex:
raise DatabaseError(str(ex))
self._swdb.initTransaction()
# TODO: vars -> libdnf
return self._swdb
def transform(self, input_dir):
transformer = libdnf.transaction.Transformer(input_dir, self.dbpath)
transformer.transform()
def close(self):
try:
del self._tid
except AttributeError:
pass
self._rpm = None
self._group = None
self._env = None
if self._swdb:
self._swdb.closeTransaction()
self._swdb.closeDatabase()
self._swdb = None
self._output = []
@property
def path(self):
return self.swdb.getPath()
def reset_db(self):
return self.swdb.resetDatabase()
# TODO: rename to get_last_transaction?
def last(self, complete_transactions_only=True):
# TODO: complete_transactions_only
t = self.swdb.getLastTransaction()
if not t:
return None
return TransactionWrapper(t)
# TODO: rename to: list_transactions?
def old(self, tids=None, limit=0, complete_transactions_only=False):
tids = tids or []
tids = [int(i) for i in tids]
result = self.swdb.listTransactions()
result = [TransactionWrapper(i) for i in result]
# TODO: move to libdnf
if tids:
result = [i for i in result if i.tid in tids]
# populate altered_lt_rpmdb and altered_gt_rpmdb
for i, trans in enumerate(result):
if i == 0:
continue
prev_trans = result[i-1]
if trans._trans.getRpmdbVersionBegin() != prev_trans._trans.getRpmdbVersionEnd():
trans.altered_lt_rpmdb = True
prev_trans.altered_gt_rpmdb = True
return result[::-1]
def get_current(self):
return TransactionWrapper(self.swdb.getCurrent())
def set_reason(self, pkg, reason):
"""Set reason for package"""
rpm_item = self.rpm._pkg_to_swdb_rpm_item(pkg)
repoid = self.repo(pkg)
action = libdnf.transaction.TransactionItemAction_REASON_CHANGE
ti = self.swdb.addItem(rpm_item, repoid, action, reason)
ti.setState(libdnf.transaction.TransactionItemState_DONE)
return ti
'''
def package(self, pkg):
"""Get SwdbPackage from package"""
return self.swdb.package(str(pkg))
'''
def repo(self, pkg):
"""Get repository of package"""
return self.swdb.getRPMRepo(str(pkg))
def package_data(self, pkg):
"""Get package data for package"""
# trans item is returned
result = self.swdb.getRPMTransactionItem(str(pkg))
if result is None:
return result
result = RPMTransactionItemWrapper(self, result)
return result
# def reason(self, pkg):
# """Get reason for package"""
# result = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1)
# return result
# TODO: rename to begin_transaction?
def beg(self, rpmdb_version, using_pkgs, tsis, cmdline=None, comment=""):
try:
self.swdb.initTransaction()
except:
pass
tid = self.swdb.beginTransaction(
int(calendar.timegm(time.gmtime())),
str(rpmdb_version),
cmdline or "",
int(misc.getloginuid()),
comment)
self.swdb.setReleasever(self.releasever)
self._tid = tid
return tid
def pkg_to_swdb_rpm_item(self, po):
rpm_item = self.swdb.createRPMItem()
rpm_item.setName(po.name)
rpm_item.setEpoch(po.epoch or 0)
rpm_item.setVersion(po.version)
rpm_item.setRelease(po.release)
rpm_item.setArch(po.arch)
return rpm_item
def log_scriptlet_output(self, msg):
if not hasattr(self, '_tid'):
return
if not msg:
return
for line in msg.splitlines():
line = ucd(line)
# logging directly to database fails if transaction runs in a background process
self._output.append((1, line))
'''
def _log_errors(self, errors):
for error in errors:
error = ucd(error)
self.swdb.log_error(self._tid, error)
'''
def end(self, end_rpmdb_version="", return_code=None, errors=None):
if not hasattr(self, '_tid'):
return # Failed at beg() time
if return_code is None:
# return_code/state auto-detection
return_code = libdnf.transaction.TransactionState_DONE
for tsi in self.rpm:
if tsi.state == libdnf.transaction.TransactionItemState_ERROR:
return_code = libdnf.transaction.TransactionState_ERROR
break
for file_descriptor, line in self._output:
self.swdb.addConsoleOutputLine(file_descriptor, line)
self._output = []
self.swdb.endTransaction(
int(time.time()),
str(end_rpmdb_version),
return_code,
)
# Closing and cleanup is done in the close() method.
# It is important to keep data around after the transaction ends
# because it's needed by plugins to report installed packages etc.
# TODO: ignore_case, more patterns
def search(self, patterns, ignore_case=True):
""" Search for history transactions which contain specified
packages al. la. "yum list". Returns transaction ids. """
return self.swdb.searchTransactionsByRPM(patterns)
def user_installed(self, pkg):
"""Returns True if package is user installed"""
reason = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1)
if reason == libdnf.transaction.TransactionItemReason_USER:
return True
# if reason is not known, consider a package user-installed
# because it was most likely installed via rpm
if reason == libdnf.transaction.TransactionItemReason_UNKNOWN:
return True
return False
def get_erased_reason(self, pkg, first_trans, rollback):
"""Get reason of package before transaction being undone. If package
is already installed in the system, keep his reason.
:param pkg: package being installed
:param first_trans: id of first transaction being undone
:param rollback: True if transaction is performing a rollback"""
if rollback:
# return the reason at the point of rollback; we're setting that reason
result = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, first_trans)
else:
result = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1)
# consider unknown reason as user-installed
if result == libdnf.transaction.TransactionItemReason_UNKNOWN:
result = libdnf.transaction.TransactionItemReason_USER
return result
| 14,885
|
Python
|
.py
| 414
| 28.014493
| 105
| 0.633616
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,739
|
__init__.py
|
rpm-software-management_dnf/dnf/db/__init__.py
|
# Copyright (C) 2017 Red Hat, Inc.
#
# DNF database subpackage
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
| 778
|
Python
|
.py
| 17
| 44.764706
| 79
| 0.780552
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,740
|
group.py
|
rpm-software-management_dnf/dnf/db/group.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017-2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import libdnf.transaction
import dnf.db.history
import dnf.transaction
import dnf.exceptions
from dnf.i18n import _
from dnf.util import logger
import rpm
class PersistorBase(object):
def __init__(self, history):
assert isinstance(history, dnf.db.history.SwdbInterface), str(type(history))
self.history = history
self._installed = {}
self._removed = {}
self._upgraded = {}
self._downgraded = {}
def __len__(self):
return len(self._installed) + len(self._removed) + len(self._upgraded) + len(self._downgraded)
def clean(self):
self._installed = {}
self._removed = {}
self._upgraded = {}
self._downgraded = {}
def _get_obj_id(self, obj):
raise NotImplementedError
def _add_to_history(self, item, action):
ti = self.history.swdb.addItem(item, "", action, libdnf.transaction.TransactionItemReason_USER)
ti.setState(libdnf.transaction.TransactionItemState_DONE)
def install(self, obj):
self._installed[self._get_obj_id(obj)] = obj
self._add_to_history(obj, libdnf.transaction.TransactionItemAction_INSTALL)
def remove(self, obj):
self._removed[self._get_obj_id(obj)] = obj
self._add_to_history(obj, libdnf.transaction.TransactionItemAction_REMOVE)
def upgrade(self, obj):
self._upgraded[self._get_obj_id(obj)] = obj
self._add_to_history(obj, libdnf.transaction.TransactionItemAction_UPGRADE)
def downgrade(self, obj):
self._downgraded[self._get_obj_id(obj)] = obj
self._add_to_history(obj, libdnf.transaction.TransactionItemAction_DOWNGRADE)
def new(self, obj_id, name, translated_name, pkg_types):
raise NotImplementedError
def get(self, obj_id):
raise NotImplementedError
def search_by_pattern(self, pattern):
raise NotImplementedError
class GroupPersistor(PersistorBase):
def __iter__(self):
items = self.history.swdb.getItems()
items = [i for i in items if i.getCompsGroupItem()]
return iter(items)
def _get_obj_id(self, obj):
return obj.getGroupId()
def new(self, obj_id, name, translated_name, pkg_types):
swdb_group = self.history.swdb.createCompsGroupItem()
swdb_group.setGroupId(obj_id)
if name is not None:
swdb_group.setName(name)
if translated_name is not None:
swdb_group.setTranslatedName(translated_name)
swdb_group.setPackageTypes(pkg_types)
return swdb_group
def get(self, obj_id):
swdb_group = self.history.swdb.getCompsGroupItem(obj_id)
if not swdb_group:
return None
swdb_group = swdb_group.getCompsGroupItem()
return swdb_group
def search_by_pattern(self, pattern):
return self.history.swdb.getCompsGroupItemsByPattern(pattern)
def get_package_groups(self, pkg_name):
return self.history.swdb.getPackageCompsGroups(pkg_name)
def is_removable_pkg(self, pkg_name):
# for group removal and autoremove
reason = self.history.swdb.resolveRPMTransactionItemReason(pkg_name, "", -2)
if reason != libdnf.transaction.TransactionItemReason_GROUP:
return False
# TODO: implement lastTransId == -2 in libdnf
package_groups = set(self.get_package_groups(pkg_name))
for group_id, group in self._removed.items():
for pkg in group.getPackages():
if pkg.getName() != pkg_name:
continue
if not pkg.getInstalled():
continue
package_groups.remove(group_id)
for group_id, group in self._installed.items():
for pkg in group.getPackages():
if pkg.getName() != pkg_name:
continue
if not pkg.getInstalled():
continue
package_groups.add(group_id)
if package_groups:
return False
return True
class EnvironmentPersistor(PersistorBase):
def __iter__(self):
items = self.history.swdb.getItems()
items = [i for i in items if i.getCompsEnvironmentItem()]
return iter(items)
def _get_obj_id(self, obj):
return obj.getEnvironmentId()
def new(self, obj_id, name, translated_name, pkg_types):
swdb_env = self.history.swdb.createCompsEnvironmentItem()
swdb_env.setEnvironmentId(obj_id)
if name is not None:
swdb_env.setName(name)
if translated_name is not None:
swdb_env.setTranslatedName(translated_name)
swdb_env.setPackageTypes(pkg_types)
return swdb_env
def get(self, obj_id):
swdb_env = self.history.swdb.getCompsEnvironmentItem(obj_id)
if not swdb_env:
return None
swdb_env = swdb_env.getCompsEnvironmentItem()
return swdb_env
def search_by_pattern(self, pattern):
return self.history.swdb.getCompsEnvironmentItemsByPattern(pattern)
def get_group_environments(self, group_id):
return self.history.swdb.getCompsGroupEnvironments(group_id)
def is_removable_group(self, group_id):
# for environment removal
swdb_group = self.history.group.get(group_id)
if not swdb_group:
return False
# TODO: implement lastTransId == -2 in libdnf
group_environments = set(self.get_group_environments(group_id))
for env_id, env in self._removed.items():
for group in env.getGroups():
if group.getGroupId() != group_id:
continue
if not group.getInstalled():
continue
group_environments.remove(env_id)
for env_id, env in self._installed.items():
for group in env.getGroups():
if group.getGroupId() != group_id:
continue
if not group.getInstalled():
continue
group_environments.add(env_id)
if group_environments:
return False
return True
class RPMTransaction(object):
def __init__(self, history, transaction=None):
self.history = history
self.transaction = transaction
if not self.transaction:
try:
self.history.swdb.initTransaction()
except:
pass
self._swdb_ti_pkg = {}
# TODO: close trans if needed
def __iter__(self):
# :api
if self.transaction:
items = self.transaction.getItems()
else:
items = self.history.swdb.getItems()
items = [dnf.db.history.RPMTransactionItemWrapper(self.history, i) for i in items if i.getRPMItem()]
return iter(items)
def __len__(self):
if self.transaction:
items = self.transaction.getItems()
else:
items = self.history.swdb.getItems()
items = [dnf.db.history.RPMTransactionItemWrapper(self.history, i) for i in items if i.getRPMItem()]
return len(items)
def _pkg_to_swdb_rpm_item(self, pkg):
rpm_item = self.history.swdb.createRPMItem()
rpm_item.setName(pkg.name)
rpm_item.setEpoch(pkg.epoch or 0)
rpm_item.setVersion(pkg.version)
rpm_item.setRelease(pkg.release)
rpm_item.setArch(pkg.arch)
return rpm_item
def new(self, pkg, action, reason=None, replaced_by=None):
rpm_item = self._pkg_to_swdb_rpm_item(pkg)
repoid = self.get_repoid(pkg)
if reason is None:
reason = self.get_reason(pkg)
result = self.history.swdb.addItem(rpm_item, repoid, action, reason)
if replaced_by:
result.addReplacedBy(replaced_by)
self._swdb_ti_pkg[result] = pkg
return result
def get_repoid(self, pkg):
result = getattr(pkg, "_force_swdb_repoid", None)
if result:
return result
return pkg.reponame
def get_reason(self, pkg):
"""Get reason for package"""
return self.history.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1)
def get_reason_name(self, pkg):
"""Get reason for package"""
return libdnf.transaction.TransactionItemReasonToString(self.get_reason(pkg))
def _add_obsoleted(self, obsoleted, replaced_by=None):
obsoleted = obsoleted or []
for obs in obsoleted:
ti = self.new(obs, libdnf.transaction.TransactionItemAction_OBSOLETED)
if replaced_by:
ti.addReplacedBy(replaced_by)
def add_downgrade(self, new, old, obsoleted=None):
ti_new = self.new(new, libdnf.transaction.TransactionItemAction_DOWNGRADE)
ti_old = self.new(old, libdnf.transaction.TransactionItemAction_DOWNGRADED, replaced_by=ti_new)
self._add_obsoleted(obsoleted, replaced_by=ti_new)
def add_erase(self, old, reason=None):
self.add_remove(old, reason)
def add_install(self, new, obsoleted=None, reason=None):
if reason is None:
reason = libdnf.transaction.TransactionItemReason_USER
ti_new = self.new(new, libdnf.transaction.TransactionItemAction_INSTALL, reason)
self._add_obsoleted(obsoleted, replaced_by=ti_new)
def add_reinstall(self, new, old, obsoleted=None):
ti_new = self.new(new, libdnf.transaction.TransactionItemAction_REINSTALL)
ti_old = self.new(old, libdnf.transaction.TransactionItemAction_REINSTALLED, replaced_by=ti_new)
self._add_obsoleted(obsoleted, replaced_by=ti_new)
def add_remove(self, old, reason=None):
reason = reason or libdnf.transaction.TransactionItemReason_USER
ti_old = self.new(old, libdnf.transaction.TransactionItemAction_REMOVE, reason)
def add_upgrade(self, new, old, obsoleted=None):
ti_new = self.new(new, libdnf.transaction.TransactionItemAction_UPGRADE)
ti_old = self.new(old, libdnf.transaction.TransactionItemAction_UPGRADED, replaced_by=ti_new)
self._add_obsoleted(obsoleted, replaced_by=ti_new)
def _test_fail_safe(self, hdr, pkg):
if pkg._from_cmdline:
return 0
if pkg.repo.module_hotfixes:
return 0
try:
if hdr['modularitylabel'] and not pkg._is_in_active_module():
logger.critical(_("No available modular metadata for modular package '{}', "
"it cannot be installed on the system").format(pkg))
return 1
except ValueError:
return 0
return 0
def _populate_rpm_ts(self, ts):
"""Populate the RPM transaction set."""
modular_problems = 0
for tsi in self:
try:
if tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADE:
hdr = tsi.pkg._header
modular_problems += self._test_fail_safe(hdr, tsi.pkg)
ts.addInstall(hdr, tsi, 'u')
elif tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADED:
ts.addErase(tsi.pkg.idx)
elif tsi.action == libdnf.transaction.TransactionItemAction_INSTALL:
hdr = tsi.pkg._header
modular_problems += self._test_fail_safe(hdr, tsi.pkg)
ts.addInstall(hdr, tsi, 'i')
elif tsi.action == libdnf.transaction.TransactionItemAction_OBSOLETE:
hdr = tsi.pkg._header
modular_problems += self._test_fail_safe(hdr, tsi.pkg)
ts.addInstall(hdr, tsi, 'u')
elif tsi.action == libdnf.transaction.TransactionItemAction_OBSOLETED:
ts.addErase(tsi.pkg.idx)
elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL:
# note: in rpm 4.12 there should not be set
# rpm.RPMPROB_FILTER_REPLACEPKG to work
hdr = tsi.pkg._header
modular_problems += self._test_fail_safe(hdr, tsi.pkg)
ts.addReinstall(hdr, tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALLED:
# Required when multiple packages with the same NEVRA marked as installed
ts.addErase(tsi.pkg.idx)
elif tsi.action == libdnf.transaction.TransactionItemAction_REMOVE:
ts.addErase(tsi.pkg.idx)
elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADE:
hdr = tsi.pkg._header
modular_problems += self._test_fail_safe(hdr, tsi.pkg)
ts.addInstall(hdr, tsi, 'u')
elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADED:
ts.addErase(tsi.pkg.idx)
elif tsi.action == libdnf.transaction.TransactionItemAction_REASON_CHANGE:
pass
else:
raise RuntimeError("TransactionItemAction not handled: %s" % tsi.action)
except rpm.error as e:
raise dnf.exceptions.Error(_("An rpm exception occurred: %s" % e))
if modular_problems:
raise dnf.exceptions.Error(_("No available modular metadata for modular package"))
return ts
@property
def install_set(self):
# :api
result = set()
for tsi in self:
if tsi.action in dnf.transaction.FORWARD_ACTIONS:
try:
result.add(tsi.pkg)
except KeyError:
raise RuntimeError("TransactionItem is has no RPM attached: %s" % tsi)
return result
@property
def remove_set(self):
# :api
result = set()
for tsi in self:
if tsi.action in dnf.transaction.BACKWARD_ACTIONS + [libdnf.transaction.TransactionItemAction_REINSTALLED]:
try:
result.add(tsi.pkg)
except KeyError:
raise RuntimeError("TransactionItem is has no RPM attached: %s" % tsi)
return result
def _rpm_limitations(self):
""" Ensures all the members can be passed to rpm as they are to perform
the transaction.
"""
src_installs = [pkg for pkg in self.install_set if pkg.arch == 'src']
if len(src_installs):
return _("Will not install a source rpm package (%s).") % \
src_installs[0]
return None
def _get_items(self, action):
return [tsi for tsi in self if tsi.action == action]
| 15,494
|
Python
|
.py
| 338
| 35.254438
| 119
| 0.625149
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,741
|
emitter.py
|
rpm-software-management_dnf/dnf/automatic/emitter.py
|
# emitter.py
# Emitters for dnf-automatic.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from dnf.i18n import _
import logging
import dnf.pycomp
import smtplib
import email.utils
import subprocess
import time
APPLIED = _("The following updates have been applied on '%s':")
APPLIED_TIMESTAMP = _("Updates completed at %s")
AVAILABLE = _("The following updates are available on '%s':")
DOWNLOADED = _("The following updates were downloaded on '%s':")
ERROR = _("An error has occured on: '%s'")
logger = logging.getLogger('dnf')
class Emitter(object):
def __init__(self, system_name):
self._applied = False
self._available_msg = None
self._downloaded = False
self._system_name = system_name
self._trans_msg = None
self._error = False
self._error_msg = None
def _prepare_msg(self):
msg = []
if self._error:
msg.append(ERROR % self._system_name)
msg.append(self._error_msg)
elif self._applied:
msg.append(APPLIED % self._system_name)
msg.append(self._available_msg)
msg.append(APPLIED_TIMESTAMP % time.strftime("%c"))
elif self._downloaded:
msg.append(DOWNLOADED % self._system_name)
msg.append(self._available_msg)
elif self._available_msg:
msg.append(AVAILABLE % self._system_name)
msg.append(self._available_msg)
else:
return None
return '\n'.join(msg)
def notify_applied(self):
assert self._available_msg
self._applied = True
def notify_available(self, msg):
self._available_msg = msg
def notify_downloaded(self):
assert self._available_msg
self._downloaded = True
def notify_error(self, msg):
self._error = True
self._error_msg = msg
class EmailEmitter(Emitter):
def __init__(self, system_name, conf):
super(EmailEmitter, self).__init__(system_name)
self._conf = conf
def _prepare_msg(self):
if self._error:
subj = _("An error has occured on '%s'.") % self._system_name
elif self._applied:
subj = _("Updates applied on '%s'.") % self._system_name
elif self._downloaded:
subj = _("Updates downloaded on '%s'.") % self._system_name
elif self._available_msg:
subj = _("Updates available on '%s'.") % self._system_name
else:
return None, None
return subj, super(EmailEmitter, self)._prepare_msg()
def commit(self):
subj, body = self._prepare_msg()
message = dnf.pycomp.email_mime(body)
message.set_charset('utf-8')
email_from = self._conf.email_from
email_to = self._conf.email_to
email_host = self._conf.email_host
email_port = self._conf.email_port
email_tls = self._conf.email_tls
message['Date'] = email.utils.formatdate()
message['From'] = email_from
message['Subject'] = subj
message['To'] = ','.join(email_to)
message['Message-ID'] = email.utils.make_msgid()
# Send the email
try:
if email_tls == 'yes':
smtp = smtplib.SMTP_SSL(email_host, email_port, timeout=300)
else:
smtp = smtplib.SMTP(email_host, email_port, timeout=300)
if email_tls == 'starttls':
smtp.starttls()
smtp.sendmail(email_from, email_to, message.as_string())
smtp.close()
except OSError as exc:
msg = _("Failed to send an email via '%s': %s") % (
email_host, exc)
logger.error(msg)
class CommandEmitterMixIn(object):
"""
Executes a desired command, and pushes data into its stdin.
Both data and command can be formatted according to user preference.
For this reason, this class expects a {str:str} dictionary as _prepare_msg
return value.
Meant for mixing with Emitter classes, as it does not define any names used
for formatting on its own.
"""
def commit(self):
command_fmt = self._conf.command_format
stdin_fmt = self._conf.stdin_format
msg = self._prepare_msg()
# all strings passed to shell should be quoted to avoid accidental code
# execution
quoted_msg = dict((key, dnf.pycomp.shlex_quote(val))
for key, val in msg.items())
command = command_fmt.format(**quoted_msg)
stdin_feed = stdin_fmt.format(**msg).encode('utf-8')
# Execute the command
subp = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE)
subp.communicate(stdin_feed)
subp.stdin.close()
if subp.wait() != 0:
msg = _("Failed to execute command '%s': returned %d") \
% (command, subp.returncode)
logger.error(msg)
class CommandEmitter(CommandEmitterMixIn, Emitter):
def __init__(self, system_name, conf):
super(CommandEmitter, self).__init__(system_name)
self._conf = conf
def _prepare_msg(self):
return {'body': super(CommandEmitter, self)._prepare_msg()}
class CommandEmailEmitter(CommandEmitterMixIn, EmailEmitter):
def _prepare_msg(self):
subject, body = super(CommandEmailEmitter, self)._prepare_msg()
return {'subject': subject,
'body': body,
'email_from': self._conf.email_from,
'email_to': ' '.join(self._conf.email_to)}
class StdIoEmitter(Emitter):
def commit(self):
msg = self._prepare_msg()
print(msg)
class MotdEmitter(Emitter):
def commit(self):
msg = self._prepare_msg()
with open('/etc/motd', 'w') as fobj:
fobj.write(msg)
| 6,840
|
Python
|
.py
| 166
| 33.457831
| 79
| 0.63241
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,742
|
__init__.py
|
rpm-software-management_dnf/dnf/automatic/__init__.py
|
# __init__.py
# dnf.automatic toplevel.
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
| 997
|
Python
|
.py
| 19
| 51.473684
| 77
| 0.780164
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,743
|
main.py
|
rpm-software-management_dnf/dnf/automatic/main.py
|
# __init__.py
# dnf.automatic CLI
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import random
import socket
import time
from dnf.i18n import _, ucd, P_
import dnf
import dnf.automatic.emitter
import dnf.cli
import dnf.cli.cli
import dnf.cli.output
import dnf.conf
import dnf.const
import dnf.exceptions
import dnf.util
import dnf.logging
import dnf.pycomp
import libdnf.conf
logger = logging.getLogger('dnf')
def build_emitters(conf):
emitters = dnf.util.MultiCallList([])
system_name = conf.emitters.system_name
emit_via = conf.emitters.emit_via
if emit_via:
for name in emit_via:
if name == 'email':
emitter = dnf.automatic.emitter.EmailEmitter(system_name, conf.email)
emitters.append(emitter)
elif name == 'stdio':
emitter = dnf.automatic.emitter.StdIoEmitter(system_name)
emitters.append(emitter)
elif name == 'motd':
emitter = dnf.automatic.emitter.MotdEmitter(system_name)
emitters.append(emitter)
elif name == 'command':
emitter = dnf.automatic.emitter.CommandEmitter(system_name, conf.command)
emitters.append(emitter)
elif name == 'command_email':
emitter = dnf.automatic.emitter.CommandEmailEmitter(system_name, conf.command_email)
emitters.append(emitter)
else:
raise dnf.exceptions.ConfigError("Unknown emitter option: %s" % name)
return emitters
def parse_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument('conf_path', nargs='?')
parser.add_argument('--timer', action='store_true')
parser.add_argument('--installupdates', dest='installupdates', action='store_true')
parser.add_argument('--downloadupdates', dest='downloadupdates', action='store_true')
parser.add_argument('--no-installupdates', dest='installupdates', action='store_false')
parser.add_argument('--no-downloadupdates', dest='downloadupdates', action='store_false')
parser.set_defaults(installupdates=None)
parser.set_defaults(downloadupdates=None)
return parser.parse_args(args), parser
class AutomaticConfig(object):
def __init__(self, filename=None, downloadupdates=None,
installupdates=None):
if filename:
# Specific config file was explicitely requested. Check that it exists
# and is readable.
if os.access(filename, os.F_OK):
if not os.access(filename, os.R_OK):
raise dnf.exceptions.Error(
"Configuration file \"{}\" is not readable.".format(filename))
else:
raise dnf.exceptions.Error(
"Configuration file \"{}\" not found.".format(filename))
else:
filename = dnf.const.CONF_AUTOMATIC_FILENAME
self.commands = CommandsConfig()
self.email = EmailConfig()
self.emitters = EmittersConfig()
self.command = CommandConfig()
self.command_email = CommandEmailConfig()
self._parser = None
self._load(filename)
if downloadupdates:
self.commands.download_updates = True
elif downloadupdates is False:
self.commands.download_updates = False
if installupdates:
self.commands.apply_updates = True
elif installupdates is False:
self.commands.apply_updates = False
self.commands.imply()
self.filename = filename
def _load(self, filename):
parser = libdnf.conf.ConfigParser()
try:
parser.read(filename)
except RuntimeError as e:
raise dnf.exceptions.ConfigError('Parsing file "%s" failed: %s' % (filename, e))
except IOError as e:
logger.warning(e)
self.commands.populate(parser, 'commands', filename,
libdnf.conf.Option.Priority_AUTOMATICCONFIG)
self.email.populate(parser, 'email', filename, libdnf.conf.Option.Priority_AUTOMATICCONFIG)
self.emitters.populate(parser, 'emitters', filename,
libdnf.conf.Option.Priority_AUTOMATICCONFIG)
self.command.populate(parser, 'command', filename,
libdnf.conf.Option.Priority_AUTOMATICCONFIG)
self.command_email.populate(parser, 'command_email', filename,
libdnf.conf.Option.Priority_AUTOMATICCONFIG)
self._parser = parser
def update_baseconf(self, baseconf):
baseconf._populate(self._parser, 'base', self.filename, dnf.conf.PRIO_AUTOMATICCONFIG)
class Config(object):
def __init__(self):
self._options = {}
def add_option(self, name, optionobj):
self._options[name] = optionobj
def prop_get(obj):
return obj._options[name].getValue()
def prop_set(obj, val):
obj._options[name].set(libdnf.conf.Option.Priority_RUNTIME, val)
setattr(type(self), name, property(prop_get, prop_set))
def populate(self, parser, section, filename, priority):
"""Set option values from an INI file section."""
if parser.hasSection(section):
for name in parser.options(section):
value = parser.getValue(section, name)
if not value or value == 'None':
value = ''
opt = self._options.get(name, None)
if opt:
try:
opt.set(priority, value)
except RuntimeError as e:
logger.debug(_('Unknown configuration value: %s=%s in %s; %s'),
ucd(name), ucd(value), ucd(filename), str(e))
else:
logger.debug(
_('Unknown configuration option: %s = %s in %s'),
ucd(name), ucd(value), ucd(filename))
class CommandsConfig(Config):
def __init__(self):
super(CommandsConfig, self).__init__()
self.add_option('apply_updates', libdnf.conf.OptionBool(False))
self.add_option('base_config_file', libdnf.conf.OptionString('/etc/dnf/dnf.conf'))
self.add_option('download_updates', libdnf.conf.OptionBool(False))
self.add_option('upgrade_type', libdnf.conf.OptionEnumString('default',
libdnf.conf.VectorString(['default', 'security'])))
self.add_option('random_sleep', libdnf.conf.OptionNumberInt32(300))
self.add_option('network_online_timeout', libdnf.conf.OptionNumberInt32(60))
self.add_option('reboot', libdnf.conf.OptionEnumString('never',
libdnf.conf.VectorString(['never', 'when-changed', 'when-needed'])))
self.add_option('reboot_command', libdnf.conf.OptionString(
'shutdown -r +5 \'Rebooting after applying package updates\''))
def imply(self):
if self.apply_updates:
self.download_updates = True
class EmailConfig(Config):
def __init__(self):
super(EmailConfig, self).__init__()
self.add_option('email_to',
libdnf.conf.OptionStringList(libdnf.conf.VectorString(["root"])))
self.add_option('email_from', libdnf.conf.OptionString("root"))
self.add_option('email_host', libdnf.conf.OptionString("localhost"))
self.add_option('email_port', libdnf.conf.OptionNumberInt32(25))
self.add_option('email_tls', libdnf.conf.OptionString("no"))
class CommandConfig(Config):
_default_command_format = "cat"
_default_stdin_format = "{body}"
def __init__(self):
super(CommandConfig, self).__init__()
self.add_option('command_format',
libdnf.conf.OptionString(self._default_command_format))
self.add_option('stdin_format',
libdnf.conf.OptionString(self._default_stdin_format))
class CommandEmailConfig(CommandConfig):
_default_command_format = "mail -Ssendwait -s {subject} -r {email_from} {email_to}"
def __init__(self):
super(CommandEmailConfig, self).__init__()
self.add_option('email_to',
libdnf.conf.OptionStringList(libdnf.conf.VectorString(["root"])))
self.add_option('email_from', libdnf.conf.OptionString("root"))
class EmittersConfig(Config):
def __init__(self):
super(EmittersConfig, self).__init__()
self.add_option('emit_via', libdnf.conf.OptionStringList(
libdnf.conf.VectorString(['email', 'stdio'])))
self.add_option('output_width', libdnf.conf.OptionNumberInt32(80))
self.add_option('system_name', libdnf.conf.OptionString(socket.gethostname()))
self.add_option('send_error_messages', libdnf.conf.OptionBool(False))
def gpgsigcheck(base, pkgs):
ok = True
for po in pkgs:
result, errmsg = base.package_signature_check(po)
if result != 0:
ok = False
logger.critical(errmsg)
if not ok:
raise dnf.exceptions.Error(_("GPG check FAILED"))
def wait_for_network(repos, timeout):
'''
Wait up to <timeout> seconds for network connection to be available.
if <timeout> is 0 the network availability detection will be skipped.
Returns True if any remote repository is accessible or remote repositories are not enabled.
Returns False if none of remote repositories is accessible.
'''
if timeout <= 0:
return True
remote_schemes = {
'http': 80,
'https': 443,
'ftp': 21,
'socks': 1080,
'socks5': 1080,
}
def remote_address(url_list):
for url in url_list:
parsed_url = dnf.pycomp.urlparse.urlparse(url)
if (not parsed_url.hostname) \
or (not parsed_url.port and parsed_url.scheme not in remote_schemes):
# skip urls without hostname or without recognized port
continue
yield (parsed_url.hostname,
parsed_url.port or remote_schemes[parsed_url.scheme])
# collect possible remote repositories urls
addresses = set()
for repo in repos.iter_enabled():
if repo.proxy:
addresses.update(remote_address([repo.proxy]))
else:
addresses.update(remote_address(repo.baseurl))
addresses.update(remote_address([repo.mirrorlist]))
addresses.update(remote_address([repo.metalink]))
if not addresses:
# there is no remote repository enabled so network connection should not be needed
return True
logger.debug(_('Waiting for internet connection...'))
time_0 = time.time()
while time.time() - time_0 < timeout:
for host, port in addresses:
try:
s = socket.create_connection((host, port), 1)
s.close()
return True
except socket.error:
pass
time.sleep(1)
return False
def main(args):
(opts, parser) = parse_arguments(args)
conf = None
emitters = None
try:
conf = AutomaticConfig(opts.conf_path, opts.downloadupdates,
opts.installupdates)
with dnf.Base() as base:
cli = dnf.cli.Cli(base)
cli._read_conf_file()
# Although dnf-automatic does not use demands, the versionlock
# plugin uses this demand do decide whether it's rules should
# be applied.
# https://bugzilla.redhat.com/show_bug.cgi?id=1746562
cli.demands.resolving = True
conf.update_baseconf(base.conf)
base.init_plugins(cli=cli)
logger.debug(_('Started dnf-automatic.'))
if opts.timer:
sleeper = random.randint(0, conf.commands.random_sleep)
logger.debug(P_('Sleep for {} second', 'Sleep for {} seconds', sleeper).format(sleeper))
time.sleep(sleeper)
base.pre_configure_plugins()
base.read_all_repos()
if not wait_for_network(base.repos, conf.commands.network_online_timeout):
logger.warning(_('System is off-line.'))
base.configure_plugins()
base.fill_sack()
upgrade(base, conf.commands.upgrade_type)
base.resolve()
output = dnf.cli.output.Output(base, base.conf)
trans = base.transaction
if not trans:
return 0
lst = output.list_transaction(trans, total_width=80)
emitters = build_emitters(conf)
emitters.notify_available(lst)
if not conf.commands.download_updates:
emitters.commit()
return 0
base.download_packages(trans.install_set)
emitters.notify_downloaded()
if not conf.commands.apply_updates:
emitters.commit()
return 0
gpgsigcheck(base, trans.install_set)
base.do_transaction()
# In case of no global error occurred within the transaction,
# we need to check state of individual transaction items.
for tsi in trans:
if tsi.state == libdnf.transaction.TransactionItemState_ERROR:
raise dnf.exceptions.Error(_('Transaction failed'))
emitters.notify_applied()
emitters.commit()
if (conf.commands.reboot == 'when-changed' or
(conf.commands.reboot == 'when-needed' and base.reboot_needed())):
exit_code = os.waitstatus_to_exitcode(os.system(conf.commands.reboot_command))
if exit_code != 0:
raise dnf.exceptions.Error('reboot command returned nonzero exit code: %d', exit_code)
except dnf.exceptions.Error as exc:
logger.error(_('Error: %s'), ucd(exc))
if conf is not None and conf.emitters.send_error_messages and emitters is not None:
emitters.notify_error(_('Error: %s') % str(exc))
emitters.commit()
return 1
return 0
def upgrade(base, upgrade_type):
if upgrade_type == 'security':
base.add_security_filters("gte", ("security",))
base.upgrade_all()
elif upgrade_type == 'default':
base.upgrade_all()
else:
raise dnf.exceptions.Error(
'Unsupported upgrade_type "{}", only "default" and "security" supported'.format(
upgrade_type))
| 15,682
|
Python
|
.py
| 341
| 35.72434
| 106
| 0.624894
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,744
|
__init__.py
|
rpm-software-management_dnf/dnf/yum/__init__.py
|
# __init__.py
# The legacy YUM subpackage.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
| 1,000
|
Python
|
.py
| 19
| 51.631579
| 77
| 0.779817
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,745
|
rpmtrans.py
|
rpm-software-management_dnf/dnf/yum/rpmtrans.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Copyright 2005 Duke University
# Parts Copyright 2007 Red Hat, Inc
from __future__ import print_function, absolute_import
from __future__ import unicode_literals
import libdnf.transaction
from dnf.i18n import _, ucd
import dnf.callback
import dnf.transaction
import dnf.util
import rpm
import os
import logging
import sys
import tempfile
import traceback
import warnings
# TODO: merge w/ libdnf
# transaction set states
TS_UPDATE = 10
TS_INSTALL = 20
TS_ERASE = 40
TS_OBSOLETED = 50
TS_OBSOLETING = 60
TS_AVAILABLE = 70
TS_UPDATED = 90
TS_FAILED = 100
TS_INSTALL_STATES = [TS_INSTALL, TS_UPDATE, TS_OBSOLETING]
TS_REMOVE_STATES = [TS_ERASE, TS_OBSOLETED, TS_UPDATED]
RPM_ACTIONS_SET = {libdnf.transaction.TransactionItemAction_INSTALL,
libdnf.transaction.TransactionItemAction_DOWNGRADE,
libdnf.transaction.TransactionItemAction_DOWNGRADED,
libdnf.transaction.TransactionItemAction_OBSOLETE,
libdnf.transaction.TransactionItemAction_OBSOLETED,
libdnf.transaction.TransactionItemAction_UPGRADE,
libdnf.transaction.TransactionItemAction_UPGRADED,
libdnf.transaction.TransactionItemAction_REMOVE,
libdnf.transaction.TransactionItemAction_REINSTALLED}
logger = logging.getLogger('dnf')
def _add_deprecated_action(name):
"""
Wrapper to return a deprecated action constant
while printing a deprecation warning.
"""
@property
def _func(self):
msg = "%s.%s is deprecated. Use dnf.callback.%s instead." \
% (self.__class__.__name__, name, name)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
value = getattr(dnf.callback, name)
return value
return _func
class TransactionDisplay(object):
# :api
def __init__(self):
# :api
pass
# use constants from dnf.callback which are the official API
PKG_CLEANUP = _add_deprecated_action("PKG_CLEANUP")
PKG_DOWNGRADE = _add_deprecated_action("PKG_DOWNGRADE")
PKG_REMOVE = _add_deprecated_action("PKG_REMOVE")
PKG_ERASE = PKG_REMOVE
PKG_INSTALL = _add_deprecated_action("PKG_INSTALL")
PKG_OBSOLETE = _add_deprecated_action("PKG_OBSOLETE")
PKG_REINSTALL = _add_deprecated_action("PKG_REINSTALL")
PKG_UPGRADE = _add_deprecated_action("PKG_UPGRADE")
PKG_VERIFY = _add_deprecated_action("PKG_VERIFY")
TRANS_PREPARATION = _add_deprecated_action("TRANS_PREPARATION")
PKG_SCRIPTLET = _add_deprecated_action("PKG_SCRIPTLET")
TRANS_POST = _add_deprecated_action("TRANS_POST")
def progress(self, package, action, ti_done, ti_total, ts_done, ts_total):
"""Report ongoing progress on a transaction item. :api
:param package: a package being processed
:param action: the action being performed
:param ti_done: number of processed bytes of the transaction
item being processed
:param ti_total: total number of bytes of the transaction item
being processed
:param ts_done: number of actions processed in the whole
transaction
:param ts_total: total number of actions in the whole
transaction
"""
pass
def scriptout(self, msgs):
"""Hook for reporting an rpm scriptlet output.
:param msgs: the scriptlet output
"""
pass
def error(self, message):
"""Report an error that occurred during the transaction. :api"""
pass
def filelog(self, package, action):
# check package object type - if it is a string - just output it
"""package is the same as in progress() - a package object or simple
string action is also the same as in progress()"""
pass
def verify_tsi_package(self, pkg, count, total):
# TODO: replace with verify_tsi?
self.progress(pkg, dnf.transaction.PKG_VERIFY, 100, 100, count, total)
class ErrorTransactionDisplay(TransactionDisplay):
"""An RPMTransaction display that prints errors to standard output."""
def error(self, message):
super(ErrorTransactionDisplay, self).error(message)
dnf.util._terminal_messenger('print', message, sys.stderr)
class LoggingTransactionDisplay(TransactionDisplay):
'''
Base class for a RPMTransaction display callback class
'''
def __init__(self):
super(LoggingTransactionDisplay, self).__init__()
self.rpm_logger = logging.getLogger('dnf.rpm')
def error(self, message):
self.rpm_logger.error(message)
def filelog(self, package, action):
action_str = dnf.transaction.FILE_ACTIONS[action]
msg = '%s: %s' % (action_str, package)
self.rpm_logger.log(dnf.logging.SUBDEBUG, msg)
def scriptout(self, msgs):
if msgs:
self.rpm_logger.info(ucd(msgs))
class RPMTransaction(object):
def __init__(self, base, test=False, displays=()):
if not displays:
displays = [ErrorTransactionDisplay()]
self.displays = displays
self.base = base
self.test = test # are we a test?
self.trans_running = False
self.fd = None
self.total_actions = 0
self.total_installed = 0
self.complete_actions = 0
self.installed_pkg_names = set()
self.total_removed = 0
self._setupOutputLogging(base.conf.rpmverbosity)
self._te_list = []
# Index in _te_list of the transaction element being processed (for use
# in callbacks)
self._te_index = 0
self._tsi_cache = None
def _setupOutputLogging(self, rpmverbosity="info"):
# UGLY... set up the transaction to record output from scriptlets
io_r = tempfile.NamedTemporaryFile()
self._readpipe = io_r
self._writepipe = open(io_r.name, 'w+b')
self.base._ts.setScriptFd(self._writepipe)
rpmverbosity = {'critical' : 'crit',
'emergency' : 'emerg',
'error' : 'err',
'information' : 'info',
'warn' : 'warning'}.get(rpmverbosity, rpmverbosity)
rpmverbosity = 'RPMLOG_' + rpmverbosity.upper()
if not hasattr(rpm, rpmverbosity):
rpmverbosity = 'RPMLOG_INFO'
rpm.setVerbosity(getattr(rpm, rpmverbosity))
rpm.setLogFile(self._writepipe)
def _shutdownOutputLogging(self):
# reset rpm bits from recording output
rpm.setVerbosity(rpm.RPMLOG_NOTICE)
rpm.setLogFile(sys.stderr)
try:
self._writepipe.close()
except:
pass
def _scriptOutput(self):
try:
# XXX ugly workaround of problem which started after upgrading glibc
# from glibc-2.27-32.fc28.x86_64 to glibc-2.28-9.fc29.x86_64
# After this upgrade nothing is read from _readpipe, so every
# posttrans and postun scriptlet output is lost. The problem
# only occurs when using dnf-2, dnf-3 is OK.
# I did not find the root cause of this error yet.
self._readpipe.seek(self._readpipe.tell())
out = self._readpipe.read()
if not out:
return None
return out
except IOError:
pass
def messages(self):
messages = self._scriptOutput()
if messages:
for line in messages.splitlines():
yield ucd(line)
def _scriptout(self):
msgs = self._scriptOutput()
for display in self.displays:
display.scriptout(msgs)
self.base.history.log_scriptlet_output(msgs)
def __del__(self):
self._shutdownOutputLogging()
def _extract_cbkey(self, cbkey):
"""Obtain the package related to the calling callback."""
if hasattr(cbkey, "pkg"):
tsi = cbkey
return [tsi]
te = self._te_list[self._te_index]
te_nevra = dnf.util._te_nevra(te)
if self._tsi_cache:
if str(self._tsi_cache[0]) == te_nevra:
return self._tsi_cache
items = []
for tsi in self.base.transaction:
if tsi.action not in RPM_ACTIONS_SET:
# skip REINSTALL in order to return REINSTALLED, or REASON_CHANGE to avoid crash
continue
if str(tsi) == te_nevra:
items.append(tsi)
if items:
self._tsi_cache = items
return items
raise RuntimeError("TransactionItem not found for key: %s" % cbkey)
def callback(self, what, amount, total, key, client_data):
try:
if isinstance(key, str):
key = ucd(key)
if what == rpm.RPMCALLBACK_TRANS_START:
self._transStart(total)
elif what == rpm.RPMCALLBACK_TRANS_STOP:
pass
elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
self._trans_progress(amount, total)
elif what == rpm.RPMCALLBACK_ELEM_PROGRESS:
# This callback type is issued every time the next transaction
# element is about to be processed by RPM, before any other
# callbacks are issued. "amount" carries the index of the element.
self._elemProgress(key, amount)
elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
return self._instOpenFile(key)
elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
self._instCloseFile(key)
elif what == rpm.RPMCALLBACK_INST_START:
self._inst_start(key)
elif what == rpm.RPMCALLBACK_INST_STOP:
self._inst_stop(key)
elif what == rpm.RPMCALLBACK_INST_PROGRESS:
self._instProgress(amount, total, key)
elif what == rpm.RPMCALLBACK_UNINST_START:
self._uninst_start(key)
elif what == rpm.RPMCALLBACK_UNINST_STOP:
self._unInstStop(key)
elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
self._uninst_progress(amount, total, key)
elif what == rpm.RPMCALLBACK_CPIO_ERROR:
self._cpioError(key)
elif what == rpm.RPMCALLBACK_UNPACK_ERROR:
self._unpackError(key)
elif what == rpm.RPMCALLBACK_SCRIPT_ERROR:
self._scriptError(amount, total, key)
elif what == rpm.RPMCALLBACK_SCRIPT_START:
self._script_start(key)
elif what == rpm.RPMCALLBACK_SCRIPT_STOP:
self._scriptStop()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
except_list = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical(''.join(except_list))
def _transStart(self, total):
self.total_actions = total
if self.test: return
self.trans_running = True
self._te_list = list(self.base._ts)
def _trans_progress(self, amount, total):
action = dnf.transaction.TRANS_PREPARATION
for display in self.displays:
display.progress('', action, amount + 1, total, 1, 1)
def _elemProgress(self, key, index):
self._te_index = index
self.complete_actions += 1
if not self.test:
transaction_list = self._extract_cbkey(key)
for display in self.displays:
display.filelog(transaction_list[0].pkg, transaction_list[0].action)
def _instOpenFile(self, key):
self.lastmsg = None
transaction_list = self._extract_cbkey(key)
pkg = transaction_list[0].pkg
rpmloc = pkg.localPkg()
try:
self.fd = open(rpmloc)
except IOError as e:
for display in self.displays:
display.error("Error: Cannot open file %s: %s" % (rpmloc, e))
else:
if self.trans_running:
self.total_installed += 1
self.installed_pkg_names.add(pkg.name)
return self.fd.fileno()
def _instCloseFile(self, key):
self.fd.close()
self.fd = None
def _inst_start(self, key):
pass
def _inst_stop(self, key):
if self.test or not self.trans_running:
return
self._scriptout()
if self.complete_actions == self.total_actions:
# RPM doesn't explicitly report when post-trans phase starts
action = dnf.transaction.TRANS_POST
for display in self.displays:
display.progress(None, action, None, None, None, None)
def _instProgress(self, amount, total, key):
transaction_list = self._extract_cbkey(key)
pkg = transaction_list[0].pkg
action = transaction_list[0].action
for display in self.displays:
display.progress(pkg, action, amount, total, self.complete_actions, self.total_actions)
def _uninst_start(self, key):
self.total_removed += 1
def _uninst_progress(self, amount, total, key):
transaction_list = self._extract_cbkey(key)
pkg = transaction_list[0].pkg
action = transaction_list[0].action
for display in self.displays:
display.progress(pkg, action, amount, total, self.complete_actions, self.total_actions)
def _unInstStop(self, key):
if self.test:
return
self._scriptout()
def _cpioError(self, key):
transaction_list = self._extract_cbkey(key)
msg = "Error in cpio payload of rpm package %s" % transaction_list[0].pkg
for display in self.displays:
display.error(msg)
def _unpackError(self, key):
self._scriptout()
transaction_list = self._extract_cbkey(key)
msg = "Error unpacking rpm package %s" % transaction_list[0].pkg
for display in self.displays:
display.error(msg)
def _scriptError(self, amount, total, key):
# "amount" carries the failed scriptlet tag,
# "total" carries fatal/non-fatal status
scriptlet_name = rpm.tagnames.get(amount, "<unknown>")
transaction_list = self._extract_cbkey(key)
name = transaction_list[0].pkg.name
msg = ("Error in %s scriptlet in rpm package %s" % (scriptlet_name, name))
for display in self.displays:
display.error(msg)
def _script_start(self, key):
# TODO: this doesn't fit into libdnf TransactionItem use cases
action = dnf.transaction.PKG_SCRIPTLET
if key is None and self._te_list == []:
pkg = 'None'
else:
transaction_list = self._extract_cbkey(key)
pkg = transaction_list[0].pkg
complete = self.complete_actions if self.total_actions != 0 and self.complete_actions != 0 \
else 1
total = self.total_actions if self.total_actions != 0 and self.complete_actions != 0 else 1
for display in self.displays:
display.progress(pkg, action, 100, 100, complete, total)
def _scriptStop(self):
self._scriptout()
def verify_tsi_package(self, pkg, count, total):
for display in self.displays:
display.verify_tsi_package(pkg, count, total)
| 16,037
|
Python
|
.py
| 371
| 33.851752
| 100
| 0.62944
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,746
|
misc.py
|
rpm-software-management_dnf/dnf/yum/misc.py
|
# misc.py
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
Assorted utility functions for yum.
"""
from __future__ import print_function, absolute_import
from __future__ import unicode_literals
from dnf.pycomp import base64_decodebytes, basestring, unicode
from stat import *
import libdnf.repo
import libdnf.utils
import dnf.const
import dnf.crypto
import dnf.exceptions
import dnf.i18n
import errno
import glob
import io
import os
import os.path
import pwd
import re
import shutil
import tempfile
import warnings
_default_checksums = ['sha256']
_re_compiled_glob_match = None
def re_glob(s):
""" Tests if a string is a shell wildcard. """
global _re_compiled_glob_match
if _re_compiled_glob_match is None:
_re_compiled_glob_match = re.compile(r'[*?]|\[.+\]').search
return _re_compiled_glob_match(s)
_re_compiled_full_match = None
def re_full_search_needed(s):
""" Tests if a string needs a full nevra match, instead of just name. """
global _re_compiled_full_match
if _re_compiled_full_match is None:
# A glob, or a "." or "-" separator, followed by something (the ".")
one = re.compile(r'.*([-.*?]|\[.+\]).').match
# Any epoch, for envra
two = re.compile('[0-9]+:').match
_re_compiled_full_match = (one, two)
for rec in _re_compiled_full_match:
if rec(s):
return True
return False
def get_default_chksum_type():
return _default_checksums[0]
class GenericHolder(object):
"""Generic Holder class used to hold other objects of known types
It exists purely to be able to do object.somestuff, object.someotherstuff
or object[key] and pass object to another function that will
understand it"""
def __init__(self, iter=None):
self.__iter = iter
def __iter__(self):
if self.__iter is not None:
return iter(self[self.__iter])
def __getitem__(self, item):
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(item)
def all_lists(self):
"""Return a dictionary of all lists."""
return {key: list_ for key, list_ in vars(self).items()
if type(list_) is list}
def merge_lists(self, other):
""" Concatenate the list attributes from 'other' to ours. """
for (key, val) in other.all_lists().items():
vars(self).setdefault(key, []).extend(val)
return self
def procgpgkey(rawkey):
'''Convert ASCII-armored GPG key to binary
'''
# Normalise newlines
rawkey = re.sub(b'\r\n?', b'\n', rawkey)
# Extract block
block = io.BytesIO()
inblock = 0
pastheaders = 0
for line in rawkey.split(b'\n'):
if line.startswith(b'-----BEGIN PGP PUBLIC KEY BLOCK-----'):
inblock = 1
elif inblock and line.strip() == b'':
pastheaders = 1
elif inblock and line.startswith(b'-----END PGP PUBLIC KEY BLOCK-----'):
# Hit the end of the block, get out
break
elif pastheaders and line.startswith(b'='):
# Hit the CRC line, don't include this and stop
break
elif pastheaders:
block.write(line + b'\n')
# Decode and return
return base64_decodebytes(block.getvalue())
def keyInstalled(ts, keyid, timestamp):
'''
Return if the GPG key described by the given keyid and timestamp are
installed in the rpmdb.
The keyid and timestamp should both be passed as integers.
The ts is an rpm transaction set object
Return values:
- -1 key is not installed
- 0 key with matching ID and timestamp is installed
- 1 key with matching ID is installed but has an older timestamp
- 2 key with matching ID is installed but has a newer timestamp
No effort is made to handle duplicates. The first matching keyid is used to
calculate the return result.
'''
# Search
for hdr in ts.dbMatch('name', 'gpg-pubkey'):
if hdr['version'] == keyid:
installedts = int(hdr['release'], 16)
if installedts == timestamp:
return 0
elif installedts < timestamp:
return 1
else:
return 2
return -1
def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True):
# :deprecated, undocumented
""" It is used internally by deprecated function `import_repo_keys`. """
msg = "Function `import_key_to_pubring` is deprecated. Will be removed after 2023-10-30."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if not os.path.exists(gpgdir):
os.makedirs(gpgdir)
# import the key
libdnf.repo.importKeyToPubring(str(rawkey, 'utf-8'), gpgdir)
if make_ro_copy:
rodir = gpgdir + '-ro'
if not os.path.exists(rodir):
os.makedirs(rodir, mode=0o755)
for f in glob.glob(gpgdir + '/*'):
basename = os.path.basename(f)
ro_f = rodir + '/' + basename
shutil.copy(f, ro_f)
os.chmod(ro_f, 0o755)
return True
def getCacheDir():
"""return a path to a valid and safe cachedir - only used when not running
as root or when --tempcache is set"""
uid = os.geteuid()
try:
usertup = pwd.getpwuid(uid)
username = dnf.i18n.ucd(usertup[0])
prefix = '%s-%s-' % (dnf.const.PREFIX, username)
except KeyError:
prefix = '%s-%s-' % (dnf.const.PREFIX, uid)
# check for /var/tmp/prefix-* -
dirpath = '%s/%s*' % (dnf.const.TMPDIR, prefix)
cachedirs = sorted(glob.glob(dirpath))
for thisdir in cachedirs:
stats = os.lstat(thisdir)
if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid:
return thisdir
# make the dir (tempfile.mkdtemp())
cachedir = tempfile.mkdtemp(prefix=prefix, dir=dnf.const.TMPDIR)
return cachedir
def seq_max_split(seq, max_entries):
""" Given a seq, split into a list of lists of length max_entries each. """
ret = []
num = len(seq)
seq = list(seq) # Trying to use a set/etc. here is bad
beg = 0
while num > max_entries:
end = beg + max_entries
ret.append(seq[beg:end])
beg += max_entries
num -= max_entries
ret.append(seq[beg:])
return ret
def unlink_f(filename):
""" Call os.unlink, but don't die if the file isn't there. This is the main
difference between "rm -f" and plain "rm". """
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def stat_f(filename, ignore_EACCES=False):
""" Call os.stat(), but don't die if the file isn't there. Returns None. """
try:
return os.stat(filename)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return None
if ignore_EACCES and e.errno == errno.EACCES:
return None
raise
def _getloginuid():
""" Get the audit-uid/login-uid, if available. os.getuid() is returned
instead if there was a problem. Note that no caching is done here. """
# We might normally call audit.audit_getloginuid(), except that requires
# importing all of the audit module. And it doesn't work anyway: BZ 518721
try:
with open("/proc/self/loginuid") as fo:
data = fo.read()
return int(data)
except (IOError, ValueError):
return os.getuid()
_cached_getloginuid = None
def getloginuid():
""" Get the audit-uid/login-uid, if available. os.getuid() is returned
instead if there was a problem. The value is cached, so you don't
have to save it. """
global _cached_getloginuid
if _cached_getloginuid is None:
_cached_getloginuid = _getloginuid()
return _cached_getloginuid
def decompress(filename, dest=None, check_timestamps=False):
"""take a filename and decompress it into the same relative location.
When the compression type is not recognized (or file is not compressed),
the content of the file is copied to the destination"""
if dest:
out = dest
else:
out = None
dot_pos = filename.rfind('.')
if dot_pos > 0:
ext = filename[dot_pos:]
if ext in ('.zck', '.xz', '.bz2', '.gz', '.lzma', '.zst'):
out = filename[:dot_pos]
if out is None:
raise dnf.exceptions.MiscError("Could not determine destination filename")
if check_timestamps:
fi = stat_f(filename)
fo = stat_f(out)
if fi and fo and fo.st_mtime == fi.st_mtime:
return out
try:
# libdnf.utils.decompress either decompress file to the destination or
# copy the content if the compression type is not recognized
libdnf.utils.decompress(filename, out, 0o644)
except RuntimeError as e:
raise dnf.exceptions.MiscError(str(e))
if check_timestamps and fi:
os.utime(out, (fi.st_mtime, fi.st_mtime))
return out
def read_in_items_from_dot_dir(thisglob, line_as_list=True):
""" Takes a glob of a dir (like /etc/foo.d/\\*.foo) returns a list of all
the lines in all the files matching that glob, ignores comments and blank
lines, optional parameter 'line_as_list tells whether to treat each line
as a space or comma-separated list, defaults to True.
"""
results = []
for fname in glob.glob(thisglob):
with open(fname) as f:
for line in f:
if re.match(r'\s*(#|$)', line):
continue
line = line.rstrip() # no more trailing \n's
line = line.lstrip() # be nice
if not line:
continue
if line_as_list:
line = line.replace('\n', ' ')
line = line.replace(',', ' ')
results.extend(line.split())
continue
results.append(line)
return results
| 11,030
|
Python
|
.py
| 279
| 32.344086
| 93
| 0.631692
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,747
|
option_parser.py
|
rpm-software-management_dnf/dnf/cli/option_parser.py
|
# optparse.py
# CLI options parser.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
from dnf.i18n import _
from dnf.util import _parse_specs
import argparse
import dnf.exceptions
import dnf.util
import dnf.rpm
import dnf.yum.misc
import logging
import os.path
import re
import sys
logger = logging.getLogger("dnf")
class MultilineHelpFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if '\n' in text:
return text.splitlines()
return super(MultilineHelpFormatter, self)._split_lines(text, width)
class OptionParser(argparse.ArgumentParser):
"""ArgumentParser like class to do things the "yum way"."""
def __init__(self, reset_usage=True):
super(OptionParser, self).__init__(add_help=False,
formatter_class=MultilineHelpFormatter)
self.command_positional_parser = None
self.command_group = None
self._add_general_options()
if reset_usage:
self._cmd_usage = {} # names, summary for dnf commands, to build usage
self._cmd_groups = set() # cmd groups added (main, plugin)
def error(self, msg):
"""Output an error message, and exit the program.
This method overrides standard argparser's error
so that error output goes to the logger.
:param msg: the error message to output
"""
self.print_usage()
logger.critical(_("Command line error: %s"), msg)
sys.exit(1)
class _RepoCallback(argparse.Action):
def __call__(self, parser, namespace, values, opt_str):
operation = 'disable' if opt_str == '--disablerepo' else 'enable'
l = getattr(namespace, self.dest)
l.extend((x, operation) for x in re.split(r'\s*[,\s]\s*', values))
class _RepoCallbackEnable(argparse.Action):
def __call__(self, parser, namespace, values, opt_str):
namespace.repos_ed.append((values[0], 'enable'))
setattr(namespace, 'reponame', values)
class _SplitCallback(argparse._AppendAction):
""" Split all strings in seq, at "," and whitespace.
Returns a new list. """
SPLITTER = r'\s*[,\s]\s*'
def __call__(self, parser, namespace, values, opt_str):
first = True
for val in re.split(self.SPLITTER, values):
if first or val:
# Empty values are sometimes used to clear existing content of the option.
# Only the first value in the parsed string can be empty. Other empty values
# are ignored.
super(OptionParser._SplitCallback,
self).__call__(parser, namespace, val, opt_str)
first = False
class _SplitExtendDictCallback(argparse.Action):
""" Split string at "," or whitespace to (key, value).
Extends dict with {key: value}."""
def __call__(self, parser, namespace, values, opt_str):
try:
key, val = values.split(',')
if not key or not val:
raise ValueError
except ValueError:
msg = _('bad format: %s') % values
raise argparse.ArgumentError(self, msg)
dct = getattr(namespace, self.dest)
dct[key] = val
class _SetoptsCallback(argparse.Action):
""" Parse setopts arguments and put them into main_<setopts>
and repo_<setopts>."""
def __call__(self, parser, namespace, values, opt_str):
vals = values.split('=', maxsplit=1)
if len(vals) < 2:
logger.warning(_("Setopt argument has no value: %s"), values)
return
k, v = vals
period = k.rfind('.')
if period != -1:
repo = k[:period]
k = k[period+1:]
if hasattr(namespace, 'repo_setopts'):
repoopts = namespace.repo_setopts
else:
repoopts = {}
repoopts.setdefault(repo, {}).setdefault(k, []).append(v)
setattr(namespace, 'repo_' + self.dest, repoopts)
else:
if hasattr(namespace, 'main_setopts'):
mainopts = namespace.main_setopts
else:
mainopts = {}
mainopts.setdefault(k, []).append(v)
setattr(namespace, 'main_' + self.dest, mainopts)
class ParseSpecGroupFileCallback(argparse.Action):
def __call__(self, parser, namespace, values, opt_str):
_parse_specs(namespace, values)
class PkgNarrowCallback(argparse.Action):
def __init__(self, *args, **kwargs):
self.pkgnarrow = {}
try:
for k in ['choices', 'default']:
self.pkgnarrow[k] = kwargs[k]
del kwargs[k]
except KeyError as e:
raise TypeError("%s() missing mandatory argument %s"
% (self.__class__.__name__, e))
kwargs['default'] = []
super(OptionParser.PkgNarrowCallback, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, opt_str):
dest_action = self.dest + '_action'
if not values or values[0] not in self.pkgnarrow['choices']:
narrow = self.pkgnarrow['default']
else:
narrow = values.pop(0)
setattr(namespace, dest_action, narrow)
setattr(namespace, self.dest, values)
class ForceArchAction(argparse.Action):
def __call__(self, parser, namespace, values, opt_str):
namespace.ignorearch = True
namespace.arch = values
def _add_general_options(self):
""" Standard options known to all dnf subcommands. """
# All defaults need to be a None, so we can always tell whether the user
# has set something or whether we are getting a default.
general_grp = self.add_argument_group(_('General {prog} options'.format(
prog=dnf.util.MAIN_PROG_UPPER)))
general_grp.add_argument("-c", "--config", dest="config_file_path",
default=None, metavar='[config file]',
help=_("config file location"))
general_grp.add_argument("-q", "--quiet", dest="quiet",
action="store_true", default=None,
help=_("quiet operation"))
general_grp.add_argument("-v", "--verbose", action="store_true",
default=None, help=_("verbose operation"))
general_grp.add_argument("--version", action="store_true", default=None,
help=_("show {prog} version and exit").format(
prog=dnf.util.MAIN_PROG_UPPER))
general_grp.add_argument("--installroot", help=_("set install root"),
metavar='[path]')
general_grp.add_argument("--nodocs", action="store_const", const=['nodocs'], dest='tsflags',
help=_("do not install documentations"))
general_grp.add_argument("--noplugins", action="store_false",
default=None, dest='plugins',
help=_("disable all plugins"))
general_grp.add_argument("--enableplugin", dest="enableplugin",
default=[], action=self._SplitCallback,
help=_("enable plugins by name"),
metavar='[plugin]')
general_grp.add_argument("--disableplugin", dest="disableplugin",
default=[], action=self._SplitCallback,
help=_("disable plugins by name"),
metavar='[plugin]')
general_grp.add_argument("--releasever", default=None,
help=_("override the value of $releasever"
" in config and repo files"))
general_grp.add_argument("--setopt", dest="setopts", default=[],
action=self._SetoptsCallback,
help=_("set arbitrary config and repo options"))
general_grp.add_argument("--skip-broken", dest="skip_broken", action="store_true",
default=None,
help=_("resolve depsolve problems by skipping packages"))
general_grp.add_argument('-h', '--help', '--help-cmd',
action="store_true", dest='help',
help=_("show command help"))
general_grp.add_argument('--allowerasing', action='store_true',
default=None,
help=_('allow erasing of installed packages to '
'resolve dependencies'))
best_group = general_grp.add_mutually_exclusive_group()
best_group.add_argument("-b", "--best", action="store_true", dest='best', default=None,
help=_("try the best available package versions in transactions."))
best_group.add_argument("--nobest", action="store_false", dest='best',
help=_("do not limit the transaction to the best candidate"))
general_grp.add_argument("-C", "--cacheonly", dest="cacheonly",
action="store_true", default=None,
help=_("run entirely from system cache, "
"don't update cache"))
general_grp.add_argument("-R", "--randomwait", dest="sleeptime", type=int,
default=None, metavar='[minutes]',
help=_("maximum command wait time"))
general_grp.add_argument("-d", "--debuglevel", dest="debuglevel",
metavar='[debug level]', default=None,
help=_("debugging output level"), type=int)
general_grp.add_argument("--debugsolver",
action="store_true", default=None,
help=_("dumps detailed solving results into"
" files"))
general_grp.add_argument("--showduplicates", dest="showdupesfromrepos",
action="store_true", default=None,
help=_("show duplicates, in repos, "
"in list/search commands"))
general_grp.add_argument("-e", "--errorlevel", default=None, type=int,
help=_("error output level"))
general_grp.add_argument("--obsoletes", default=None, dest="obsoletes",
action="store_true",
help=_("enables {prog}'s obsoletes processing logic "
"for upgrade or display capabilities that "
"the package obsoletes for info, list and "
"repoquery").format(prog=dnf.util.MAIN_PROG))
general_grp.add_argument("--rpmverbosity", default=None,
help=_("debugging output level for rpm"),
metavar='[debug level name]')
general_grp.add_argument("-y", "--assumeyes", action="store_true",
default=None, help=_("automatically answer yes"
" for all questions"))
general_grp.add_argument("--assumeno", action="store_true",
default=None, help=_("automatically answer no"
" for all questions"))
general_grp.add_argument("--enablerepo", action=self._RepoCallback,
dest='repos_ed', default=[], metavar='[repo]',
help=_("Temporarily enable repositories for the purpose "
"of the current dnf command. Accepts an id, a "
"comma-separated list of ids, or a glob of ids. "
"This option can be specified multiple times."))
repo_group = general_grp.add_mutually_exclusive_group()
repo_group.add_argument("--disablerepo", action=self._RepoCallback,
dest='repos_ed', default=[], metavar='[repo]',
help=_("Temporarily disable active repositories for the "
"purpose of the current dnf command. Accepts an id, "
"a comma-separated list of ids, or a glob of ids. "
"This option can be specified multiple times, but "
"is mutually exclusive with `--repo`."))
repo_group.add_argument('--repo', '--repoid', metavar='[repo]', dest='repo',
action=self._SplitCallback, default=[],
help=_('enable just specific repositories by an id or a glob, '
'can be specified multiple times'))
enable_group = general_grp.add_mutually_exclusive_group()
enable_group.add_argument("--enable", default=False,
dest="set_enabled", action="store_true",
help=_("enable repos with config-manager "
"command (automatically saves)"))
enable_group.add_argument("--disable", default=False,
dest="set_disabled", action="store_true",
help=_("disable repos with config-manager "
"command (automatically saves)"))
general_grp.add_argument("-x", "--exclude", "--excludepkgs", default=[],
dest='excludepkgs', action=self._SplitCallback,
help=_("exclude packages by name or glob"),
metavar='[package]')
general_grp.add_argument("--disableexcludes", "--disableexcludepkgs",
default=[], dest="disable_excludes",
action=self._SplitCallback,
help=_("disable excludepkgs"),
metavar='[repo]')
general_grp.add_argument("--repofrompath", default={},
action=self._SplitExtendDictCallback,
metavar='[repo,path]',
help=_("label and path to an additional repository to use (same "
"path as in a baseurl), can be specified multiple times."))
general_grp.add_argument("--noautoremove", action="store_false",
default=None, dest='clean_requirements_on_remove',
help=_("disable removal of dependencies that are no longer used"))
general_grp.add_argument("--nogpgcheck", action="store_false",
default=None, dest='gpgcheck',
help=_("disable gpg signature checking (if RPM policy allows)"))
general_grp.add_argument("--color", dest="color", default=None,
help=_("control whether color is used"))
general_grp.add_argument("--refresh", dest="freshest_metadata",
action="store_true",
help=_("set metadata as expired before running"
" the command"))
general_grp.add_argument("-4", dest="ip_resolve", default=None,
help=_("resolve to IPv4 addresses only"),
action="store_const", const='ipv4')
general_grp.add_argument("-6", dest="ip_resolve", default=None,
help=_("resolve to IPv6 addresses only"),
action="store_const", const='ipv6')
general_grp.add_argument("--destdir", "--downloaddir", dest="destdir", default=None,
help=_("set directory to copy packages to"))
general_grp.add_argument("--downloadonly", dest="downloadonly",
action="store_true", default=False,
help=_("only download packages"))
general_grp.add_argument("--comment", dest="comment", default=None,
help=_("add a comment to transaction"))
# Updateinfo options...
general_grp.add_argument("--bugfix", action="store_true",
help=_("Include bugfix relevant packages, "
"in updates"))
general_grp.add_argument("--enhancement", action="store_true",
help=_("Include enhancement relevant packages,"
" in updates"))
general_grp.add_argument("--newpackage", action="store_true",
help=_("Include newpackage relevant packages,"
" in updates"))
general_grp.add_argument("--security", action="store_true",
help=_("Include security relevant packages, "
"in updates"))
general_grp.add_argument("--advisory", "--advisories", dest="advisory",
default=[], action=self._SplitCallback,
help=_("Include packages needed to fix the "
"given advisory, in updates"))
general_grp.add_argument("--bz", "--bzs", default=[], dest="bugzilla",
action=self._SplitCallback, help=_(
"Include packages needed to fix the given BZ, in updates"))
general_grp.add_argument("--cve", "--cves", default=[], dest="cves",
action=self._SplitCallback,
help=_("Include packages needed to fix the given CVE, in updates"))
general_grp.add_argument(
"--sec-severity", "--secseverity",
choices=['Critical', 'Important', 'Moderate', 'Low'], default=[],
dest="severity", action=self._SplitCallback, help=_(
"Include security relevant packages matching the severity, "
"in updates"))
general_grp.add_argument("--forcearch", metavar="ARCH",
dest=argparse.SUPPRESS,
action=self.ForceArchAction,
choices=sorted(dnf.rpm._BASEARCH_MAP.keys()),
help=_("Force the use of an architecture"))
general_grp.add_argument('command', nargs='?', help=argparse.SUPPRESS)
def _add_cmd_usage(self, cmd, group):
""" store usage info about a single dnf command."""
summary = dnf.i18n.ucd(cmd.summary)
name = dnf.i18n.ucd(cmd.aliases[0])
if not name in self._cmd_usage:
self._cmd_usage[name] = (group, summary)
self._cmd_groups.add(group)
def add_commands(self, cli_cmds, group):
""" store name & summary for dnf commands
The stored information is used build usage information
grouped by build-in & plugin commands.
"""
for cmd in set(cli_cmds.values()):
self._add_cmd_usage(cmd, group)
def get_usage(self):
""" get the usage information to show the user. """
desc = {'main': _('List of Main Commands:'),
'plugin': _('List of Plugin Commands:')}
usage = '%s [options] COMMAND\n' % dnf.util.MAIN_PROG
for grp in ['main', 'plugin']:
if not grp in self._cmd_groups:
# dont add plugin usage, if we dont have plugins
continue
usage += "\n%s\n\n" % desc[grp]
for name in sorted(self._cmd_usage.keys()):
group, summary = self._cmd_usage[name]
if group == grp:
usage += "%-25s %s\n" % (name, summary)
return usage
def _add_command_options(self, command):
self.prog = "%s %s" % (dnf.util.MAIN_PROG, command._basecmd)
self.description = command.summary
self.command_positional_parser = argparse.ArgumentParser(self.prog, add_help=False)
self.command_positional_parser.print_usage = self.print_usage
self.command_positional_parser._positionals.title = None
self.command_group = self.add_argument_group(
'{} command-specific options'.format(command._basecmd.capitalize()))
self.command_group.add_argument = self.cmd_add_argument
self.command_group._command = command._basecmd
command.set_argparser(self.command_group)
def cmd_add_argument(self, *args, **kwargs):
if all([(arg[0] in self.prefix_chars) for arg in args]):
return type(self.command_group).add_argument(self.command_group, *args, **kwargs)
else:
return self.command_positional_parser.add_argument(*args, **kwargs)
def _check_encoding(self, args):
for arg in args:
try:
arg.encode('utf-8')
except UnicodeEncodeError as e:
raise dnf.exceptions.ConfigError(
_("Cannot encode argument '%s': %s") % (arg, str(e)))
def parse_main_args(self, args):
self._check_encoding(args)
namespace, _unused_args = self.parse_known_args(args)
return namespace
def parse_command_args(self, command, args):
self._add_command_options(command)
namespace, unused_args = self.parse_known_args(args)
namespace = self.command_positional_parser.parse_args(unused_args, namespace)
command.opts = namespace
return command.opts
def print_usage(self, file_=None):
if self.command_positional_parser:
self._actions += self.command_positional_parser._actions
super(OptionParser, self).print_usage(file_)
def print_help(self, command=None):
# pylint: disable=W0212
if command:
if not self.command_group or self.command_group._command != command._basecmd:
self._add_command_options(command)
self._actions += self.command_positional_parser._actions
self._action_groups.append(self.command_positional_parser._positionals)
else:
self.usage = self.get_usage()
super(OptionParser, self).print_help()
| 24,187
|
Python
|
.py
| 411
| 41.082725
| 100
| 0.532935
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,748
|
aliases.py
|
rpm-software-management_dnf/dnf/cli/aliases.py
|
# aliases.py
# Resolving aliases in CLI arguments.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
import collections
import dnf.cli
from dnf.conf.config import PRIO_DEFAULT
import dnf.exceptions
import libdnf.conf
import logging
import os
import os.path
logger = logging.getLogger('dnf')
ALIASES_DROPIN_DIR = '/etc/dnf/aliases.d/'
ALIASES_CONF_PATH = os.path.join(ALIASES_DROPIN_DIR, 'ALIASES.conf')
ALIASES_USER_PATH = os.path.join(ALIASES_DROPIN_DIR, 'USER.conf')
class AliasesConfig(object):
def __init__(self, path):
self._path = path
self._parser = libdnf.conf.ConfigParser()
self._parser.read(self._path)
@property
def enabled(self):
option = libdnf.conf.OptionBool(True)
try:
option.set(PRIO_DEFAULT, self._parser.getData()["main"]["enabled"])
except IndexError:
pass
return option.getValue()
@property
def aliases(self):
result = collections.OrderedDict()
section = "aliases"
if not self._parser.hasSection(section):
return result
for key in self._parser.options(section):
value = self._parser.getValue(section, key)
if not value:
continue
result[key] = value.split()
return result
class Aliases(object):
def __init__(self):
self.aliases = collections.OrderedDict()
self.conf = None
self.enabled = True
if self._disabled_by_environ():
self.enabled = False
return
self._load_main()
if not self.enabled:
return
self._load_aliases()
def _disabled_by_environ(self):
option = libdnf.conf.OptionBool(True)
try:
option.set(PRIO_DEFAULT, os.environ['DNF_DISABLE_ALIASES'])
return option.getValue()
except KeyError:
return False
except RuntimeError:
logger.warning(
_('Unexpected value of environment variable: '
'DNF_DISABLE_ALIASES=%s'), os.environ['DNF_DISABLE_ALIASES'])
return True
def _load_conf(self, path):
try:
return AliasesConfig(path)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_('Parsing file "%s" failed: %s') % (path, e))
except IOError as e:
raise dnf.exceptions.ConfigError(
_('Cannot read file "%s": %s') % (path, e))
def _load_main(self):
try:
self.conf = self._load_conf(ALIASES_CONF_PATH)
self.enabled = self.conf.enabled
except dnf.exceptions.ConfigError as e:
logger.debug(_('Config error: %s'), e)
def _load_aliases(self, filenames=None):
if filenames is None:
try:
filenames = self._dropin_dir_filenames()
except dnf.exceptions.ConfigError:
return
for filename in filenames:
try:
conf = self._load_conf(filename)
if conf.enabled:
self.aliases.update(conf.aliases)
except dnf.exceptions.ConfigError as e:
logger.warning(_('Config error: %s'), e)
def _dropin_dir_filenames(self):
# Get default aliases config filenames:
# all files from ALIASES_DROPIN_DIR,
# and ALIASES_USER_PATH as the last one (-> override all others)
ignored_filenames = [os.path.basename(ALIASES_CONF_PATH),
os.path.basename(ALIASES_USER_PATH)]
def _ignore_filename(filename):
return filename in ignored_filenames or\
filename.startswith('.') or\
not filename.endswith(('.conf', '.CONF'))
filenames = []
try:
if not os.path.exists(ALIASES_DROPIN_DIR):
os.mkdir(ALIASES_DROPIN_DIR)
for fn in sorted(os.listdir(ALIASES_DROPIN_DIR)):
if _ignore_filename(fn):
continue
filenames.append(os.path.join(ALIASES_DROPIN_DIR, fn))
except (IOError, OSError) as e:
raise dnf.exceptions.ConfigError(e)
if os.path.exists(ALIASES_USER_PATH):
filenames.append(ALIASES_USER_PATH)
return filenames
def _resolve(self, args):
stack = []
self.prefix_options = []
def store_prefix(args):
num = 0
for arg in args:
if arg and arg[0] != '-':
break
num += 1
self.prefix_options += args[:num]
return args[num:]
def subresolve(args):
suffix = store_prefix(args)
if (not suffix or # Current alias on stack is resolved
suffix[0] not in self.aliases or # End resolving
suffix[0].startswith('\\')): # End resolving
try:
stack.pop()
# strip the '\' if it exists
if suffix[0].startswith('\\'):
suffix[0] = suffix[0][1:]
except IndexError:
pass
return suffix
if suffix[0] in stack: # Infinite recursion detected
raise dnf.exceptions.Error(
_('Aliases contain infinite recursion'))
# Next word must be an alias
stack.append(suffix[0])
current_alias_result = subresolve(self.aliases[suffix[0]])
if current_alias_result: # We reached non-alias or '\'
return current_alias_result + suffix[1:]
else: # Need to resolve aliases in the rest
return subresolve(suffix[1:])
suffix = subresolve(args)
return self.prefix_options + suffix
def resolve(self, args):
if self.enabled:
try:
args = self._resolve(args)
except dnf.exceptions.Error as e:
logger.error(_('%s, using original arguments.'), e)
return args
| 7,133
|
Python
|
.py
| 177
| 30.033898
| 79
| 0.594829
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,749
|
cli.py
|
rpm-software-management_dnf/dnf/cli/cli.py
|
# Copyright 2005 Duke University
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Seth Vidal
"""
Command line interface yum class and related.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
import datetime
import logging
import operator
import os
import random
import rpm
import sys
import time
import hawkey
import libdnf.transaction
from . import output
from dnf.cli import CliError
from dnf.i18n import ucd, _
import dnf
import dnf.cli.aliases
import dnf.cli.commands
import dnf.cli.commands.alias
import dnf.cli.commands.autoremove
import dnf.cli.commands.check
import dnf.cli.commands.clean
import dnf.cli.commands.deplist
import dnf.cli.commands.distrosync
import dnf.cli.commands.downgrade
import dnf.cli.commands.group
import dnf.cli.commands.history
import dnf.cli.commands.install
import dnf.cli.commands.makecache
import dnf.cli.commands.mark
import dnf.cli.commands.module
import dnf.cli.commands.reinstall
import dnf.cli.commands.remove
import dnf.cli.commands.repolist
import dnf.cli.commands.repoquery
import dnf.cli.commands.search
import dnf.cli.commands.shell
import dnf.cli.commands.swap
import dnf.cli.commands.updateinfo
import dnf.cli.commands.upgrade
import dnf.cli.commands.upgrademinimal
import dnf.cli.demand
import dnf.cli.format
import dnf.cli.option_parser
import dnf.conf
import dnf.conf.substitutions
import dnf.const
import dnf.db.history
import dnf.exceptions
import dnf.logging
import dnf.persistor
import dnf.plugin
import dnf.rpm
import dnf.sack
import dnf.transaction
import dnf.util
import dnf.yum.misc
logger = logging.getLogger('dnf')
def _add_pkg_simple_list_lens(data, pkg, indent=''):
""" Get the length of each pkg's column. Add that to data.
This "knows" about simpleList and printVer. """
na = len(pkg.name) + 1 + len(pkg.arch) + len(indent)
ver = len(pkg.evr)
rid = len(pkg._from_repo)
for (d, v) in (('na', na), ('ver', ver), ('rid', rid)):
data[d].setdefault(v, 0)
data[d][v] += 1
def _list_cmd_calc_columns(output, ypl):
""" Work out the dynamic size of the columns to pass to fmtColumns. """
data = {'na' : {}, 'ver' : {}, 'rid' : {}}
for lst in (ypl.installed, ypl.available, ypl.extras, ypl.autoremove,
ypl.updates, ypl.recent):
for pkg in lst:
_add_pkg_simple_list_lens(data, pkg)
if len(ypl.obsoletes) > 0:
for (npkg, opkg) in ypl.obsoletesTuples:
_add_pkg_simple_list_lens(data, npkg)
_add_pkg_simple_list_lens(data, opkg, indent=" " * 4)
data = [data['na'], data['ver'], data['rid']]
columns = output.calcColumns(data, remainder_column=1)
return (-columns[0], -columns[1], -columns[2])
def print_versions(pkgs, base, output):
def sm_ui_time(x):
return time.strftime("%c", time.gmtime(x))
rpmdb_sack = dnf.sack.rpmdb_sack(base)
done = False
for pkg in rpmdb_sack.query().installed().filterm(name=pkgs):
if done:
print("")
done = True
if pkg.epoch == '0':
ver = '%s-%s.%s' % (pkg.version, pkg.release, pkg.arch)
else:
ver = '%s:%s-%s.%s' % (pkg.epoch,
pkg.version, pkg.release, pkg.arch)
name = output.term.bold(pkg.name)
print(_(" Installed: %s-%s at %s") %(name, ver,
sm_ui_time(pkg.installtime)))
print(_(" Built : %s at %s") % (pkg.packager if pkg.packager else "",
sm_ui_time(pkg.buildtime)))
# :hawkey, no changelist information yet
# print(_(" Committed: %s at %s") % (pkg.committer,
# sm_ui_date(pkg.committime)))
def report_module_switch(switchedModules):
msg1 = _("The operation would result in switching of module '{0}' stream '{1}' to "
"stream '{2}'")
for moduleName, streams in switchedModules.items():
logger.warning(msg1.format(moduleName, streams[0], streams[1]))
class BaseCli(dnf.Base):
"""This is the base class for yum cli."""
def __init__(self, conf=None):
conf = conf or dnf.conf.Conf()
super(BaseCli, self).__init__(conf=conf)
self.output = output.Output(self, self.conf)
def do_transaction(self, display=()):
"""Take care of package downloading, checking, user
confirmation and actually running the transaction.
:param display: `rpm.callback.TransactionProgress` object(s)
:return: history database transaction ID or None
"""
if dnf.base.WITH_MODULES:
if not self.conf.module_stream_switch:
switchedModules = dict(self._moduleContainer.getSwitchedStreams())
if switchedModules:
report_module_switch(switchedModules)
msg = _("It is not possible to switch enabled streams of a module unless explicitly "
"enabled via configuration option module_stream_switch.\n"
"It is recommended to rather remove all installed content from the module, and "
"reset the module using '{prog} module reset <module_name>' command. After "
"you reset the module, you can install the other stream.").format(
prog=dnf.util.MAIN_PROG)
raise dnf.exceptions.Error(msg)
trans = self.transaction
pkg_str = self.output.list_transaction(trans)
if pkg_str:
logger.info(pkg_str)
if trans:
# Check which packages have to be downloaded
install_pkgs = []
rmpkgs = []
install_only = True
for tsi in trans:
if tsi.action in dnf.transaction.FORWARD_ACTIONS:
install_pkgs.append(tsi.pkg)
elif tsi.action in dnf.transaction.BACKWARD_ACTIONS:
install_only = False
rmpkgs.append(tsi.pkg)
# Close the connection to the rpmdb so that rpm doesn't hold the
# SIGINT handler during the downloads.
del self._ts
# report the total download size to the user
if not install_pkgs:
self.output.reportRemoveSize(rmpkgs)
else:
self.output.reportDownloadSize(install_pkgs, install_only)
if trans or self._moduleContainer.isChanged() or \
(self._history and (self._history.group or self._history.env)):
# confirm with user
if self.conf.downloadonly:
logger.info(_("{prog} will only download packages for the transaction.").format(
prog=dnf.util.MAIN_PROG_UPPER))
elif 'test' in self.conf.tsflags:
logger.info(_("{prog} will only download packages, install gpg keys, and check the "
"transaction.").format(prog=dnf.util.MAIN_PROG_UPPER))
if dnf.util._is_bootc_host() and \
os.path.realpath(self.conf.installroot) == "/" and \
not self.conf.downloadonly:
_bootc_host_msg = _("""
*** Error: system is configured to be read-only; for more
*** information run `bootc --help`.
""")
logger.info(_bootc_host_msg)
raise CliError(_("Operation aborted."))
if self._promptWanted():
if self.conf.assumeno or not self.output.userconfirm():
raise CliError(_("Operation aborted."))
else:
logger.info(_('Nothing to do.'))
return
if trans:
if install_pkgs:
logger.info(_('Downloading Packages:'))
try:
total_cb = self.output.download_callback_total_cb
self.download_packages(install_pkgs, self.output.progress, total_cb)
except dnf.exceptions.DownloadError as e:
specific = dnf.cli.format.indent_block(ucd(e))
errstr = _('Error downloading packages:') + '\n%s' % specific
# setting the new line to prevent next chars being eaten up
# by carriage returns
print()
raise dnf.exceptions.Error(errstr)
# Check GPG signatures
self.gpgsigcheck(install_pkgs)
if self.conf.downloadonly:
return
if not isinstance(display, Sequence):
display = [display]
display = [output.CliTransactionDisplay()] + list(display)
tid = super(BaseCli, self).do_transaction(display)
# display last transaction (which was closed during do_transaction())
if tid is not None:
trans = self.history.old([tid])[0]
trans = dnf.db.group.RPMTransaction(self.history, trans._trans)
else:
trans = None
if trans:
# the post transaction summary is already written to log during
# Base.do_transaction() so here only print the messages to the
# user arranged in columns
print()
print('\n'.join(self.output.post_transaction_output(trans)))
print()
for tsi in trans:
if tsi.state == libdnf.transaction.TransactionItemState_ERROR:
raise dnf.exceptions.Error(_('Transaction failed'))
return tid
def gpgsigcheck(self, pkgs):
"""Perform GPG signature verification on the given packages,
installing keys if possible.
:param pkgs: a list of package objects to verify the GPG
signatures of
:raises: Will raise :class:`Error` if there's a problem
"""
error_messages = []
for po in pkgs:
result, errmsg = self._sig_check_pkg(po)
if result == 0:
# Verified ok, or verify not req'd
continue
elif result == 1:
ay = self.conf.assumeyes and not self.conf.assumeno
if (not sys.stdin or not sys.stdin.isatty()) and not ay:
raise dnf.exceptions.Error(_('Refusing to automatically import keys when running ' \
'unattended.\nUse "-y" to override.'))
# the callback here expects to be able to take options which
# userconfirm really doesn't... so fake it
fn = lambda x, y, z: self.output.userconfirm()
try:
self._get_key_for_package(po, fn)
except (dnf.exceptions.Error, ValueError) as e:
error_messages.append(str(e))
else:
# Fatal error
error_messages.append(errmsg)
if error_messages:
for msg in error_messages:
logger.critical(msg)
raise dnf.exceptions.Error(_("GPG check FAILED"))
def latest_changelogs(self, package):
"""Return list of changelogs for package newer then installed version"""
newest = None
# find the date of the newest changelog for installed version of package
# stored in rpmdb
for mi in self._rpmconn.readonly_ts.dbMatch('name', package.name):
changelogtimes = mi[rpm.RPMTAG_CHANGELOGTIME]
if changelogtimes:
newest = datetime.date.fromtimestamp(changelogtimes[0])
break
chlogs = [chlog for chlog in package.changelogs
if newest is None or chlog['timestamp'] > newest]
return chlogs
def format_changelog(self, changelog):
"""Return changelog formatted as in spec file"""
chlog_str = '* %s %s\n%s\n' % (
changelog['timestamp'].strftime("%c"),
dnf.i18n.ucd(changelog['author']),
dnf.i18n.ucd(changelog['text']))
return chlog_str
def print_changelogs(self, packages):
# group packages by src.rpm to avoid showing duplicate changelogs
bysrpm = dict()
for p in packages:
# there are packages without source_name, use name then.
bysrpm.setdefault(p.source_name or p.name, []).append(p)
for source_name in sorted(bysrpm.keys()):
bin_packages = bysrpm[source_name]
print(_("Changelogs for {}").format(', '.join([str(pkg) for pkg in bin_packages])))
for chl in self.latest_changelogs(bin_packages[0]):
print(self.format_changelog(chl))
def check_updates(self, patterns=(), reponame=None, print_=True, changelogs=False):
"""Check updates matching given *patterns* in selected repository."""
ypl = self.returnPkgLists('upgrades', patterns, reponame=reponame)
if self.conf.obsoletes or self.conf.verbose:
typl = self.returnPkgLists('obsoletes', patterns, reponame=reponame)
ypl.obsoletes = typl.obsoletes
ypl.obsoletesTuples = typl.obsoletesTuples
if print_:
columns = _list_cmd_calc_columns(self.output, ypl)
if len(ypl.updates) > 0:
local_pkgs = {}
highlight = self.output.term.MODE['bold']
if highlight:
# Do the local/remote split we get in "yum updates"
for po in sorted(ypl.updates):
local = po.localPkg()
if os.path.exists(local) and po.verifyLocalPkg():
local_pkgs[(po.name, po.arch)] = po
cul = self.conf.color_update_local
cur = self.conf.color_update_remote
self.output.listPkgs(ypl.updates, '', outputType='list',
highlight_na=local_pkgs, columns=columns,
highlight_modes={'=' : cul, 'not in' : cur})
if changelogs:
self.print_changelogs(ypl.updates)
if len(ypl.obsoletes) > 0:
print(_('Obsoleting Packages'))
# The tuple is (newPkg, oldPkg) ... so sort by new
for obtup in sorted(ypl.obsoletesTuples,
key=operator.itemgetter(0)):
self.output.updatesObsoletesList(obtup, 'obsoletes',
columns=columns)
return ypl.updates or ypl.obsoletes
def distro_sync_userlist(self, userlist):
""" Upgrade or downgrade packages to match the latest versions available
in the enabled repositories.
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
oldcount = self._goal.req_length()
if len(userlist) == 0:
self.distro_sync()
else:
for pkg_spec in userlist:
self.distro_sync(pkg_spec)
cnt = self._goal.req_length() - oldcount
if cnt <= 0 and not self._goal.req_has_distupgrade_all():
msg = _('No packages marked for distribution synchronization.')
raise dnf.exceptions.Error(msg)
def downgradePkgs(self, specs=[], file_pkgs=[], strict=False):
"""Attempt to take the user specified list of packages or
wildcards and downgrade them. If a complete version number is
specified, attempt to downgrade them to the specified version
:param specs: a list of names or wildcards specifying packages to downgrade
:param file_pkgs: a list of pkg objects from local files
"""
result = False
for pkg in file_pkgs:
try:
self.package_downgrade(pkg, strict=strict)
result = True
except dnf.exceptions.MarkingError as e:
logger.info(_('No match for argument: %s'),
self.output.term.bold(pkg.location))
for arg in specs:
try:
self.downgrade_to(arg, strict=strict)
result = True
except dnf.exceptions.PackageNotFoundError as err:
msg = _('No package %s available.')
logger.info(msg, self.output.term.bold(arg))
except dnf.exceptions.PackagesNotInstalledError as err:
logger.info(_('Packages for argument %s available, but not installed.'),
self.output.term.bold(err.pkg_spec))
except dnf.exceptions.MarkingError:
assert False
if not result:
raise dnf.exceptions.Error(_('No packages marked for downgrade.'))
def output_packages(self, basecmd, pkgnarrow='all', patterns=(), reponame=None):
"""Output selection *pkgnarrow* of packages matching *patterns* and *repoid*."""
try:
highlight = self.output.term.MODE['bold']
ypl = self.returnPkgLists(
pkgnarrow, patterns, installed_available=highlight, reponame=reponame)
except dnf.exceptions.Error as e:
return 1, [str(e)]
else:
update_pkgs = {}
inst_pkgs = {}
local_pkgs = {}
columns = None
if basecmd == 'list':
# Dynamically size the columns
columns = _list_cmd_calc_columns(self.output, ypl)
if highlight and ypl.installed:
# If we have installed and available lists, then do the
# highlighting for the installed packages so you can see what's
# available to update, an extra, or newer than what we have.
for pkg in (ypl.hidden_available +
ypl.reinstall_available +
ypl.old_available):
key = (pkg.name, pkg.arch)
if key not in update_pkgs or pkg > update_pkgs[key]:
update_pkgs[key] = pkg
if highlight and ypl.available:
# If we have installed and available lists, then do the
# highlighting for the available packages so you can see what's
# available to install vs. update vs. old.
for pkg in ypl.hidden_installed:
key = (pkg.name, pkg.arch)
if key not in inst_pkgs or pkg > inst_pkgs[key]:
inst_pkgs[key] = pkg
if highlight and ypl.updates:
# Do the local/remote split we get in "yum updates"
for po in sorted(ypl.updates):
if po.reponame != hawkey.SYSTEM_REPO_NAME:
local_pkgs[(po.name, po.arch)] = po
# Output the packages:
clio = self.conf.color_list_installed_older
clin = self.conf.color_list_installed_newer
clir = self.conf.color_list_installed_reinstall
clie = self.conf.color_list_installed_extra
rip = self.output.listPkgs(ypl.installed, _('Installed Packages'), basecmd,
highlight_na=update_pkgs, columns=columns,
highlight_modes={'>' : clio, '<' : clin,
'=' : clir, 'not in' : clie})
clau = self.conf.color_list_available_upgrade
clad = self.conf.color_list_available_downgrade
clar = self.conf.color_list_available_reinstall
clai = self.conf.color_list_available_install
rap = self.output.listPkgs(ypl.available, _('Available Packages'), basecmd,
highlight_na=inst_pkgs, columns=columns,
highlight_modes={'<' : clau, '>' : clad,
'=' : clar, 'not in' : clai})
raep = self.output.listPkgs(ypl.autoremove, _('Autoremove Packages'),
basecmd, columns=columns)
rep = self.output.listPkgs(ypl.extras, _('Extra Packages'), basecmd,
columns=columns)
cul = self.conf.color_update_local
cur = self.conf.color_update_remote
rup = self.output.listPkgs(ypl.updates, _('Available Upgrades'), basecmd,
highlight_na=local_pkgs, columns=columns,
highlight_modes={'=' : cul, 'not in' : cur})
# XXX put this into the ListCommand at some point
if len(ypl.obsoletes) > 0 and basecmd == 'list':
# if we've looked up obsolete lists and it's a list request
rop = len(ypl.obsoletes)
print(_('Obsoleting Packages'))
for obtup in sorted(ypl.obsoletesTuples,
key=operator.itemgetter(0)):
self.output.updatesObsoletesList(obtup, 'obsoletes',
columns=columns)
else:
rop = self.output.listPkgs(ypl.obsoletes, _('Obsoleting Packages'),
basecmd, columns=columns)
rrap = self.output.listPkgs(ypl.recent, _('Recently Added Packages'),
basecmd, columns=columns)
if len(patterns) and \
rrap == 0 and rop == 0 and rup == 0 and rep == 0 and rap == 0 and raep == 0 and rip == 0:
raise dnf.exceptions.Error(_('No matching Packages to list'))
def returnPkgLists(self, pkgnarrow='all', patterns=None,
installed_available=False, reponame=None):
"""Return a :class:`dnf.yum.misc.GenericHolder` object containing
lists of package objects that match the given names or wildcards.
:param pkgnarrow: a string specifying which types of packages
lists to produce, such as updates, installed, available, etc.
:param patterns: a list of names or wildcards specifying
packages to list
:param installed_available: whether the available package list
is present as .hidden_available when doing all, available,
or installed
:param reponame: limit packages list to the given repository
:return: a :class:`dnf.yum.misc.GenericHolder` instance with the
following lists defined::
available = list of packageObjects
installed = list of packageObjects
upgrades = tuples of packageObjects (updating, installed)
extras = list of packageObjects
obsoletes = tuples of packageObjects (obsoleting, installed)
recent = list of packageObjects
"""
done_hidden_available = False
done_hidden_installed = False
if installed_available and pkgnarrow == 'installed':
done_hidden_available = True
pkgnarrow = 'all'
elif installed_available and pkgnarrow == 'available':
done_hidden_installed = True
pkgnarrow = 'all'
ypl = self._do_package_lists(
pkgnarrow, patterns, ignore_case=True, reponame=reponame)
if self.conf.showdupesfromrepos:
for pkg in ypl.reinstall_available:
if not pkg.installed and not done_hidden_available:
ypl.available.append(pkg)
if installed_available:
ypl.hidden_available = ypl.available
ypl.hidden_installed = ypl.installed
if done_hidden_available:
ypl.available = []
if done_hidden_installed:
ypl.installed = []
return ypl
def provides(self, args):
"""Print out a list of packages that provide the given file or
feature. This a cli wrapper to the provides methods in the
rpmdb and pkgsack.
:param args: the name of a file or feature to search for
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# always in showdups mode
old_sdup = self.conf.showdupesfromrepos
self.conf.showdupesfromrepos = True
matches = []
used_search_strings = []
for spec in args:
query, used_search_string = super(BaseCli, self).provides(spec)
matches.extend(query)
used_search_strings.extend(used_search_string)
for pkg in sorted(matches):
self.output.matchcallback_verbose(pkg, used_search_strings, args)
self.conf.showdupesfromrepos = old_sdup
if not matches:
raise dnf.exceptions.Error(_('No matches found. If searching for a file, '
'try specifying the full path or using a '
'wildcard prefix ("*/") at the beginning.'))
def _promptWanted(self):
# shortcut for the always-off/always-on options
if self.conf.assumeyes and not self.conf.assumeno:
return False
return True
class Cli(object):
def __init__(self, base):
self.base = base
self.cli_commands = {}
self.command = None
self.demands = dnf.cli.demand.DemandSheet() # :api
self.register_command(dnf.cli.commands.alias.AliasCommand)
self.register_command(dnf.cli.commands.autoremove.AutoremoveCommand)
self.register_command(dnf.cli.commands.check.CheckCommand)
self.register_command(dnf.cli.commands.clean.CleanCommand)
self.register_command(dnf.cli.commands.distrosync.DistroSyncCommand)
self.register_command(dnf.cli.commands.deplist.DeplistCommand)
self.register_command(dnf.cli.commands.downgrade.DowngradeCommand)
self.register_command(dnf.cli.commands.group.GroupCommand)
self.register_command(dnf.cli.commands.history.HistoryCommand)
self.register_command(dnf.cli.commands.install.InstallCommand)
self.register_command(dnf.cli.commands.makecache.MakeCacheCommand)
self.register_command(dnf.cli.commands.mark.MarkCommand)
self.register_command(dnf.cli.commands.module.ModuleCommand)
self.register_command(dnf.cli.commands.reinstall.ReinstallCommand)
self.register_command(dnf.cli.commands.remove.RemoveCommand)
self.register_command(dnf.cli.commands.repolist.RepoListCommand)
self.register_command(dnf.cli.commands.repoquery.RepoQueryCommand)
self.register_command(dnf.cli.commands.search.SearchCommand)
self.register_command(dnf.cli.commands.shell.ShellCommand)
self.register_command(dnf.cli.commands.swap.SwapCommand)
self.register_command(dnf.cli.commands.updateinfo.UpdateInfoCommand)
self.register_command(dnf.cli.commands.upgrade.UpgradeCommand)
self.register_command(dnf.cli.commands.upgrademinimal.UpgradeMinimalCommand)
self.register_command(dnf.cli.commands.InfoCommand)
self.register_command(dnf.cli.commands.ListCommand)
self.register_command(dnf.cli.commands.ProvidesCommand)
self.register_command(dnf.cli.commands.CheckUpdateCommand)
self.register_command(dnf.cli.commands.RepoPkgsCommand)
self.register_command(dnf.cli.commands.HelpCommand)
def _configure_repos(self, opts):
self.base.read_all_repos(opts)
if opts.repofrompath:
for label, path in opts.repofrompath.items():
this_repo = self.base.repos.add_new_repo(label, self.base.conf, baseurl=[path])
this_repo._configure_from_options(opts)
# do not let this repo to be disabled
opts.repos_ed.append((label, "enable"))
if opts.repo:
opts.repos_ed.insert(0, ("*", "disable"))
opts.repos_ed.extend([(r, "enable") for r in opts.repo])
notmatch = set()
# Process repo enables and disables in order
try:
for (repo, operation) in opts.repos_ed:
repolist = self.base.repos.get_matching(repo)
if not repolist:
if self.base.conf.strict and operation == "enable":
msg = _("Unknown repo: '%s'")
raise dnf.exceptions.RepoError(msg % repo)
notmatch.add(repo)
if operation == "enable":
repolist.enable()
else:
repolist.disable()
except dnf.exceptions.ConfigError as e:
logger.critical(e)
self.optparser.print_help()
sys.exit(1)
for repo in notmatch:
logger.warning(_("No repository match: %s"), repo)
expired_repos = self.base._repo_persistor.get_expired_repos()
if expired_repos is None:
expired_repos = self.base.repos.keys()
for rid in expired_repos:
repo = self.base.repos.get(rid)
if repo:
repo._repo.expire()
# setup the progress bars/callbacks
(bar, self.base._ds_callback) = self.base.output.setup_progress_callbacks()
self.base.repos.all().set_progress_bar(bar)
key_import = output.CliKeyImport(self.base, self.base.output)
self.base.repos.all()._set_key_import(key_import)
def _log_essentials(self):
logger.debug('{prog} version: %s'.format(prog=dnf.util.MAIN_PROG_UPPER),
dnf.const.VERSION)
logger.log(dnf.logging.DDEBUG,
'Command: %s', self.cmdstring)
logger.log(dnf.logging.DDEBUG,
'Installroot: %s', self.base.conf.installroot)
logger.log(dnf.logging.DDEBUG, 'Releasever: %s',
self.base.conf.releasever)
logger.debug("cachedir: %s", self.base.conf.cachedir)
def _process_demands(self):
demands = self.demands
repos = self.base.repos
if demands.root_user:
if not dnf.util.am_i_root():
raise dnf.exceptions.Error(
_('This command has to be run with superuser privileges '
'(under the root user on most systems).'))
if demands.changelogs:
for repo in repos.iter_enabled():
repo.load_metadata_other = True
if demands.cacheonly or self.base.conf.cacheonly:
self.base.conf.cacheonly = True
for repo in repos.values():
repo._repo.setSyncStrategy(dnf.repo.SYNC_ONLY_CACHE)
else:
if demands.freshest_metadata:
for repo in repos.iter_enabled():
repo._repo.expire()
elif not demands.fresh_metadata:
for repo in repos.values():
repo._repo.setSyncStrategy(dnf.repo.SYNC_LAZY)
if demands.sack_activation:
self.base.fill_sack(
load_system_repo='auto' if self.demands.load_system_repo else False,
load_available_repos=self.demands.available_repos)
def _parse_commands(self, opts, args):
"""Check that the requested CLI command exists."""
basecmd = opts.command
command_cls = self.cli_commands.get(basecmd)
if command_cls is None:
logger.critical(_('No such command: %s. Please use %s --help'),
basecmd, sys.argv[0])
if self.base.conf.plugins:
logger.critical(_("It could be a {PROG} plugin command, "
"try: \"{prog} install 'dnf-command(%s)'\"").format(
prog=dnf.util.MAIN_PROG, PROG=dnf.util.MAIN_PROG_UPPER), basecmd)
else:
logger.critical(_("It could be a {prog} plugin command, "
"but loading of plugins is currently disabled.").format(
prog=dnf.util.MAIN_PROG_UPPER))
raise CliError
self.command = command_cls(self)
logger.log(dnf.logging.DDEBUG, 'Base command: %s', basecmd)
logger.log(dnf.logging.DDEBUG, 'Extra commands: %s', args)
def configure(self, args, option_parser=None):
"""Parse command line arguments, and set up :attr:`self.base.conf` and
:attr:`self.cmds`, as well as logger objects in base instance.
:param args: a list of command line arguments
:param option_parser: a class for parsing cli options
"""
aliases = dnf.cli.aliases.Aliases()
args = aliases.resolve(args)
self.optparser = dnf.cli.option_parser.OptionParser() \
if option_parser is None else option_parser
opts = self.optparser.parse_main_args(args)
# Just print out the version if that's what the user wanted
if opts.version:
print(dnf.const.VERSION)
print_versions(self.base.conf.history_record_packages, self.base,
self.base.output)
sys.exit(0)
if opts.quiet:
opts.debuglevel = 0
opts.errorlevel = 2
if opts.verbose:
opts.debuglevel = opts.errorlevel = dnf.const.VERBOSE_LEVEL
# Read up configuration options and initialize plugins
try:
if opts.cacheonly:
self.base.conf._set_value("cachedir", self.base.conf.system_cachedir,
dnf.conf.PRIO_DEFAULT)
self.demands.cacheonly = True
self.base.conf._configure_from_options(opts)
self._read_conf_file(opts.releasever)
if 'arch' in opts:
self.base.conf.arch = opts.arch
self.base.conf._adjust_conf_options()
except (dnf.exceptions.ConfigError, ValueError) as e:
logger.critical(_('Config error: %s'), e)
sys.exit(1)
except IOError as e:
e = '%s: %s' % (ucd(str(e)), repr(e.filename))
logger.critical(_('Config error: %s'), e)
sys.exit(1)
if opts.destdir is not None:
self.base.conf.destdir = opts.destdir
if not self.base.conf.downloadonly and opts.command not in (
'download', 'system-upgrade', 'reposync', 'modulesync'):
logger.critical(_('--destdir or --downloaddir must be used with --downloadonly '
'or download or system-upgrade command.')
)
sys.exit(1)
if (opts.set_enabled or opts.set_disabled) and opts.command != 'config-manager':
logger.critical(
_('--enable, --set-enabled and --disable, --set-disabled '
'must be used with config-manager command.'))
sys.exit(1)
if opts.sleeptime is not None:
time.sleep(random.randrange(opts.sleeptime * 60))
# store the main commands & summaries, before plugins are loaded
self.optparser.add_commands(self.cli_commands, 'main')
# store the plugin commands & summaries
self.base.init_plugins(opts.disableplugin, opts.enableplugin, self)
self.optparser.add_commands(self.cli_commands,'plugin')
# show help if no command specified
# this is done here, because we first have the full
# usage info after the plugins are loaded.
if not opts.command:
self.optparser.print_help()
sys.exit(0)
# save our original args out
self.base.args = args
# save out as a nice command string
self.cmdstring = self.optparser.prog + ' '
for arg in self.base.args:
self.cmdstring += '%s ' % arg
self._log_essentials()
try:
self._parse_commands(opts, args)
except CliError:
sys.exit(1)
# show help for dnf <command> --help / --help-cmd
if opts.help:
self.optparser.print_help(self.command)
sys.exit(0)
opts = self.optparser.parse_command_args(self.command, args)
if opts.allowerasing:
self.demands.allow_erasing = opts.allowerasing
self.base._allow_erasing = True
if opts.freshest_metadata:
self.demands.freshest_metadata = opts.freshest_metadata
if opts.debugsolver:
self.base.conf.debug_solver = True
if opts.obsoletes:
self.base.conf.obsoletes = True
self.command.pre_configure()
self.base.pre_configure_plugins()
# with cachedir in place we can configure stuff depending on it:
self.base._activate_persistor()
self._configure_repos(opts)
self.base.configure_plugins()
self.base.conf._configure_from_options(opts)
self.command.configure()
if self.base.conf.destdir:
dnf.util.ensure_dir(self.base.conf.destdir)
self.base.repos.all().pkgdir = self.base.conf.destdir
if self.base.conf.color != 'auto':
self.base.output.term.reinit(color=self.base.conf.color)
if rpm.expandMacro('%_pkgverify_level') in ('signature', 'all'):
forcing = False
for repo in self.base.repos.iter_enabled():
if repo.gpgcheck:
continue
repo.gpgcheck = True
forcing = True
if not self.base.conf.localpkg_gpgcheck:
self.base.conf.localpkg_gpgcheck = True
forcing = True
if forcing:
logger.warning(
_("Warning: Enforcing GPG signature check globally "
"as per active RPM security policy (see 'gpgcheck' in "
"dnf.conf(5) for how to squelch this message)"
)
)
def _read_conf_file(self, releasever=None):
timer = dnf.logging.Timer('config')
conf = self.base.conf
# replace remote config path with downloaded file
conf._check_remote_file('config_file_path')
# search config file inside the installroot first
conf._search_inside_installroot('config_file_path')
# check whether a config file is requested from command line and the file exists
filename = conf._get_value('config_file_path')
if (conf._get_priority('config_file_path') == dnf.conf.PRIO_COMMANDLINE) and \
not os.path.isfile(filename):
raise dnf.exceptions.ConfigError(_('Config file "{}" does not exist').format(filename))
# read config
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
# search reposdir file inside the installroot first
from_root = conf._search_inside_installroot('reposdir')
# Update vars from same root like repos were taken
if conf._get_priority('varsdir') == dnf.conf.PRIO_COMMANDLINE:
from_root = "/"
subst = conf.substitutions
subst.update_from_etc(from_root, varsdir=conf._get_value('varsdir'))
# cachedir, logs, releasever, and gpgkey are taken from or stored in installroot
if releasever is None and conf.releasever is None:
releasever = dnf.rpm.detect_releasever(conf.installroot)
elif releasever == '/':
releasever = dnf.rpm.detect_releasever(releasever)
if releasever is not None:
conf.releasever = releasever
if conf.releasever is None:
logger.warning(_("Unable to detect release version (use '--releasever' to specify "
"release version)"))
for opt in ('cachedir', 'logdir', 'persistdir'):
conf.prepend_installroot(opt)
self.base._logging._setup_from_dnf_conf(conf)
timer()
return conf
def _populate_update_security_filter(self, opts, cmp_type='eq', all=None):
"""
:param opts:
:param cmp_type: string supported "eq", "gte"
:param all:
:return:
"""
if (opts is None) and (all is None):
return
types = []
if opts.bugfix or all:
types.append('bugfix')
if opts.enhancement or all:
types.append('enhancement')
if opts.newpackage or all:
types.append('newpackage')
if opts.security or all:
types.append('security')
self.base.add_security_filters(cmp_type, types=types, advisory=opts.advisory,
bugzilla=opts.bugzilla, cves=opts.cves,
severity=opts.severity)
def redirect_logger(self, stdout=None, stderr=None):
# :api
"""
Change minimal logger level for terminal output to stdout and stderr according to specific
command requirements
@param stdout: logging.INFO, logging.WARNING, ...
@param stderr:logging.INFO, logging.WARNING, ...
"""
if stdout is not None:
self.base._logging.stdout_handler.setLevel(stdout)
if stderr is not None:
self.base._logging.stderr_handler.setLevel(stderr)
def redirect_repo_progress(self, fo=sys.stderr):
progress = dnf.cli.progress.MultiFileProgressMeter(fo)
self.base.output.progress = progress
self.base.repos.all().set_progress_bar(progress)
def _check_running_kernel(self):
kernel = self.base.sack.get_running_kernel()
if kernel is None:
return
q = self.base.sack.query().filterm(provides=kernel.name)
q = q.installed()
q.filterm(advisory_type='security')
ikpkg = kernel
for pkg in q:
if pkg > ikpkg:
ikpkg = pkg
if ikpkg > kernel:
print('Security: %s is an installed security update' % ikpkg)
print('Security: %s is the currently running version' % kernel)
def _option_conflict(self, option_string_1, option_string_2):
print(self.optparser.print_usage())
raise dnf.exceptions.Error(_("argument {}: not allowed with argument {}".format(
option_string_1, option_string_2)))
def register_command(self, command_cls):
"""Register a Command. :api"""
for name in command_cls.aliases:
if name in self.cli_commands:
raise dnf.exceptions.ConfigError(_('Command "%s" already defined') % name)
self.cli_commands[name] = command_cls
def run(self):
"""Call the base command, and pass it the extended commands or
arguments.
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
self._process_demands()
# Reports about excludes and includes (but not from plugins)
if self.base.conf.excludepkgs:
logger.debug(
_('Excludes in dnf.conf: ') + ", ".join(sorted(set(self.base.conf.excludepkgs))))
if self.base.conf.includepkgs:
logger.debug(
_('Includes in dnf.conf: ') + ", ".join(sorted(set(self.base.conf.includepkgs))))
for repo in self.base.repos.iter_enabled():
if repo.excludepkgs:
logger.debug(_('Excludes in repo ') + repo.id + ": "
+ ", ".join(sorted(set(repo.excludepkgs))))
if repo.includepkgs:
logger.debug(_('Includes in repo ') + repo.id + ": "
+ ", ".join(sorted(set(repo.includepkgs))))
return self.command.run()
| 44,801
|
Python
|
.py
| 920
| 36.336957
| 109
| 0.593657
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,750
|
output.py
|
rpm-software-management_dnf/dnf/cli/output.py
|
# Copyright 2005 Duke University
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Handle actual output from the cli."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import hawkey
import itertools
import libdnf.transaction
import logging
import operator
import pwd
import re
import sys
import time
from dnf.cli.format import format_number, format_time
from dnf.i18n import _, C_, P_, ucd, fill_exact_width, textwrap_fill, exact_width, select_short_long
from dnf.pycomp import xrange, basestring, long, unicode, sys_maxsize
from dnf.yum.rpmtrans import TransactionDisplay
from dnf.db.history import MergedTransactionWrapper
import dnf.base
import dnf.callback
import dnf.cli.progress
import dnf.cli.term
import dnf.conf
import dnf.crypto
import dnf.i18n
import dnf.transaction
import dnf.util
import dnf.yum.misc
logger = logging.getLogger('dnf')
def _spread_in_columns(cols_count, label, lst):
left = itertools.chain((label,), itertools.repeat(''))
lst_length = len(lst)
right_count = cols_count - 1
missing_items = -lst_length % right_count
if not lst_length:
lst = itertools.repeat('', right_count)
elif missing_items:
lst.extend(('',) * missing_items)
lst_iter = iter(lst)
return list(zip(left, *[lst_iter] * right_count))
class Output(object):
"""Main output class for the yum command line."""
GRP_PACKAGE_INDENT = ' ' * 3
FILE_PROVIDE_RE = re.compile(r'^\*{0,2}/')
def __init__(self, base, conf):
self.conf = conf
self.base = base
self.term = dnf.cli.term.Term(color=base.conf.color)
self.progress = None
def _banner(self, col_data, row):
term_width = self.term.columns
rule = '%s' % '=' * term_width
header = self.fmtColumns(zip(row, col_data), ' ')
return rule, header, rule
def _col_widths(self, rows):
col_data = [dict() for _ in rows[0]]
for row in rows:
for (i, val) in enumerate(row):
col_dct = col_data[i]
length = len(val)
col_dct[length] = col_dct.get(length, 0) + 1
cols = self.calcColumns(col_data, None, indent=' ')
# align to the left
return list(map(operator.neg, cols))
def _highlight(self, highlight):
hibeg = ''
hiend = ''
if not highlight:
pass
elif not isinstance(highlight, basestring) or highlight == 'bold':
hibeg = self.term.MODE['bold']
elif highlight == 'normal':
pass # Minor opt.
else:
# Turn a string into a specific output: colour, bold, etc.
for high in highlight.replace(',', ' ').split():
if high == 'normal':
hibeg = ''
elif high in self.term.MODE:
hibeg += self.term.MODE[high]
elif high in self.term.FG_COLOR:
hibeg += self.term.FG_COLOR[high]
elif (high.startswith('fg:') and
high[3:] in self.term.FG_COLOR):
hibeg += self.term.FG_COLOR[high[3:]]
elif (high.startswith('bg:') and
high[3:] in self.term.BG_COLOR):
hibeg += self.term.BG_COLOR[high[3:]]
if hibeg:
hiend = self.term.MODE['normal']
return (hibeg, hiend)
def _sub_highlight(self, haystack, highlight, needles, **kwds):
hibeg, hiend = self._highlight(highlight)
return self.term.sub(haystack, hibeg, hiend, needles, **kwds)
@staticmethod
def _calc_columns_spaces_helps(current, data_tups, left):
""" Spaces left on the current field will help how many pkgs? """
ret = 0
for tup in data_tups:
if left < (tup[0] - current):
break
ret += tup[1]
return ret
@property
def history(self):
return self.base.history
@property
def sack(self):
return self.base.sack
def calcColumns(self, data, columns=None, remainder_column=0,
total_width=None, indent=''):
"""Dynamically calculate the widths of the columns that the
fields in data should be placed into for output.
:param data: a list of dictionaries that represent the data to
be output. Each dictionary in the list corresponds to a
column of output. The keys of the dictionary are the
lengths of the items to be output, and the value associated
with a key is the number of items of that length.
:param columns: a list containing the minimum amount of space
that must be allocated for each row. This can be used to
ensure that there is space available in a column if, for
example, the actual lengths of the items being output
cannot be given in *data*
:param remainder_column: number of the column to receive a few
extra spaces that may remain after other allocation has
taken place
:param total_width: the total width of the output.
self.term.real_columns is used by default
:param indent: string that will be prefixed to a line of
output to create e.g. an indent
:return: a list of the widths of the columns that the fields
in data should be placed into for output
"""
cols = len(data)
# Convert the data to ascending list of tuples, (field_length, pkgs)
pdata = data
data = [None] * cols # Don't modify the passed in data
for d in range(0, cols):
data[d] = sorted(pdata[d].items())
if total_width is None:
total_width = self.term.real_columns
# We start allocating 1 char to everything but the last column, and a
# space between each (again, except for the last column). Because
# at worst we are better with:
# |one two three|
# | four |
# ...than:
# |one two three|
# | f|
# |our |
# ...the later being what we get if we pre-allocate the last column, and
# thus. the space, due to "three" overflowing it's column by 2 chars.
if columns is None:
columns = [1] * (cols - 1)
columns.append(0)
# i'm not able to get real terminal width so i'm probably
# running in non interactive terminal (pipe to grep, redirect to file...)
# avoid splitting lines to enable filtering output
if not total_width:
full_columns = []
for d in xrange(0, cols):
col = data[d]
if col:
full_columns.append(col[-1][0])
else:
full_columns.append(columns[d] + 1)
full_columns[0] += len(indent) * 2
# if possible, try to keep default width (usually 80 columns)
default_width = self.term.columns
if sum(full_columns) > default_width:
return full_columns
total_width = default_width
total_width -= (sum(columns) + (cols - 1) + exact_width(indent))
if not columns[-1]:
total_width += 1
while total_width > 0:
# Find which field all the spaces left will help best
helps = 0
val = 0
for d in xrange(0, cols):
thelps = self._calc_columns_spaces_helps(columns[d], data[d],
total_width)
if not thelps:
continue
# We prefer to overflow: the last column, and then earlier
# columns. This is so that in the best case (just overflow the
# last) ... grep still "works", and then we make it prettier.
if helps and (d == (cols - 1)) and (thelps / 2) < helps:
continue
if thelps < helps:
continue
helps = thelps
val = d
# If we found a column to expand, move up to the next level with
# that column and start again with any remaining space.
if helps:
diff = data[val].pop(0)[0] - columns[val]
if not columns[val] and (val == (cols - 1)):
# If we are going from 0 => N on the last column, take 1
# for the space before the column.
total_width -= 1
columns[val] += diff
total_width -= diff
continue
overflowed_columns = 0
for d in xrange(0, cols):
if not data[d]:
continue
overflowed_columns += 1
if overflowed_columns:
# Split the remaining spaces among each overflowed column
# equally
norm = total_width // overflowed_columns
for d in xrange(0, cols):
if not data[d]:
continue
columns[d] += norm
total_width -= norm
# Split the remaining spaces among each column equally, except the
# last one. And put the rest into the remainder column
cols -= 1
norm = total_width // cols
for d in xrange(0, cols):
columns[d] += norm
columns[remainder_column] += total_width - (cols * norm)
total_width = 0
return columns
@staticmethod
def _fmt_column_align_width(width):
"""Returns tuple of (align_left, width)"""
if width < 0:
return (True, -width)
return (False, width)
def _col_data(self, col_data):
assert len(col_data) == 2 or len(col_data) == 3
if len(col_data) == 2:
(val, width) = col_data
hibeg = hiend = ''
if len(col_data) == 3:
(val, width, highlight) = col_data
(hibeg, hiend) = self._highlight(highlight)
return (ucd(val), width, hibeg, hiend)
def fmtColumns(self, columns, msg=u'', end=u''):
"""Return a row of data formatted into a string for output.
Items can overflow their columns.
:param columns: a list of tuples containing the data to
output. Each tuple contains first the item to be output,
then the amount of space allocated for the column, and then
optionally a type of highlighting for the item
:param msg: a string to begin the line of output with
:param end: a string to end the line of output with
:return: a row of data formatted into a string for output
"""
columns = list(columns)
total_width = len(msg)
data = []
for col_data in columns[:-1]:
(val, width, hibeg, hiend) = self._col_data(col_data)
if not width: # Don't count this column, invisible text
msg += u"%s"
data.append(val)
continue
(align_left, width) = self._fmt_column_align_width(width)
val_width = exact_width(val)
if val_width <= width:
# Don't use fill_exact_width() because it sucks performance
# wise for 1,000s of rows. Also allows us to use len(), when
# we can.
msg += u"%s%s%s%s "
if align_left:
data.extend([hibeg, val, " " * (width - val_width), hiend])
else:
data.extend([hibeg, " " * (width - val_width), val, hiend])
else:
msg += u"%s%s%s\n" + " " * (total_width + width + 1)
data.extend([hibeg, val, hiend])
total_width += width
total_width += 1
(val, width, hibeg, hiend) = self._col_data(columns[-1])
(align_left, width) = self._fmt_column_align_width(width)
val = fill_exact_width(val, width, left=align_left,
prefix=hibeg, suffix=hiend)
msg += u"%%s%s" % end
data.append(val)
return msg % tuple(data)
def simpleList(self, pkg, ui_overflow=False, indent='', highlight=False,
columns=None):
"""Print a package as a line.
:param pkg: the package to be printed
:param ui_overflow: unused
:param indent: string to be prefixed onto the line to provide
e.g. an indent
:param highlight: highlighting options for the name of the
package
:param columns: tuple containing the space allocated for each
column of output. The columns are the package name, version,
and repository
"""
if columns is None:
columns = (-40, -22, -16) # Old default
na = '%s%s.%s' % (indent, pkg.name, pkg.arch)
hi_cols = [highlight, 'normal', 'normal']
columns = zip((na, pkg.evr, pkg._from_repo), columns, hi_cols)
print(self.fmtColumns(columns))
def simpleEnvraList(self, pkg, ui_overflow=False,
indent='', highlight=False, columns=None):
"""Print a package as a line, with the package itself in envra
format so it can be passed to list/install/etc.
:param pkg: the package to be printed
:param ui_overflow: unused
:param indent: string to be prefixed onto the line to provide
e.g. an indent
:param highlight: highlighting options for the name of the
package
:param columns: tuple containing the space allocated for each
column of output. The columns the are the package envra and
repository
"""
if columns is None:
columns = (-63, -16) # Old default
envra = '%s%s' % (indent, ucd(pkg))
hi_cols = [highlight, 'normal', 'normal']
rid = pkg.ui_from_repo
columns = zip((envra, rid), columns, hi_cols)
print(self.fmtColumns(columns))
def simple_name_list(self, pkg):
"""Print a package as a line containing its name."""
print(ucd(pkg.name))
def simple_nevra_list(self, pkg):
"""Print a package as a line containing its NEVRA."""
print(ucd(pkg))
def fmtKeyValFill(self, key, val):
"""Return a key value pair in the common two column output
format.
:param key: the key to be formatted
:param val: the value associated with *key*
:return: the key value pair formatted in two columns for output
"""
keylen = exact_width(key)
cols = self.term.real_columns
if not cols:
cols = sys_maxsize
elif cols < 20:
cols = 20
nxt = ' ' * (keylen - 2) + ': '
if not val:
# textwrap.fill in case of empty val returns empty string
return key
val = ucd(val)
ret = textwrap_fill(val, width=cols, initial_indent=key,
subsequent_indent=nxt)
if ret.count("\n") > 1 and keylen > (cols // 3):
# If it's big, redo it again with a smaller subsequent off
ret = textwrap_fill(val, width=cols, initial_indent=key,
subsequent_indent=' ...: ')
return ret
def fmtSection(self, name, fill='='):
"""Format and return a section header. The format of the
header is a line with *name* centered, and *fill* repeated on
either side to fill an entire line on the terminal.
:param name: the name of the section
:param fill: the character to repeat on either side of *name*
to fill an entire line. *fill* must be a single character.
:return: a string formatted to be a section header
"""
name = ucd(name)
cols = self.term.columns - 2
name_len = exact_width(name)
if name_len >= (cols - 4):
beg = end = fill * 2
else:
beg = fill * ((cols - name_len) // 2)
end = fill * (cols - name_len - len(beg))
return "%s %s %s" % (beg, name, end)
def infoOutput(self, pkg, highlight=False):
"""Print information about the given package.
:param pkg: the package to print information about
:param highlight: highlighting options for the name of the
package
"""
def format_key_val(key, val):
return " ".join([fill_exact_width(key, 12, 12), ":", str(val)])
def format_key_val_fill(key, val):
return self.fmtKeyValFill(fill_exact_width(key, 12, 12) + " : ", val or "")
output_list = []
(hibeg, hiend) = self._highlight(highlight)
# Translators: This is abbreviated 'Name'. Should be no longer
# than 12 characters. You can use the full version if it is short
# enough in your language.
key = select_short_long(12, C_("short", "Name"),
C_("long", "Name"))
output_list.append(format_key_val(key,
"%s%s%s" % (hibeg, pkg.name, hiend)))
if pkg.epoch:
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Epoch"), pkg.epoch))
key = select_short_long(12, C_("short", "Version"),
C_("long", "Version"))
output_list.append(format_key_val(key, pkg.version))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Release"), pkg.release))
key = select_short_long(12, C_("short", "Arch"),
C_("long", "Architecture"))
output_list.append(format_key_val(key, pkg.arch))
key = select_short_long(12, C_("short", "Size"), C_("long", "Size"))
output_list.append(format_key_val(key,
format_number(float(pkg._size))))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Source"), pkg.sourcerpm))
key = select_short_long(12, C_("short", "Repo"),
C_("long", "Repository"))
output_list.append(format_key_val(key, pkg.repoid))
if pkg._from_system:
history_repo = self.history.repo(pkg)
if history_repo:
# Translators: This message should be no longer than 12 chars.
output_list.append(format_key_val(_("From repo"), history_repo))
if self.conf.verbose:
# :hawkey does not support changelog information
# print(_("Committer : %s") % ucd(pkg.committer))
# print(_("Committime : %s") % time.ctime(pkg.committime))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Packager"), pkg.packager))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Buildtime"),
dnf.util.normalize_time(pkg.buildtime)))
if pkg.installtime:
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Install time"),
dnf.util.normalize_time(pkg.installtime)))
history_pkg = self.history.package_data(pkg)
if history_pkg:
try:
uid = int(history_pkg._item.getInstalledBy())
except ValueError: # In case int() fails
uid = None
# Translators: This message should be no longer than 12 chars.
output_list.append(format_key_val(_("Installed by"), self._pwd_ui_username(uid)))
# Translators: This is abbreviated 'Summary'. Should be no longer
# than 12 characters. You can use the full version if it is short
# enough in your language.
key = select_short_long(12, C_("short", "Summary"),
C_("long", "Summary"))
output_list.append(format_key_val_fill(key, pkg.summary))
if pkg.url:
output_list.append(format_key_val(_("URL"), ucd(pkg.url)))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val_fill(_("License"), pkg.license))
# Translators: This is abbreviated 'Description'. Should be no longer
# than 12 characters. You can use the full version if it is short
# enough in your language.
key = select_short_long(12, C_("short", "Description"),
C_("long", "Description"))
output_list.append(format_key_val_fill(key, pkg.description))
return "\n".join(output_list)
def updatesObsoletesList(self, uotup, changetype, columns=None):
"""Print a simple string that explains the relationship
between the members of an update or obsoletes tuple.
:param uotup: an update or obsoletes tuple. The first member
is the new package, and the second member is the old
package
:param changetype: a string indicating what the change between
the packages is, e.g. 'updates' or 'obsoletes'
:param columns: a tuple containing information about how to
format the columns of output. The absolute value of each
number in the tuple indicates how much space has been
allocated for the corresponding column. If the number is
negative, the text in the column will be left justified,
and if it is positive, the text will be right justified.
The columns of output are the package name, version, and repository
"""
(changePkg, instPkg) = uotup
if columns is not None:
# New style, output all info. for both old/new with old indented
chi = self.conf.color_update_remote
if changePkg.reponame != hawkey.SYSTEM_REPO_NAME:
chi = self.conf.color_update_local
self.simpleList(changePkg, columns=columns, highlight=chi)
self.simpleList(instPkg, columns=columns, indent=' ' * 4,
highlight=self.conf.color_update_installed)
return
# Old style
c_compact = changePkg.compactPrint()
i_compact = '%s.%s' % (instPkg.name, instPkg.arch)
c_repo = changePkg.repoid
print('%-35.35s [%.12s] %.10s %-20.20s' %
(c_compact, c_repo, changetype, i_compact))
def listPkgs(self, lst, description, outputType, highlight_na={},
columns=None, highlight_modes={}):
"""Prints information about the given list of packages.
:param lst: a list of packages to print information about
:param description: string describing what the list of
packages contains, e.g. 'Available Packages'
:param outputType: The type of information to be printed.
Current options::
'list' - simple pkg list
'info' - similar to rpm -qi output
'name' - simple name list
'nevra' - simple nevra list
:param highlight_na: a dictionary containing information about
packages that should be highlighted in the output. The
dictionary keys are (name, arch) tuples for the package,
and the associated values are the package objects
themselves.
:param columns: a tuple containing information about how to
format the columns of output. The absolute value of each
number in the tuple indicates how much space has been
allocated for the corresponding column. If the number is
negative, the text in the column will be left justified,
and if it is positive, the text will be right justified.
The columns of output are the package name, version, and
repository
:param highlight_modes: dictionary containing information
about to highlight the packages in *highlight_na*.
*highlight_modes* should contain the following keys::
'not_in' - highlighting used for packages not in *highlight_na*
'=' - highlighting used when the package versions are equal
'<' - highlighting used when the package has a lower version
number
'>' - highlighting used when the package has a higher version
number
:return: number of packages listed
"""
if outputType in ['list', 'info', 'name', 'nevra']:
if len(lst) > 0:
print('%s' % description)
info_set = set()
if outputType == 'list':
unique_item_dict = {}
for pkg in lst:
unique_item_dict[str(pkg) + str(pkg._from_repo)] = pkg
lst = unique_item_dict.values()
for pkg in sorted(lst):
key = (pkg.name, pkg.arch)
highlight = False
if key not in highlight_na:
highlight = highlight_modes.get('not in', 'normal')
elif pkg.evr_eq(highlight_na[key]):
highlight = highlight_modes.get('=', 'normal')
elif pkg.evr_lt(highlight_na[key]):
highlight = highlight_modes.get('>', 'bold')
else:
highlight = highlight_modes.get('<', 'normal')
if outputType == 'list':
self.simpleList(pkg, ui_overflow=True,
highlight=highlight, columns=columns)
elif outputType == 'info':
info_set.add(self.infoOutput(pkg, highlight=highlight) + "\n")
elif outputType == 'name':
self.simple_name_list(pkg)
elif outputType == 'nevra':
self.simple_nevra_list(pkg)
else:
pass
if info_set:
print("\n".join(sorted(info_set)))
return len(lst)
def userconfirm(self, msg=None, defaultyes_msg=None):
"""Get a yes or no from the user, and default to No
:msg: String for case with [y/N]
:defaultyes_msg: String for case with [Y/n]
:return: True if the user selects yes, and False if the user
selects no
"""
yui = (ucd(_('y')), ucd(_('yes')))
nui = (ucd(_('n')), ucd(_('no')))
aui = yui + nui
while True:
if msg is None:
msg = _('Is this ok [y/N]: ')
choice = ''
if self.conf.defaultyes:
if defaultyes_msg is None:
msg = _('Is this ok [Y/n]: ')
else:
msg = defaultyes_msg
try:
choice = dnf.i18n.ucd_input(msg)
except EOFError:
pass
except KeyboardInterrupt:
choice = nui[0]
choice = ucd(choice).lower()
if len(choice) == 0:
choice = yui[0] if self.conf.defaultyes else nui[0]
if choice in aui:
break
# If the English one letter names don't mix with the translated
# letters, allow them too:
if u'y' == choice and u'y' not in aui:
choice = yui[0]
break
if u'n' == choice and u'n' not in aui:
choice = nui[0]
break
if choice in yui:
return True
return False
def _pkgs2name_dict(self, sections):
installed = self.sack.query().installed()._name_dict()
available = self.sack.query().available()._name_dict()
d = {}
for pkg_name in itertools.chain(*list(zip(*sections))[1]):
if pkg_name in installed:
d[pkg_name] = installed[pkg_name][0]
elif pkg_name in available:
d[pkg_name] = available[pkg_name][0]
return d
def _pkgs2col_lengths(self, sections, name_dict):
nevra_lengths = {}
repo_lengths = {}
for pkg_name in itertools.chain(*list(zip(*sections))[1]):
pkg = name_dict.get(pkg_name)
if pkg is None:
continue
nevra_l = exact_width(ucd(pkg)) + exact_width(self.GRP_PACKAGE_INDENT)
repo_l = exact_width(ucd(pkg.reponame))
nevra_lengths[nevra_l] = nevra_lengths.get(nevra_l, 0) + 1
repo_lengths[repo_l] = repo_lengths.get(repo_l, 0) + 1
return (nevra_lengths, repo_lengths)
def _display_packages(self, pkg_names):
for name in pkg_names:
print('%s%s' % (self.GRP_PACKAGE_INDENT, name))
def _display_packages_verbose(self, pkg_names, name_dict, columns):
for name in pkg_names:
try:
pkg = name_dict[name]
except KeyError:
# package not in any repo -> print only package name
print('%s%s' % (self.GRP_PACKAGE_INDENT, name))
continue
highlight = False
if not pkg._from_system:
highlight = self.conf.color_list_available_install
self.simpleEnvraList(pkg, ui_overflow=True,
indent=self.GRP_PACKAGE_INDENT,
highlight=highlight,
columns=columns)
def display_pkgs_in_groups(self, group):
"""Output information about the packages in a given group
:param group: a Group object to output information about
"""
def names(packages):
return sorted(dnf.util._name_unset_wrapper(pkg.name) for pkg in packages)
print(_('Group: %s') % dnf.util._name_unset_wrapper(group.ui_name))
verbose = self.conf.verbose
if verbose:
print(_(' Group-Id: %s') % ucd(group.id))
if group.ui_description:
print(_(' Description: %s') % ucd(group.ui_description) or "")
if group.lang_only:
print(_(' Language: %s') % group.lang_only)
sections = (
(_(' Mandatory Packages:'), names(group.mandatory_packages)),
(_(' Default Packages:'), names(group.default_packages)),
(_(' Optional Packages:'), names(group.optional_packages)),
(_(' Conditional Packages:'), names(group.conditional_packages)))
if verbose:
name_dict = self._pkgs2name_dict(sections)
col_lengths = self._pkgs2col_lengths(sections, name_dict)
columns = self.calcColumns(col_lengths)
columns = (-columns[0], -columns[1])
for (section_name, packages) in sections:
if len(packages) < 1:
continue
print(section_name)
self._display_packages_verbose(packages, name_dict, columns)
else:
for (section_name, packages) in sections:
if len(packages) < 1:
continue
print(section_name)
self._display_packages(packages)
def display_groups_in_environment(self, environment):
"""Output information about the packages in a given environment
:param environment: an Environment object to output information about
"""
def names(groups):
return sorted(dnf.util._name_unset_wrapper(group.name) for group in groups)
print(_('Environment Group: %s') % dnf.util._name_unset_wrapper(environment.ui_name))
if self.conf.verbose:
print(_(' Environment-Id: %s') % ucd(environment.id))
if environment.ui_description:
description = ucd(environment.ui_description) or ""
print(_(' Description: %s') % description)
sections = (
(_(' Mandatory Groups:'), names(environment.mandatory_groups)),
(_(' Optional Groups:'), names(environment.optional_groups)))
for (section_name, packages) in sections:
if len(packages) < 1:
continue
print(section_name)
self._display_packages(packages)
def matchcallback(self, po, values, matchfor=None, verbose=None,
highlight=None):
"""Output search/provides type callback matches.
:param po: the package object that matched the search
:param values: the information associated with *po* that
matched the search
:param matchfor: a list of strings to be highlighted in the
output
:param verbose: whether to output extra verbose information
:param highlight: highlighting options for the highlighted matches
"""
def print_highlighted_key_item(key, item, printed_headline, can_overflow=False):
if not printed_headline:
print(_('Matched from:'))
item = ucd(item) or ""
if item == "":
return
if matchfor:
item = self._sub_highlight(item, highlight, matchfor, ignore_case=True)
if can_overflow:
print(self.fmtKeyValFill(key, item))
else:
print(key % item)
def print_file_provides(item, printed_match):
if not self.FILE_PROVIDE_RE.match(item):
return False
key = _("Filename : %s")
file_match = False
for filename in po.files:
if fnmatch.fnmatch(filename, item):
print_highlighted_key_item(
key, filename, file_match or printed_match, can_overflow=False)
file_match = True
return file_match
if self.conf.showdupesfromrepos:
msg = '%s : ' % po
else:
msg = '%s.%s : ' % (po.name, po.arch)
msg = self.fmtKeyValFill(msg, po.summary or "")
if matchfor:
if highlight is None:
highlight = self.conf.color_search_match
msg = self._sub_highlight(msg, highlight, matchfor, ignore_case=True)
print(msg)
if verbose is None:
verbose = self.conf.verbose
if not verbose:
return
print(_("Repo : %s") % po.ui_from_repo)
printed_match = False
name_match = False
for item in set(values):
if po.summary == item:
name_match = True
continue # Skip double name/summary printing
if po.description == item:
key = _("Description : ")
print_highlighted_key_item(key, item, printed_match, can_overflow=True)
printed_match = True
elif po.url == item:
key = _("URL : %s")
print_highlighted_key_item(key, item, printed_match, can_overflow=False)
printed_match = True
elif po.license == item:
key = _("License : %s")
print_highlighted_key_item(key, item, printed_match, can_overflow=False)
printed_match = True
elif print_file_provides(item, printed_match):
printed_match = True
else:
key = _("Provide : %s")
for provide in po.provides:
provide = str(provide)
if fnmatch.fnmatch(provide, item):
print_highlighted_key_item(key, provide, printed_match, can_overflow=False)
printed_match = True
else:
first_provide = provide.split()[0]
possible = set('=<>')
if any((char in possible) for char in item):
item_new = item.split()[0]
else:
item_new = item
if fnmatch.fnmatch(first_provide, item_new):
print_highlighted_key_item(
key, provide, printed_match, can_overflow=False)
printed_match = True
if not any([printed_match, name_match]):
for item in set(values):
key = _("Other : %s")
print_highlighted_key_item(key, item, printed_match, can_overflow=False)
print()
def matchcallback_verbose(self, po, values, matchfor=None):
"""Output search/provides type callback matches. This will
output more information than :func:`matchcallback`.
:param po: the package object that matched the search
:param values: the information associated with *po* that
matched the search
:param matchfor: a list of strings to be highlighted in the
output
"""
return self.matchcallback(po, values, matchfor, verbose=True)
def reportDownloadSize(self, packages, installonly=False):
"""Report the total download size for a set of packages
:param packages: a list of package objects
:param installonly: whether the transaction consists only of installations
"""
totsize = 0
locsize = 0
insize = 0
error = False
for pkg in packages:
# Just to be on the safe side, if for some reason getting
# the package size fails, log the error and don't report download
# size
try:
size = int(pkg._size)
totsize += size
try:
if pkg.verifyLocalPkg():
locsize += size
except Exception:
pass
if not installonly:
continue
try:
size = int(pkg.installsize)
except Exception:
pass
insize += size
except Exception:
error = True
msg = _('There was an error calculating total download size')
logger.error(msg)
break
if not error:
if locsize:
logger.info(_("Total size: %s"),
format_number(totsize))
if locsize != totsize:
logger.info(_("Total download size: %s"),
format_number(totsize - locsize))
if installonly:
logger.info(_("Installed size: %s"), format_number(insize))
def reportRemoveSize(self, packages):
"""Report the total size of packages being removed.
:param packages: a list of package objects
"""
totsize = 0
error = False
for pkg in packages:
# Just to be on the safe side, if for some reason getting
# the package size fails, log the error and don't report download
# size
try:
size = pkg._size
totsize += size
except Exception:
error = True
msg = _('There was an error calculating installed size')
logger.error(msg)
break
if not error:
logger.info(_("Freed space: %s"), format_number(totsize))
def list_group_transaction(self, comps, history, diff):
if not diff:
return None
out = []
rows = []
if diff.new_groups:
out.append(_('Marking packages as installed by the group:'))
for grp_id in diff.new_groups:
pkgs = list(diff.added_packages(grp_id))
group_object = comps._group_by_id(grp_id)
grp_name = group_object.ui_name if group_object else grp_id
rows.extend(_spread_in_columns(4, "@" + grp_name, pkgs))
if diff.removed_groups:
out.append(_('Marking packages as removed by the group:'))
for grp_id in diff.removed_groups:
pkgs = list(diff.removed_packages(grp_id))
grp_name = history.group.get(grp_id).ui_name
rows.extend(_spread_in_columns(4, "@" + grp_name, pkgs))
if rows:
col_data = self._col_widths(rows)
for row in rows:
out.append(self.fmtColumns(zip(row, col_data), ' '))
out[0:0] = self._banner(col_data, (_('Group'), _('Packages'), '', ''))
return '\n'.join(out)
def list_transaction(self, transaction, total_width=None):
"""Return a string representation of the transaction in an
easy-to-read format.
"""
forward_actions = hawkey.UPGRADE | hawkey.UPGRADE_ALL | hawkey.DISTUPGRADE | \
hawkey.DISTUPGRADE_ALL | hawkey.DOWNGRADE | hawkey.INSTALL
skipped_conflicts = set()
skipped_broken = set()
if transaction is None:
# set empty transaction list instead of returning None
# in order to display module changes when RPM transaction is empty
transaction = []
list_bunch = dnf.util._make_lists(transaction)
pkglist_lines = []
data = {'n' : {}, 'v' : {}, 'r' : {}}
a_wid = 0 # Arch can't get "that big" ... so always use the max.
def _add_line(lines, data, a_wid, po, obsoletes=[]):
(n, a, e, v, r) = po.pkgtup
evr = po.evr
repoid = po._from_repo
size = format_number(po._size)
if a is None: # gpgkeys are weird
a = 'noarch'
# none, partial, full?
if po._from_system:
hi = self.conf.color_update_installed
elif po._from_cmdline:
hi = self.conf.color_update_local
else:
hi = self.conf.color_update_remote
lines.append((n, a, evr, repoid, size, obsoletes, hi))
# Create a dict of field_length => number of packages, for
# each field.
for (d, v) in (("n", len(n)), ("v", len(evr)), ("r", len(repoid))):
data[d].setdefault(v, 0)
data[d][v] += 1
a_wid = max(a_wid, len(a))
return a_wid
ins_group_msg = _('Installing group/module packages') if dnf.base.WITH_MODULES \
else _('Installing group packages')
for (action, pkglist) in [
# TRANSLATORS: This is for a list of packages to be installed.
(C_('summary', 'Installing'), list_bunch.installed),
# TRANSLATORS: This is for a list of packages to be upgraded.
(C_('summary', 'Upgrading'), list_bunch.upgraded),
# TRANSLATORS: This is for a list of packages to be reinstalled.
(C_('summary', 'Reinstalling'), list_bunch.reinstalled),
(ins_group_msg, list_bunch.installed_group),
(_('Installing dependencies'), list_bunch.installed_dep),
(_('Installing weak dependencies'), list_bunch.installed_weak),
# TRANSLATORS: This is for a list of packages to be removed.
(_('Removing'), list_bunch.erased),
(_('Removing dependent packages'), list_bunch.erased_dep),
(_('Removing unused dependencies'), list_bunch.erased_clean),
# TRANSLATORS: This is for a list of packages to be downgraded.
(C_('summary', 'Downgrading'), list_bunch.downgraded)]:
lines = []
# build a reverse mapping to 'replaced_by'
# this is required to achieve reasonable speed
replaces = {}
for tsi in transaction:
if tsi.action != libdnf.transaction.TransactionItemAction_OBSOLETED:
continue
for i in tsi._item.getReplacedBy():
replaces.setdefault(i, set()).add(tsi)
for tsi in sorted(pkglist, key=lambda x: x.pkg):
if tsi.action not in dnf.transaction.FORWARD_ACTIONS + [libdnf.transaction.TransactionItemAction_REMOVE]:
continue
# get TransactionItems obsoleted by tsi
obsoleted = sorted(replaces.get(tsi._item, []))
a_wid = _add_line(lines, data, a_wid, tsi.pkg, obsoleted)
pkglist_lines.append((action, lines))
installedProfiles = sorted(dict(self.base._moduleContainer.getInstalledProfiles()).items())
if installedProfiles:
action = _("Installing module profiles")
lines = []
for name, profiles in installedProfiles:
for profile in list(profiles):
lines.append(("%s/%s" % (name, profile), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
removedProfiles = sorted(dict(self.base._moduleContainer.getRemovedProfiles()).items())
if removedProfiles:
action = _("Disabling module profiles")
lines = []
for name, profiles in removedProfiles:
for profile in list(profiles):
lines.append(("%s/%s" % (name, profile), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
enabledStreams = sorted(dict(self.base._moduleContainer.getEnabledStreams()).items())
if enabledStreams:
action = _("Enabling module streams")
lines = []
for name, stream in enabledStreams:
lines.append((name, "", stream, "", "", "", ""))
pkglist_lines.append((action, lines))
switchedStreams = sorted(dict(self.base._moduleContainer.getSwitchedStreams()).items())
if switchedStreams:
action = _("Switching module streams")
lines = []
for name, stream in switchedStreams:
lines.append((name, "", "%s -> %s" % (stream[0], stream[1]), "", "", "", ""))
pkglist_lines.append((action, lines))
disabledModules = sorted(list(self.base._moduleContainer.getDisabledModules()))
if disabledModules:
action = _("Disabling modules")
lines = []
for name in disabledModules:
lines.append((name, "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
resetModules = sorted(list(self.base._moduleContainer.getResetModules()))
if resetModules:
action = _("Resetting modules")
lines = []
for name in resetModules:
lines.append((name, "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
if self.base._history:
def format_line(group):
return (dnf.util._name_unset_wrapper(group.getName()), "", "", "", "", "", "")
install_env_group = self.base._history.env._installed
if install_env_group:
action = _("Installing Environment Groups")
lines = []
for group in install_env_group.values():
lines.append(format_line(group))
pkglist_lines.append((action, lines))
upgrade_env_group = self.base._history.env._upgraded
if upgrade_env_group:
action = _("Upgrading Environment Groups")
lines = []
for group in upgrade_env_group.values():
lines.append(format_line(group))
pkglist_lines.append((action, lines))
remove_env_group = self.base._history.env._removed
if remove_env_group:
action = _("Removing Environment Groups")
lines = []
for group in remove_env_group.values():
lines.append(format_line(group))
pkglist_lines.append((action, lines))
install_group = self.base._history.group._installed
if install_group:
action = _("Installing Groups")
lines = []
for group in install_group.values():
lines.append(format_line(group))
pkglist_lines.append((action, lines))
upgrade_group = self.base._history.group._upgraded
if upgrade_group:
action = _("Upgrading Groups")
lines = []
for group in upgrade_group.values():
lines.append(format_line(group))
pkglist_lines.append((action, lines))
remove_group = self.base._history.group._removed
if remove_group:
action = _("Removing Groups")
lines = []
for group in remove_group.values():
lines.append(format_line(group))
pkglist_lines.append((action, lines))
# show skipped conflicting packages
if (not self.conf.best or not self.conf.strict) and self.base._goal.actions & forward_actions:
lines = []
skipped_conflicts, skipped_broken = self.base._skipped_packages(
report_problems=True, transaction=transaction)
skipped_broken = dict((str(pkg), pkg) for pkg in skipped_broken)
for pkg in sorted(skipped_conflicts):
a_wid = _add_line(lines, data, a_wid, pkg, [])
recommendations = ["--best"]
if not self.base._allow_erasing:
recommendations.append("--allowerasing")
skip_str = _("Skipping packages with conflicts:\n"
"(add '%s' to command line "
"to force their upgrade)") % " ".join(recommendations)
# remove misleading green color from the "packages with conflicts" lines
lines = [i[:-1] + ("", ) for i in lines]
pkglist_lines.append((skip_str, lines))
lines = []
for nevra, pkg in sorted(skipped_broken.items()):
a_wid = _add_line(lines, data, a_wid, pkg, [])
skip_str = _("Skipping packages with broken dependencies%s")
if self.base.conf.upgrade_group_objects_upgrade:
skip_str = skip_str % ""
else:
skip_str = skip_str % _(" or part of a group")
# remove misleading green color from the "broken dependencies" lines
lines = [i[:-1] + ("", ) for i in lines]
pkglist_lines.append((skip_str, lines))
output_width = self.term.columns
if not data['n'] and not self.base._moduleContainer.isChanged() and not \
(self.base._history and (self.base._history.group or self.base._history.env)):
return u''
else:
data = [data['n'], {}, data['v'], data['r'], {}]
columns = [1, a_wid, 1, 1, 5]
columns = self.calcColumns(data, indent=" ", columns=columns,
remainder_column=2, total_width=total_width)
(n_wid, a_wid, v_wid, r_wid, s_wid) = columns
real_width = sum(columns) + 5
output_width = output_width if output_width >= real_width else real_width
# Do not use 'Package' without context. Using context resolves
# RhBug 1302935 as a side effect.
msg_package = select_short_long(n_wid,
# Translators: This is the short version of 'Package'. You can
# use the full (unabbreviated) term 'Package' if you think that
# the translation to your language is not too long and will
# always fit to limited space.
C_('short', 'Package'),
# Translators: This is the full (unabbreviated) term 'Package'.
C_('long', 'Package'))
msg_arch = select_short_long(a_wid,
# Translators: This is abbreviated 'Architecture', used when
# we have not enough space to display the full word.
C_('short', 'Arch'),
# Translators: This is the full word 'Architecture', used when
# we have enough space.
C_('long', 'Architecture'))
msg_version = select_short_long(v_wid,
# Translators: This is the short version of 'Version'. You can
# use the full (unabbreviated) term 'Version' if you think that
# the translation to your language is not too long and will
# always fit to limited space.
C_('short', 'Version'),
# Translators: This is the full (unabbreviated) term 'Version'.
C_('long', 'Version'))
msg_repository = select_short_long(r_wid,
# Translators: This is abbreviated 'Repository', used when
# we have not enough space to display the full word.
C_('short', 'Repo'),
# Translators: This is the full word 'Repository', used when
# we have enough space.
C_('long', 'Repository'))
msg_size = select_short_long(s_wid,
# Translators: This is the short version of 'Size'. It should
# not be longer than 5 characters. If the term 'Size' in your
# language is not longer than 5 characters then you can use it
# unabbreviated.
C_('short', 'Size'),
# Translators: This is the full (unabbreviated) term 'Size'.
C_('long', 'Size'))
out = [u"%s\n%s\n%s\n" % ('=' * output_width,
self.fmtColumns(((msg_package, -n_wid),
(msg_arch, -a_wid),
(msg_version, -v_wid),
(msg_repository, -r_wid),
(msg_size, s_wid)), u" "),
'=' * output_width)]
for (action, lines) in pkglist_lines:
if lines:
totalmsg = u"%s:\n" % action
for (n, a, evr, repoid, size, obsoletes, hi) in lines:
columns = ((n, -n_wid, hi), (a, -a_wid),
(evr, -v_wid), (repoid, -r_wid), (size, s_wid))
msg = self.fmtColumns(columns, u" ", u"\n")
hibeg, hiend = self._highlight(self.conf.color_update_installed)
for obspo in sorted(obsoletes):
appended = ' ' + _('replacing') + ' %s%s%s.%s %s\n'
appended %= (hibeg, obspo.name, hiend, obspo.arch, obspo.evr)
msg += appended
totalmsg = totalmsg + msg
if lines:
out.append(totalmsg)
out.append(_("""
Transaction Summary
%s
""") % ('=' * output_width))
summary_data = (
(_('Install'), len(list_bunch.installed) +
len(list_bunch.installed_group) +
len(list_bunch.installed_weak) +
len(list_bunch.installed_dep), 0),
(_('Upgrade'), len(list_bunch.upgraded), 0),
(_('Remove'), len(list_bunch.erased) + len(list_bunch.erased_dep) +
len(list_bunch.erased_clean), 0),
(_('Downgrade'), len(list_bunch.downgraded), 0),
(_('Skip'), len(skipped_conflicts) + len(skipped_broken), 0))
max_msg_action = 0
max_msg_count = 0
max_msg_pkgs = 0
max_msg_depcount = 0
for action, count, depcount in summary_data:
if not count and not depcount:
continue
msg_pkgs = P_('Package', 'Packages', count)
len_msg_action = exact_width(action)
len_msg_count = exact_width(unicode(count))
len_msg_pkgs = exact_width(msg_pkgs)
if depcount:
len_msg_depcount = exact_width(unicode(depcount))
else:
len_msg_depcount = 0
max_msg_action = max(len_msg_action, max_msg_action)
max_msg_count = max(len_msg_count, max_msg_count)
max_msg_pkgs = max(len_msg_pkgs, max_msg_pkgs)
max_msg_depcount = max(len_msg_depcount, max_msg_depcount)
for action, count, depcount in summary_data:
msg_pkgs = P_('Package', 'Packages', count)
if depcount:
msg_deppkgs = P_('Dependent package', 'Dependent packages',
depcount)
action_msg = fill_exact_width(action, max_msg_action)
if count:
msg = '%s %*d %s (+%*d %s)\n'
out.append(msg % (action_msg,
max_msg_count, count,
"%-*s" % (max_msg_pkgs, msg_pkgs),
max_msg_depcount, depcount, msg_deppkgs))
else:
msg = '%s %s ( %*d %s)\n'
out.append(msg % (action_msg,
(max_msg_count + max_msg_pkgs) * ' ',
max_msg_depcount, depcount, msg_deppkgs))
elif count:
msg = '%s %*d %s\n'
out.append(msg % (fill_exact_width(action, max_msg_action),
max_msg_count, count, msg_pkgs))
return ''.join(out)
def _pto_callback(self, action, tsis):
# Works a bit like calcColumns, but we never overflow a column we just
# have a dynamic number of columns.
def _fits_in_cols(msgs, num):
""" Work out how many columns we can use to display stuff, in
the post trans output. """
if len(msgs) < num:
return []
left = self.term.columns - ((num - 1) + 2)
if left <= 0:
return []
col_lens = [0] * num
col = 0
for msg in msgs:
if len(msg) > col_lens[col]:
diff = (len(msg) - col_lens[col])
if left <= diff:
return []
left -= diff
col_lens[col] = len(msg)
col += 1
col %= len(col_lens)
for col in range(len(col_lens)):
col_lens[col] += left // num
col_lens[col] *= -1
return col_lens
if not tsis:
return ''
out = []
msgs = []
out.append('{}:'.format(action))
for tsi in tsis:
msgs.append(str(tsi))
for num in (8, 7, 6, 5, 4, 3, 2):
cols = _fits_in_cols(msgs, num)
if cols:
break
if not cols:
cols = [-(self.term.columns - 2)]
while msgs:
current_msgs = msgs[:len(cols)]
out.append(' {}'.format(self.fmtColumns(zip(current_msgs, cols))))
msgs = msgs[len(cols):]
return out
def post_transaction_output(self, transaction):
"""
Return a human-readable summary of the transaction. Packages in sections
are arranged to columns.
"""
return dnf.util._post_transaction_output(self.base, transaction, self._pto_callback)
def setup_progress_callbacks(self):
"""Set up the progress callbacks and various
output bars based on debug level.
"""
progressbar = None
if self.conf.debuglevel >= 2:
progressbar = dnf.cli.progress.MultiFileProgressMeter(fo=sys.stdout)
self.progress = dnf.cli.progress.MultiFileProgressMeter(fo=sys.stdout)
# setup our depsolve progress callback
return (progressbar, DepSolveProgressCallBack())
def download_callback_total_cb(self, remote_size, download_start_timestamp):
"""Outputs summary information about the download process.
:param remote_size: the total amount of information that was
downloaded, in bytes
:param download_start_timestamp: the time when the download
process started, in seconds since the epoch
"""
if remote_size <= 0:
return
width = self.term.columns
logger.info("-" * width)
dl_time = max(0.01, time.time() - download_start_timestamp)
msg = ' %5sB/s | %5sB %9s ' % (
format_number(remote_size // dl_time),
format_number(remote_size),
format_time(dl_time))
msg = fill_exact_width(_("Total"), width - len(msg)) + msg
logger.info(msg)
def _history_uiactions(self, hpkgs):
actions = set()
actions_short = set()
count = 0
for pkg in hpkgs:
if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADED):
# skip states we don't want to display in user input
continue
actions.add(pkg.action_name)
actions_short.add(pkg.action_short)
count += 1
if len(actions) > 1:
return count, ", ".join(sorted(actions_short))
# So empty transactions work, although that "shouldn't" really happen
return count, "".join(list(actions))
def _pwd_ui_username(self, uid, limit=None):
if isinstance(uid, list):
return [self._pwd_ui_username(u, limit) for u in uid]
# loginuid is set to -1 (0xFFFF_FFFF) on init, in newer kernels.
# loginuid is set to INT_MAX (0x7FFF_FFFF) on init, in older kernels.
if uid is None or uid in (0xFFFFFFFF, 0x7FFFFFFF):
loginid = _("<unset>")
name = _("System") + " " + loginid
if limit is not None and len(name) > limit:
name = loginid
return ucd(name)
def _safe_split_0(text, *args):
""" Split gives us a [0] for everything _but_ '', this function
returns '' in that case. """
ret = text.split(*args)
if not ret:
return ''
return ret[0]
try:
user = pwd.getpwuid(int(uid))
fullname = _safe_split_0(ucd(user.pw_gecos), ';', 2)
user_name = ucd(user.pw_name)
name = "%s <%s>" % (fullname, user_name)
if limit is not None and len(name) > limit:
name = "%s ... <%s>" % (_safe_split_0(fullname), user_name)
if len(name) > limit:
name = "<%s>" % user_name
return name
except KeyError:
return ucd(uid)
def historyListCmd(self, tids, reverse=False):
"""Output a list of information about the history of yum
transactions.
:param tids: transaction Ids; lists all transactions if empty
"""
transactions = self.history.old(tids)
if self.conf.history_list_view == 'users':
uids = [1, 2]
elif self.conf.history_list_view == 'commands':
uids = [1]
else:
assert self.conf.history_list_view == 'single-user-commands'
uids = set()
done = 0
blanks = 0
for transaction in transactions:
done += 1
if transaction.cmdline is None:
blanks += 1
uids.add(transaction.loginuid)
fmt = "%s | %s | %s | %s | %s"
if len(uids) == 1:
name = _("Command line")
real_cols = self.term.real_columns
if real_cols is None:
# if output is redirected in `less` the columns
# detected are None value, to detect terminal size
# use stdin file descriptor
real_cols = dnf.cli.term._real_term_width(0)
if real_cols is None:
# if even stdin fd fails use 24 to fit to 80 cols
real_cols = 24
name_width = real_cols - 55 if real_cols > 79 else 24
else:
# TRANSLATORS: user names who executed transaction in history command output
name = _("User name")
name_width = 24
print(fmt % (fill_exact_width(_("ID"), 6, 6),
fill_exact_width(name, name_width, name_width),
fill_exact_width(_("Date and time"), 16, 16),
fill_exact_width(_("Action(s)"), 14, 14),
fill_exact_width(_("Altered"), 7, 7)))
# total table width: each column length +3 (padding and separator between columns)
table_width = 6 + 3 + name_width + 3 + 16 + 3 + 14 + 3 + 7
print("-" * table_width)
fmt = "%6u | %s | %-16.16s | %s | %4u"
if reverse is True:
transactions = reversed(transactions)
for transaction in transactions:
if len(uids) == 1:
name = transaction.cmdline or ''
else:
name = self._pwd_ui_username(transaction.loginuid, 24)
name = ucd(name)
# TRANSLATORS: This is the time format for dnf history list.
# You can change it but do it with caution because the output
# must be no longer than 16 characters. Format specifiers:
# %Y - year number (4 digits), %m - month (00-12), %d - day
# number (01-31), %H - hour (00-23), %M - minute (00-59).
tm = time.strftime(_("%Y-%m-%d %H:%M"),
time.localtime(transaction.beg_timestamp))
num, uiacts = self._history_uiactions(transaction.data())
name = fill_exact_width(name, name_width, name_width)
uiacts = fill_exact_width(uiacts, 14, 14)
rmark = lmark = ' '
if transaction.return_code is None:
rmark = lmark = '*'
elif transaction.return_code:
rmark = lmark = '#'
# We don't check .errors, because return_code will be non-0
elif transaction.is_output:
rmark = lmark = 'E'
if transaction.altered_lt_rpmdb:
rmark = '<'
if transaction.altered_gt_rpmdb:
lmark = '>'
print(fmt % (transaction.tid, name, tm, uiacts, num), "%s%s" % (lmark, rmark))
def historyInfoCmd(self, tids, pats=[], mtids=set()):
"""Output information about a transaction in history
:param tids: transaction Ids; prints info for the last transaction if empty
:raises dnf.exceptions.Error in case no transactions were found
"""
tids = set(tids)
last = self.history.last()
if last is None:
logger.critical(_('No transactions'))
raise dnf.exceptions.Error(_('Failed history info'))
lasttid = last.tid
lastdbv = last.end_rpmdb_version
transactions = []
if not tids:
last = self.history.last(complete_transactions_only=False)
if last is not None:
tids.add(last.tid)
transactions.append(last)
else:
transactions = self.history.old(tids)
if not tids:
logger.critical(_('No transaction ID, or package, given'))
raise dnf.exceptions.Error(_('Failed history info'))
bmtid, emtid = -1, -1
mobj = None
done = False
if mtids:
mtids = sorted(mtids)
bmtid, emtid = mtids.pop()
for trans in transactions:
if lastdbv is not None and trans.tid == lasttid:
# If this is the last transaction, is good and it doesn't
# match the current rpmdb ... then mark it as bad.
rpmdbv = self.base._ts.dbCookie()
trans.compare_rpmdbv(str(rpmdbv))
lastdbv = None
merged = False
if trans.tid >= bmtid and trans.tid <= emtid:
if mobj is None:
mobj = MergedTransactionWrapper(trans)
else:
mobj.merge(trans)
merged = True
elif mobj is not None:
if done:
print("-" * 79)
done = True
self._historyInfoCmd(mobj)
mobj = None
if mtids:
bmtid, emtid = mtids.pop()
if trans.tid >= bmtid and trans.tid <= emtid:
mobj = trans
merged = True
if not merged:
if done:
print("-" * 79)
done = True
self._historyInfoCmd(trans, pats)
if mobj is not None:
if done:
print("-" * 79)
self._historyInfoCmd(mobj)
def _historyInfoCmd(self, old, pats=[]):
loginuid = old.loginuid
if isinstance(loginuid, int):
loginuid = [loginuid]
name = [self._pwd_ui_username(uid) for uid in loginuid]
_pkg_states_installed = {'i' : _('Installed'), 'e' : _('Erased'),
'o' : _('Upgraded'), 'n' : _('Downgraded')}
_pkg_states_available = {'i' : _('Installed'), 'e' : _('Not installed'),
'o' : _('Older'), 'n' : _('Newer')}
maxlen = max([len(x) for x in (list(_pkg_states_installed.values()) +
list(_pkg_states_available.values()))])
_pkg_states_installed['maxlen'] = maxlen
_pkg_states_available['maxlen'] = maxlen
def _simple_pkg(pkg, prefix_len, was_installed=False, highlight=False,
pkg_max_len=0, show_repo=True):
prefix = " " * prefix_len
if was_installed:
_pkg_states = _pkg_states_installed
else:
_pkg_states = _pkg_states_available
state = _pkg_states['i']
# get installed packages with name = pkg.name
ipkgs = self.sack.query().installed().filterm(name=pkg.name).run()
if not ipkgs:
state = _pkg_states['e']
else:
# get latest installed package from software database
inst_pkg = self.history.package(ipkgs[0])
if inst_pkg:
res = pkg.compare(inst_pkg)
# res is:
# 0 if inst_pkg == pkg
# > 0 when inst_pkg > pkg
# < 0 when inst_pkg < pkg
if res == 0:
pass # installed
elif res > 0:
state = _pkg_states['o'] # updated
else:
state = _pkg_states['n'] # downgraded
if highlight:
(hibeg, hiend) = self._highlight('bold')
else:
(hibeg, hiend) = self._highlight('normal')
state = fill_exact_width(state, _pkg_states['maxlen'])
ui_repo = ''
if show_repo:
ui_repo = pkg.ui_from_repo()
print("%s%s%s%s %-*s %s" % (prefix, hibeg, state, hiend,
pkg_max_len, str(pkg), ui_repo))
tids = old.tids()
if len(tids) > 1:
print(_("Transaction ID :"), "%u..%u" % (tids[0], tids[-1]))
else:
print(_("Transaction ID :"), tids[0])
begt = float(old.beg_timestamp)
begtm = time.strftime("%c", time.localtime(begt))
print(_("Begin time :"), begtm)
if old.beg_rpmdb_version is not None:
if old.altered_lt_rpmdb:
print(_("Begin rpmdb :"), old.beg_rpmdb_version, "**")
else:
print(_("Begin rpmdb :"), old.beg_rpmdb_version)
if old.end_timestamp is not None:
endt = old.end_timestamp
endtm = time.strftime("%c", time.localtime(endt))
diff = endt - begt
if diff < 5 * 60:
diff = _("(%u seconds)") % diff
elif diff < 5 * 60 * 60:
diff = _("(%u minutes)") % (diff // 60)
elif diff < 5 * 60 * 60 * 24:
diff = _("(%u hours)") % (diff // (60 * 60))
else:
diff = _("(%u days)") % (diff // (60 * 60 * 24))
print(_("End time :"), endtm, diff)
if old.end_rpmdb_version is not None:
if old.altered_gt_rpmdb:
print(_("End rpmdb :"), old.end_rpmdb_version, "**")
else:
print(_("End rpmdb :"), old.end_rpmdb_version)
if isinstance(name, (list, tuple)):
seen = set()
for i in name:
if i in seen:
continue
seen.add(i)
print(_("User :"), i)
else:
print(_("User :"), name)
if isinstance(old.return_code, (list, tuple)):
codes = old.return_code
if codes[0] is None:
print(_("Return-Code :"), "**", _("Aborted"), "**")
codes = codes[1:]
elif not all(codes):
print(_("Return-Code :"), _("Success"))
elif codes:
print(_("Return-Code :"), _("Failures:"), ", ".join([str(i) for i in codes]))
elif old.return_code is None:
print(_("Return-Code :"), "**", _("Aborted"), "**")
elif old.return_code:
print(_("Return-Code :"), _("Failure:"), old.return_code)
else:
print(_("Return-Code :"), _("Success"))
if isinstance(old.releasever, (list, tuple)):
seen = set()
for i in old.releasever:
if i in seen:
continue
seen.add(i)
print(_("Releasever :"), i)
else:
print(_("Releasever :"), old.releasever)
if old.cmdline is not None:
if isinstance(old.cmdline, (list, tuple)):
for cmdline in old.cmdline:
print(_("Command Line :"), cmdline)
else:
print(_("Command Line :"), old.cmdline)
if old.comment is not None:
if isinstance(old.comment, (list, tuple)):
for comment in old.comment:
print(_("Comment :"), comment)
else:
print(_("Comment :"), old.comment)
perf_with = old.performed_with()
if perf_with:
print(_("Transaction performed with:"))
max_len = 0
for with_pkg in perf_with:
str_len = len(str(with_pkg))
if str_len > max_len:
max_len = str_len
for with_pkg in perf_with:
_simple_pkg(with_pkg, 4, was_installed=True, pkg_max_len=max_len)
print(_("Packages Altered:"))
self.historyInfoCmdPkgsAltered(old, pats)
t_out = old.output()
if t_out:
print(_("Scriptlet output:"))
num = 0
for line in t_out:
num += 1
print("%4d" % num, line)
t_err = old.error()
if t_err:
print(_("Errors:"))
num = 0
for line in t_err:
num += 1
print("%4d" % num, line)
# TODO: remove
_history_state2uistate = {'True-Install' : _('Install'),
'Install' : _('Install'),
'Dep-Install' : _('Dep-Install'),
'Obsoleted' : _('Obsoleted'),
'Obsoleting' : _('Obsoleting'),
'Erase' : _('Erase'),
'Reinstall' : _('Reinstall'),
'Downgrade' : _('Downgrade'),
'Downgraded' : _('Downgraded'),
'Update' : _('Upgrade'),
'Updated' : _('Upgraded'),
}
def historyInfoCmdPkgsAltered(self, old, pats=[]):
"""Print information about how packages are altered in a transaction.
:param old: the :class:`DnfSwdbTrans` to
print information about
:param pats: a list of patterns. Packages that match a patten
in *pats* will be highlighted in the output
"""
# Note that these don't use _simple_pkg() because we are showing what
# happened to them in the transaction ... not the difference between the
# version in the transaction and now.
all_uistates = self._history_state2uistate
maxlen = 0
pkg_max_len = 0
packages = old.packages()
for pkg in packages:
uistate = all_uistates.get(pkg.action_name, pkg.action_name)
if maxlen < len(uistate):
maxlen = len(uistate)
pkg_len = len(str(pkg))
if pkg_max_len < pkg_len:
pkg_max_len = pkg_len
for pkg in packages:
prefix = " " * 4
if pkg.state != libdnf.transaction.TransactionItemState_DONE:
prefix = " ** "
highlight = 'normal'
if pats:
if any([pkg.match(pat) for pat in pats]):
highlight = 'bold'
(hibeg, hiend) = self._highlight(highlight)
uistate = all_uistates.get(pkg.action_name, pkg.action_name)
uistate = fill_exact_width(ucd(uistate), maxlen)
print("%s%s%s%s %-*s %s" % (prefix, hibeg, uistate, hiend,
pkg_max_len, str(pkg),
pkg.ui_from_repo()))
class DepSolveProgressCallBack(dnf.callback.Depsolve):
"""Provides text output callback functions for Dependency Solver callback."""
def pkg_added(self, pkg, mode):
"""Print information about a package being added to the
transaction set.
:param pkgtup: tuple containing the package name, arch,
version, and repository
:param mode: a short string indicating why the package is
being added to the transaction set.
Valid current values for *mode* are::
i = the package will be installed
u = the package will be an update
e = the package will be erased
r = the package will be reinstalled
d = the package will be a downgrade
o = the package will be obsoleting another package
ud = the package will be updated
od = the package will be obsoleted
"""
output = None
if mode == 'i':
output = _('---> Package %s.%s %s will be installed')
elif mode == 'u':
output = _('---> Package %s.%s %s will be an upgrade')
elif mode == 'e':
output = _('---> Package %s.%s %s will be erased')
elif mode == 'r':
output = _('---> Package %s.%s %s will be reinstalled')
elif mode == 'd':
output = _('---> Package %s.%s %s will be a downgrade')
elif mode == 'o':
output = _('---> Package %s.%s %s will be obsoleting')
elif mode == 'ud':
output = _('---> Package %s.%s %s will be upgraded')
elif mode == 'od':
output = _('---> Package %s.%s %s will be obsoleted')
if output:
logger.debug(output, pkg.name, pkg.arch, pkg.evr)
def start(self):
"""Perform setup at the beginning of the dependency solving
process.
"""
logger.debug(_('--> Starting dependency resolution'))
def end(self):
"""Output a message stating that dependency resolution has finished."""
logger.debug(_('--> Finished dependency resolution'))
class CliKeyImport(dnf.callback.KeyImport):
def __init__(self, base, output):
self.base = base
self.output = output
def _confirm(self, id, userid, fingerprint, url, timestamp):
def short_id(id):
rj = '0' if dnf.pycomp.PY3 else b'0'
return id[-8:].rjust(8, rj)
msg = (_('Importing GPG key 0x%s:\n'
' Userid : "%s"\n'
' Fingerprint: %s\n'
' From : %s') %
(short_id(id), userid,
dnf.crypto._printable_fingerprint(fingerprint),
url.replace("file://", "")))
logger.critical("%s", msg)
if self.base.conf.assumeyes:
return True
if self.base.conf.assumeno:
return False
return self.output.userconfirm()
class CliTransactionDisplay(TransactionDisplay):
"""A YUM specific callback class for RPM operations."""
width = property(lambda self: dnf.cli.term._term_width())
def __init__(self):
super(CliTransactionDisplay, self).__init__()
self.lastmsg = ""
self.lastpackage = None # name of last package we looked at
self.output = True
# for a progress bar
self.mark = "="
self.marks = 22
def progress(self, package, action, ti_done, ti_total, ts_done, ts_total):
"""Output information about an rpm operation. This may
include a text progress bar.
:param package: the package involved in the event
:param action: the type of action that is taking place. Valid
values are given by
:func:`rpmtrans.TransactionDisplay.action.keys()`
:param ti_done: a number representing the amount of work
already done in the current transaction
:param ti_total: a number representing the total amount of work
to be done in the current transaction
:param ts_done: the number of the current transaction in
transaction set
:param ts_total: the total number of transactions in the
transaction set
"""
action_str = dnf.transaction.ACTIONS.get(action)
if action_str is None:
return
wid1 = self._max_action_width()
pkgname = ucd(package)
self.lastpackage = package
if ti_total == 0:
percent = 0
else:
percent = (ti_done*long(100))//ti_total
self._out_progress(ti_done, ti_total, ts_done, ts_total,
percent, action_str, pkgname, wid1)
def _max_action_width(self):
if not hasattr(self, '_max_action_wid_cache'):
wid1 = 0
for val in dnf.transaction.ACTIONS.values():
wid_val = exact_width(val)
if wid1 < wid_val:
wid1 = wid_val
self._max_action_wid_cache = wid1
wid1 = self._max_action_wid_cache
return wid1
def _out_progress(self, ti_done, ti_total, ts_done, ts_total,
percent, process, pkgname, wid1):
if self.output and (sys.stdout.isatty() or ti_done == ti_total):
(fmt, wid1, wid2) = self._makefmt(percent, ts_done, ts_total,
progress=sys.stdout.isatty(),
pkgname=pkgname, wid1=wid1)
pkgname = ucd(pkgname)
msg = fmt % (fill_exact_width(process, wid1, wid1),
fill_exact_width(pkgname, wid2, wid2))
if msg != self.lastmsg:
dnf.util._terminal_messenger('write_flush', msg, sys.stdout)
self.lastmsg = msg
if ti_done == ti_total:
print(" ")
def _makefmt(self, percent, ts_done, ts_total, progress=True,
pkgname=None, wid1=15):
l = len(str(ts_total))
size = "%s.%s" % (l, l)
fmt_done = "%" + size + "s/%" + size + "s"
done = fmt_done % (ts_done, ts_total)
# This should probably use TerminLine, but we don't want to dep. on
# that. So we kind do an ok job by hand ... at least it's dynamic now.
if pkgname is None:
pnl = 22
else:
pnl = exact_width(pkgname)
overhead = (2 * l) + 2 # Length of done, above
overhead += 2 + wid1 +2 # Length of beginning (" " action " :")
overhead += 1 # Space between pn and done
overhead += 2 # Ends for progress
overhead += 1 # Space for end
width = self.width
if width < overhead:
width = overhead # Give up
width -= overhead
if pnl > width // 2:
pnl = width // 2
marks = self.width - (overhead + pnl)
width = "%s.%s" % (marks, marks)
fmt_bar = "[%-" + width + "s]"
# pnl = str(28 + marks + 1)
full_pnl = pnl + marks + 1
if progress and percent == 100: # Don't chop pkg name on 100%
fmt = "\r %s: %s " + done
wid2 = full_pnl
elif progress:
if marks > 5:
bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
else:
bar = ""
fmt = "\r %s: %s " + bar + " " + done
wid2 = pnl
elif percent == 100:
fmt = " %s: %s " + done
wid2 = full_pnl
else:
if marks > 5:
bar = fmt_bar % (self.mark * marks, )
else:
bar = ""
fmt = " %s: %s " + bar + " " + done
wid2 = pnl
return fmt, wid1, wid2
def progressbar(current, total, name=None):
"""Output the current status to the terminal using a simple
text progress bar consisting of 50 # marks.
:param current: a number representing the amount of work
already done
:param total: a number representing the total amount of work
to be done
:param name: a name to label the progress bar with
"""
mark = '#'
if not sys.stdout.isatty():
return
if current == 0:
percent = 0
else:
if total != 0:
percent = float(current) / total
else:
percent = 0
width = dnf.cli.term._term_width()
if name is None and current == total:
name = '-'
end = ' %d/%d' % (current, total)
width -= len(end) + 1
if width < 0:
width = 0
if name is None:
width -= 2
if width < 0:
width = 0
hashbar = mark * int(width * percent)
output = '\r[%-*s]%s' % (width, hashbar, end)
elif current == total: # Don't chop name on 100%
output = '\r%s%s' % (fill_exact_width(name, width, width), end)
else:
width -= 4
if width < 0:
width = 0
nwid = width // 2
if nwid > exact_width(name):
nwid = exact_width(name)
width -= nwid
hashbar = mark * int(width * percent)
output = '\r%s: [%-*s]%s' % (fill_exact_width(name, nwid, nwid), width,
hashbar, end)
if current <= total:
dnf.util._terminal_messenger('write', output, sys.stdout)
if current == total:
dnf.util._terminal_messenger('write', '\n', sys.stdout)
dnf.util._terminal_messenger('flush', out=sys.stdout)
| 89,077
|
Python
|
.py
| 1,908
| 33.280398
| 134
| 0.532409
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,751
|
format.py
|
rpm-software-management_dnf/dnf/cli/format.py
|
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
from __future__ import unicode_literals
from dnf.pycomp import long
def format_number(number, SI=0, space=' '):
"""Return a human-readable metric-like string representation
of a number.
:param number: the number to be converted to a human-readable form
:param SI: If is 0, this function will use the convention
that 1 kilobyte = 1024 bytes, otherwise, the convention
that 1 kilobyte = 1000 bytes will be used
:param space: string that will be placed between the number
and the SI prefix
:return: a human-readable metric-like string representation of
*number*
"""
# copied from from urlgrabber.progress
symbols = [ ' ', # (none)
'k', # kilo
'M', # mega
'G', # giga
'T', # tera
'P', # peta
'E', # exa
'Z', # zetta
'Y'] # yotta
if SI: step = 1000.0
else: step = 1024.0
thresh = 999
depth = 0
max_depth = len(symbols) - 1
if number is None:
number = 0.0
# we want numbers between 0 and thresh, but don't exceed the length
# of our list. In that event, the formatting will be screwed up,
# but it'll still show the right number.
while number > thresh and depth < max_depth:
depth = depth + 1
number = number / step
if isinstance(number, int) or isinstance(number, long):
format = '%i%s%s'
elif number < 9.95:
# must use 9.95 for proper sizing. For example, 9.99 will be
# rounded to 10.0 with the .1f format string (which is too long)
format = '%.1f%s%s'
else:
format = '%.0f%s%s'
return(format % (float(number or 0), space, symbols[depth]))
def format_time(seconds, use_hours=0):
"""Return a human-readable string representation of a number
of seconds. The string will show seconds, minutes, and
optionally hours.
:param seconds: the number of seconds to convert to a
human-readable form
:param use_hours: If use_hours is 0, the representation will
be in minutes and seconds. Otherwise, it will be in hours,
minutes, and seconds
:return: a human-readable string representation of *seconds*
"""
# copied from from urlgrabber.progress
if seconds is None or seconds < 0:
if use_hours: return '--:--:--'
else: return '--:--'
elif seconds == float('inf'):
return 'Infinite'
else:
seconds = int(seconds)
minutes = seconds // 60
seconds = seconds % 60
if use_hours:
hours = minutes // 60
minutes = minutes % 60
return '%02i:%02i:%02i' % (hours, minutes, seconds)
else:
return '%02i:%02i' % (minutes, seconds)
def indent_block(s):
return '\n'.join(' ' + s for s in s.splitlines())
| 3,846
|
Python
|
.py
| 90
| 36.1
| 77
| 0.648049
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,752
|
demand.py
|
rpm-software-management_dnf/dnf/cli/demand.py
|
# demand.py
# Demand sheet and related classes.
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
class _BoolDefault(object):
def __init__(self, default):
self.default = default
self._storing_name = '__%s%x' % (self.__class__.__name__, id(self))
def __get__(self, obj, objtype=None):
objdict = obj.__dict__
if self._storing_name in objdict:
return objdict[self._storing_name]
return self.default
def __set__(self, obj, val):
objdict = obj.__dict__
if self._storing_name in objdict:
current_val = objdict[self._storing_name]
if current_val != val:
raise AttributeError('Demand already set.')
objdict[self._storing_name] = val
class DemandSheet(object):
"""Collection of demands that different CLI parts have on other parts. :api"""
# :api...
allow_erasing = _BoolDefault(False)
available_repos = _BoolDefault(False)
resolving = _BoolDefault(False)
root_user = _BoolDefault(False)
sack_activation = _BoolDefault(False)
load_system_repo = _BoolDefault(True)
success_exit_status = 0
cacheonly = _BoolDefault(False)
fresh_metadata = _BoolDefault(True)
freshest_metadata = _BoolDefault(False)
changelogs = _BoolDefault(False)
transaction_display = None
# This demand controlls applicability of the plugins that could filter
# repositories packages (e.g. versionlock).
# If it stays None, the demands.resolving is used as a fallback.
plugin_filtering_enabled = _BoolDefault(None)
| 2,535
|
Python
|
.py
| 55
| 41.272727
| 82
| 0.709717
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,753
|
utils.py
|
rpm-software-management_dnf/dnf/cli/utils.py
|
# Copyright (C) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Various utility functions, and a utility class."""
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli.format import format_number
from dnf.i18n import _
import dnf.util
import logging
import os
import time
_USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
logger = logging.getLogger('dnf')
def jiffies_to_seconds(jiffies):
"""Convert a number of jiffies to seconds. How many jiffies are in a second
is system-dependent, e.g. 100 jiffies = 1 second is common.
:param jiffies: a number of jiffies
:return: the equivalent number of seconds
"""
return int(jiffies) / _USER_HZ
def seconds_to_ui_time(seconds):
"""Return a human-readable string representation of the length of
a time interval given in seconds.
:param seconds: the length of the time interval in seconds
:return: a human-readable string representation of the length of
the time interval
"""
if seconds >= 60 * 60 * 24:
return "%d day(s) %d:%02d:%02d" % (seconds // (60 * 60 * 24),
(seconds // (60 * 60)) % 24,
(seconds // 60) % 60,
seconds % 60)
if seconds >= 60 * 60:
return "%d:%02d:%02d" % (seconds // (60 * 60), (seconds // 60) % 60,
(seconds % 60))
return "%02d:%02d" % ((seconds // 60), seconds % 60)
def get_process_info(pid):
"""Return info dict about a process."""
try:
pid = int(pid)
# Maybe true if /proc isn't mounted, or not Linux ... or something.
if (not os.path.exists("/proc/%d/status" % pid) or
not os.path.exists("/proc/stat") or
not os.path.exists("/proc/%d/stat" % pid)):
return None
ps = {}
with open("/proc/%d/status" % pid) as status_file:
for line in status_file:
if line[-1] != '\n':
continue
data = line[:-1].split(':\t', 1)
if len(data) < 2:
continue
data[1] = dnf.util.rtrim(data[1], ' kB')
ps[data[0].strip().lower()] = data[1].strip()
if 'vmrss' not in ps:
return None
if 'vmsize' not in ps:
return None
boot_time = None
with open("/proc/stat") as stat_file:
for line in stat_file:
if line.startswith("btime "):
boot_time = int(line[len("btime "):-1])
break
if boot_time is None:
return None
with open('/proc/%d/stat' % pid) as stat_file:
ps_stat = stat_file.read().split()
ps['start_time'] = boot_time + jiffies_to_seconds(ps_stat[21])
ps['state'] = {'R' : _('Running'),
'S' : _('Sleeping'),
'D' : _('Uninterruptible'),
'Z' : _('Zombie'),
'T' : _('Traced/Stopped')
}.get(ps_stat[2], _('Unknown'))
return ps
except (OSError, ValueError) as e:
logger.error("Failed to get process info: %s", e)
return None
def show_lock_owner(pid):
"""Output information about process holding a lock."""
ps = get_process_info(pid)
if not ps:
msg = _('Unable to find information about the locking process (PID %d)')
logger.critical(msg, pid)
return
msg = _(' The application with PID %d is: %s') % (pid, ps['name'])
logger.critical("%s", msg)
logger.critical(_(" Memory : %5s RSS (%5sB VSZ)"),
format_number(int(ps['vmrss']) * 1024),
format_number(int(ps['vmsize']) * 1024))
ago = seconds_to_ui_time(int(time.time()) - ps['start_time'])
logger.critical(_(' Started: %s - %s ago'),
dnf.util.normalize_time(ps['start_time']), ago)
logger.critical(_(' State : %s'), ps['state'])
return
| 4,818
|
Python
|
.py
| 110
| 34.127273
| 80
| 0.566489
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,754
|
completion_helper.py.in
|
rpm-software-management_dnf/dnf/cli/completion_helper.py.in
|
#!@PYTHON_EXECUTABLE@
#
# This file is part of dnf.
#
# Copyright 2015 (C) Igor Gnatenko <i.gnatenko.brain@gmail.com>
# Copyright 2016 (C) Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import dnf.exceptions
import dnf.cli
import dnf.cli.commands.clean
import sys
def filter_list_by_kw(kw, lst):
return filter(lambda k: str(k).startswith(kw), lst)
def listpkg_to_setstr(pkgs):
return set([str(x) for x in pkgs])
class RemoveCompletionCommand(dnf.cli.commands.remove.RemoveCommand):
def __init__(self, args):
super(RemoveCompletionCommand, self).__init__(args)
def configure(self):
self.cli.demands.root_user = False
self.cli.demands.sack_activation = True
def run(self):
for pkg in ListCompletionCommand.installed(self.base, self.opts.pkg_specs):
print(str(pkg))
class InstallCompletionCommand(dnf.cli.commands.install.InstallCommand):
def __init__(self, args):
super(InstallCompletionCommand, self).__init__(args)
def configure(self):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self):
installed = listpkg_to_setstr(ListCompletionCommand.installed(self.base,
self.opts.pkg_specs))
available = listpkg_to_setstr(ListCompletionCommand.available(self.base,
self.opts.pkg_specs))
for pkg in (available - installed):
print(str(pkg))
class ReinstallCompletionCommand(dnf.cli.commands.reinstall.ReinstallCommand):
def __init__(self, args):
super(ReinstallCompletionCommand, self).__init__(args)
def configure(self):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self):
installed = listpkg_to_setstr(ListCompletionCommand.installed(self.base,
self.opts.pkg_specs))
available = listpkg_to_setstr(ListCompletionCommand.available(self.base,
self.opts.pkg_specs))
for pkg in (installed & available):
print(str(pkg))
class ListCompletionCommand(dnf.cli.commands.ListCommand):
def __init__(self, args):
super(ListCompletionCommand, self).__init__(args)
def run(self):
subcmds = self.pkgnarrows
args = self.opts.packages
action = self.opts.packages_action
if len(args) > 1 and args[1] not in subcmds:
print("\n".join(filter_list_by_kw(args[1], subcmds)))
else:
if action == "installed":
pkgs = self.installed(self.base, args)
elif action == "available":
pkgs = self.available(self.base, args)
elif action == "updates":
pkgs = self.updates(self.base, args)
else:
available = listpkg_to_setstr(self.available(self.base, args))
installed = listpkg_to_setstr(self.installed(self.base, args))
pkgs = (available | installed)
if not pkgs:
print("\n".join(filter_list_by_kw(args[0], subcmds)))
return
for pkg in pkgs:
print(str(pkg))
@staticmethod
def installed(base, arg):
return base.sack.query().installed().filterm(name__glob="{}*".format(arg[0]))
@staticmethod
def available(base, arg):
return base.sack.query().available().filterm(name__glob="{}*".format(arg[0]))
@staticmethod
def updates(base, arg):
return base.check_updates(["{}*".format(arg[0])], print_=False)
class RepoListCompletionCommand(dnf.cli.commands.repolist.RepoListCommand):
def __init__(self, args):
super(RepoListCompletionCommand, self).__init__(args)
def run(self):
args = self.opts
if args.repos_action == "enabled":
print("\n".join(filter_list_by_kw(args.repos[0],
[r.id for r in self.base.repos.iter_enabled()])))
elif args.repos_action == "disabled":
print("\n".join(filter_list_by_kw(args.repos[0],
[r.id for r in self.base.repos.all() if not r.enabled])))
elif args.repos_action == "all":
print("\n".join(filter_list_by_kw(args.repos[0],
[r.id for r in self.base.repos.all()])))
class UpgradeCompletionCommand(dnf.cli.commands.upgrade.UpgradeCommand):
def __init__(self, args):
super(UpgradeCompletionCommand, self).__init__(args)
def configure(self):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self):
for pkg in ListCompletionCommand.updates(self.base, self.opts.pkg_specs):
print(str(pkg))
class DowngradeCompletionCommand(dnf.cli.commands.downgrade.DowngradeCommand):
def __init__(self, args):
super(DowngradeCompletionCommand, self).__init__(args)
def configure(self):
self.cli.demands.root_user = False
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
def run(self):
for pkg in ListCompletionCommand.available(self.base, self.opts.pkg_specs).downgrades():
print(str(pkg))
class CleanCompletionCommand(dnf.cli.commands.clean.CleanCommand):
def __init__(self, args):
super(CleanCompletionCommand, self).__init__(args)
def run(self):
subcmds = dnf.cli.commands.clean._CACHE_TYPES.keys()
print("\n".join(filter_list_by_kw(self.opts.type[1], subcmds)))
def main(args):
base = dnf.cli.cli.BaseCli()
cli = dnf.cli.Cli(base)
if args[0] == "_cmds":
base.init_plugins([], [], cli)
print("\n".join(filter_list_by_kw(args[1], cli.cli_commands)))
return
cli.cli_commands.clear()
cli.register_command(RemoveCompletionCommand)
cli.register_command(InstallCompletionCommand)
cli.register_command(ReinstallCompletionCommand)
cli.register_command(ListCompletionCommand)
cli.register_command(RepoListCompletionCommand)
cli.register_command(UpgradeCompletionCommand)
cli.register_command(DowngradeCompletionCommand)
cli.register_command(CleanCompletionCommand)
cli.configure(args)
try:
cli.run()
except (OSError, dnf.exceptions.Error):
sys.exit(0)
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
| 7,462
|
Python
|
.py
| 166
| 35.89759
| 96
| 0.640171
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,755
|
__init__.py
|
rpm-software-management_dnf/dnf/cli/__init__.py
|
# __init__.py
# DNF cli subpackage.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
import dnf.exceptions
class CliError(dnf.exceptions.Error):
"""CLI Exception. :api"""
pass
from dnf.cli.cli import Cli # :api
from dnf.cli.commands import Command # :api
| 1,221
|
Python
|
.py
| 26
| 45.461538
| 77
| 0.77395
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,756
|
term.py
|
rpm-software-management_dnf/dnf/cli/term.py
|
# Copyright (C) 2013-2014 Red Hat, Inc.
# Terminal routines.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import curses
import dnf.pycomp
import fcntl
import re
import struct
import sys
import termios
def _real_term_width(fd=1):
""" Get the real terminal width """
try:
buf = 'abcdefgh'
buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf)
ret = struct.unpack(b'hhhh', buf)[1]
return ret
except IOError:
return None
def _term_width(fd=1):
""" Compute terminal width falling to default 80 in case of trouble"""
tw = _real_term_width(fd=1)
if not tw:
return 80
elif tw < 20:
return 20
else:
return tw
class Term(object):
"""A class to provide some terminal "UI" helpers based on curses."""
# From initial search for "terminfo and python" got:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116
# ...it's probably not copyrightable, but if so ASPN says:
#
# Except where otherwise noted, recipes in the Python Cookbook are
# published under the Python license.
__enabled = True
real_columns = property(lambda self: _real_term_width())
columns = property(lambda self: _term_width())
__cap_names = {
'underline' : 'smul',
'reverse' : 'rev',
'normal' : 'sgr0',
}
__colors = {
'black' : 0,
'blue' : 1,
'green' : 2,
'cyan' : 3,
'red' : 4,
'magenta' : 5,
'yellow' : 6,
'white' : 7
}
__ansi_colors = {
'black' : 0,
'red' : 1,
'green' : 2,
'yellow' : 3,
'blue' : 4,
'magenta' : 5,
'cyan' : 6,
'white' : 7
}
__ansi_forced_MODE = {
'bold' : '\x1b[1m',
'blink' : '\x1b[5m',
'dim' : '',
'reverse' : '\x1b[7m',
'underline' : '\x1b[4m',
'normal' : '\x1b(B\x1b[m'
}
__ansi_forced_FG_COLOR = {
'black' : '\x1b[30m',
'red' : '\x1b[31m',
'green' : '\x1b[32m',
'yellow' : '\x1b[33m',
'blue' : '\x1b[34m',
'magenta' : '\x1b[35m',
'cyan' : '\x1b[36m',
'white' : '\x1b[37m'
}
__ansi_forced_BG_COLOR = {
'black' : '\x1b[40m',
'red' : '\x1b[41m',
'green' : '\x1b[42m',
'yellow' : '\x1b[43m',
'blue' : '\x1b[44m',
'magenta' : '\x1b[45m',
'cyan' : '\x1b[46m',
'white' : '\x1b[47m'
}
def __forced_init(self):
self.MODE = self.__ansi_forced_MODE
self.FG_COLOR = self.__ansi_forced_FG_COLOR
self.BG_COLOR = self.__ansi_forced_BG_COLOR
def reinit(self, term_stream=None, color='auto'):
"""Reinitializes the :class:`Term`.
:param term_stream: the terminal stream that the
:class:`Term` should be initialized to use. If
*term_stream* is not given, :attr:`sys.stdout` is used.
:param color: when to colorize output. Valid values are
'always', 'auto', and 'never'. 'always' will use ANSI codes
to always colorize output, 'auto' will decide whether do
colorize depending on the terminal, and 'never' will never
colorize.
"""
self.__enabled = True
self.lines = 24
if color == 'always':
self.__forced_init()
return
# Output modes:
self.MODE = {
'bold' : '',
'blink' : '',
'dim' : '',
'reverse' : '',
'underline' : '',
'normal' : ''
}
# Colours
self.FG_COLOR = {
'black' : '',
'blue' : '',
'green' : '',
'cyan' : '',
'red' : '',
'magenta' : '',
'yellow' : '',
'white' : ''
}
self.BG_COLOR = {
'black' : '',
'blue' : '',
'green' : '',
'cyan' : '',
'red' : '',
'magenta' : '',
'yellow' : '',
'white' : ''
}
if color == 'never':
self.__enabled = False
return
assert color == 'auto'
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream:
term_stream = sys.stdout
if not term_stream.isatty():
self.__enabled = False
return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try:
curses.setupterm(fd=term_stream.fileno())
except Exception:
self.__enabled = False
return
self._ctigetstr = curses.tigetstr
self.lines = curses.tigetnum('lines')
# Look up string capabilities.
for cap_name in self.MODE:
mode = cap_name
if cap_name in self.__cap_names:
cap_name = self.__cap_names[cap_name]
self.MODE[mode] = self._tigetstr(cap_name)
# Colors
set_fg = self._tigetstr('setf').encode('utf-8')
if set_fg:
for (color, val) in self.__colors.items():
self.FG_COLOR[color] = curses.tparm(set_fg, val).decode() or ''
set_fg_ansi = self._tigetstr('setaf').encode('utf-8')
if set_fg_ansi:
for (color, val) in self.__ansi_colors.items():
fg_color = curses.tparm(set_fg_ansi, val).decode() or ''
self.FG_COLOR[color] = fg_color
set_bg = self._tigetstr('setb').encode('utf-8')
if set_bg:
for (color, val) in self.__colors.items():
self.BG_COLOR[color] = curses.tparm(set_bg, val).decode() or ''
set_bg_ansi = self._tigetstr('setab').encode('utf-8')
if set_bg_ansi:
for (color, val) in self.__ansi_colors.items():
bg_color = curses.tparm(set_bg_ansi, val).decode() or ''
self.BG_COLOR[color] = bg_color
def __init__(self, term_stream=None, color='auto'):
self.reinit(term_stream, color)
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
cap = self._ctigetstr(cap_name) or ''
if dnf.pycomp.is_py3bytes(cap):
cap = cap.decode()
return re.sub(r'\$<\d+>[/*]?', '', cap)
def color(self, color, s):
"""Colorize string with color"""
return (self.MODE[color] + str(s) + self.MODE['normal'])
def bold(self, s):
"""Make string bold."""
return self.color('bold', s)
def sub(self, haystack, beg, end, needles, escape=None, ignore_case=False):
"""Search the string *haystack* for all occurrences of any
string in the list *needles*. Prefix each occurrence with
*beg*, and postfix each occurrence with *end*, then return the
modified string. For example::
>>> yt = Term()
>>> yt.sub('spam and eggs', 'x', 'z', ['and'])
'spam xandz eggs'
This is particularly useful for emphasizing certain words
in output: for example, calling :func:`sub` with *beg* =
MODE['bold'] and *end* = MODE['normal'] will return a string
that when printed to the terminal will appear to be *haystack*
with each occurrence of the strings in *needles* in bold
face. Note, however, that the :func:`sub_mode`,
:func:`sub_bold`, :func:`sub_fg`, and :func:`sub_bg` methods
provide convenient ways to access this same emphasizing functionality.
:param haystack: the string to be modified
:param beg: the string to be prefixed onto matches
:param end: the string to be postfixed onto matches
:param needles: a list of strings to add the prefixes and
postfixes to
:param escape: a function that accepts a string and returns
the same string with problematic characters escaped. By
default, :func:`re.escape` is used.
:param ignore_case: whether case should be ignored when
searching for matches
:return: *haystack* with *beg* prefixing, and *end*
postfixing, occurrences of the strings in *needles*
"""
if not self.__enabled:
return haystack
if not escape:
escape = re.escape
render = lambda match: beg + match.group() + end
for needle in needles:
pat = escape(needle)
flags = re.I if ignore_case else 0
haystack = re.sub(pat, render, haystack, flags=flags)
return haystack
def sub_norm(self, haystack, beg, needles, **kwds):
"""Search the string *haystack* for all occurrences of any
string in the list *needles*. Prefix each occurrence with
*beg*, and postfix each occurrence with self.MODE['normal'],
then return the modified string. If *beg* is an ANSI escape
code, such as given by self.MODE['bold'], this method will
return *haystack* with the formatting given by the code only
applied to the strings in *needles*.
:param haystack: the string to be modified
:param beg: the string to be prefixed onto matches
:param end: the string to be postfixed onto matches
:param needles: a list of strings to add the prefixes and
postfixes to
:return: *haystack* with *beg* prefixing, and self.MODE['normal']
postfixing, occurrences of the strings in *needles*
"""
return self.sub(haystack, beg, self.MODE['normal'], needles, **kwds)
def sub_mode(self, haystack, mode, needles, **kwds):
"""Search the string *haystack* for all occurrences of any
string in the list *needles*. Prefix each occurrence with
self.MODE[*mode*], and postfix each occurrence with
self.MODE['normal'], then return the modified string. This
will return a string that when printed to the terminal will
appear to be *haystack* with each occurrence of the strings in
*needles* in the given *mode*.
:param haystack: the string to be modified
:param mode: the mode to set the matches to be in. Valid
values are given by self.MODE.keys().
:param needles: a list of strings to add the prefixes and
postfixes to
:return: *haystack* with self.MODE[*mode*] prefixing, and
self.MODE['normal'] postfixing, occurrences of the strings
in *needles*
"""
return self.sub_norm(haystack, self.MODE[mode], needles, **kwds)
def sub_bold(self, haystack, needles, **kwds):
"""Search the string *haystack* for all occurrences of any
string in the list *needles*. Prefix each occurrence with
self.MODE['bold'], and postfix each occurrence with
self.MODE['normal'], then return the modified string. This
will return a string that when printed to the terminal will
appear to be *haystack* with each occurrence of the strings in
*needles* in bold face.
:param haystack: the string to be modified
:param needles: a list of strings to add the prefixes and
postfixes to
:return: *haystack* with self.MODE['bold'] prefixing, and
self.MODE['normal'] postfixing, occurrences of the strings
in *needles*
"""
return self.sub_mode(haystack, 'bold', needles, **kwds)
def sub_fg(self, haystack, color, needles, **kwds):
"""Search the string *haystack* for all occurrences of any
string in the list *needles*. Prefix each occurrence with
self.FG_COLOR[*color*], and postfix each occurrence with
self.MODE['normal'], then return the modified string. This
will return a string that when printed to the terminal will
appear to be *haystack* with each occurrence of the strings in
*needles* in the given color.
:param haystack: the string to be modified
:param color: the color to set the matches to be in. Valid
values are given by self.FG_COLOR.keys().
:param needles: a list of strings to add the prefixes and
postfixes to
:return: *haystack* with self.FG_COLOR[*color*] prefixing, and
self.MODE['normal'] postfixing, occurrences of the strings
in *needles*
"""
return self.sub_norm(haystack, self.FG_COLOR[color], needles, **kwds)
def sub_bg(self, haystack, color, needles, **kwds):
"""Search the string *haystack* for all occurrences of any
string in the list *needles*. Prefix each occurrence with
self.BG_COLOR[*color*], and postfix each occurrence with
self.MODE['normal'], then return the modified string. This
will return a string that when printed to the terminal will
appear to be *haystack* with each occurrence of the strings in
*needles* highlighted in the given background color.
:param haystack: the string to be modified
:param color: the background color to set the matches to be in. Valid
values are given by self.BG_COLOR.keys().
:param needles: a list of strings to add the prefixes and
postfixes to
:return: *haystack* with self.BG_COLOR[*color*] prefixing, and
self.MODE['normal'] postfixing, occurrences of the strings
in *needles*
"""
return self.sub_norm(haystack, self.BG_COLOR[color], needles, **kwds)
| 14,682
|
Python
|
.py
| 344
| 33.680233
| 79
| 0.594347
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,757
|
progress.py
|
rpm-software-management_dnf/dnf/cli/progress.py
|
# Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
from __future__ import unicode_literals
from dnf.cli.format import format_number, format_time
from dnf.cli.term import _term_width
from dnf.pycomp import unicode
from time import time
import sys
import dnf.callback
import dnf.util
class MultiFileProgressMeter(dnf.callback.DownloadProgress):
"""Multi-file download progress meter"""
STATUS_2_STR = {
dnf.callback.STATUS_FAILED: 'FAILED',
dnf.callback.STATUS_ALREADY_EXISTS: 'SKIPPED',
dnf.callback.STATUS_MIRROR: 'MIRROR',
dnf.callback.STATUS_DRPM: 'DRPM',
}
def __init__(self, fo=sys.stderr, update_period=0.3, tick_period=1.0, rate_average=5.0):
"""Creates a new progress meter instance
update_period -- how often to update the progress bar
tick_period -- how fast to cycle through concurrent downloads
rate_average -- time constant for average speed calculation
"""
self.fo = fo
self.update_period = update_period
self.tick_period = tick_period
self.rate_average = rate_average
self.unknown_progres = 0
self.total_drpm = 0
self.isatty = sys.stdout.isatty()
self.done_drpm = 0
self.done_files = 0
self.done_size = 0
self.active = []
self.state = {}
self.last_time = 0
self.last_size = 0
self.rate = None
self.total_files = 0
self.total_size = 0
def message(self, msg):
dnf.util._terminal_messenger('write_flush', msg, self.fo)
def start(self, total_files, total_size, total_drpms=0):
self.total_files = total_files
self.total_size = total_size
self.total_drpm = total_drpms
# download state
self.done_drpm = 0
self.done_files = 0
self.done_size = 0
self.active = []
self.state = {}
# rate averaging
self.last_time = 0
self.last_size = 0
self.rate = None
def progress(self, payload, done):
now = time()
text = unicode(payload)
total = int(payload.download_size)
done = int(done)
# update done_size
if text not in self.state:
self.state[text] = now, 0
self.active.append(text)
start, old = self.state[text]
self.state[text] = start, done
self.done_size += done - old
# update screen if enough time has elapsed
if now - self.last_time > self.update_period:
if total > self.total_size:
self.total_size = total
self._update(now)
def _update(self, now):
if self.last_time:
delta_time = now - self.last_time
delta_size = self.done_size - self.last_size
if delta_time > 0 and delta_size > 0:
# update the average rate
rate = delta_size / delta_time
if self.rate is not None:
weight = min(delta_time/self.rate_average, 1)
rate = rate*weight + self.rate*(1 - weight)
self.rate = rate
self.last_time = now
self.last_size = self.done_size
if not self.isatty:
return
# pick one of the active downloads
text = self.active[int(now/self.tick_period) % len(self.active)]
if self.total_files > 1:
n = '%d' % (self.done_files + 1)
if len(self.active) > 1:
n += '-%d' % (self.done_files + len(self.active))
text = '(%s/%d): %s' % (n, self.total_files, text)
# average rate, total done size, estimated remaining time
if self.rate and self.total_size:
time_eta = format_time((self.total_size - self.done_size) / self.rate)
else:
time_eta = '--:--'
msg = ' %5sB/s | %5sB %9s ETA\r' % (
format_number(self.rate) if self.rate else '--- ',
format_number(self.done_size),
time_eta)
left = _term_width() - len(msg)
bl = (left - 7)//2
if bl > 8:
# use part of the remaining space for progress bar
if self.total_size:
pct = self.done_size * 100 // self.total_size
n, p = divmod(self.done_size * bl * 2 // self.total_size, 2)
bar = '=' * n + '-' * p
msg = '%3d%% [%-*s]%s' % (pct, bl, bar, msg)
left -= bl + 7
else:
n = self.unknown_progres - 3
p = 3
n = 0 if n < 0 else n
bar = ' ' * n + '=' * p
msg = ' [%-*s]%s' % (bl, bar, msg)
left -= bl + 7
self.unknown_progres = self.unknown_progres + 3 if self.unknown_progres + 3 < bl \
else 0
self.message('%-*.*s%s' % (left, left, text, msg))
def end(self, payload, status, err_msg):
start = now = time()
text = unicode(payload)
size = int(payload.download_size)
done = 0
# update state
if status == dnf.callback.STATUS_MIRROR:
pass
elif status == dnf.callback.STATUS_DRPM:
self.done_drpm += 1
elif text in self.state:
start, done = self.state.pop(text)
self.active.remove(text)
size -= done
self.done_files += 1
self.done_size += size
elif status == dnf.callback.STATUS_ALREADY_EXISTS:
self.done_files += 1
self.done_size += size
if status:
# the error message, no trimming
if status is dnf.callback.STATUS_DRPM and self.total_drpm > 1:
msg = '[%s %d/%d] %s: ' % (self.STATUS_2_STR[status], self.done_drpm,
self.total_drpm, text)
else:
msg = '[%s] %s: ' % (self.STATUS_2_STR[status], text)
left = _term_width() - len(msg) - 1
msg = '%s%-*s\n' % (msg, left, err_msg)
else:
if self.total_files > 1:
text = '(%d/%d): %s' % (self.done_files, self.total_files, text)
# average rate, file size, download time
tm = max(now - start, 0.001)
msg = ' %5sB/s | %5sB %9s \n' % (
format_number(float(done) / tm),
format_number(done),
format_time(tm))
left = _term_width() - len(msg)
msg = '%-*.*s%s' % (left, left, text, msg)
self.message(msg)
# now there's a blank line. fill it if possible.
if self.active:
self._update(now)
| 7,609
|
Python
|
.py
| 181
| 31.845304
| 98
| 0.562171
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,758
|
main.py
|
rpm-software-management_dnf/dnf/cli/main.py
|
# Copyright 2005 Duke University
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Entrance point for the yum command line interface.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.conf import Conf
from dnf.cli.cli import Cli
from dnf.cli.option_parser import OptionParser
from dnf.i18n import ucd
from dnf.cli.utils import show_lock_owner
from dnf.i18n import _
import dnf.cli
import dnf.cli.cli
import dnf.cli.option_parser
import dnf.exceptions
import dnf.i18n
import dnf.logging
import dnf.util
import errno
import hawkey
import libdnf.error
import logging
import os
import os.path
import sys
logger = logging.getLogger("dnf")
def ex_IOError(e):
logger.log(dnf.logging.SUBDEBUG, '', exc_info=True)
logger.critical(ucd(e))
return 1
def ex_Error(e):
logger.log(dnf.logging.SUBDEBUG, '', exc_info=True)
if e.value is not None:
logger.critical(_('Error: %s'), ucd(e))
return 1
def main(args, conf_class=Conf, cli_class=Cli, option_parser_class=OptionParser):
try:
dnf.i18n.setup_stdout()
with dnf.cli.cli.BaseCli(conf_class()) as base:
return _main(base, args, cli_class, option_parser_class)
except dnf.exceptions.ProcessLockError as e:
logger.critical(e.value)
show_lock_owner(e.pid)
return 200
except dnf.exceptions.LockError as e:
logger.critical(e.value)
return 200
except dnf.exceptions.DepsolveError as e:
return 1
except dnf.exceptions.Error as e:
return ex_Error(e)
except hawkey.Exception as e:
logger.critical(_('Error: %s'), ucd(e))
return 1
except libdnf.error.Error as e:
logger.critical(_('Error: %s'), ucd(e))
return 1
except IOError as e:
return ex_IOError(e)
except KeyboardInterrupt as e:
logger.critical('{}: {}'.format(type(e).__name__, _("Terminated.")))
return 1
def _main(base, args, cli_class, option_parser):
"""Run the dnf program from a command line interface."""
# our core object for the cli
base._logging._presetup()
cli = cli_class(base)
# do our cli parsing and config file setup
# also sanity check the things being passed on the cli
try:
cli.configure(list(map(ucd, args)), option_parser())
except (IOError, OSError) as e:
return ex_IOError(e)
return cli_run(cli, base)
def cli_run(cli, base):
# Try to open the current directory to see if we have
# read and execute access. If not, chdir to /
try:
f = open(".")
except IOError as e:
if e.errno == errno.EACCES:
logger.critical(_('No read/execute access in current directory, moving to /'))
os.chdir("/")
else:
f.close()
try:
cli.run()
except dnf.exceptions.LockError:
raise
except (IOError, OSError) as e:
return ex_IOError(e)
if cli.demands.resolving:
try:
ret = resolving(cli, base)
except dnf.exceptions.DepsolveError as e:
ex_Error(e)
msg = ""
if not cli.demands.allow_erasing and base._goal.problem_conflicts(available=True):
msg += _("try to add '{}' to command line to replace conflicting "
"packages").format("--allowerasing")
if cli.base.conf.strict:
if not msg:
msg += _("try to add '{}' to skip uninstallable packages").format(
"--skip-broken")
else:
msg += _(" or '{}' to skip uninstallable packages").format("--skip-broken")
if cli.base.conf.best:
prio = cli.base.conf._get_priority("best")
if prio <= dnf.conf.PRIO_MAINCONFIG:
if not msg:
msg += _("try to add '{}' to use not only best candidate packages").format(
"--nobest")
else:
msg += _(" or '{}' to use not only best candidate packages").format(
"--nobest")
if base._goal.file_dep_problem_present() and 'filelists' not in cli.base.conf.optional_metadata_types:
if not msg:
msg += _("try to add '{}' to load additional filelists metadata").format(
"--setopt=optional_metadata_types=filelists")
else:
msg += _(" or '{}' to load additional filelists metadata").format(
"--setopt=optional_metadata_types=filelists")
if msg:
logger.info("({})".format(msg))
raise
if ret:
return ret
cli.command.run_transaction()
return cli.demands.success_exit_status
def resolving(cli, base):
"""Perform the depsolve, download and RPM transaction stage."""
if base.transaction is None:
base.resolve(cli.demands.allow_erasing)
logger.info(_('Dependencies resolved.'))
cli.command.run_resolved()
# Run the transaction
displays = []
if cli.demands.transaction_display is not None:
displays.append(cli.demands.transaction_display)
try:
base.do_transaction(display=displays)
except dnf.cli.CliError as exc:
logger.error(ucd(exc))
return 1
except dnf.exceptions.TransactionCheckError as err:
for msg in cli.command.get_error_output(err):
logger.critical(msg)
return 1
except IOError as e:
return ex_IOError(e)
else:
logger.info(_('Complete!'))
return 0
def user_main(args, exit_code=False):
"""Call one of the multiple main() functions based on environment variables.
:param args: command line arguments passed into yum
:param exit_code: if *exit_code* is True, this function will exit
python with its exit code when it has finished executing.
Otherwise, it will return its exit code.
:return: the exit code from dnf.yum execution
"""
errcode = main(args)
if exit_code:
sys.exit(errcode)
return errcode
if __name__ == "__main__":
user_main(sys.argv[1:], exit_code=True)
| 6,988
|
Python
|
.py
| 184
| 30.467391
| 114
| 0.635612
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,759
|
module.py
|
rpm-software-management_dnf/dnf/cli/commands/module.py
|
# supplies the 'module' command.
#
# Copyright (C) 2014-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from dnf.cli import commands, CliError
from dnf.i18n import _
from dnf.module.exceptions import NoModuleException
from dnf.util import logger
import dnf.util
import sys
import os
import hawkey
import libdnf
import dnf.module.module_base
import dnf.exceptions
class ModuleCommand(commands.Command):
class SubCommand(commands.Command):
def __init__(self, cli):
super(ModuleCommand.SubCommand, self).__init__(cli)
self.module_base = dnf.module.module_base.ModuleBase(self.base)
def _get_modules_from_name_stream_specs(self):
modules_from_specs = set()
for module_spec in self.opts.module_spec:
__, nsvcap = self.module_base._get_modules(module_spec)
# When there is no match, the problem was already reported by module_base.remove()
if nsvcap is None:
continue
name = nsvcap.name if nsvcap.name else ""
stream = nsvcap.stream if nsvcap.stream else ""
if (nsvcap.version and nsvcap.version != -1) or nsvcap.context:
logger.info(_("Only module name, stream, architecture or profile is used. "
"Ignoring unneeded information in argument: '{}'").format(
module_spec))
arch = nsvcap.arch if nsvcap.arch else ""
modules = self.base._moduleContainer.query(name, stream, "", "", arch)
modules_from_specs.update(modules)
return modules_from_specs
def _get_module_artifact_names(self, use_modules, skip_modules):
artifacts = set()
pkg_names = set()
for module in use_modules:
if module not in skip_modules:
if self.base._moduleContainer.isModuleActive(module):
artifacts.update(module.getArtifacts())
for artifact in artifacts:
subj = hawkey.Subject(artifact)
for nevra_obj in subj.get_nevra_possibilities(
forms=[hawkey.FORM_NEVRA]):
if nevra_obj.name:
pkg_names.add(nevra_obj.name)
return pkg_names, artifacts
class ListSubCommand(SubCommand):
aliases = ('list',)
summary = _('list all module streams, profiles and states')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
def run_on_module(self):
mods = self.module_base
if self.opts.enabled:
output = mods._get_brief_description(
self.opts.module_spec, libdnf.module.ModulePackageContainer.ModuleState_ENABLED)
elif self.opts.disabled:
output = mods._get_brief_description(
self.opts.module_spec,
libdnf.module.ModulePackageContainer.ModuleState_DISABLED)
elif self.opts.installed:
output = mods._get_brief_description(
self.opts.module_spec,
libdnf.module.ModulePackageContainer.ModuleState_INSTALLED)
else:
output = mods._get_brief_description(
self.opts.module_spec, libdnf.module.ModulePackageContainer.ModuleState_UNKNOWN)
if output:
print(output)
return
if self.opts.module_spec:
msg = _('No matching Modules to list')
raise dnf.exceptions.Error(msg)
class InfoSubCommand(SubCommand):
aliases = ('info',)
summary = _('print detailed information about a module')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
def run_on_module(self):
if self.opts.verbose:
output = self.module_base._get_full_info(self.opts.module_spec)
elif self.opts.profile:
output = self.module_base._get_info_profiles(self.opts.module_spec)
else:
output = self.module_base._get_info(self.opts.module_spec)
if output:
print(output)
else:
raise dnf.exceptions.Error(_('No matching Modules to list'))
class EnableSubCommand(SubCommand):
aliases = ('enable',)
summary = _('enable a module stream')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def run_on_module(self):
try:
self.module_base.enable(self.opts.module_spec)
except dnf.exceptions.MarkingErrors as e:
if self.base.conf.strict:
if e.no_match_group_specs or e.error_group_specs:
raise e
if e.module_depsolv_errors and e.module_depsolv_errors[1] != \
libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS:
raise e
logger.error(str(e))
class DisableSubCommand(SubCommand):
aliases = ('disable',)
summary = _('disable a module with all its streams')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def run_on_module(self):
try:
self.module_base.disable(self.opts.module_spec)
except dnf.exceptions.MarkingErrors as e:
if self.base.conf.strict:
if e.no_match_group_specs or e.error_group_specs:
raise e
if e.module_depsolv_errors and e.module_depsolv_errors[1] != \
libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS:
raise e
logger.error(str(e))
class ResetSubCommand(SubCommand):
aliases = ('reset',)
summary = _('reset a module')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def run_on_module(self):
try:
self.module_base.reset(self.opts.module_spec)
except dnf.exceptions.MarkingErrors as e:
if self.base.conf.strict:
if e.no_match_group_specs:
raise e
logger.error(str(e))
class InstallSubCommand(SubCommand):
aliases = ('install',)
summary = _('install a module profile including its packages')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def run_on_module(self):
try:
self.module_base.install(self.opts.module_spec, self.base.conf.strict)
except dnf.exceptions.MarkingErrors as e:
if self.base.conf.strict:
if e.no_match_group_specs or e.error_group_specs:
raise e
logger.error(str(e))
class UpdateSubCommand(SubCommand):
aliases = ('update',)
summary = _('update packages associated with an active stream')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def run_on_module(self):
module_specs = self.module_base.upgrade(self.opts.module_spec)
if module_specs:
raise NoModuleException(", ".join(module_specs))
class RemoveSubCommand(SubCommand):
aliases = ('remove', 'erase',)
summary = _('remove installed module profiles and their packages')
def configure(self):
demands = self.cli.demands
demands.allow_erasing = True
demands.available_repos = True
demands.fresh_metadata = False
demands.resolving = True
demands.root_user = True
demands.sack_activation = True
def run_on_module(self):
skipped_groups = self.module_base.remove(self.opts.module_spec)
if self.opts.all:
modules_from_specs = self._get_modules_from_name_stream_specs()
remove_names_from_spec, __ = self._get_module_artifact_names(
modules_from_specs, set())
keep_names, __ = self._get_module_artifact_names(
self.base._moduleContainer.getModulePackages(), modules_from_specs)
remove_query = self.base.sack.query().installed().filterm(
name=remove_names_from_spec)
keep_query = self.base.sack.query().installed().filterm(name=keep_names)
for pkg in remove_query:
if pkg in keep_query:
msg = _("Package {} belongs to multiple modules, skipping").format(pkg)
logger.info(msg)
else:
self.base.goal.erase(
pkg, clean_deps=self.base.conf.clean_requirements_on_remove)
if not skipped_groups:
return
logger.error(dnf.exceptions.MarkingErrors(no_match_group_specs=skipped_groups))
class SwitchToSubCommand(SubCommand):
aliases = ('switch-to',)
summary = _('switch a module to a stream and distrosync rpm packages')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
self.base.conf.module_stream_switch = True
def run_on_module(self):
try:
self.module_base.switch_to(self.opts.module_spec, strict=self.base.conf.strict)
except dnf.exceptions.MarkingErrors as e:
if self.base.conf.strict:
if e.no_match_group_specs or e.error_group_specs:
raise e
logger.error(str(e))
class ProvidesSubCommand(SubCommand):
aliases = ("provides", )
summary = _('locate a module the modular packages belong to')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
def run_on_module(self):
output = self.module_base._what_provides(self.opts.module_spec)
if output:
print(output)
class RepoquerySubCommand(SubCommand):
aliases = ("repoquery", )
summary = _('list packages belonging to a module')
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
def run_on_module(self):
modules_from_specs = set()
for module_spec in self.opts.module_spec:
modules, __ = self.module_base._get_modules(module_spec)
modules_from_specs.update(modules)
names_from_spec, spec_artifacts = self._get_module_artifact_names(
modules_from_specs, set())
package_strings = set()
if self.opts.available or not self.opts.installed:
query = self.base.sack.query().available().filterm(nevra_strict=spec_artifacts)
for pkg in query:
package_strings.add(str(pkg))
if self.opts.installed:
query = self.base.sack.query().installed().filterm(name=names_from_spec)
for pkg in query:
package_strings.add(str(pkg))
output = "\n".join(sorted(package_strings))
print(output)
SUBCMDS = {ListSubCommand, InfoSubCommand, EnableSubCommand,
DisableSubCommand, ResetSubCommand, InstallSubCommand, UpdateSubCommand,
RemoveSubCommand, SwitchToSubCommand, ProvidesSubCommand, RepoquerySubCommand}
SUBCMDS_NOT_REQUIRED_ARG = {ListSubCommand}
aliases = ("module",)
summary = _("Interact with Modules.")
def __init__(self, cli):
super(ModuleCommand, self).__init__(cli)
subcmd_objs = (subcmd(cli) for subcmd in self.SUBCMDS)
self.subcmd = None
self._subcmd_name2obj = {
alias: subcmd for subcmd in subcmd_objs for alias in subcmd.aliases}
def set_argparser(self, parser):
narrows = parser.add_mutually_exclusive_group()
narrows.add_argument('--enabled', dest='enabled',
action='store_true',
help=_("show only enabled modules"))
narrows.add_argument('--disabled', dest='disabled',
action='store_true',
help=_("show only disabled modules"))
narrows.add_argument('--installed', dest='installed',
action='store_true',
help=_("show only installed modules or packages"))
narrows.add_argument('--profile', dest='profile',
action='store_true',
help=_("show profile content"))
parser.add_argument('--available', dest='available', action='store_true',
help=_("show only available packages"))
narrows.add_argument('--all', dest='all',
action='store_true',
help=_("remove all modular packages"))
subcommand_choices = []
subcommand_help = []
for subcmd in sorted(self.SUBCMDS, key=lambda x: x.aliases[0]):
subcommand_choices.append(subcmd.aliases[0])
subcommand_help.append('{}: {}'.format(subcmd.aliases[0], subcmd.summary or ''))
parser.add_argument('subcmd', nargs=1, choices=subcommand_choices,
metavar='<modular command>',
help='\n'.join(subcommand_help))
parser.add_argument('module_spec', metavar='module-spec', nargs='*',
help=_("Module specification"))
def configure(self):
try:
self.subcmd = self._subcmd_name2obj[self.opts.subcmd[0]]
except (CliError, KeyError):
self.cli.optparser.print_usage()
raise CliError
self.subcmd.opts = self.opts
self.subcmd.configure()
def run(self):
self.check_required_argument()
self.subcmd.run_on_module()
def check_required_argument(self):
not_required_argument = [alias
for subcmd in self.SUBCMDS_NOT_REQUIRED_ARG
for alias in subcmd.aliases]
if self.opts.subcmd[0] not in not_required_argument:
if not self.opts.module_spec:
raise CliError(
_("{} {} {}: too few arguments").format(dnf.util.MAIN_PROG,
self.opts.command,
self.opts.subcmd[0]))
| 16,882
|
Python
|
.py
| 348
| 34.692529
| 100
| 0.579149
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,760
|
downgrade.py
|
rpm-software-management_dnf/dnf/cli/commands/downgrade.py
|
# downgrade.py
# Downgrade CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _
class DowngradeCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
downgrade command.
"""
aliases = ('downgrade', 'dg')
summary = _("Downgrade a package")
@staticmethod
def set_argparser(parser):
parser.add_argument('package', nargs='*', help=_('Package to downgrade'),
action=OptionParser.ParseSpecGroupFileCallback)
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
commands._checkGPGKey(self.base, self.cli)
if not self.opts.filenames:
commands._checkEnabledRepo(self.base)
def run(self):
file_pkgs = self.base.add_remote_rpms(self.opts.filenames, strict=False,
progress=self.base.output.progress)
return self.base.downgradePkgs(
specs=self.opts.pkg_specs + ['@' + x for x in self.opts.grp_specs],
file_pkgs=file_pkgs,
strict=self.base.conf.strict)
| 2,325
|
Python
|
.py
| 50
| 40.58
| 81
| 0.708425
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,761
|
deplist.py
|
rpm-software-management_dnf/dnf/cli/commands/deplist.py
|
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
from dnf.cli.commands.repoquery import RepoQueryCommand
class DeplistCommand(RepoQueryCommand):
"""
The command is alias for 'dnf repoquery --deplist'
"""
aliases = ('deplist',)
summary = _("[deprecated, use repoquery --deplist] List package's dependencies and what packages provide them")
def configure(self):
RepoQueryCommand.configure(self)
self.opts.deplist = True
| 1,510
|
Python
|
.py
| 31
| 46.258065
| 115
| 0.7673
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,762
|
repolist.py
|
rpm-software-management_dnf/dnf/cli/commands/repolist.py
|
# repolist.py
# repolist CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.i18n import _, ucd, fill_exact_width, exact_width
from dnf.cli.option_parser import OptionParser
import dnf.cli.format
import dnf.pycomp
import dnf.util
import fnmatch
import hawkey
import logging
import operator
logger = logging.getLogger('dnf')
def _expire_str(repo, md):
last = dnf.util.normalize_time(repo._repo.getTimestamp()) if md else _("unknown")
if repo.metadata_expire <= -1:
return _("Never (last: %s)") % last
elif not repo.metadata_expire:
return _("Instant (last: %s)") % last
else:
num = _num2ui_num(repo.metadata_expire)
return _("%s second(s) (last: %s)") % (num, last)
def _num2ui_num(num):
return ucd(dnf.pycomp.format("%d", num, True))
def _repo_match(repo, patterns):
rid = repo.id.lower()
rnm = repo.name.lower()
for pat in patterns:
if fnmatch.fnmatch(rid, pat):
return True
if fnmatch.fnmatch(rnm, pat):
return True
return False
def _repo_size(sack, repo):
ret = 0
for pkg in sack.query(flags=hawkey.IGNORE_EXCLUDES).filterm(reponame__eq=repo.id):
ret += pkg._size
return dnf.cli.format.format_number(ret)
class RepoListCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
repolist command.
"""
aliases = ('repolist', 'repoinfo')
summary = _('display the configured software repositories')
@staticmethod
def set_argparser(parser):
repolimit = parser.add_mutually_exclusive_group()
repolimit.add_argument('--all', dest='_repos_action',
action='store_const', const='all', default=None,
help=_("show all repos"))
repolimit.add_argument('--enabled', dest='_repos_action',
action='store_const', const='enabled',
help=_("show enabled repos (default)"))
repolimit.add_argument('--disabled', dest='_repos_action',
action='store_const', const='disabled',
help=_("show disabled repos"))
parser.add_argument('repos', nargs='*', default='enabled-default', metavar="REPOSITORY",
choices=['all', 'enabled', 'disabled'],
action=OptionParser.PkgNarrowCallback,
help=_("Repository specification"))
def pre_configure(self):
if not self.opts.quiet:
self.cli.redirect_logger(stdout=logging.WARNING, stderr=logging.INFO)
def configure(self):
if not self.opts.quiet:
self.cli.redirect_repo_progress()
demands = self.cli.demands
if self.base.conf.verbose or self.opts.command == 'repoinfo':
demands.available_repos = True
demands.sack_activation = True
if self.opts._repos_action:
self.opts.repos_action = self.opts._repos_action
def run(self):
arg = self.opts.repos_action
extcmds = [x.lower() for x in self.opts.repos]
verbose = self.base.conf.verbose
repos = list(self.base.repos.values())
repos.sort(key=operator.attrgetter('id'))
term = self.output.term
on_ehibeg = term.FG_COLOR['green'] + term.MODE['bold']
on_dhibeg = term.FG_COLOR['red']
on_hiend = term.MODE['normal']
tot_num = 0
cols = []
if not repos:
logger.warning(_('No repositories available'))
return
include_status = arg == 'all' or (arg == 'enabled-default' and extcmds)
repoinfo_output = []
for repo in repos:
if len(extcmds) and not _repo_match(repo, extcmds):
continue
(ehibeg, dhibeg, hiend) = '', '', ''
ui_enabled = ''
ui_endis_wid = 0
ui_excludes_num = ''
if include_status:
(ehibeg, dhibeg, hiend) = (on_ehibeg, on_dhibeg, on_hiend)
if repo.enabled:
enabled = True
if arg == 'disabled':
continue
if include_status or verbose or self.opts.command == 'repoinfo':
ui_enabled = ehibeg + _('enabled') + hiend
ui_endis_wid = exact_width(_('enabled'))
if verbose or self.opts.command == 'repoinfo':
ui_size = _repo_size(self.base.sack, repo)
else:
enabled = False
if arg == 'enabled' or (arg == 'enabled-default' and not extcmds):
continue
ui_enabled = dhibeg + _('disabled') + hiend
ui_endis_wid = exact_width(_('disabled'))
if not (verbose or self.opts.command == 'repoinfo'):
rid = ucd(repo.id)
cols.append((rid, repo.name, (ui_enabled, ui_endis_wid)))
else:
if enabled:
md = repo.metadata
else:
md = None
out = [self.output.fmtKeyValFill(_("Repo-id : "), repo.id),
self.output.fmtKeyValFill(_("Repo-name : "), repo.name)]
if include_status:
out += [self.output.fmtKeyValFill(_("Repo-status : "),
ui_enabled)]
if md and repo._repo.getRevision():
out += [self.output.fmtKeyValFill(_("Repo-revision : "),
repo._repo.getRevision())]
if md and repo._repo.getContentTags():
tags = repo._repo.getContentTags()
out += [self.output.fmtKeyValFill(_("Repo-tags : "),
", ".join(sorted(tags)))]
if md and repo._repo.getDistroTags():
distroTagsDict = {k: v for (k, v) in repo._repo.getDistroTags()}
for (distro, tags) in distroTagsDict.items():
out += [self.output.fmtKeyValFill(
_("Repo-distro-tags : "),
"[%s]: %s" % (distro, ", ".join(sorted(tags))))]
if md:
num = len(self.base.sack.query(flags=hawkey.IGNORE_EXCLUDES).filterm(
reponame__eq=repo.id))
num_available = len(self.base.sack.query().filterm(reponame__eq=repo.id))
ui_num = _num2ui_num(num)
ui_num_available = _num2ui_num(num_available)
tot_num += num
out += [
self.output.fmtKeyValFill(
_("Repo-updated : "),
dnf.util.normalize_time(repo._repo.getMaxTimestamp())),
self.output.fmtKeyValFill(_("Repo-pkgs : "), ui_num),
self.output.fmtKeyValFill(_("Repo-available-pkgs: "), ui_num_available),
self.output.fmtKeyValFill(_("Repo-size : "), ui_size)]
if repo.metalink:
out += [self.output.fmtKeyValFill(_("Repo-metalink : "),
repo.metalink)]
if enabled:
ts = repo._repo.getTimestamp()
out += [self.output.fmtKeyValFill(
_(" Updated : "), dnf.util.normalize_time(ts))]
elif repo.mirrorlist:
out += [self.output.fmtKeyValFill(_("Repo-mirrors : "),
repo.mirrorlist)]
baseurls = repo.baseurl
if baseurls:
out += [self.output.fmtKeyValFill(_("Repo-baseurl : "),
", ".join(baseurls))]
elif enabled:
mirrors = repo._repo.getMirrors()
if mirrors:
url = "%s (%d more)" % (mirrors[0], len(mirrors) - 1)
out += [self.output.fmtKeyValFill(_("Repo-baseurl : "), url)]
expire = _expire_str(repo, md)
out += [self.output.fmtKeyValFill(_("Repo-expire : "), expire)]
if repo.excludepkgs:
# TRANSLATORS: Packages that are excluded - their names like (dnf systemd)
out += [self.output.fmtKeyValFill(_("Repo-exclude : "),
", ".join(repo.excludepkgs))]
if repo.includepkgs:
out += [self.output.fmtKeyValFill(_("Repo-include : "),
", ".join(repo.includepkgs))]
if ui_excludes_num:
# TRANSLATORS: Number of packages that where excluded (5)
out += [self.output.fmtKeyValFill(_("Repo-excluded : "),
ui_excludes_num)]
if repo.repofile:
out += [self.output.fmtKeyValFill(_("Repo-filename : "),
repo.repofile)]
repoinfo_output.append("\n".join(map(ucd, out)))
if repoinfo_output:
print("\n\n".join(repoinfo_output))
if not verbose and cols:
# Work out the first (id) and last (enabled/disabled/count),
# then chop the middle (name)...
id_len = exact_width(_('repo id'))
nm_len = 0
st_len = 0
for (rid, rname, (ui_enabled, ui_endis_wid)) in cols:
if id_len < exact_width(rid):
id_len = exact_width(rid)
if nm_len < exact_width(rname):
nm_len = exact_width(rname)
if st_len < ui_endis_wid:
st_len = ui_endis_wid
# Need this as well as above for: fill_exact_width()
if include_status:
if exact_width(_('status')) > st_len:
left = term.columns - (id_len + len(_('status')) + 2)
else:
left = term.columns - (id_len + st_len + 2)
else: # Don't output a status column.
left = term.columns - (id_len + 1)
if left < nm_len: # Name gets chopped
nm_len = left
else: # Share the extra...
left -= nm_len
id_len += left // 2
nm_len += left - (left // 2)
txt_rid = fill_exact_width(_('repo id'), id_len)
if include_status:
txt_rnam = fill_exact_width(_('repo name'), nm_len, nm_len)
else:
txt_rnam = _('repo name')
if not include_status: # Don't output a status column.
print("%s %s" % (txt_rid, txt_rnam))
else:
print("%s %s %s" % (txt_rid, txt_rnam, _('status')))
for (rid, rname, (ui_enabled, ui_endis_wid)) in cols:
if not include_status: # Don't output a status column.
print("%s %s" % (fill_exact_width(rid, id_len), rname))
continue
print("%s %s %s" % (fill_exact_width(rid, id_len),
fill_exact_width(rname, nm_len, nm_len),
ui_enabled))
if verbose or self.opts.command == 'repoinfo':
msg = _('Total packages: {}')
print(msg.format(_num2ui_num(tot_num)))
| 12,922
|
Python
|
.py
| 256
| 35.066406
| 96
| 0.508709
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,763
|
updateinfo.py
|
rpm-software-management_dnf/dnf/cli/commands/updateinfo.py
|
# updateinfo.py
# UpdateInfo CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""UpdateInfo CLI command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import collections
import fnmatch
import hawkey
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _, exact_width
from dnf.pycomp import unicode
def _maxlen(iterable):
"""Return maximum length of items in a non-empty iterable."""
return max(exact_width(item) for item in iterable)
class UpdateInfoCommand(commands.Command):
"""Implementation of the UpdateInfo command."""
TYPE2LABEL = {hawkey.ADVISORY_BUGFIX: _('bugfix'),
hawkey.ADVISORY_ENHANCEMENT: _('enhancement'),
hawkey.ADVISORY_SECURITY: _('security'),
hawkey.ADVISORY_UNKNOWN: _('unknown'),
hawkey.ADVISORY_NEWPACKAGE: _('newpackage')}
SECURITY2LABEL = {'Critical': _('Critical/Sec.'),
'Important': _('Important/Sec.'),
'Moderate': _('Moderate/Sec.'),
'Low': _('Low/Sec.')}
direct_commands = {'list-updateinfo' : 'list',
'list-security' : 'list',
'list-sec' : 'list',
'info-updateinfo' : 'info',
'info-security' : 'info',
'info-sec' : 'info',
'summary-updateinfo' : 'summary'}
aliases = ['updateinfo', 'upif'] + list(direct_commands.keys())
summary = _('display advisories about packages')
availability_default = 'available'
availabilities = ['installed', 'updates', 'all', availability_default]
def __init__(self, cli):
"""Initialize the command."""
super(UpdateInfoCommand, self).__init__(cli)
self._installed_query = None
@staticmethod
def set_argparser(parser):
availability = parser.add_mutually_exclusive_group()
availability.add_argument(
"--available", dest='_availability', const='available', action='store_const',
help=_("advisories about newer versions of installed packages (default)"))
availability.add_argument(
"--installed", dest='_availability', const='installed', action='store_const',
help=_("advisories about equal and older versions of installed packages"))
availability.add_argument(
"--updates", dest='_availability', const='updates', action='store_const',
help=_("advisories about newer versions of those installed packages "
"for which a newer version is available"))
availability.add_argument(
"--all", dest='_availability', const='all', action='store_const',
help=_("advisories about any versions of installed packages"))
cmds = ['summary', 'list', 'info']
output_format = parser.add_mutually_exclusive_group()
output_format.add_argument("--summary", dest='_spec_action', const='summary',
action='store_const',
help=_('show summary of advisories (default)'))
output_format.add_argument("--list", dest='_spec_action', const='list',
action='store_const',
help=_('show list of advisories'))
output_format.add_argument("--info", dest='_spec_action', const='info',
action='store_const',
help=_('show info of advisories'))
parser.add_argument("--with-cve", dest='with_cve', default=False,
action='store_true',
help=_('show only advisories with CVE reference'))
parser.add_argument("--with-bz", dest='with_bz', default=False,
action='store_true',
help=_('show only advisories with bugzilla reference'))
parser.add_argument('spec', nargs='*', metavar='SPEC',
choices=cmds, default=cmds[0],
action=OptionParser.PkgNarrowCallback,
help=_("Package specification"))
def configure(self):
"""Do any command-specific configuration based on command arguments."""
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
if self.opts.command in self.direct_commands:
# we were called with direct command
self.opts.spec_action = self.direct_commands[self.opts.command]
else:
if self.opts._spec_action:
self.opts.spec_action = self.opts._spec_action
if self.opts._availability:
self.opts.availability = self.opts._availability
else:
# yum compatibility - search for all|available|installed|updates in spec[0]
if not self.opts.spec or self.opts.spec[0] not in self.availabilities:
self.opts.availability = self.availability_default
else:
self.opts.availability = self.opts.spec.pop(0)
# filtering by advisory types (security/bugfix/enhancement/newpackage)
self.opts._advisory_types = set()
if self.opts.bugfix:
self.opts._advisory_types.add(hawkey.ADVISORY_BUGFIX)
if self.opts.enhancement:
self.opts._advisory_types.add(hawkey.ADVISORY_ENHANCEMENT)
if self.opts.newpackage:
self.opts._advisory_types.add(hawkey.ADVISORY_NEWPACKAGE)
if self.opts.security:
self.opts._advisory_types.add(hawkey.ADVISORY_SECURITY)
# yum compatibility - yum accepts types also as positional arguments
if self.opts.spec:
spec = self.opts.spec.pop(0)
if spec == 'bugfix':
self.opts._advisory_types.add(hawkey.ADVISORY_BUGFIX)
elif spec == 'enhancement':
self.opts._advisory_types.add(hawkey.ADVISORY_ENHANCEMENT)
elif spec in ('security', 'sec'):
self.opts._advisory_types.add(hawkey.ADVISORY_SECURITY)
elif spec == 'newpackage':
self.opts._advisory_types.add(hawkey.ADVISORY_NEWPACKAGE)
elif spec in ('bugzillas', 'bzs'):
self.opts.with_bz = True
elif spec == 'cves':
self.opts.with_cve = True
else:
self.opts.spec.insert(0, spec)
if self.opts.advisory:
self.opts.spec.extend(self.opts.advisory)
def run(self):
"""Execute the command with arguments."""
if self.opts.availability == 'installed':
apkg_adv_insts = self.installed_apkg_adv_insts(self.opts.spec)
description = _('installed')
elif self.opts.availability == 'updates':
apkg_adv_insts = self.updating_apkg_adv_insts(self.opts.spec)
description = _('updates')
elif self.opts.availability == 'all':
apkg_adv_insts = self.all_apkg_adv_insts(self.opts.spec)
description = _('all')
else:
apkg_adv_insts = self.available_apkg_adv_insts(self.opts.spec)
description = _('available')
if self.opts.spec_action == 'list':
self.display_list(apkg_adv_insts)
elif self.opts.spec_action == 'info':
self.display_info(apkg_adv_insts)
else:
self.display_summary(apkg_adv_insts, description)
def _newer_equal_installed(self, apackage):
if self._installed_query is None:
self._installed_query = self.base.sack.query().installed().apply()
q = self._installed_query.filter(name=apackage.name, evr__gte=apackage.evr)
return len(q) > 0
def _advisory_matcher(self, advisory):
if not self.opts._advisory_types \
and not self.opts.spec \
and not self.opts.severity \
and not self.opts.bugzilla \
and not self.opts.cves \
and not self.opts.with_cve \
and not self.opts.with_bz:
return True
if advisory.type in self.opts._advisory_types:
return True
if any(fnmatch.fnmatchcase(advisory.id, pat) for pat in self.opts.spec):
return True
if self.opts.severity and advisory.severity in self.opts.severity:
return True
if self.opts.bugzilla and any([advisory.match_bug(bug) for bug in self.opts.bugzilla]):
return True
if self.opts.cves and any([advisory.match_cve(cve) for cve in self.opts.cves]):
return True
if self.opts.with_cve:
if any([ref.type == hawkey.REFERENCE_CVE for ref in advisory.references]):
return True
if self.opts.with_bz:
if any([ref.type == hawkey.REFERENCE_BUGZILLA for ref in advisory.references]):
return True
return False
def _apackage_advisory_installed(self, pkgs_query, cmptype, specs):
"""Return (adv. package, advisory, installed) triplets."""
for apackage in pkgs_query.get_advisory_pkgs(cmptype):
advisory = apackage.get_advisory(self.base.sack)
advisory_match = self._advisory_matcher(advisory)
apackage_match = any(fnmatch.fnmatchcase(apackage.name, pat)
for pat in self.opts.spec)
if advisory_match or apackage_match:
installed = self._newer_equal_installed(apackage)
yield apackage, advisory, installed
def running_kernel_pkgs(self):
"""Return query containing packages of currently running kernel"""
sack = self.base.sack
q = sack.query().filterm(empty=True)
kernel = sack.get_running_kernel()
if kernel:
q = q.union(sack.query().filterm(sourcerpm=kernel.sourcerpm))
return q
def available_apkg_adv_insts(self, specs):
"""Return available (adv. package, adv., inst.) triplets"""
# check advisories for the latest installed packages
q = self.base.sack.query().installed().latest(1)
# plus packages of the running kernel
q = q.union(self.running_kernel_pkgs().installed())
return self._apackage_advisory_installed(q, hawkey.GT, specs)
def installed_apkg_adv_insts(self, specs):
"""Return installed (adv. package, adv., inst.) triplets"""
return self._apackage_advisory_installed(
self.base.sack.query().installed(), hawkey.LT | hawkey.EQ, specs)
def updating_apkg_adv_insts(self, specs):
"""Return updating (adv. package, adv., inst.) triplets"""
return self._apackage_advisory_installed(
self.base.sack.query().filterm(upgradable=True), hawkey.GT, specs)
def all_apkg_adv_insts(self, specs):
"""Return installed (adv. package, adv., inst.) triplets"""
return self._apackage_advisory_installed(
self.base.sack.query().installed(), hawkey.LT | hawkey.EQ | hawkey.GT, specs)
def _summary(self, apkg_adv_insts):
"""Make the summary of advisories."""
# Remove duplicate advisory IDs. We assume that the ID is unique within
# a repository and two advisories with the same IDs in different
# repositories must have the same type.
id2type = {}
for (apkg, advisory, installed) in apkg_adv_insts:
id2type[advisory.id] = advisory.type
if advisory.type == hawkey.ADVISORY_SECURITY:
id2type[(advisory.id, advisory.severity)] = (advisory.type, advisory.severity)
return collections.Counter(id2type.values())
def display_summary(self, apkg_adv_insts, description):
"""Display the summary of advisories."""
typ2cnt = self._summary(apkg_adv_insts)
if typ2cnt:
print(_('Updates Information Summary: ') + description)
# Convert types to strings and order the entries.
label_counts = [
(0, _('New Package notice(s)'), typ2cnt[hawkey.ADVISORY_NEWPACKAGE]),
(0, _('Security notice(s)'), typ2cnt[hawkey.ADVISORY_SECURITY]),
(1, _('Critical Security notice(s)'),
typ2cnt[(hawkey.ADVISORY_SECURITY, 'Critical')]),
(1, _('Important Security notice(s)'),
typ2cnt[(hawkey.ADVISORY_SECURITY, 'Important')]),
(1, _('Moderate Security notice(s)'),
typ2cnt[(hawkey.ADVISORY_SECURITY, 'Moderate')]),
(1, _('Low Security notice(s)'),
typ2cnt[(hawkey.ADVISORY_SECURITY, 'Low')]),
(1, _('Unknown Security notice(s)'),
typ2cnt[(hawkey.ADVISORY_SECURITY, None)]),
(0, _('Bugfix notice(s)'), typ2cnt[hawkey.ADVISORY_BUGFIX]),
(0, _('Enhancement notice(s)'), typ2cnt[hawkey.ADVISORY_ENHANCEMENT]),
(0, _('other notice(s)'), typ2cnt[hawkey.ADVISORY_UNKNOWN])]
width = _maxlen(unicode(v[2]) for v in label_counts if v[2])
for indent, label, count in label_counts:
if not count:
continue
print(' %*s %s' % (width + 4 * indent, unicode(count), label))
if self.base.conf.autocheck_running_kernel:
self.cli._check_running_kernel()
def display_list(self, apkg_adv_insts):
"""Display the list of advisories."""
def inst2mark(inst):
if not self.opts.availability == 'all':
return ''
elif inst:
return 'i '
else:
return ' '
def type2label(typ, sev):
if typ == hawkey.ADVISORY_SECURITY:
return self.SECURITY2LABEL.get(sev, _('Unknown/Sec.'))
else:
return self.TYPE2LABEL.get(typ, _('unknown'))
nevra_inst_dict = dict()
for apkg, advisory, installed in apkg_adv_insts:
nevra = '%s-%s.%s' % (apkg.name, apkg.evr, apkg.arch)
if self.opts.with_cve or self.opts.with_bz:
for ref in advisory.references:
if ref.type == hawkey.REFERENCE_BUGZILLA and not self.opts.with_bz:
continue
elif ref.type == hawkey.REFERENCE_CVE and not self.opts.with_cve:
continue
nevra_inst_dict.setdefault((nevra, installed, advisory.updated), dict())[ref.id] = (
advisory.type, advisory.severity)
else:
nevra_inst_dict.setdefault((nevra, installed, advisory.updated), dict())[advisory.id] = (
advisory.type, advisory.severity)
advlist = []
# convert types to labels, find max len of advisory IDs and types
idw = tlw = nw = 0
for (nevra, inst, aupdated), id2type in sorted(nevra_inst_dict.items(), key=lambda x: x[0]):
nw = max(nw, len(nevra))
for aid, atypesev in id2type.items():
idw = max(idw, len(aid))
label = type2label(*atypesev)
tlw = max(tlw, len(label))
advlist.append((inst2mark(inst), aid, label, nevra, aupdated))
for (inst, aid, label, nevra, aupdated) in advlist:
if self.base.conf.verbose:
print('%s%-*s %-*s %-*s %s' % (inst, idw, aid, tlw, label, nw, nevra, aupdated))
else:
print('%s%-*s %-*s %s' % (inst, idw, aid, tlw, label, nevra))
def display_info(self, apkg_adv_insts):
"""Display the details about available advisories."""
arches = self.base.sack.list_arches()
verbose = self.base.conf.verbose
labels = (_('Update ID'), _('Type'), _('Updated'), _('Bugs'),
_('CVEs'), _('Description'), _('Severity'), _('Rights'),
_('Files'), _('Installed'))
def advisory2info(advisory, installed):
attributes = [
[advisory.id],
[self.TYPE2LABEL.get(advisory.type, _('unknown'))],
[unicode(advisory.updated)],
[],
[],
(advisory.description or '').splitlines(),
[advisory.severity],
(advisory.rights or '').splitlines(),
sorted(set(pkg.filename for pkg in advisory.packages
if pkg.arch in arches)),
None]
for ref in advisory.references:
if ref.type == hawkey.REFERENCE_BUGZILLA:
attributes[3].append('{} - {}'.format(ref.id, ref.title or ''))
elif ref.type == hawkey.REFERENCE_CVE:
attributes[4].append(ref.id)
attributes[3].sort()
attributes[4].sort()
if not verbose:
attributes[7] = None
attributes[8] = None
if self.opts.availability == 'all':
attributes[9] = [_('true') if installed else _('false')]
width = _maxlen(labels)
lines = []
lines.append('=' * 79)
lines.append(' ' + advisory.title)
lines.append('=' * 79)
for label, atr_lines in zip(labels, attributes):
if atr_lines in (None, [None]):
continue
for i, line in enumerate(atr_lines):
key = label if i == 0 else ''
key_padding = width - exact_width(key)
lines.append('%*s%s: %s' % (key_padding, "", key, line))
return '\n'.join(lines)
advisories = set()
for apkg, advisory, installed in apkg_adv_insts:
advisories.add(advisory2info(advisory, installed))
print("\n\n".join(sorted(advisories, key=lambda x: x.lower())))
| 19,002
|
Python
|
.py
| 363
| 39.699725
| 105
| 0.582997
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,764
|
remove.py
|
rpm-software-management_dnf/dnf/cli/commands/remove.py
|
# remove_command.py
# Remove CLI command.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.i18n import _
from dnf.cli.option_parser import OptionParser
import dnf.base
import argparse
import hawkey
import dnf.exceptions
import logging
logger = logging.getLogger("dnf")
class RemoveCommand(commands.Command):
"""Remove command."""
nevra_forms = {'remove-n': hawkey.FORM_NAME,
'remove-na': hawkey.FORM_NA,
'remove-nevra': hawkey.FORM_NEVRA,
'erase-n': hawkey.FORM_NAME,
'erase-na': hawkey.FORM_NA,
'erase-nevra': hawkey.FORM_NEVRA}
aliases = ('remove', 'erase', 'rm') + tuple(nevra_forms.keys())
summary = _('remove a package or packages from your system')
@staticmethod
def set_argparser(parser):
mgroup = parser.add_mutually_exclusive_group()
mgroup.add_argument('--duplicates', action='store_true',
dest='duplicated',
help=_('remove duplicated packages'))
mgroup.add_argument('--duplicated', action='store_true',
help=argparse.SUPPRESS)
mgroup.add_argument('--oldinstallonly', action='store_true',
help=_(
'remove installonly packages over the limit'))
parser.add_argument('packages', nargs='*', help=_('Package to remove'),
action=OptionParser.ParseSpecGroupFileCallback,
metavar=_('PACKAGE'))
def configure(self):
demands = self.cli.demands
# disable all available repos to delete whole dependency tree
# instead of replacing removable package with available packages
demands.resolving = True
demands.root_user = True
demands.sack_activation = True
if self.opts.duplicated:
demands.available_repos = True
elif dnf.base.WITH_MODULES and self.opts.grp_specs:
demands.available_repos = True
demands.fresh_metadata = False
demands.allow_erasing = True
else:
demands.allow_erasing = True
demands.available_repos = False
def run(self):
forms = []
if self.opts.command in self.nevra_forms:
forms = [self.nevra_forms[self.opts.command]]
# local pkgs not supported in erase command
self.opts.pkg_specs += self.opts.filenames
done = False
if self.opts.duplicated:
q = self.base.sack.query()
instonly = self.base._get_installonly_query(q.installed())
dups = q.duplicated().difference(instonly)
if not dups:
logger.info(_('No duplicated packages found for removal.'))
return
for (name, arch), pkgs_list in dups._na_dict().items():
if len(pkgs_list) < 2:
continue
pkgs_list.sort(reverse=True)
try:
self.base.reinstall(str(pkgs_list[0]))
except dnf.exceptions.PackagesNotAvailableError:
xmsg = ''
msg = _('Installed package %s%s not available.')
logger.warning(msg, self.base.output.term.bold(str(pkgs_list[0])), xmsg)
for pkg in pkgs_list[1:]:
self.base.package_remove(pkg)
return
if self.opts.oldinstallonly:
q = self.base.sack.query()
instonly = self.base._get_installonly_query(q.installed()).latest(-1)
# also remove running kernel from the set
kernel = self.base.sack.get_running_kernel()
if kernel is not None:
running_installonly = instonly.filter(
epoch=kernel.epoch, version=kernel.version, release=kernel.release)
if running_installonly:
instonly = instonly.difference(running_installonly)
if instonly:
for pkg in instonly:
self.base.package_remove(pkg)
else:
logger.info(_('No old installonly packages found for removal.'))
return
# Remove groups.
if self.opts.grp_specs and forms:
for grp_spec in self.opts.grp_specs:
msg = _('Not a valid form: %s')
logger.warning(msg, self.base.output.term.bold(grp_spec))
elif self.opts.grp_specs:
if dnf.base.WITH_MODULES:
module_base = dnf.module.module_base.ModuleBase(self.base)
skipped_grps = module_base.remove(self.opts.grp_specs)
if len(self.opts.grp_specs) != len(skipped_grps):
done = True
else:
skipped_grps = self.opts.grp_specs
if skipped_grps:
for group in skipped_grps:
try:
if self.base.env_group_remove([group]):
done = True
except dnf.exceptions.Error:
pass
for pkg_spec in self.opts.pkg_specs:
try:
self.base.remove(pkg_spec, forms=forms)
except dnf.exceptions.MarkingError as e:
msg = '{}: {}'.format(e.value, self.base.output.term.bold(pkg_spec))
logger.info(msg)
else:
done = True
if not done:
logger.warning(_('No packages marked for removal.'))
| 6,606
|
Python
|
.py
| 143
| 34.216783
| 92
| 0.593731
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,765
|
check.py
|
rpm-software-management_dnf/dnf/cli/commands/check.py
|
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
from dnf.cli import commands
import argparse
import dnf.exceptions
class CheckCommand(commands.Command):
"""A class containing methods needed by the cli to execute the check
command.
"""
aliases = ('check',)
summary = _('check for problems in the packagedb')
@staticmethod
def set_argparser(parser):
parser.add_argument('--all', dest='check_types',
action='append_const', const='all',
help=_('show all problems; default'))
parser.add_argument('--dependencies', dest='check_types',
action='append_const', const='dependencies',
help=_('show dependency problems'))
parser.add_argument('--duplicates', dest='check_types',
action='append_const', const='duplicates',
help=_('show duplicate problems'))
parser.add_argument('--obsoleted', dest='check_types',
action='append_const', const='obsoleted',
help=_('show obsoleted packages'))
parser.add_argument('--provides', dest='check_types',
action='append_const', const='provides',
help=_('show problems with provides'))
# Add compatibility with yum but invisible in help
# In choices [] allows to return empty list if no argument otherwise it fails
parser.add_argument('check_yum_types', nargs='*', choices=[
'all', 'dependencies', 'duplicates', 'obsoleted', 'provides', []],
help=argparse.SUPPRESS)
def configure(self):
self.cli.demands.sack_activation = True
if self.opts.check_yum_types:
if self.opts.check_types:
self.opts.check_types = self.opts.check_types + \
self.opts.check_yum_types
else:
self.opts.check_types = self.opts.check_yum_types
if not self.opts.check_types:
self.opts.check_types = {'all'}
else:
self.opts.check_types = set(self.opts.check_types)
self.base.conf.disable_excludes += ["all"]
def run(self):
output_set = set()
q = self.base.sack.query().installed()
if self.opts.check_types.intersection({'all', 'dependencies'}):
sack = None
for pkg in q:
for require in set(pkg.regular_requires) | set(set(pkg.requires_pre) - set(pkg.prereq_ignoreinst)):
if str(require).startswith('rpmlib'):
continue
if not len(q.filter(provides=[require])):
if str(require).startswith('('):
# rich deps can be only tested by solver
if sack is None:
sack = dnf.sack.rpmdb_sack(self.base)
selector = dnf.selector.Selector(sack)
selector.set(provides=str(require))
goal = dnf.goal.Goal(sack)
goal.protect_running_kernel = self.base.conf.protect_running_kernel
goal.install(select=selector, optional=False)
solved = goal.run()
# there ase only @system repo in sack, therefore solved is only in case
# when rich deps doesn't require any additional package
if solved:
continue
msg = _("{} has missing requires of {}")
output_set.add(msg.format(
self.base.output.term.bold(pkg),
self.base.output.term.bold(require)))
for conflict in pkg.conflicts:
conflicted = q.filter(provides=[conflict],
name=str(conflict).split()[0])
for conflict_pkg in conflicted:
msg = '{} has installed conflict "{}": {}'
output_set.add(msg.format(
self.base.output.term.bold(pkg),
self.base.output.term.bold(conflict),
self.base.output.term.bold(conflict_pkg)))
if self.opts.check_types.intersection({'all', 'duplicates'}):
installonly = self.base._get_installonly_query(q)
dups = q.duplicated().difference(installonly)._name_dict()
for name, pkgs in dups.items():
pkgs.sort()
for dup in pkgs[1:]:
msg = _("{} is a duplicate with {}").format(
self.base.output.term.bold(pkgs[0]),
self.base.output.term.bold(dup))
output_set.add(msg)
if self.opts.check_types.intersection({'all', 'obsoleted'}):
for pkg in q:
for obsolete in pkg.obsoletes:
obsoleted = q.filter(provides=[obsolete],
name=str(obsolete).split()[0])
if len(obsoleted):
msg = _("{} is obsoleted by {}").format(
self.base.output.term.bold(obsoleted[0]),
self.base.output.term.bold(pkg))
output_set.add(msg)
if self.opts.check_types.intersection({'all', 'provides'}):
for pkg in q:
for provide in pkg.provides:
if pkg not in q.filter(provides=[provide]):
msg = _("{} provides {} but it cannot be found")
output_set.add(msg.format(
self.base.output.term.bold(pkg),
self.base.output.term.bold(provide)))
for msg in sorted(output_set):
print(msg)
if output_set:
raise dnf.exceptions.Error(
'Check discovered {} problem(s)'.format(len(output_set)))
| 7,231
|
Python
|
.py
| 134
| 37.641791
| 115
| 0.542426
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,766
|
alias.py
|
rpm-software-management_dnf/dnf/cli/commands/alias.py
|
# alias.py
# Alias CLI command.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os.path
import dnf.cli
import dnf.cli.aliases
from dnf.cli import commands
import dnf.conf
import dnf.exceptions
from dnf.i18n import _
logger = logging.getLogger('dnf')
class AliasCommand(commands.Command):
aliases = ('alias',)
summary = _('List or create command aliases')
@staticmethod
def set_argparser(parser):
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable-resolving', default=False, action='store_true',
help=_('enable aliases resolving'))
enable_group.add_argument(
'--disable-resolving', default=False, action='store_true',
help=_('disable aliases resolving'))
parser.add_argument("subcommand", nargs='?', default='list',
choices=['add', 'list', 'delete'],
help=_("action to do with aliases"))
parser.add_argument("alias", nargs="*", metavar="command[=result]",
help=_("alias definition"))
def configure(self):
demands = self.cli.demands
if self.opts.subcommand in ('add', 'delete'):
demands.root_user = True
self.aliases_base = dnf.cli.aliases.Aliases()
self.aliases_base._load_aliases()
self.resolving_enabled = self.aliases_base.enabled
self._update_config_from_options()
def _update_config_from_options(self):
enabled = None
if self.opts.enable_resolving:
enabled = True
logger.info(_("Aliases are now enabled"))
if self.opts.disable_resolving:
enabled = False
logger.info(_("Aliases are now disabled"))
if enabled is not None:
if not os.path.exists(dnf.cli.aliases.ALIASES_CONF_PATH):
open(dnf.cli.aliases.ALIASES_CONF_PATH, 'w').close()
dnf.conf.BaseConfig.write_raw_configfile(
dnf.cli.aliases.ALIASES_CONF_PATH,
'main', None, {'enabled': enabled})
if not self.aliases_base._disabled_by_environ():
self.aliases_base.enabled = enabled
def _parse_option_alias(self):
new_aliases = {}
for alias in self.opts.alias:
alias = alias.split('=', 1)
cmd = alias[0].strip()
if len(cmd.split()) != 1:
logger.warning(_("Invalid alias key: %s"), cmd)
continue
if cmd.startswith('-'):
logger.warning(_("Invalid alias key: %s"), cmd)
continue
if len(alias) == 1:
logger.warning(_("Alias argument has no value: %s"), cmd)
continue
new_aliases[cmd] = alias[1].split()
return new_aliases
def _load_user_aliases(self):
if not os.path.exists(dnf.cli.aliases.ALIASES_USER_PATH):
open(dnf.cli.aliases.ALIASES_USER_PATH, 'w').close()
try:
conf = dnf.cli.aliases.AliasesConfig(
dnf.cli.aliases.ALIASES_USER_PATH)
except dnf.exceptions.ConfigError as e:
logger.warning(_('Config error: %s'), e)
return None
return conf
def _store_user_aliases(self, user_aliases, enabled):
fileobj = open(dnf.cli.aliases.ALIASES_USER_PATH, 'w')
output = "[main]\n"
output += "enabled = {}\n\n".format(enabled)
output += "[aliases]\n"
for key, value in user_aliases.items():
output += "{} = {}\n".format(key, ' '.join(value))
fileobj.write(output)
def add_aliases(self, aliases):
conf = self._load_user_aliases()
user_aliases = conf.aliases
if user_aliases is None:
return
user_aliases.update(aliases)
self._store_user_aliases(user_aliases, conf.enabled)
logger.info(_("Aliases added: %s"), ', '.join(aliases.keys()))
def remove_aliases(self, cmds):
conf = self._load_user_aliases()
user_aliases = conf.aliases
if user_aliases is None:
return
valid_cmds = []
for cmd in cmds:
try:
del user_aliases[cmd]
valid_cmds.append(cmd)
except KeyError:
logger.info(_("Alias not found: %s"), cmd)
self._store_user_aliases(user_aliases, conf.enabled)
logger.info(_("Aliases deleted: %s"), ', '.join(valid_cmds))
def list_alias(self, cmd):
args = [cmd]
try:
args = self.aliases_base._resolve(args)
except dnf.exceptions.Error as e:
logger.error(
_('%s, alias %s="%s"'), e, cmd, (' ').join(self.aliases_base.aliases[cmd]))
else:
print(_("Alias %s='%s'") % (cmd, " ".join(args)))
def run(self):
if not self.aliases_base.enabled:
logger.warning(_("Aliases resolving is disabled."))
if self.opts.subcommand == 'add': # Add new alias
aliases = self._parse_option_alias()
if not aliases:
raise dnf.exceptions.Error(_("No aliases specified."))
self.add_aliases(aliases)
return
if self.opts.subcommand == 'delete': # Remove alias
cmds = self.opts.alias
if cmds == []:
raise dnf.exceptions.Error(_("No alias specified."))
self.remove_aliases(cmds)
return
if not self.opts.alias: # List all aliases
if not self.aliases_base.aliases:
logger.info(_("No aliases defined."))
return
for cmd in self.aliases_base.aliases:
self.list_alias(cmd)
else: # List alias by key
for cmd in self.opts.alias:
if cmd not in self.aliases_base.aliases:
logger.info(_("No match for alias: %s") % cmd)
continue
self.list_alias(cmd)
| 7,088
|
Python
|
.py
| 164
| 33.341463
| 91
| 0.597391
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,767
|
history.py
|
rpm-software-management_dnf/dnf/cli/commands/history.py
|
# Copyright 2006 Duke University
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import libdnf
import hawkey
from dnf.i18n import _, ucd
from dnf.cli import commands
from dnf.transaction_sr import TransactionReplay, serialize_transaction
import dnf.cli
import dnf.exceptions
import dnf.transaction
import dnf.util
import json
import logging
import os
logger = logging.getLogger('dnf')
class HistoryCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
history command.
"""
aliases = ('history', 'hist')
summary = _('display, or use, the transaction history')
_CMDS = ['list', 'info', 'redo', 'replay', 'rollback', 'store', 'undo', 'userinstalled']
def __init__(self, *args, **kw):
super(HistoryCommand, self).__init__(*args, **kw)
self._require_one_transaction_id = False
@staticmethod
def set_argparser(parser):
parser.add_argument('transactions_action', nargs='?', metavar="COMMAND",
help="Available commands: {} (default), {}".format(
HistoryCommand._CMDS[0],
", ".join(HistoryCommand._CMDS[1:])))
parser.add_argument('--reverse', action='store_true',
help="display history list output reversed")
parser.add_argument("-o", "--output", default=None,
help=_("For the store command, file path to store the transaction to"))
parser.add_argument("--ignore-installed", action="store_true",
help=_("For the replay command, don't check for installed packages matching "
"those in transaction"))
parser.add_argument("--ignore-extras", action="store_true",
help=_("For the replay command, don't check for extra packages pulled "
"into the transaction"))
parser.add_argument("--skip-unavailable", action="store_true",
help=_("For the replay command, skip packages that are not available or have "
"missing dependencies"))
parser.add_argument('transactions', nargs='*', metavar="TRANSACTION",
help="For commands working with history transactions, "
"Transaction ID (<number>, 'last' or 'last-<number>' "
"for one transaction, <transaction-id>..<transaction-id> "
"for a range)")
parser.add_argument('transaction_filename', nargs='?', metavar="TRANSACTION_FILE",
help="For the replay command, path to the stored "
"transaction file to replay")
def configure(self):
if not self.opts.transactions_action:
# no positional argument given
self.opts.transactions_action = self._CMDS[0]
elif self.opts.transactions_action not in self._CMDS:
# first positional argument is not a command
self.opts.transactions.insert(0, self.opts.transactions_action)
self.opts.transactions_action = self._CMDS[0]
self._require_one_transaction_id_msg = _("Found more than one transaction ID.\n"
"'{}' requires one transaction ID or package name."
).format(self.opts.transactions_action)
demands = self.cli.demands
if self.opts.transactions_action == 'replay':
if not self.opts.transactions:
raise dnf.cli.CliError(_('No transaction file name given.'))
if len(self.opts.transactions) > 1:
raise dnf.cli.CliError(_('More than one argument given as transaction file name.'))
# in case of replay, copy over the file name to it's appropriate variable
# (the arg parser can't distinguish here)
self.opts.transaction_filename = os.path.abspath(self.opts.transactions[0])
self.opts.transactions = []
demands.available_repos = True
demands.resolving = True
demands.root_user = True
# Override configuration options that affect how the transaction is resolved
self.base.conf.clean_requirements_on_remove = False
self.base.conf.install_weak_deps = False
dnf.cli.commands._checkGPGKey(self.base, self.cli)
elif self.opts.transactions_action == 'store':
self._require_one_transaction_id = True
if not self.opts.transactions:
raise dnf.cli.CliError(_('No transaction ID or package name given.'))
elif self.opts.transactions_action in ['redo', 'undo', 'rollback']:
demands.available_repos = True
demands.resolving = True
demands.root_user = True
self._require_one_transaction_id = True
if not self.opts.transactions:
msg = _('No transaction ID or package name given.')
logger.critical(msg)
raise dnf.cli.CliError(msg)
elif len(self.opts.transactions) > 1:
logger.critical(self._require_one_transaction_id_msg)
raise dnf.cli.CliError(self._require_one_transaction_id_msg)
demands.available_repos = True
dnf.cli.commands._checkGPGKey(self.base, self.cli)
else:
demands.fresh_metadata = False
demands.sack_activation = True
if self.base.history.path != ":memory:" and not os.access(self.base.history.path, os.R_OK):
msg = _("You don't have access to the history DB: %s" % self.base.history.path)
logger.critical(msg)
raise dnf.cli.CliError(msg)
def get_error_output(self, error):
"""Get suggestions for resolving the given error."""
if isinstance(error, dnf.exceptions.TransactionCheckError):
if self.opts.transactions_action == 'undo':
id_, = self.opts.transactions
return (_('Cannot undo transaction %s, doing so would result '
'in an inconsistent package database.') % id_,)
elif self.opts.transactions_action == 'rollback':
id_, = (self.opts.transactions if self.opts.transactions[0] != 'force'
else self.opts.transactions[1:])
return (_('Cannot rollback transaction %s, doing so would '
'result in an inconsistent package database.') % id_,)
return dnf.cli.commands.Command.get_error_output(self, error)
def _hcmd_redo(self, extcmds):
old = self._history_get_transaction(extcmds)
data = serialize_transaction(old)
self.replay = TransactionReplay(
self.base,
data=data,
ignore_installed=True,
ignore_extras=True,
skip_unavailable=self.opts.skip_unavailable
)
self.replay.run()
def _history_get_transactions(self, extcmds):
if not extcmds:
raise dnf.cli.CliError(_('No transaction ID given'))
old = self.base.history.old(extcmds)
if not old:
raise dnf.cli.CliError(_('Transaction ID "{0}" not found.').format(extcmds[0]))
return old
def _history_get_transaction(self, extcmds):
old = self._history_get_transactions(extcmds)
if len(old) > 1:
raise dnf.cli.CliError(_('Found more than one transaction ID!'))
return old[0]
def _hcmd_undo(self, extcmds):
old = self._history_get_transaction(extcmds)
self._revert_transaction(old)
def _hcmd_rollback(self, extcmds):
old = self._history_get_transaction(extcmds)
last = self.base.history.last()
merged_trans = None
if old.tid != last.tid:
# history.old([]) returns all transactions and we don't want that
# so skip merging the transactions when trying to rollback to the last transaction
# which is the current system state and rollback is not applicable
for trans in self.base.history.old(list(range(old.tid + 1, last.tid + 1))):
if trans.altered_lt_rpmdb:
logger.warning(_('Transaction history is incomplete, before %u.'), trans.tid)
elif trans.altered_gt_rpmdb:
logger.warning(_('Transaction history is incomplete, after %u.'), trans.tid)
if merged_trans is None:
merged_trans = dnf.db.history.MergedTransactionWrapper(trans)
else:
merged_trans.merge(trans)
self._revert_transaction(merged_trans)
def _revert_transaction(self, trans):
action_map = {
"Install": "Removed",
"Removed": "Install",
"Upgrade": "Downgraded",
"Upgraded": "Downgrade",
"Downgrade": "Upgraded",
"Downgraded": "Upgrade",
"Reinstalled": "Reinstall",
"Reinstall": "Reinstalled",
"Obsoleted": "Install",
"Obsolete": "Obsoleted",
"Reason Change": "Reason Change",
}
data = serialize_transaction(trans)
# revert actions in the serialized transaction data to perform rollback/undo
for content_type in ("rpms", "groups", "environments"):
for ti in data.get(content_type, []):
ti["action"] = action_map[ti["action"]]
if ti["action"] == "Install" and ti.get("reason", None) == "clean":
ti["reason"] = "dependency"
if ti["action"] == "Reason Change" and "nevra" in ti:
subj = hawkey.Subject(ti["nevra"])
nevra = subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])[0]
reason = self.output.history.swdb.resolveRPMTransactionItemReason(
nevra.name,
nevra.arch,
trans.tids()[0] - 1
)
ti["reason"] = libdnf.transaction.TransactionItemReasonToString(reason)
if ti.get("repo_id") == hawkey.SYSTEM_REPO_NAME:
# erase repo_id, because it's not possible to perform forward actions from the @System repo
ti["repo_id"] = None
self.replay = TransactionReplay(
self.base,
data=data,
ignore_installed=True,
ignore_extras=True,
skip_unavailable=self.opts.skip_unavailable
)
self.replay.run()
def _hcmd_userinstalled(self):
"""Execute history userinstalled command."""
pkgs = tuple(self.base.iter_userinstalled())
n_listed = self.output.listPkgs(pkgs, 'Packages installed by user', 'nevra')
if n_listed == 0:
raise dnf.cli.CliError(_('No packages to list'))
def _args2transaction_ids(self):
"""Convert commandline arguments to transaction ids"""
def str2transaction_id(s):
if s == 'last':
s = '0'
elif s.startswith('last-'):
s = s[4:]
transaction_id = int(s)
if transaction_id <= 0:
transaction_id += self.output.history.last().tid
return transaction_id
tids = set()
merged_tids = set()
for t in self.opts.transactions:
if '..' in t:
try:
begin_transaction_id, end_transaction_id = t.split('..', 2)
except ValueError:
logger.critical(
_("Invalid transaction ID range definition '{}'.\n"
"Use '<transaction-id>..<transaction-id>'."
).format(t))
raise dnf.cli.CliError
cant_convert_msg = _("Can't convert '{}' to transaction ID.\n"
"Use '<number>', 'last', 'last-<number>'.")
try:
begin_transaction_id = str2transaction_id(begin_transaction_id)
except ValueError:
logger.critical(_(cant_convert_msg).format(begin_transaction_id))
raise dnf.cli.CliError
try:
end_transaction_id = str2transaction_id(end_transaction_id)
except ValueError:
logger.critical(_(cant_convert_msg).format(end_transaction_id))
raise dnf.cli.CliError
if self._require_one_transaction_id and begin_transaction_id != end_transaction_id:
logger.critical(self._require_one_transaction_id_msg)
raise dnf.cli.CliError
if begin_transaction_id > end_transaction_id:
begin_transaction_id, end_transaction_id = \
end_transaction_id, begin_transaction_id
merged_tids.add((begin_transaction_id, end_transaction_id))
tids.update(range(begin_transaction_id, end_transaction_id + 1))
else:
try:
tids.add(str2transaction_id(t))
except ValueError:
# not a transaction id, assume it's package name
transact_ids_from_pkgname = self.output.history.search([t])
if transact_ids_from_pkgname:
tids.update(transact_ids_from_pkgname)
else:
msg = _("No transaction which manipulates package '{}' was found."
).format(t)
if self._require_one_transaction_id:
logger.critical(msg)
raise dnf.cli.CliError
else:
logger.info(msg)
return sorted(tids, reverse=True), merged_tids
def run(self):
vcmd = self.opts.transactions_action
if vcmd == 'replay':
self.replay = TransactionReplay(
self.base,
filename=self.opts.transaction_filename,
ignore_installed = self.opts.ignore_installed,
ignore_extras = self.opts.ignore_extras,
skip_unavailable = self.opts.skip_unavailable
)
self.replay.run()
else:
tids, merged_tids = self._args2transaction_ids()
if vcmd == 'list' and (tids or not self.opts.transactions):
self.output.historyListCmd(tids, reverse=self.opts.reverse)
elif vcmd == 'info' and (tids or not self.opts.transactions):
self.output.historyInfoCmd(tids, self.opts.transactions, merged_tids)
elif vcmd == 'undo':
self._hcmd_undo(tids)
elif vcmd == 'redo':
self._hcmd_redo(tids)
elif vcmd == 'rollback':
self._hcmd_rollback(tids)
elif vcmd == 'userinstalled':
self._hcmd_userinstalled()
elif vcmd == 'store':
tid = self._history_get_transaction(tids)
data = serialize_transaction(tid)
try:
filename = self.opts.output if self.opts.output is not None else "transaction.json"
# it is absolutely possible for both assumeyes and assumeno to be True, go figure
if (self.base.conf.assumeno or not self.base.conf.assumeyes) and os.path.isfile(filename):
msg = _("{} exists, overwrite?").format(filename)
if self.base.conf.assumeno or not self.base.output.userconfirm(
msg='\n{} [y/N]: '.format(msg), defaultyes_msg='\n{} [Y/n]: '.format(msg)):
print(_("Not overwriting {}, exiting.").format(filename))
return
with open(filename, "w") as f:
json.dump(data, f, indent=4, sort_keys=True)
f.write("\n")
print(_("Transaction saved to {}.").format(filename))
except OSError as e:
raise dnf.cli.CliError(_('Error storing transaction: {}').format(str(e)))
def run_resolved(self):
if self.opts.transactions_action not in ("replay", "redo", "rollback", "undo"):
return
self.replay.post_transaction()
def run_transaction(self):
if self.opts.transactions_action not in ("replay", "redo", "rollback", "undo"):
return
warnings = self.replay.get_warnings()
if warnings:
logger.log(
dnf.logging.WARNING,
_("Warning, the following problems occurred while running a transaction:")
)
for w in warnings:
logger.log(dnf.logging.WARNING, " " + w)
| 17,957
|
Python
|
.py
| 344
| 37.854651
| 111
| 0.569061
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,768
|
search.py
|
rpm-software-management_dnf/dnf/cli/commands/search.py
|
# search.py
# Search CLI command.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import collections
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import ucd, _, C_
import dnf.i18n
import dnf.match_counter
import dnf.util
import hawkey
import logging
logger = logging.getLogger('dnf')
class SearchCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
search command.
"""
aliases = ('search', 'se')
summary = _('search package details for the given string')
@staticmethod
def set_argparser(parser):
parser.add_argument('--all', action='store_true',
help=_("search also package description and URL"))
parser.add_argument('query_string', nargs='+', metavar=_('KEYWORD'),
choices=['all'], default=None,
action=OptionParser.PkgNarrowCallback,
help=_("Keyword to search for"))
def _search(self, args):
"""Search for simple text tags in a package object."""
TRANS_TBL = collections.OrderedDict((
('name', C_('long', 'Name')),
('summary', C_('long', 'Summary')),
('description', C_('long', 'Description')),
('url', _('URL')),
))
def _translate_attr(attr):
try:
return TRANS_TBL[attr]
except:
return attr
def _print_section_header(exact_match, attrs, keys):
trans_attrs = map(_translate_attr, attrs)
# TRANSLATORS: separator used between package attributes (eg. Name & Summary & URL)
trans_attrs_str = _(' & ').join(trans_attrs)
if exact_match:
# TRANSLATORS: %s - translated package attributes,
# %%s - found keys (in listed attributes)
section_text = _('%s Exactly Matched: %%s') % trans_attrs_str
else:
# TRANSLATORS: %s - translated package attributes,
# %%s - found keys (in listed attributes)
section_text = _('%s Matched: %%s') % trans_attrs_str
formatted = self.base.output.fmtSection(section_text % ", ".join(keys))
print(ucd(formatted))
counter = dnf.match_counter.MatchCounter()
for arg in args:
self._search_counted(counter, 'name', arg)
self._search_counted(counter, 'summary', arg)
if self.opts.all:
for arg in args:
self._search_counted(counter, 'description', arg)
self._search_counted(counter, 'url', arg)
else:
needles = len(args)
pkgs = list(counter.keys())
for pkg in pkgs:
if len(counter.matched_needles(pkg)) != needles:
del counter[pkg]
used_attrs = None
matched_needles = None
exact_match = False
print_section_header = False
limit = None
if not self.base.conf.showdupesfromrepos:
limit = self.base.sack.query().filterm(pkg=counter.keys()).latest()
seen = set()
for pkg in counter.sorted(reverse=True, limit_to=limit):
if not self.base.conf.showdupesfromrepos:
if pkg.name + pkg.arch in seen:
continue
seen.add(pkg.name + pkg.arch)
if used_attrs != counter.matched_keys(pkg):
used_attrs = counter.matched_keys(pkg)
print_section_header = True
if matched_needles != counter.matched_needles(pkg):
matched_needles = counter.matched_needles(pkg)
print_section_header = True
if exact_match != (counter.matched_haystacks(pkg) == matched_needles):
exact_match = counter.matched_haystacks(pkg) == matched_needles
print_section_header = True
if print_section_header:
_print_section_header(exact_match, used_attrs, matched_needles)
print_section_header = False
self.base.output.matchcallback(pkg, counter.matched_haystacks(pkg), args)
if len(counter) == 0:
logger.info(_('No matches found.'))
def _search_counted(self, counter, attr, needle):
fdict = {'%s__substr' % attr : needle}
if dnf.util.is_glob_pattern(needle):
fdict = {'%s__glob' % attr : needle}
q = self.base.sack.query().filterm(hawkey.ICASE, **fdict)
for pkg in q.run():
counter.add(pkg, attr, needle)
return counter
def pre_configure(self):
if not self.opts.quiet:
self.cli.redirect_logger(stdout=logging.WARNING, stderr=logging.INFO)
def configure(self):
if not self.opts.quiet:
self.cli.redirect_repo_progress()
demands = self.cli.demands
demands.available_repos = True
demands.fresh_metadata = False
demands.sack_activation = True
self.opts.all = self.opts.all or self.opts.query_string_action
def run(self):
logger.debug(_('Searching Packages: '))
return self._search(self.opts.query_string)
| 6,306
|
Python
|
.py
| 137
| 36.248175
| 95
| 0.61357
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,769
|
repoquery.py
|
rpm-software-management_dnf/dnf/cli/commands/repoquery.py
|
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from dnf.i18n import _
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
import argparse
import datetime
import logging
import re
import sys
import dnf
import dnf.cli
import dnf.exceptions
import dnf.subject
import dnf.util
import hawkey
logger = logging.getLogger('dnf')
QFORMAT_DEFAULT = '%{name}-%{epoch}:%{version}-%{release}.%{arch}'
# matches %[-][dd]{attr}
QFORMAT_MATCH = re.compile(r'%(-?\d*?){([:\w]+?)}')
ALLOWED_QUERY_TAGS = ('name', 'arch', 'epoch', 'version', 'release',
'reponame', 'repoid', 'from_repo', 'evr', 'debug_name',
'source_name', 'source_debug_name', 'installtime',
'buildtime', 'size', 'downloadsize', 'installsize',
'provides', 'requires', 'obsoletes', 'conflicts',
'suggests', 'recommends', 'enhances', 'supplements',
'sourcerpm', 'description', 'summary', 'license', 'url',
'reason', 'group', 'vendor', 'packager',)
OPTS_MAPPING = {
'conflicts': 'conflicts',
'enhances': 'enhances',
'obsoletes': 'obsoletes',
'provides': 'provides',
'recommends': 'recommends',
'requires': 'requires',
'requires-pre': 'requires_pre',
'suggests': 'suggests',
'supplements': 'supplements'
}
def rpm2py_format(queryformat):
"""Convert a rpm like QUERYFMT to an python .format() string."""
def fmt_repl(matchobj):
fill = matchobj.groups()[0]
key = matchobj.groups()[1]
key = key.lower() # we allow both uppercase and lowercase variants
if key not in ALLOWED_QUERY_TAGS:
return brackets(matchobj.group())
if fill:
if fill[0] == '-':
fill = '>' + fill[1:]
else:
fill = '<' + fill
fill = ':' + fill
return '{0.' + key + fill + "}"
def brackets(txt):
return txt.replace('{', '{{').replace('}', '}}')
queryformat = queryformat.replace("\\n", "\n").replace("\\t", "\t")
for key, value in OPTS_MAPPING.items():
queryformat = queryformat.replace(key, value)
fmt = ""
spos = 0
for item in QFORMAT_MATCH.finditer(queryformat):
fmt += brackets(queryformat[spos:item.start()])
fmt += fmt_repl(item)
spos = item.end()
fmt += brackets(queryformat[spos:])
return fmt
class _CommaSplitCallback(OptionParser._SplitCallback):
SPLITTER = r'\s*,\s*'
class RepoQueryCommand(commands.Command):
"""A class containing methods needed by the cli to execute the repoquery command.
"""
nevra_forms = {'repoquery-n': hawkey.FORM_NAME,
'repoquery-na': hawkey.FORM_NA,
'repoquery-nevra': hawkey.FORM_NEVRA}
aliases = ('repoquery', 'rq') + tuple(nevra_forms.keys())
summary = _('search for packages matching keyword')
@staticmethod
def filter_repo_arch(opts, query):
"""Filter query by repoid and arch options"""
if opts.repo:
query.filterm(reponame=opts.repo)
if opts.arches:
query.filterm(arch=opts.arches)
return query
@staticmethod
def set_argparser(parser):
parser.add_argument('-a', '--all', dest='queryall', action='store_true',
help=_("Query all packages (shorthand for repoquery '*' "
"or repoquery without argument)"))
parser.add_argument('--show-duplicates', action='store_true',
help=_("Query all versions of packages (default)"))
parser.add_argument('--arch', '--archlist', dest='arches', default=[],
action=_CommaSplitCallback, metavar='[arch]',
help=_('show only results from this ARCH'))
parser.add_argument('-f', '--file', metavar='FILE', nargs='+',
help=_('show only results that owns FILE'))
parser.add_argument('--whatconflicts', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that conflict REQ'))
parser.add_argument('--whatdepends', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('shows results that requires, suggests, supplements, enhances, '
'or recommends package provides and files REQ'))
parser.add_argument('--whatobsoletes', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that obsolete REQ'))
parser.add_argument('--whatprovides', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that provide REQ'))
parser.add_argument('--whatrequires', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('shows results that requires package provides and files REQ'))
parser.add_argument('--whatrecommends', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that recommend REQ'))
parser.add_argument('--whatenhances', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that enhance REQ'))
parser.add_argument('--whatsuggests', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that suggest REQ'))
parser.add_argument('--whatsupplements', default=[], action=_CommaSplitCallback,
metavar='REQ',
help=_('show only results that supplement REQ'))
whatrequiresform = parser.add_mutually_exclusive_group()
whatrequiresform.add_argument("--alldeps", action="store_true",
help=_("check non-explicit dependencies (files and Provides); default"))
whatrequiresform.add_argument("--exactdeps", action="store_true",
help=_('check dependencies exactly as given, opposite of --alldeps'))
parser.add_argument("--recursive", action="store_true", help=_(
'used with --whatrequires, and --requires --resolve, query packages recursively.'))
parser.add_argument('--deplist', action='store_true', help=_(
"show a list of all dependencies and what packages provide them"))
parser.add_argument('--resolve', action='store_true',
help=_('resolve capabilities to originating package(s)'))
parser.add_argument("--tree", action="store_true",
help=_('show recursive tree for package(s)'))
parser.add_argument('--srpm', action='store_true',
help=_('operate on corresponding source RPM'))
parser.add_argument("--latest-limit", dest='latest_limit', type=int,
help=_('show N latest packages for a given name.arch'
' (or latest but N if N is negative)'))
parser.add_argument("--disable-modular-filtering", action="store_true",
help=_("list also packages of inactive module streams"))
outform = parser.add_mutually_exclusive_group()
outform.add_argument('-i', "--info", dest='queryinfo',
default=False, action='store_true',
help=_('show detailed information about the package'))
outform.add_argument('-l', "--list", dest='queryfilelist',
default=False, action='store_true',
help=_('show list of files in the package'))
outform.add_argument('-s', "--source", dest='querysourcerpm',
default=False, action='store_true',
help=_('show package source RPM name'))
outform.add_argument('--changelogs', dest='querychangelogs',
default=False, action='store_true',
help=_('show changelogs of the package'))
outform.add_argument('--qf', "--queryformat", dest='queryformat',
default=QFORMAT_DEFAULT,
help=_('display format for listing packages: '
'"%%{name} %%{version} ...", '
'use --querytags to view full tag list'))
parser.add_argument('--querytags', action='store_true',
help=_('show available tags to use with '
'--queryformat'))
outform.add_argument("--nevra", dest='queryformat', const=QFORMAT_DEFAULT,
action='store_const',
help=_('use name-epoch:version-release.architecture format for '
'displaying found packages (default)'))
outform.add_argument("--nvr", dest='queryformat', const='%{name}-%{version}-%{release}',
action='store_const', help=_('use name-version-release format for '
'displaying found packages '
'(rpm query default)'))
outform.add_argument("--envra", dest='queryformat',
const='%{epoch}:%{name}-%{version}-%{release}.%{arch}',
action='store_const',
help=_('use epoch:name-version-release.architecture format for '
'displaying found packages'))
outform.add_argument('--groupmember', action="store_true", help=_(
'Display in which comps groups are presented selected packages'))
pkgfilter = parser.add_mutually_exclusive_group()
pkgfilter.add_argument("--duplicates", dest='pkgfilter',
const='duplicated', action='store_const',
help=_('limit the query to installed duplicate '
'packages'))
pkgfilter.add_argument("--duplicated", dest='pkgfilter',
const='duplicated', action='store_const',
help=argparse.SUPPRESS)
pkgfilter.add_argument("--installonly", dest='pkgfilter',
const='installonly', action='store_const',
help=_('limit the query to installed installonly packages'))
pkgfilter.add_argument("--unsatisfied", dest='pkgfilter',
const='unsatisfied', action='store_const',
help=_('limit the query to installed packages with unsatisfied dependencies'))
parser.add_argument('--location', action='store_true',
help=_('show a location from where packages can be downloaded'))
package_attribute = parser.add_mutually_exclusive_group()
help_msgs = {
'conflicts': _('Display capabilities that the package conflicts with.'),
'depends': _('Display capabilities that the package can depend on, enhance, recommend,'
' suggest, and supplement.'),
'enhances': _('Display capabilities that the package can enhance.'),
'provides': _('Display capabilities provided by the package.'),
'recommends': _('Display capabilities that the package recommends.'),
'requires': _('Display capabilities that the package depends on.'),
'requires-pre': _('If the package is not installed display capabilities that it depends on for '
'running %%pre and %%post scriptlets. If the package is installed display '
'capabilities that is depends for %%pre, %%post, %%preun and %%postun.'),
'suggests': _('Display capabilities that the package suggests.'),
'supplements': _('Display capabilities that the package can supplement.')
}
for arg, help_msg in help_msgs.items():
name = '--%s' % arg
package_attribute.add_argument(name, dest='packageatr', action='store_const',
const=arg, help=help_msg)
parser.add_argument('--available', action="store_true", help=_('Display only available packages.'))
help_list = {
'installed': _('Display only installed packages.'),
'extras': _('Display only packages that are not present in any of available repositories.'),
'upgrades': _('Display only packages that provide an upgrade for some already installed package.'),
'unneeded': _('Display only packages that can be removed by "{prog} autoremove" '
'command.').format(prog=dnf.util.MAIN_PROG),
'userinstalled': _('Display only packages that were installed by user.')
}
list_group = parser.add_mutually_exclusive_group()
for list_arg, help_arg in help_list.items():
switch = '--%s' % list_arg
list_group.add_argument(switch, dest='list', action='store_const',
const=list_arg, help=help_arg)
# make --autoremove hidden compatibility alias for --unneeded
list_group.add_argument(
'--autoremove', dest='list', action='store_const',
const="unneeded", help=argparse.SUPPRESS)
parser.add_argument('--recent', action="store_true", help=_('Display only recently edited packages'))
parser.add_argument('key', nargs='*', metavar="KEY",
help=_('the key to search for'))
def pre_configure(self):
if not self.opts.quiet:
self.cli.redirect_logger(stdout=logging.WARNING, stderr=logging.INFO)
def configure(self):
if not self.opts.quiet:
self.cli.redirect_repo_progress()
demands = self.cli.demands
if self.opts.obsoletes:
if self.opts.packageatr:
self.cli._option_conflict("--obsoletes", "--" + self.opts.packageatr)
else:
self.opts.packageatr = "obsoletes"
if self.opts.querytags:
return
if self.opts.resolve and not self.opts.packageatr:
raise dnf.cli.CliError(
_("Option '--resolve' has to be used together with one of the "
"'--conflicts', '--depends', '--enhances', '--provides', '--recommends', "
"'--requires', '--requires-pre', '--suggests' or '--supplements' options"))
if self.opts.recursive:
if self.opts.exactdeps:
self.cli._option_conflict("--recursive", "--exactdeps")
if not any([self.opts.whatrequires,
(self.opts.packageatr == "requires" and self.opts.resolve)]):
raise dnf.cli.CliError(
_("Option '--recursive' has to be used with '--whatrequires <REQ>' "
"(optionally with '--alldeps', but not with '--exactdeps'), or with "
"'--requires <REQ> --resolve'"))
if self.opts.alldeps or self.opts.exactdeps:
if not (self.opts.whatrequires or self.opts.whatdepends):
raise dnf.cli.CliError(
_("argument {} requires --whatrequires or --whatdepends option".format(
'--alldeps' if self.opts.alldeps else '--exactdeps')))
if self.opts.srpm:
self.base.repos.enable_source_repos()
if (self.opts.list not in ["installed", "userinstalled"] and
self.opts.pkgfilter != "installonly") or self.opts.available:
demands.available_repos = True
demands.sack_activation = True
if self.opts.querychangelogs:
demands.changelogs = True
if self.opts.queryfilelist or self.opts.file or dnf.util._is_file_pattern_present(self.opts.key):
self.base.conf.optional_metadata_types += ["filelists"]
def build_format_fn(self, opts, pkg):
if opts.querychangelogs:
out = []
out.append('Changelog for %s' % str(pkg))
for chlog in pkg.changelogs:
dt = chlog['timestamp']
out.append('* %s %s\n%s\n' % (
# TRANSLATORS: This is the date format for a changelog
# in dnf repoquery. You are encouraged to change it
# according to the requirements of your language. Format
# specifiers used here: %a - abbreviated weekday name in
# your language, %b - abbreviated month name in the correct
# grammatical form, %d - day number (01-31), %Y - year
# number (4 digits).
dt.strftime(_("%a %b %d %Y")),
dnf.i18n.ucd(chlog['author']),
dnf.i18n.ucd(chlog['text'])))
return '\n'.join(out)
try:
po = PackageWrapper(pkg)
if opts.queryinfo:
return self.base.output.infoOutput(pkg)
elif opts.queryfilelist:
filelist = po.files
if not filelist:
print(_('Package {} contains no files').format(pkg), file=sys.stderr)
return filelist
elif opts.querysourcerpm:
return po.sourcerpm
else:
return rpm2py_format(opts.queryformat).format(po)
except AttributeError as e:
# catch that the user has specified attributes
# there don't exist on the dnf Package object.
raise dnf.exceptions.Error(str(e))
def _resolve_nevras(self, nevras, base_query):
resolved_nevras_query = self.base.sack.query().filterm(empty=True)
for nevra in nevras:
resolved_nevras_query = resolved_nevras_query.union(base_query.intersection(
dnf.subject.Subject(nevra).get_best_query(
self.base.sack,
with_provides=False,
with_filenames=False
)
))
return resolved_nevras_query
def _do_recursive_deps(self, query_in, query_select, done=None):
done = done if done else query_select
query_required = query_in.filter(requires=query_select)
query_select = query_required.difference(done)
done = query_required.union(done)
if query_select:
done = self._do_recursive_deps(query_in, query_select, done=done)
return done
def by_all_deps(self, names, query, all_dep_types=False):
# in case of arguments being NEVRAs, resolve them to packages
resolved_nevras_query = self._resolve_nevras(names, query)
# filter the arguments directly as reldeps
depquery = query.filter(requires__glob=names)
# filter the resolved NEVRAs as packages
depquery = depquery.union(query.filter(requires=resolved_nevras_query))
if all_dep_types:
# TODO this is very inefficient, as it resolves the `names` glob to
# reldeps four more times, which in a reasonably wide glob like
# `dnf repoquery --whatdepends "libdnf*"` can take roughly 50% of
# the total execution time.
depquery = depquery.union(query.filter(recommends__glob=names))
depquery = depquery.union(query.filter(enhances__glob=names))
depquery = depquery.union(query.filter(supplements__glob=names))
depquery = depquery.union(query.filter(suggests__glob=names))
depquery = depquery.union(query.filter(recommends=resolved_nevras_query))
depquery = depquery.union(query.filter(enhances=resolved_nevras_query))
depquery = depquery.union(query.filter(supplements=resolved_nevras_query))
depquery = depquery.union(query.filter(suggests=resolved_nevras_query))
if self.opts.recursive:
depquery = self._do_recursive_deps(query, depquery)
return depquery
def _get_recursive_providers_query(self, query_in, providers, done=None):
done = done if done else self.base.sack.query().filterm(empty=True)
t = self.base.sack.query().filterm(empty=True)
for pkg in providers.run():
t = t.union(query_in.filter(provides=pkg.requires))
query_select = t.difference(done)
if query_select:
done = self._get_recursive_providers_query(query_in, query_select, done=t.union(done))
return t.union(done)
def _add_add_remote_packages(self):
rpmnames = []
remote_packages = []
for key in self.opts.key:
schemes = dnf.pycomp.urlparse.urlparse(key)[0]
if key.endswith('.rpm'):
rpmnames.append(key)
elif schemes and schemes in ('http', 'ftp', 'file', 'https'):
rpmnames.append(key)
if rpmnames:
remote_packages = self.base.add_remote_rpms(
rpmnames, strict=False, progress=self.base.output.progress)
return remote_packages
def run(self):
if self.opts.querytags:
print("\n".join(sorted(ALLOWED_QUERY_TAGS)))
return
self.cli._populate_update_security_filter(self.opts)
q = self.base.sack.query(
flags=hawkey.IGNORE_MODULAR_EXCLUDES
if self.opts.disable_modular_filtering
else hawkey.APPLY_EXCLUDES
)
if self.opts.key:
remote_packages = self._add_add_remote_packages()
kwark = {'with_provides': False}
if self.opts.command in self.nevra_forms:
kwark["forms"] = [self.nevra_forms[self.opts.command]]
kwark['with_filenames'] = False
pkgs = []
query_results = q.filter(empty=True)
if remote_packages:
query_results = query_results.union(
self.base.sack.query().filterm(pkg=remote_packages))
for key in self.opts.key:
query_results = query_results.union(
dnf.subject.Subject(key, ignore_case=True).get_best_query(
self.base.sack, query=q, **kwark))
q = query_results
if self.opts.recent:
q = q._recent(self.base.conf.recent)
if self.opts.available:
if self.opts.list and self.opts.list != "installed":
print(self.cli.optparser.print_usage())
raise dnf.exceptions.Error(_("argument {}: not allowed with argument {}".format(
"--available", "--" + self.opts.list)))
elif self.opts.list == "unneeded":
q = q._unneeded(self.base.history.swdb)
elif self.opts.list and self.opts.list != 'userinstalled':
q = getattr(q, self.opts.list)()
if self.opts.pkgfilter == "duplicated":
installonly = self.base._get_installonly_query(q)
q = q.difference(installonly).duplicated()
elif self.opts.pkgfilter == "installonly":
q = self.base._get_installonly_query(q)
elif self.opts.pkgfilter == "unsatisfied":
rpmdb = dnf.sack.rpmdb_sack(self.base)
rpmdb._configure(self.base.conf.installonlypkgs, self.base.conf.installonly_limit)
goal = dnf.goal.Goal(rpmdb)
goal.protect_running_kernel = False
solved = goal.run(verify=True)
if not solved:
print(dnf.util._format_resolve_problems(goal.problem_rules()))
return
elif not self.opts.list:
# do not show packages from @System repo
q = q.available()
# filter repo and arch
q = self.filter_repo_arch(self.opts, q)
orquery = q
if self.opts.file:
q.filterm(file__glob=self.opts.file)
if self.opts.whatconflicts:
rels = q.filter(conflicts__glob=self.opts.whatconflicts)
q = rels.union(q.filter(conflicts=self._resolve_nevras(self.opts.whatconflicts, q)))
if self.opts.whatobsoletes:
q.filterm(obsoletes=self.opts.whatobsoletes)
if self.opts.whatprovides:
query_for_provide = q.filter(provides__glob=self.opts.whatprovides)
if query_for_provide:
q = query_for_provide
else:
q.filterm(file__glob=self.opts.whatprovides)
if self.opts.whatrequires:
if (self.opts.exactdeps):
q.filterm(requires__glob=self.opts.whatrequires)
else:
q = self.by_all_deps(self.opts.whatrequires, q)
if self.opts.whatdepends:
if (self.opts.exactdeps):
dependsquery = q.filter(requires__glob=self.opts.whatdepends)
dependsquery = dependsquery.union(q.filter(recommends__glob=self.opts.whatdepends))
dependsquery = dependsquery.union(q.filter(enhances__glob=self.opts.whatdepends))
dependsquery = dependsquery.union(q.filter(supplements__glob=self.opts.whatdepends))
q = dependsquery.union(q.filter(suggests__glob=self.opts.whatdepends))
else:
q = self.by_all_deps(self.opts.whatdepends, q, True)
if self.opts.whatrecommends:
rels = q.filter(recommends__glob=self.opts.whatrecommends)
q = rels.union(q.filter(recommends=self._resolve_nevras(self.opts.whatrecommends, q)))
if self.opts.whatenhances:
rels = q.filter(enhances__glob=self.opts.whatenhances)
q = rels.union(q.filter(enhances=self._resolve_nevras(self.opts.whatenhances, q)))
if self.opts.whatsupplements:
rels = q.filter(supplements__glob=self.opts.whatsupplements)
q = rels.union(q.filter(supplements=self._resolve_nevras(self.opts.whatsupplements, q)))
if self.opts.whatsuggests:
rels = q.filter(suggests__glob=self.opts.whatsuggests)
q = rels.union(q.filter(suggests=self._resolve_nevras(self.opts.whatsuggests, q)))
if self.opts.latest_limit:
q = q.latest(self.opts.latest_limit)
# reduce a query to security upgrades if they are specified
q = self.base._merge_update_filters(q, warning=False)
if self.opts.srpm:
pkg_list = []
for pkg in q:
srcname = pkg.source_name
if srcname is not None:
tmp_query = self.base.sack.query().filterm(name=srcname, evr=pkg.evr,
arch='src')
pkg_list += tmp_query.run()
q = self.base.sack.query().filterm(pkg=pkg_list)
if self.opts.tree:
if not self.opts.whatrequires and self.opts.packageatr not in (
'conflicts', 'enhances', 'obsoletes', 'provides', 'recommends',
'requires', 'suggests', 'supplements'):
raise dnf.exceptions.Error(
_("No valid switch specified\nusage: {prog} repoquery [--conflicts|"
"--enhances|--obsoletes|--provides|--recommends|--requires|"
"--suggest|--supplements|--whatrequires] [key] [--tree]\n\n"
"description:\n For the given packages print a tree of the "
"packages.").format(prog=dnf.util.MAIN_PROG))
self.tree_seed(q, orquery, self.opts)
return
pkgs = set()
if self.opts.packageatr:
rels = set()
for pkg in q.run():
if self.opts.list != 'userinstalled' or self.base.history.user_installed(pkg):
if self.opts.packageatr == 'depends':
rels.update(pkg.requires + pkg.enhances + pkg.suggests +
pkg.supplements + pkg.recommends)
else:
rels.update(getattr(pkg, OPTS_MAPPING[self.opts.packageatr]))
if self.opts.resolve:
# find the providing packages and show them
if self.opts.list == "installed":
query = self.filter_repo_arch(self.opts, self.base.sack.query())
else:
query = self.filter_repo_arch(self.opts, self.base.sack.query().available())
providers = query.filter(provides=rels)
if self.opts.recursive:
providers = providers.union(
self._get_recursive_providers_query(query, providers))
pkgs = set()
for pkg in providers.latest().run():
pkgs.add(self.build_format_fn(self.opts, pkg))
else:
pkgs.update(str(rel) for rel in rels)
elif self.opts.location:
for pkg in q.run():
location = pkg.remote_location()
if location is not None:
pkgs.add(location)
elif self.opts.deplist:
pkgs = []
for pkg in sorted(set(q.run())):
if self.opts.list != 'userinstalled' or self.base.history.user_installed(pkg):
deplist_output = []
deplist_output.append('package: ' + str(pkg))
for req in sorted([str(req) for req in pkg.requires]):
deplist_output.append(' dependency: ' + req)
subject = dnf.subject.Subject(req)
query = subject.get_best_query(self.base.sack)
query = self.filter_repo_arch(
self.opts, query.available())
if not self.opts.verbose:
query = query.latest()
for provider in query.run():
deplist_output.append(' provider: ' + str(provider))
pkgs.append('\n'.join(deplist_output))
if pkgs:
print('\n\n'.join(pkgs))
return
elif self.opts.groupmember:
self._group_member_report(q)
return
else:
for pkg in q.run():
if self.opts.list != 'userinstalled' or self.base.history.user_installed(pkg):
pkgs.add(self.build_format_fn(self.opts, pkg))
if pkgs:
if self.opts.queryinfo:
print("\n\n".join(sorted(pkgs)))
else:
print("\n".join(sorted(pkgs)))
def _group_member_report(self, query):
package_conf_dict = {}
for group in self.base.comps.groups:
package_conf_dict[group.id] = set([pkg.name for pkg in group.packages_iter()])
group_package_dict = {}
pkg_not_in_group = []
for pkg in query.run():
group_id_list = []
for group_id, package_name_set in package_conf_dict.items():
if pkg.name in package_name_set:
group_id_list.append(group_id)
if group_id_list:
group_package_dict.setdefault(
'$'.join(sorted(group_id_list)), []).append(str(pkg))
else:
pkg_not_in_group.append(str(pkg))
output = []
for key, package_list in sorted(group_package_dict.items()):
output.append(
'\n'.join(sorted(package_list) + sorted([' @' + id for id in key.split('$')])))
output.append('\n'.join(sorted(pkg_not_in_group)))
if output:
print('\n'.join(output))
def grow_tree(self, level, pkg, opts):
pkg_string = self.build_format_fn(opts, pkg)
if level == -1:
print(pkg_string)
return
spacing = " "
for x in range(0, level):
spacing += "| "
requires = []
for requirepkg in pkg.requires:
requires.append(str(requirepkg))
reqstr = "[" + str(len(requires)) + ": " + ", ".join(requires) + "]"
print(spacing + r"\_ " + pkg_string + " " + reqstr)
def tree_seed(self, query, aquery, opts, level=-1, usedpkgs=None):
for pkg in sorted(set(query.run()), key=lambda p: p.name):
usedpkgs = set() if usedpkgs is None or level == -1 else usedpkgs
if pkg.name.startswith("rpmlib") or pkg.name.startswith("solvable"):
return
self.grow_tree(level, pkg, opts)
if pkg not in usedpkgs:
usedpkgs.add(pkg)
if opts.packageatr:
strpkg = getattr(pkg, opts.packageatr)
ar = {}
for name in set(strpkg):
pkgquery = self.base.sack.query().filterm(provides=name)
for querypkg in pkgquery:
ar[querypkg.name + "." + querypkg.arch] = querypkg
pkgquery = self.base.sack.query().filterm(pkg=list(ar.values()))
else:
pkgquery = self.by_all_deps((pkg.name, ), aquery) if opts.alldeps \
else aquery.filter(requires__glob=pkg.name)
self.tree_seed(pkgquery, aquery, opts, level + 1, usedpkgs)
class PackageWrapper(object):
"""Wrapper for dnf.package.Package, so we can control formatting."""
def __init__(self, pkg):
self._pkg = pkg
def __getattr__(self, attr):
atr = getattr(self._pkg, attr)
if atr is None:
return "(none)"
if isinstance(atr, list):
return '\n'.join(sorted({dnf.i18n.ucd(reldep) for reldep in atr}))
return dnf.i18n.ucd(atr)
@staticmethod
def _get_timestamp(timestamp):
if timestamp > 0:
dt = datetime.datetime.utcfromtimestamp(timestamp)
return dt.strftime("%Y-%m-%d %H:%M")
else:
return ''
@property
def buildtime(self):
return self._get_timestamp(self._pkg.buildtime)
@property
def installtime(self):
return self._get_timestamp(self._pkg.installtime)
| 35,768
|
Python
|
.py
| 660
| 39.7
| 111
| 0.564944
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,770
|
distrosync.py
|
rpm-software-management_dnf/dnf/cli/commands/distrosync.py
|
# distrosync.py
# distro-sync CLI command.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from dnf.cli import commands
from dnf.i18n import _
class DistroSyncCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
distro-synch command.
"""
aliases = ('distro-sync', 'distrosync', 'distribution-synchronization', 'dsync')
summary = _('synchronize installed packages to the latest available versions')
@staticmethod
def set_argparser(parser):
parser.add_argument('package', nargs='*', help=_('Package to synchronize'))
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
commands._checkGPGKey(self.base, self.cli)
commands._checkEnabledRepo(self.base, self.opts.package)
def run(self):
return self.base.distro_sync_userlist(self.opts.package)
| 1,951
|
Python
|
.py
| 41
| 43.780488
| 84
| 0.74619
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,771
|
reinstall.py
|
rpm-software-management_dnf/dnf/cli/commands/reinstall.py
|
# reinstall.py
# Reinstall CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _
import dnf.exceptions
import logging
logger = logging.getLogger('dnf')
class ReinstallCommand(commands.Command):
"""A class containing methods needed by the cli to execute the reinstall command.
"""
aliases = ('reinstall', 'rei')
summary = _('reinstall a package')
@staticmethod
def set_argparser(parser):
parser.add_argument('packages', nargs='+', help=_('Package to reinstall'),
action=OptionParser.ParseSpecGroupFileCallback,
metavar=_('PACKAGE'))
def configure(self):
"""Verify that conditions are met so that this command can
run. These include that the program is being run by the root
user, that there are enabled repositories with gpg keys, and
that this command is called with appropriate arguments.
"""
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
commands._checkGPGKey(self.base, self.cli)
if not self.opts.filenames:
commands._checkEnabledRepo(self.base)
def run(self):
# Reinstall files.
done = False
for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False,
progress=self.base.output.progress):
try:
self.base.package_reinstall(pkg)
except dnf.exceptions.MarkingError:
logger.info(_('No match for argument: %s'),
self.base.output.term.bold(pkg.location))
else:
done = True
# Reinstall packages.
for pkg_spec in self.opts.pkg_specs + ['@' + x for x in self.opts.grp_specs]:
try:
self.base.reinstall(pkg_spec)
except dnf.exceptions.PackagesNotInstalledError as err:
for pkg in err.packages:
logger.info(_('Package %s available, but not installed.'),
self.output.term.bold(pkg.name))
break
logger.info(_('No match for argument: %s'),
self.base.output.term.bold(pkg_spec))
except dnf.exceptions.PackagesNotAvailableError as err:
for pkg in err.packages:
xmsg = ''
pkgrepo = self.base.history.repo(pkg)
if pkgrepo:
xmsg = _(' (from %s)') % pkgrepo
msg = _('Installed package %s%s not available.')
logger.info(msg, self.base.output.term.bold(pkg),
xmsg)
except dnf.exceptions.MarkingError:
assert False, 'Only the above marking errors are expected.'
else:
done = True
if not done:
raise dnf.exceptions.Error(_('No packages marked for reinstall.'))
| 4,189
|
Python
|
.py
| 89
| 36.573034
| 85
| 0.626223
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,772
|
upgrade.py
|
rpm-software-management_dnf/dnf/cli/commands/upgrade.py
|
# upgrade.py
# Upgrade CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import dnf.exceptions
import dnf.base
import dnf.util
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _
logger = logging.getLogger('dnf')
class UpgradeCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
update command.
"""
aliases = ('upgrade', 'update', 'upgrade-to', 'update-to', 'localupdate', 'up')
summary = _('upgrade a package or packages on your system')
@staticmethod
def set_argparser(parser):
parser.add_argument('packages', nargs='*', help=_('Package to upgrade'),
action=OptionParser.ParseSpecGroupFileCallback,
metavar=_('PACKAGE'))
def configure(self):
"""Verify that conditions are met so that this command can run.
These include that there are enabled repositories with gpg
keys, and that this command is being run by the root user.
"""
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
if dnf.util._is_file_pattern_present(self.opts.pkg_specs):
self.base.conf.optional_metadata_types += ["filelists"]
commands._checkGPGKey(self.base, self.cli)
if not self.opts.filenames:
commands._checkEnabledRepo(self.base)
self.upgrade_minimal = None
self.all_security = None
self.skipped_grp_specs = None
def run(self):
cmp_type = "eq" if self.upgrade_minimal else "gte"
self.cli._populate_update_security_filter(self.opts, cmp_type=cmp_type,
all=self.all_security)
if self.opts.filenames or self.opts.pkg_specs or self.opts.grp_specs:
result = False
result |= self._update_modules()
result |= self._update_files()
result |= self._update_packages()
result |= self._update_groups()
if result:
return
else:
self.base.upgrade_all()
return
raise dnf.exceptions.Error(_('No packages marked for upgrade.'))
def _update_modules(self):
group_specs_num = len(self.opts.grp_specs)
if dnf.base.WITH_MODULES:
module_base = dnf.module.module_base.ModuleBase(self.base)
self.skipped_grp_specs = module_base.upgrade(self.opts.grp_specs)
else:
self.skipped_grp_specs = self.opts.grp_specs
return len(self.skipped_grp_specs) != group_specs_num
def _update_files(self):
success = False
if self.opts.filenames:
for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False,
progress=self.base.output.progress):
try:
self.base.package_upgrade(pkg)
success = True
except dnf.exceptions.MarkingError as e:
logger.info(_('No match for argument: %s'),
self.base.output.term.bold(pkg.location))
return success
def _update_packages(self):
success = False
for pkg_spec in self.opts.pkg_specs:
try:
self.base.upgrade(pkg_spec)
success = True
except dnf.exceptions.MarkingError as e:
logger.info(_('No match for argument: %s'),
self.base.output.term.bold(pkg_spec))
return success
def _update_groups(self):
if self.skipped_grp_specs:
self.base.env_group_upgrade(self.skipped_grp_specs)
return True
return False
| 4,887
|
Python
|
.py
| 109
| 35.366972
| 85
| 0.638924
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,773
|
autoremove.py
|
rpm-software-management_dnf/dnf/cli/commands/autoremove.py
|
# autoremove.py
# Autoremove CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _
import dnf.exceptions
import hawkey
import logging
logger = logging.getLogger("dnf")
class AutoremoveCommand(commands.Command):
nevra_forms = {'autoremove-n': hawkey.FORM_NAME,
'autoremove-na': hawkey.FORM_NA,
'autoremove-nevra': hawkey.FORM_NEVRA}
aliases = ('autoremove',) + tuple(nevra_forms.keys())
summary = _('remove all unneeded packages that were originally installed '
'as dependencies')
@staticmethod
def set_argparser(parser):
parser.add_argument('packages', nargs='*', help=_('Package to remove'),
action=OptionParser.ParseSpecGroupFileCallback,
metavar=_('PACKAGE'))
def configure(self):
demands = self.cli.demands
demands.resolving = True
demands.root_user = True
demands.sack_activation = True
if any([self.opts.grp_specs, self.opts.pkg_specs, self.opts.filenames]):
self.base.conf.clean_requirements_on_remove = True
demands.allow_erasing = True
# disable all available repos to delete whole dependency tree
# instead of replacing removable package with available packages
demands.available_repos = False
else:
demands.available_repos = True
demands.fresh_metadata = False
def run(self):
if any([self.opts.grp_specs, self.opts.pkg_specs, self.opts.filenames]):
forms = []
if self.opts.command in self.nevra_forms:
forms = [self.nevra_forms[self.opts.command]]
self.base.autoremove(forms,
self.opts.pkg_specs,
self.opts.grp_specs,
self.opts.filenames)
else:
self.base.autoremove()
| 3,046
|
Python
|
.py
| 65
| 38.646154
| 80
| 0.671607
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,774
|
swap.py
|
rpm-software-management_dnf/dnf/cli/commands/swap.py
|
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
from dnf.cli import commands
import dnf.util
import logging
logger = logging.getLogger("dnf")
class SwapCommand(commands.Command):
"""A class containing methods needed by the cli to execute the swap command.
"""
aliases = ('swap',)
summary = _('run an interactive {prog} mod for remove and install one spec').format(
prog=dnf.util.MAIN_PROG_UPPER)
@staticmethod
def set_argparser(parser):
parser.add_argument('remove_spec', action="store", help=_('The specs that will be removed'))
parser.add_argument('install_spec', action="store", help=_(
'The specs that will be installed'))
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
commands._checkGPGKey(self.base, self.cli)
commands._checkEnabledRepo(self.base, [self.opts.install_spec])
def _perform(self, cmd_str, spec):
cmd_cls = self.cli.cli_commands.get(cmd_str)
if cmd_cls is not None:
cmd = cmd_cls(self.cli)
self.cli.optparser.parse_command_args(cmd, [cmd_str, spec])
cmd.run()
def run(self):
# The install part must be performed before the remove one because it can
# operate on local rpm files. Command line packages cannot be added
# to the sack once the goal is created.
self._perform('install', self.opts.install_spec)
self._perform('remove', self.opts.remove_spec)
| 2,625
|
Python
|
.py
| 55
| 42.545455
| 100
| 0.7125
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,775
|
__init__.py
|
rpm-software-management_dnf/dnf/cli/commands/__init__.py
|
# Copyright 2006 Duke University
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Seth Vidal
"""
Classes for subcommands of the yum command line interface.
"""
from __future__ import print_function
from __future__ import unicode_literals
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _
import dnf.cli
import dnf.exceptions
import dnf.pycomp
import dnf.util
import logging
import os
logger = logging.getLogger('dnf')
_RPM_VERIFY = _("To diagnose the problem, try running: '%s'.") % \
'rpm -Va --nofiles --nodigest'
_RPM_REBUILDDB = _("You probably have corrupted RPMDB, running '%s'"
" might fix the issue.") % 'rpm --rebuilddb'
gpg_msg = \
_("""You have enabled checking of packages via GPG keys. This is a good thing.
However, you do not have any GPG public keys installed. You need to download
the keys for packages you wish to install and install them.
You can do that by running the command:
rpm --import public.gpg.key
Alternatively you can specify the url to the key you would like to use
for a repository in the 'gpgkey' option in a repository section and {prog}
will install it for you.
For more information contact your distribution or package provider.""")
def _checkGPGKey(base, cli):
"""Verify that there are gpg keys for the enabled repositories in the
rpm database.
:param base: a :class:`dnf.Base` object.
:raises: :class:`cli.CliError`
"""
if not base.conf.gpgcheck:
return
if not base._gpg_key_check():
for repo in base.repos.iter_enabled():
if (repo.gpgcheck or repo.repo_gpgcheck) and not repo.gpgkey:
logger.critical("\n%s\n", gpg_msg.format(prog=dnf.util.MAIN_PROG_UPPER))
logger.critical(_("Problem repository: %s"), repo)
raise dnf.cli.CliError
def _checkEnabledRepo(base, possible_local_files=()):
"""Verify that there is at least one enabled repo.
:param base: a :class:`dnf.Base` object.
:param possible_local_files: the list of strings that could be a local rpms
:raises: :class:`cli.CliError`:
"""
if base.repos._any_enabled():
return
for lfile in possible_local_files:
if lfile.endswith(".rpm") and os.path.exists(lfile):
return
scheme = dnf.pycomp.urlparse.urlparse(lfile)[0]
if scheme in ('http', 'ftp', 'file', 'https'):
return
msg = _('There are no enabled repositories in "{}".').format('", "'.join(base.conf.reposdir))
raise dnf.cli.CliError(msg)
class Command(object):
"""Abstract base class for CLI commands."""
aliases = [] # :api
summary = "" # :api
opts = None
def __init__(self, cli):
# :api
self.cli = cli
@property
def base(self):
# :api
return self.cli.base
@property
def _basecmd(self):
return self.aliases[0]
@property
def output(self):
return self.cli.base.output
def set_argparser(self, parser):
"""Define command specific options and arguments. #:api"""
pass
def pre_configure(self):
# :api
"""Do any command-specific pre-configuration."""
pass
def configure(self):
# :api
"""Do any command-specific configuration."""
pass
def get_error_output(self, error):
"""Get suggestions for resolving the given error."""
if isinstance(error, dnf.exceptions.TransactionCheckError):
return (_RPM_VERIFY, _RPM_REBUILDDB)
raise NotImplementedError('error not supported yet: %s' % error)
def run(self):
# :api
"""Execute the command."""
pass
def run_resolved(self):
"""Finalize operation after resolvement"""
pass
def run_transaction(self):
"""Finalize operations post-transaction."""
pass
class InfoCommand(Command):
"""A class containing methods needed by the cli to execute the
info command.
"""
aliases = ('info', 'if')
summary = _('display details about a package or group of packages')
DEFAULT_PKGNARROW = 'all'
pkgnarrows = {'available', 'installed', 'extras', 'updates', 'upgrades',
'autoremove', 'recent', 'obsoletes', DEFAULT_PKGNARROW}
@classmethod
def set_argparser(cls, parser):
narrows = parser.add_mutually_exclusive_group()
narrows.add_argument('--all', dest='_packages_action',
action='store_const', const='all', default=None,
help=_("show all packages (default)"))
narrows.add_argument('--available', dest='_packages_action',
action='store_const', const='available',
help=_("show only available packages"))
narrows.add_argument('--installed', dest='_packages_action',
action='store_const', const='installed',
help=_("show only installed packages"))
narrows.add_argument('--extras', dest='_packages_action',
action='store_const', const='extras',
help=_("show only extras packages"))
narrows.add_argument('--updates', dest='_packages_action',
action='store_const', const='upgrades',
help=_("show only upgrades packages"))
narrows.add_argument('--upgrades', dest='_packages_action',
action='store_const', const='upgrades',
help=_("show only upgrades packages"))
narrows.add_argument('--autoremove', dest='_packages_action',
action='store_const', const='autoremove',
help=_("show only autoremove packages"))
narrows.add_argument('--recent', dest='_packages_action',
action='store_const', const='recent',
help=_("show only recently changed packages"))
parser.add_argument('packages', nargs='*', metavar=_('PACKAGE'),
choices=cls.pkgnarrows, default=cls.DEFAULT_PKGNARROW,
action=OptionParser.PkgNarrowCallback,
help=_("Package name specification"))
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
if self.opts._packages_action:
self.opts.packages_action = self.opts._packages_action
if self.opts.packages_action != 'installed':
demands.available_repos = True
if self.opts.obsoletes:
if self.opts._packages_action:
self.cli._option_conflict("--obsoletes", "--" + self.opts._packages_action)
else:
self.opts.packages_action = 'obsoletes'
if self.opts.packages_action == 'updates':
self.opts.packages_action = 'upgrades'
def run(self):
self.cli._populate_update_security_filter(self.opts)
return self.base.output_packages('info', self.opts.packages_action,
self.opts.packages)
class ListCommand(InfoCommand):
"""A class containing methods needed by the cli to execute the
list command.
"""
aliases = ('list', 'ls')
summary = _('list a package or groups of packages')
def run(self):
self.cli._populate_update_security_filter(self.opts)
return self.base.output_packages('list', self.opts.packages_action,
self.opts.packages)
class ProvidesCommand(Command):
"""A class containing methods needed by the cli to execute the
provides command.
"""
aliases = ('provides', 'whatprovides', 'prov', 'wp')
summary = _('find what package provides the given value')
@staticmethod
def set_argparser(parser):
parser.add_argument('dependency', nargs='+', metavar=_('PROVIDE'),
help=_("Provide specification to search for"))
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.fresh_metadata = False
demands.sack_activation = True
self.base.conf.optional_metadata_types += ["filelists"]
def run(self):
logger.debug(_("Searching Packages: "))
return self.base.provides(self.opts.dependency)
class CheckUpdateCommand(Command):
"""A class containing methods needed by the cli to execute the
check-update command.
"""
aliases = ('check-update', 'check-upgrade')
summary = _('check for available package upgrades')
@staticmethod
def set_argparser(parser):
parser.add_argument('--changelogs', dest='changelogs',
default=False, action='store_true',
help=_('show changelogs before update'))
parser.add_argument('packages', nargs='*', metavar=_('PACKAGE'))
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.plugin_filtering_enabled = True
if self.opts.changelogs:
demands.changelogs = True
if dnf.util._is_file_pattern_present(self.opts.packages):
self.base.conf.optional_metadata_types += ["filelists"]
_checkEnabledRepo(self.base)
def run(self):
self.cli._populate_update_security_filter(self.opts, cmp_type="gte")
found = self.base.check_updates(self.opts.packages, print_=True,
changelogs=self.opts.changelogs)
if found:
self.cli.demands.success_exit_status = 100
if self.base.conf.autocheck_running_kernel:
self.cli._check_running_kernel()
class RepoPkgsCommand(Command):
"""Implementation of the repository-packages command."""
class CheckUpdateSubCommand(Command):
"""Implementation of the info sub-command."""
aliases = ('check-update',)
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
found = self.base.check_updates(self.opts.pkg_specs,
self.reponame, print_=True)
if found:
self.cli.demands.success_exit_status = 100
class InfoSubCommand(Command):
"""Implementation of the info sub-command."""
aliases = ('info',)
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
if self.opts._pkg_specs_action:
self.opts.pkg_specs_action = self.opts._pkg_specs_action
if self.opts.pkg_specs_action != 'installed':
demands.available_repos = True
if self.opts.obsoletes:
if self.opts._pkg_specs_action:
self.cli._option_conflict("--obsoletes", "--" + self.opts._pkg_specs_action)
else:
self.opts.pkg_specs_action = 'obsoletes'
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
self.cli._populate_update_security_filter(self.opts)
self.base.output_packages('info', self.opts.pkg_specs_action,
self.opts.pkg_specs, self.reponame)
class InstallSubCommand(Command):
"""Implementation of the install sub-command."""
aliases = ('install',)
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def run_on_repo(self):
self.cli._populate_update_security_filter(self.opts)
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
done = False
if not self.opts.pkg_specs:
# Install all packages.
try:
self.base.install('*', self.reponame)
except dnf.exceptions.MarkingError:
logger.info(_('No package available.'))
else:
done = True
else:
# Install packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.install(pkg_spec, self.reponame)
except dnf.exceptions.MarkingError as e:
msg = '{}: {}'.format(e.value, self.base.output.term.bold(pkg_spec))
logger.info(msg)
else:
done = True
if not done:
raise dnf.exceptions.Error(_('No packages marked for install.'))
class ListSubCommand(InfoSubCommand):
"""Implementation of the list sub-command."""
aliases = ('list',)
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
self.cli._populate_update_security_filter(self.opts)
self.base.output_packages('list', self.opts.pkg_specs_action,
self.opts.pkg_specs, self.reponame)
class MoveToSubCommand(Command):
"""Implementation of the move-to sub-command."""
aliases = ('move-to',)
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
done = False
if not self.opts.pkg_specs:
# Reinstall all packages.
try:
self.base.reinstall('*', new_reponame=self.reponame)
except dnf.exceptions.PackagesNotInstalledError:
logger.info(_('No package installed.'))
except dnf.exceptions.PackagesNotAvailableError:
logger.info(_('No package available.'))
except dnf.exceptions.MarkingError:
assert False, 'Only the above marking errors are expected.'
else:
done = True
else:
# Reinstall packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.reinstall(pkg_spec, new_reponame=self.reponame)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No match for argument: %s')
logger.info(msg, pkg_spec)
except dnf.exceptions.PackagesNotAvailableError as err:
for pkg in err.packages:
xmsg = ''
pkgrepo = self.base.history.repo(pkg)
if pkgrepo:
xmsg = _(' (from %s)') % pkgrepo
msg = _('Installed package %s%s not available.')
logger.info(msg, self.output.term.bold(pkg), xmsg)
except dnf.exceptions.MarkingError:
assert False, \
'Only the above marking errors are expected.'
else:
done = True
if not done:
raise dnf.exceptions.Error(_('Nothing to do.'))
class ReinstallOldSubCommand(Command):
"""Implementation of the reinstall-old sub-command."""
aliases = ('reinstall-old',)
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
done = False
if not self.opts.pkg_specs:
# Reinstall all packages.
try:
self.base.reinstall('*', self.reponame, self.reponame)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No package installed from the repository.')
logger.info(msg)
except dnf.exceptions.PackagesNotAvailableError:
logger.info(_('No package available.'))
except dnf.exceptions.MarkingError:
assert False, 'Only the above marking errors are expected.'
else:
done = True
else:
# Reinstall packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.reinstall(pkg_spec, self.reponame,
self.reponame)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No match for argument: %s')
logger.info(msg, pkg_spec)
except dnf.exceptions.PackagesNotAvailableError as err:
for pkg in err.packages:
xmsg = ''
pkgrepo = self.base.history.repo(pkg)
if pkgrepo:
xmsg = _(' (from %s)') % pkgrepo
msg = _('Installed package %s%s not available.')
logger.info(msg, self.output.term.bold(pkg), xmsg)
except dnf.exceptions.MarkingError:
assert False, \
'Only the above marking errors are expected.'
else:
done = True
if not done:
raise dnf.exceptions.Error(_('Nothing to do.'))
class ReinstallSubCommand(Command):
"""Implementation of the reinstall sub-command."""
aliases = ('reinstall',)
def __init__(self, cli):
"""Initialize the command."""
super(RepoPkgsCommand.ReinstallSubCommand, self).__init__(cli)
self.wrapped_commands = (RepoPkgsCommand.ReinstallOldSubCommand(cli),
RepoPkgsCommand.MoveToSubCommand(cli))
def configure(self):
self.cli.demands.available_repos = True
for command in self.wrapped_commands:
command.opts = self.opts
command.reponame = self.reponame
command.configure()
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
for command in self.wrapped_commands:
try:
command.run_on_repo()
except dnf.exceptions.Error:
continue
else:
break
else:
raise dnf.exceptions.Error(_('No packages marked for reinstall.'))
class RemoveOrDistroSyncSubCommand(Command):
"""Implementation of the remove-or-distro-sync sub-command."""
aliases = ('remove-or-distro-sync',)
def configure(self):
demands = self.cli.demands
demands.available_repos = True
demands.sack_activation = True
demands.resolving = True
demands.root_user = True
def _replace(self, pkg_spec, reponame):
"""Synchronize a package with another repository or remove it."""
self.cli.base.sack.disable_repo(reponame)
subject = dnf.subject.Subject(pkg_spec)
matches = subject.get_best_query(self.cli.base.sack)
history = self.cli.base.history
installed = [
pkg for pkg in matches.installed()
if history.repo(pkg) == reponame]
if not installed:
raise dnf.exceptions.PackagesNotInstalledError(
'no package matched', pkg_spec)
available = matches.available()
clean_deps = self.cli.base.conf.clean_requirements_on_remove
for package in installed:
if available.filter(name=package.name, arch=package.arch):
self.cli.base._goal.distupgrade(package)
else:
self.cli.base._goal.erase(package, clean_deps=clean_deps)
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
done = False
if not self.opts.pkg_specs:
# Sync all packages.
try:
self._replace('*', self.reponame)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No package installed from the repository.')
logger.info(msg)
else:
done = True
else:
# Reinstall packages.
for pkg_spec in self.opts.pkg_specs:
try:
self._replace(pkg_spec, self.reponame)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No match for argument: %s')
logger.info(msg, pkg_spec)
else:
done = True
if not done:
raise dnf.exceptions.Error(_('Nothing to do.'))
class RemoveOrReinstallSubCommand(Command):
"""Implementation of the remove-or-reinstall sub-command."""
aliases = ('remove-or-reinstall',)
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
done = False
if not self.opts.pkg_specs:
# Reinstall all packages.
try:
self.base.reinstall('*', old_reponame=self.reponame,
new_reponame_neq=self.reponame,
remove_na=True)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No package installed from the repository.')
logger.info(msg)
except dnf.exceptions.MarkingError:
assert False, 'Only the above marking error is expected.'
else:
done = True
else:
# Reinstall packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.reinstall(
pkg_spec, old_reponame=self.reponame,
new_reponame_neq=self.reponame, remove_na=True)
except dnf.exceptions.PackagesNotInstalledError:
msg = _('No match for argument: %s')
logger.info(msg, pkg_spec)
except dnf.exceptions.MarkingError:
assert False, 'Only the above marking error is expected.'
else:
done = True
if not done:
raise dnf.exceptions.Error(_('Nothing to do.'))
class RemoveSubCommand(Command):
"""Implementation of the remove sub-command."""
aliases = ('remove',)
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.allow_erasing = True
demands.available_repos = False
demands.resolving = True
demands.root_user = True
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
done = False
if not self.opts.pkg_specs:
# Remove all packages.
try:
self.base.remove('*', self.reponame)
except dnf.exceptions.MarkingError:
msg = _('No package installed from the repository.')
logger.info(msg)
else:
done = True
else:
# Remove packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.remove(pkg_spec, self.reponame)
except dnf.exceptions.MarkingError as e:
logger.info(str(e))
else:
done = True
if not done:
logger.warning(_('No packages marked for removal.'))
class UpgradeSubCommand(Command):
"""Implementation of the upgrade sub-command."""
aliases = ('upgrade', 'upgrade-to')
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
def run_on_repo(self):
"""Execute the command with respect to given arguments *cli_args*."""
_checkGPGKey(self.base, self.cli)
done = False
if not self.opts.pkg_specs:
# Update all packages.
self.base.upgrade_all(self.reponame)
done = True
else:
# Update packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.upgrade(pkg_spec, self.reponame)
except dnf.exceptions.MarkingError:
logger.info(_('No match for argument: %s'), pkg_spec)
else:
done = True
if not done:
raise dnf.exceptions.Error(_('No packages marked for upgrade.'))
SUBCMDS = {CheckUpdateSubCommand, InfoSubCommand, InstallSubCommand,
ListSubCommand, MoveToSubCommand, ReinstallOldSubCommand,
ReinstallSubCommand, RemoveOrDistroSyncSubCommand,
RemoveOrReinstallSubCommand, RemoveSubCommand,
UpgradeSubCommand}
aliases = ('repository-packages',
'repo-pkgs', 'repo-packages', 'repository-pkgs')
summary = _('run commands on top of all packages in given repository')
def __init__(self, cli):
"""Initialize the command."""
super(RepoPkgsCommand, self).__init__(cli)
subcmd_objs = (subcmd(cli) for subcmd in self.SUBCMDS)
self.subcmd = None
self._subcmd_name2obj = {
alias: subcmd for subcmd in subcmd_objs for alias in subcmd.aliases}
def set_argparser(self, parser):
narrows = parser.add_mutually_exclusive_group()
narrows.add_argument('--all', dest='_pkg_specs_action',
action='store_const', const='all', default=None,
help=_("show all packages (default)"))
narrows.add_argument('--available', dest='_pkg_specs_action',
action='store_const', const='available',
help=_("show only available packages"))
narrows.add_argument('--installed', dest='_pkg_specs_action',
action='store_const', const='installed',
help=_("show only installed packages"))
narrows.add_argument('--extras', dest='_pkg_specs_action',
action='store_const', const='extras',
help=_("show only extras packages"))
narrows.add_argument('--updates', dest='_pkg_specs_action',
action='store_const', const='upgrades',
help=_("show only upgrades packages"))
narrows.add_argument('--upgrades', dest='_pkg_specs_action',
action='store_const', const='upgrades',
help=_("show only upgrades packages"))
narrows.add_argument('--autoremove', dest='_pkg_specs_action',
action='store_const', const='autoremove',
help=_("show only autoremove packages"))
narrows.add_argument('--recent', dest='_pkg_specs_action',
action='store_const', const='recent',
help=_("show only recently changed packages"))
parser.add_argument(
'reponame', nargs=1, action=OptionParser._RepoCallbackEnable,
metavar=_('REPOID'), help=_("Repository ID"))
subcommand_choices = [subcmd.aliases[0] for subcmd in self.SUBCMDS]
subcommand_choices_all = [alias for subcmd in self.SUBCMDS for alias in subcmd.aliases]
parser.add_argument('subcmd', nargs=1, metavar="SUBCOMMAND",
choices=subcommand_choices_all, help=", ".join(subcommand_choices))
DEFAULT_PKGNARROW = 'all'
pkgnarrows = {DEFAULT_PKGNARROW, 'installed', 'available',
'autoremove', 'extras', 'obsoletes', 'recent',
'upgrades'}
parser.add_argument('pkg_specs', nargs='*', metavar=_('PACKAGE'),
choices=pkgnarrows, default=DEFAULT_PKGNARROW,
action=OptionParser.PkgNarrowCallback,
help=_("Package specification"))
def configure(self):
"""Verify whether the command can run with given arguments."""
# Check sub-command.
try:
self.subcmd = self._subcmd_name2obj[self.opts.subcmd[0]]
except (dnf.cli.CliError, KeyError) as e:
self.cli.optparser.print_usage()
raise dnf.cli.CliError
self.subcmd.opts = self.opts
self.subcmd.reponame = self.opts.reponame[0]
self.subcmd.configure()
def run(self):
"""Execute the command with respect to given arguments *extcmds*."""
self.subcmd.run_on_repo()
class HelpCommand(Command):
"""A class containing methods needed by the cli to execute the
help command.
"""
aliases = ('help',)
summary = _('display a helpful usage message')
@staticmethod
def set_argparser(parser):
parser.add_argument('cmd', nargs='?', metavar=_('COMMAND'),
help=_("{prog} command to get help for").format(
prog=dnf.util.MAIN_PROG_UPPER))
def run(self):
if (not self.opts.cmd
or self.opts.cmd not in self.cli.cli_commands):
self.cli.optparser.print_help()
else:
command = self.cli.cli_commands[self.opts.cmd]
self.cli.optparser.print_help(command(self))
| 32,332
|
Python
|
.py
| 682
| 33.475073
| 97
| 0.561217
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,776
|
makecache.py
|
rpm-software-management_dnf/dnf/cli/commands/makecache.py
|
# makecache.py
# Makecache CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.i18n import _
import argparse
import dnf.cli
import dnf.exceptions
import dnf.util
import logging
logger = logging.getLogger("dnf")
class MakeCacheCommand(commands.Command):
aliases = ('makecache', 'mc')
summary = _('generate the metadata cache')
@staticmethod
def set_argparser(parser):
parser.add_argument('--timer', action='store_true', dest="timer_opt")
# compatibility with dnf < 2.0
parser.add_argument('timer', nargs='?', choices=['timer'],
metavar='timer', help=argparse.SUPPRESS)
def run(self):
timer = self.opts.timer is not None or self.opts.timer_opt
msg = _("Making cache files for all metadata files.")
logger.debug(msg)
return self.base.update_cache(timer)
| 1,901
|
Python
|
.py
| 43
| 40.627907
| 77
| 0.737979
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,777
|
clean.py
|
rpm-software-management_dnf/dnf/cli/commands/clean.py
|
# clean.py
# Clean CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.i18n import _, P_
from dnf.yum import misc
import dnf.cli
import dnf.exceptions
import dnf.lock
import dnf.logging
import dnf.repo
import logging
import os
import re
import time
logger = logging.getLogger("dnf")
# Dict mapping cmdline arguments to actual data types to be cleaned up
_CACHE_TYPES = {
'metadata': ['metadata', 'dbcache', 'expire-cache'],
'packages': ['packages'],
'dbcache': ['dbcache'],
'expire-cache': ['expire-cache'],
'all': ['metadata', 'packages', 'dbcache'],
}
def _tree(dirpath):
"""Traverse dirpath recursively and yield relative filenames."""
for root, dirs, files in os.walk(dirpath):
base = os.path.relpath(root, dirpath)
for f in files:
path = os.path.join(base, f)
yield os.path.normpath(path)
def _filter(files, patterns):
"""Yield those filenames that match any of the patterns."""
return (f for f in files for p in patterns if re.match(p, f))
def _clean(dirpath, files):
"""Remove the given filenames from dirpath."""
count = 0
for f in files:
path = os.path.join(dirpath, f)
logger.log(dnf.logging.DDEBUG, _('Removing file %s'), path)
misc.unlink_f(path)
count += 1
return count
def _cached_repos(files):
"""Return the repo IDs that have some cached metadata around."""
metapat = dnf.repo.CACHE_FILES['metadata']
matches = (re.match(metapat, f) for f in files)
return set(m.group('repoid') for m in matches if m)
class CleanCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
clean command.
"""
aliases = ('clean',)
summary = _('remove cached data')
@staticmethod
def set_argparser(parser):
parser.add_argument('type', nargs='+',
choices=_CACHE_TYPES.keys(),
help=_('Metadata type to clean'))
def run(self):
cachedir = self.base.conf.cachedir
md_lock = dnf.lock.build_metadata_lock(cachedir, True)
download_lock = dnf.lock.build_download_lock(cachedir, True)
rpmdb_lock = dnf.lock.build_rpmdb_lock(self.base.conf.persistdir, True)
while True:
try:
with md_lock, download_lock, rpmdb_lock:
types = set(t for c in self.opts.type for t in _CACHE_TYPES[c])
files = list(_tree(cachedir))
logger.debug(_('Cleaning data: ' + ' '.join(types)))
if 'expire-cache' in types:
expired = _cached_repos(files)
self.base._repo_persistor.expired_to_add.update(expired)
types.remove('expire-cache')
logger.info(_('Cache was expired'))
patterns = [dnf.repo.CACHE_FILES[t] for t in types]
count = _clean(cachedir, _filter(files, patterns))
logger.info(P_('%d file removed', '%d files removed', count) % count)
return
except dnf.exceptions.LockError as e:
if not self.base.conf.exit_on_lock:
msg = _('Waiting for process with pid %d to finish.') % (e.pid)
logger.info(msg)
time.sleep(3)
else:
raise e
| 4,462
|
Python
|
.py
| 104
| 35.163462
| 89
| 0.638857
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,778
|
shell.py
|
rpm-software-management_dnf/dnf/cli/commands/shell.py
|
# shell.py
# Shell CLI command.
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dnf.cli import commands
from dnf.i18n import _, ucd
import dnf.util
import cmd
import copy
import dnf
import logging
import shlex
import sys
logger = logging.getLogger('dnf')
# only demands we'd like to override
class ShellDemandSheet(object):
available_repos = True
resolving = True
root_user = True
sack_activation = True
class ShellCommand(commands.Command, cmd.Cmd):
aliases = ('shell', 'sh')
summary = _('run an interactive {prog} shell').format(prog=dnf.util.MAIN_PROG_UPPER)
MAPPING = {'repo': 'repo',
'repository': 'repo',
'exit': 'quit',
'quit': 'quit',
'run': 'ts_run',
'ts': 'transaction',
'transaction': 'transaction',
'config': 'config',
'resolvedep': 'resolve',
'help': 'help'
}
def __init__(self, cli):
commands.Command.__init__(self, cli)
cmd.Cmd.__init__(self)
self.prompt = '> '
@staticmethod
def set_argparser(parser):
parser.add_argument('script', nargs='?', metavar=_('SCRIPT'),
help=_('Script to run in {prog} shell').format(
prog=dnf.util.MAIN_PROG_UPPER))
def configure(self):
# append to ShellDemandSheet missing demands from
# dnf.cli.demand.DemandSheet with their default values.
default_demands = self.cli.demands
self.cli.demands = ShellDemandSheet()
for attr in dir(default_demands):
if attr.startswith('__'):
continue
try:
getattr(self.cli.demands, attr)
except AttributeError:
setattr(self.cli.demands, attr, getattr(default_demands, attr))
def run(self):
if self.opts.script:
self._run_script(self.opts.script)
else:
self.cmdloop()
def _clean(self):
self.base._finalize_base()
self.base._transaction = None
self.base.fill_sack()
def onecmd(self, line):
if not line or line == '\n':
return
if line == 'EOF':
line = 'quit'
try:
s_line = shlex.split(line)
except:
self._help()
return
# reset option parser before each command, keep usage information
self.cli.optparser.__init__(reset_usage=False)
opts = self.cli.optparser.parse_main_args(s_line)
# Disable shell recursion.
if opts.command == 'shell':
return
if opts.command in self.MAPPING:
getattr(self, '_' + self.MAPPING[opts.command])(s_line[1::])
else:
cmd_cls = self.cli.cli_commands.get(opts.command)
if cmd_cls is not None:
cmd = cmd_cls(self.cli)
try:
opts = self.cli.optparser.parse_command_args(cmd, s_line)
except SystemExit:
# argparse.ArgumentParser prints usage information and executes
# sys.exit() on problems with parsing command line arguments
return
try:
cmd.cli.demands = copy.deepcopy(self.cli.demands)
cmd.configure()
cmd.run()
except dnf.exceptions.Error as e:
logger.error(_("Error:") + " " + ucd(e))
return
else:
self._help()
def _config(self, args=None):
def print_or_set(key, val, conf):
if val:
setattr(conf, key, val)
else:
try:
print('{}: {}'.format(key, getattr(conf, str(key))))
except:
logger.warning(_('Unsupported key value.'))
if not args or len(args) > 2:
self._help('config')
return
key = args[0]
val = args[1] if len(args) == 2 else None
period = key.find('.')
if period != -1:
repo_name = key[:period]
key = key[period+1:]
repos = self.base.repos.get_matching(repo_name)
for repo in repos:
print_or_set(key, val, repo)
if not repos:
logger.warning(_('Could not find repository: %s'),
repo_name)
else:
print_or_set(key, val, self.base.conf)
def _help(self, args=None):
"""Output help information.
:param args: the command to output help information about. If
*args* is an empty, general help will be output.
"""
arg = args[0] if isinstance(args, list) and len(args) > 0 else args
msg = None
if arg:
if arg == 'config':
msg = _("""{} arg [value]
arg: debuglevel, errorlevel, obsoletes, gpgcheck, assumeyes, exclude,
repo_id.gpgcheck, repo_id.exclude
If no value is given it prints the current value.
If value is given it sets that value.""").format(arg)
elif arg == 'help':
msg = _("""{} [command]
print help""").format(arg)
elif arg in ['repo', 'repository']:
msg = _("""{} arg [option]
list: lists repositories and their status. option = [all | id | glob]
enable: enable repositories. option = repository id
disable: disable repositories. option = repository id""").format(arg)
elif arg == 'resolvedep':
msg = _("""{}
resolve the transaction set""").format(arg)
elif arg in ['transaction', 'ts']:
msg = _("""{} arg
list: lists the contents of the transaction
reset: reset (zero-out) the transaction
run: run the transaction""").format(arg)
elif arg == 'run':
msg = _("""{}
run the transaction""").format(arg)
elif arg in ['exit', 'quit']:
msg = _("""{}
exit the shell""").format(arg)
if not msg:
self.cli.optparser.print_help()
msg = _("""Shell specific arguments:
config set config options
help print help
repository (or repo) enable, disable or list repositories
resolvedep resolve the transaction set
transaction (or ts) list, reset or run the transaction set
run resolve and run the transaction set
exit (or quit) exit the shell""")
print('\n' + msg)
def _repo(self, args=None):
cmd = args[0] if args else None
if cmd in ['list', None]:
self.onecmd('repolist ' + ' '.join(args[1:]))
elif cmd in ['enable', 'disable']:
repos = self.cli.base.repos
fill_sack = False
for repo in args[1::]:
r = repos.get_matching(repo)
if r:
getattr(r, cmd)()
fill_sack = True
else:
logger.critical(_("Error:") + " " + _("Unknown repo: '%s'"),
self.base.output.term.bold(repo))
if fill_sack:
self.base.fill_sack()
# reset base._comps, as it has changed due to changing the repos
self.base._comps = None
else:
self._help('repo')
def _resolve(self, args=None):
try:
self.cli.base.resolve(self.cli.demands.allow_erasing)
except dnf.exceptions.DepsolveError as e:
print(e)
def _run_script(self, file):
try:
with open(file, 'r') as fd:
lines = fd.readlines()
for line in lines:
if not line.startswith('#'):
self.onecmd(line)
except IOError:
logger.info(_('Error: Cannot open %s for reading'), self.base.output.term.bold(file))
sys.exit(1)
def _transaction(self, args=None):
cmd = args[0] if args else None
if cmd == 'reset':
self._clean()
return
self._resolve()
if cmd in ['list', None]:
if self.base._transaction:
out = self.base.output.list_transaction(self.base._transaction)
logger.info(out)
elif cmd == 'run':
try:
self.base.do_transaction()
except dnf.exceptions.Error as e:
logger.error(_("Error:") + " " + ucd(e))
else:
logger.info(_("Complete!"))
self._clean()
else:
self._help('transaction')
def _ts_run(self, args=None):
self._transaction(['run'])
def _quit(self, args=None):
logger.info(_('Leaving Shell'))
sys.exit(0)
| 9,836
|
Python
|
.py
| 250
| 28.668
| 97
| 0.547846
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,779
|
mark.py
|
rpm-software-management_dnf/dnf/cli/commands/mark.py
|
# mark.py
# Mark CLI command.
#
# Copyright (C) 2015-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import unicode_literals
import libdnf.transaction
from dnf.i18n import _
from dnf.cli import commands
import dnf
import functools
import logging
logger = logging.getLogger("dnf")
class MarkCommand(commands.Command):
aliases = ('mark',)
summary = _('mark or unmark installed packages as installed by user.')
@staticmethod
def set_argparser(parser):
parser.add_argument('mark', nargs=1, choices=['install', 'remove', 'group'],
help=_("install: mark as installed by user\n"
"remove: unmark as installed by user\n"
"group: mark as installed by group"))
parser.add_argument('package', nargs='+', metavar="PACKAGE",
help=_("Package specification"))
def _mark_install(self, pkg):
self.base.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_USER)
logger.info(_('%s marked as user installed.'), str(pkg))
def _mark_remove(self, pkg):
self.base.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_DEPENDENCY)
logger.info(_('%s unmarked as user installed.'), str(pkg))
def _mark_group(self, pkg):
self.base.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_GROUP)
logger.info(_('%s marked as group installed.'), str(pkg))
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.root_user = True
demands.available_repos = False
demands.resolving = False
def run(self):
cmd = self.opts.mark[0]
pkgs = self.opts.package
mark_func = functools.partial(getattr(self, '_mark_' + cmd))
notfound = []
for pkg in pkgs:
subj = dnf.subject.Subject(pkg)
q = subj.get_best_query(self.base.sack)
for pkg in q:
mark_func(pkg)
if len(q) == 0:
notfound.append(pkg)
if notfound:
logger.error(_('Error:'))
for pkg in notfound:
logger.error(_('Package %s is not installed.'), pkg)
raise dnf.cli.CliError
old = self.base.history.last()
if old is None:
rpmdb_version = self.base._ts.dbCookie()
else:
rpmdb_version = old.end_rpmdb_version
self.base.history.beg(rpmdb_version, [], [])
self.base.history.end(rpmdb_version)
| 3,534
|
Python
|
.py
| 78
| 37.525641
| 94
| 0.657841
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,780
|
upgrademinimal.py
|
rpm-software-management_dnf/dnf/cli/commands/upgrademinimal.py
|
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
from dnf.cli.commands.upgrade import UpgradeCommand
class UpgradeMinimalCommand(UpgradeCommand):
"""A class containing methods needed by the cli to execute the check
command.
"""
aliases = ('upgrade-minimal', 'update-minimal', 'up-min')
summary = _("upgrade, but only 'newest' package match which fixes a problem"
" that affects your system")
def configure(self):
UpgradeCommand.configure(self)
self.upgrade_minimal = True
if not any([self.opts.bugfix, self.opts.enhancement,
self.opts.newpackage, self.opts.security, self.opts.advisory,
self.opts.bugzilla, self.opts.cves, self.opts.severity]):
self.all_security = True
| 1,799
|
Python
|
.py
| 35
| 46.971429
| 80
| 0.73777
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,781
|
install.py
|
rpm-software-management_dnf/dnf/cli/commands/install.py
|
# install.py
# Install CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from itertools import chain
import hawkey
import dnf.exceptions
import dnf.util
from dnf.cli import commands
from dnf.cli.option_parser import OptionParser
from dnf.i18n import _
logger = logging.getLogger('dnf')
class InstallCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
install command.
"""
nevra_forms = {'install-n': hawkey.FORM_NAME,
'install-na': hawkey.FORM_NA,
'install-nevra': hawkey.FORM_NEVRA}
alternatives_provide = 'alternative-for({})'
aliases = ('install', 'localinstall', 'in') + tuple(nevra_forms.keys())
summary = _('install a package or packages on your system')
@staticmethod
def set_argparser(parser):
parser.add_argument('package', nargs='+', metavar=_('PACKAGE'),
action=OptionParser.ParseSpecGroupFileCallback,
help=_('Package to install'))
def configure(self):
"""Verify that conditions are met so that this command can run.
That there are enabled repositories with gpg keys, and that
this command is called with appropriate arguments.
"""
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
if dnf.util._is_file_pattern_present(self.opts.pkg_specs):
self.base.conf.optional_metadata_types += ["filelists"]
commands._checkGPGKey(self.base, self.cli)
if not self.opts.filenames:
commands._checkEnabledRepo(self.base)
def run(self):
err_pkgs = []
errs = []
error_module_specs = []
nevra_forms = self._get_nevra_forms_from_command()
self.cli._populate_update_security_filter(self.opts)
if self.opts.command == 'localinstall' and (self.opts.grp_specs or self.opts.pkg_specs):
self._log_not_valid_rpm_file_paths(self.opts.grp_specs)
if self.base.conf.strict:
raise dnf.exceptions.Error(_('Nothing to do.'))
skipped_grp_specs = []
if self.opts.grp_specs and self.opts.command != 'localinstall':
if dnf.base.WITH_MODULES:
try:
module_base = dnf.module.module_base.ModuleBase(self.base)
module_base.install(self.opts.grp_specs, strict=self.base.conf.strict)
except dnf.exceptions.MarkingErrors as e:
if e.no_match_group_specs:
for e_spec in e.no_match_group_specs:
skipped_grp_specs.append(e_spec)
if e.error_group_specs:
for e_spec in e.error_group_specs:
error_module_specs.append("@" + e_spec)
module_depsolv_errors = e.module_depsolv_errors
if module_depsolv_errors:
logger.error(dnf.module.module_base.format_modular_solver_errors(
module_depsolv_errors[0]))
else:
skipped_grp_specs = self.opts.grp_specs
if self.opts.filenames and nevra_forms:
self._inform_not_a_valid_combination(self.opts.filenames)
if self.base.conf.strict:
raise dnf.exceptions.Error(_('Nothing to do.'))
else:
err_pkgs = self._install_files()
if skipped_grp_specs and nevra_forms:
self._inform_not_a_valid_combination(skipped_grp_specs)
if self.base.conf.strict:
raise dnf.exceptions.Error(_('Nothing to do.'))
elif skipped_grp_specs and self.opts.command != 'localinstall':
self._install_groups(skipped_grp_specs)
if self.opts.command != 'localinstall':
errs = self._install_packages(nevra_forms)
if (len(errs) != 0 or len(err_pkgs) != 0 or error_module_specs) and self.base.conf.strict:
raise dnf.exceptions.PackagesNotAvailableError(_("Unable to find a match"),
pkg_spec=' '.join(errs),
packages=err_pkgs)
def _get_nevra_forms_from_command(self):
if self.opts.command in self.nevra_forms:
return [self.nevra_forms[self.opts.command]]
else:
return []
def _log_not_valid_rpm_file_paths(self, grp_specs):
group_names = map(lambda g: '@' + g, grp_specs)
for pkg in chain(self.opts.pkg_specs, group_names):
msg = _('Not a valid rpm file path: %s')
logger.info(msg, self.base.output.term.bold(pkg))
def _inform_not_a_valid_combination(self, forms):
for form in forms:
msg = _('Not a valid form: %s')
logger.warning(msg, self.base.output.term.bold(form))
def _install_files(self):
err_pkgs = []
strict = self.base.conf.strict
for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=strict,
progress=self.base.output.progress):
try:
self.base.package_install(pkg, strict=strict)
except dnf.exceptions.MarkingError:
msg = _('No match for argument: %s')
logger.info(msg, self.base.output.term.bold(pkg.location))
err_pkgs.append(pkg)
return err_pkgs
def _install_groups(self, grp_specs):
try:
self.base.env_group_install(grp_specs,
tuple(self.base.conf.group_package_types),
strict=self.base.conf.strict)
except dnf.exceptions.Error:
if self.base.conf.strict:
raise
def _report_alternatives(self, pkg_spec):
query = self.base.sack.query().filterm(
provides=self.alternatives_provide.format(pkg_spec))
if query:
msg = _('There are following alternatives for "{0}": {1}')
logger.info(msg.format(
pkg_spec,
', '.join(sorted(set([alt.name for alt in query])))))
def _install_packages(self, nevra_forms):
errs = []
strict = self.base.conf.strict
for pkg_spec in self.opts.pkg_specs:
try:
self.base.install(pkg_spec, strict=strict, forms=nevra_forms)
except dnf.exceptions.MarkingError as e:
msg = '{}: {}'.format(e.value, self.base.output.term.bold(pkg_spec))
logger.info(msg)
self.base._report_icase_hint(pkg_spec)
self._report_alternatives(pkg_spec)
errs.append(pkg_spec)
return errs
| 7,916
|
Python
|
.py
| 162
| 37.271605
| 98
| 0.607221
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,782
|
group.py
|
rpm-software-management_dnf/dnf/cli/commands/group.py
|
# group.py
# Group CLI command.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.comps import CompsQuery
from dnf.cli import commands
from dnf.i18n import _, ucd
import libdnf.transaction
import dnf.cli
import dnf.exceptions
import dnf.util
import logging
logger = logging.getLogger("dnf")
class GroupCommand(commands.Command):
""" Single sub-command interface for most groups interaction. """
direct_commands = {'grouplist' : 'list',
'groupinstall' : 'install',
'groupupdate' : 'install',
'groupremove' : 'remove',
'grouperase' : 'remove',
'groupinfo' : 'info'}
aliases = ('group', 'groups', 'grp') + tuple(direct_commands.keys())
summary = _('display, or use, the groups information')
_CMD_ALIASES = {'update' : 'upgrade',
'erase' : 'remove'}
_MARK_CMDS = ('install', 'remove')
_GROUP_SUBCOMMANDS = ('summary', 'list', 'info', 'remove', 'install', 'upgrade', 'mark')
def _canonical(self):
# were we called with direct command?
direct = self.direct_commands.get(self.opts.command)
if direct:
# canonize subcmd and args
if self.opts.subcmd is not None:
self.opts.args.insert(0, self.opts.subcmd)
self.opts.subcmd = direct
if self.opts.subcmd is None:
self.opts.subcmd = 'summary'
self.opts.subcmd = self._CMD_ALIASES.get(self.opts.subcmd,
self.opts.subcmd)
def __init__(self, cli):
super(GroupCommand, self).__init__(cli)
self._remark = False
def _assert_comps(self):
msg = _('No group data available for configured repositories.')
if not len(self.base.comps):
raise dnf.exceptions.CompsError(msg)
def _environment_lists(self, patterns):
def available_pred(env):
env_found = self.base.history.env.get(env.id)
return not(env_found)
self._assert_comps()
if patterns is None:
envs = self.base.comps.environments
else:
envs = self.base.comps.environments_by_pattern(",".join(patterns))
return dnf.util.mapall(list, dnf.util.partition(available_pred, envs))
def _group_lists(self, uservisible, patterns):
def installed_pred(group):
group_found = self.base.history.group.get(group.id)
if group_found:
return True
return False
installed = []
available = []
self._assert_comps()
if patterns is None:
grps = self.base.comps.groups
else:
grps = self.base.comps.groups_by_pattern(",".join(patterns))
for grp in grps:
tgt_list = available
if installed_pred(grp):
tgt_list = installed
if not uservisible or grp.uservisible:
tgt_list.append(grp)
return installed, available
def _info(self, userlist):
for strng in userlist:
group_matched = False
for env in self.base.comps.environments_by_pattern(strng):
self.output.display_groups_in_environment(env)
group_matched = True
for group in self.base.comps.groups_by_pattern(strng):
self.output.display_pkgs_in_groups(group)
group_matched = True
if not group_matched:
logger.error(_('Warning: Group %s does not exist.'), strng)
return 0, []
def _list(self, userlist):
uservisible = 1
showinstalled = 0
showavailable = 0
print_ids = self.base.conf.verbose or self.opts.ids
while userlist:
if userlist[0] == 'hidden':
uservisible = 0
userlist.pop(0)
elif userlist[0] == 'installed':
showinstalled = 1
userlist.pop(0)
elif userlist[0] == 'available':
showavailable = 1
userlist.pop(0)
elif userlist[0] == 'ids':
print_ids = True
userlist.pop(0)
else:
break
if self.opts.hidden:
uservisible = 0
if self.opts.installed:
showinstalled = 1
if self.opts.available:
showavailable = 1
if not userlist:
userlist = None # Match everything...
errs = False
if userlist is not None:
for group in userlist:
comps = self.base.comps
in_group = len(comps.groups_by_pattern(group)) > 0
in_environment = len(comps.environments_by_pattern(group)) > 0
if not in_group and not in_environment:
logger.error(_('Warning: No groups match:') + '\n %s',
group)
errs = True
if errs:
return 0, []
env_inst, env_avail = self._environment_lists(userlist)
installed, available = self._group_lists(uservisible, userlist)
def _out_grp(sect, group):
if not done:
print(sect)
msg = ' %s' % dnf.util._name_unset_wrapper(group.ui_name)
if print_ids:
msg += ' (%s)' % group.id
if group.lang_only:
msg += ' [%s]' % group.lang_only
print('{}'.format(msg))
def _out_env(sect, envs):
if envs:
print(sect)
for e in envs:
msg = ' %s' % dnf.util._name_unset_wrapper(e.ui_name)
if print_ids:
msg += ' (%s)' % e.id
print(msg)
if not showinstalled:
_out_env(_('Available Environment Groups:'), env_avail)
if not showavailable:
_out_env(_('Installed Environment Groups:'), env_inst)
if not showavailable:
done = False
for group in installed:
if group.lang_only:
continue
_out_grp(_('Installed Groups:'), group)
done = True
done = False
for group in installed:
if not group.lang_only:
continue
_out_grp(_('Installed Language Groups:'), group)
done = True
if showinstalled:
return 0, []
done = False
for group in available:
if group.lang_only:
continue
_out_grp(_('Available Groups:'), group)
done = True
done = False
for group in available:
if not group.lang_only:
continue
_out_grp(_('Available Language Groups:'), group)
done = True
return 0, []
def _mark_install(self, patterns):
q = CompsQuery(self.base.comps, self.base.history,
CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS,
CompsQuery.AVAILABLE | CompsQuery.INSTALLED)
solver = self.base._build_comps_solver()
res = q.get(*patterns)
if self.opts.with_optional:
types = tuple(self.base.conf.group_package_types + ['optional'])
else:
types = tuple(self.base.conf.group_package_types)
pkg_types = libdnf.transaction.listToCompsPackageType(types)
for env_id in res.environments:
solver._environment_install(env_id, pkg_types)
for group_id in res.groups:
solver._group_install(group_id, pkg_types)
def _mark_remove(self, patterns):
q = CompsQuery(self.base.comps, self.base.history,
CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS,
CompsQuery.INSTALLED)
solver = self.base._build_comps_solver()
res = q.get(*patterns)
for env_id in res.environments:
assert dnf.util.is_string_type(env_id)
solver._environment_remove(env_id)
for grp_id in res.groups:
assert dnf.util.is_string_type(grp_id)
solver._group_remove(grp_id)
def _mark_subcmd(self, extcmds):
if extcmds[0] in self._MARK_CMDS:
return extcmds[0], extcmds[1:]
return 'install', extcmds
def _summary(self, userlist):
uservisible = 1
if len(userlist) > 0:
if userlist[0] == 'hidden':
uservisible = 0
userlist.pop(0)
if self.opts.hidden:
uservisible = 0
if not userlist:
userlist = None # Match everything...
installed, available = self._group_lists(uservisible, userlist)
def _out_grp(sect, num):
if not num:
return
logger.info('%s %u', sect, num)
done = 0
for group in installed:
if group.lang_only:
continue
done += 1
_out_grp(_('Installed Groups:'), done)
done = 0
for group in installed:
if not group.lang_only:
continue
done += 1
_out_grp(_('Installed Language Groups:'), done)
done = False
for group in available:
if group.lang_only:
continue
done += 1
_out_grp(_('Available Groups:'), done)
done = False
for group in available:
if not group.lang_only:
continue
done += 1
_out_grp(_('Available Language Groups:'), done)
return 0, []
@staticmethod
def set_argparser(parser):
parser.add_argument('--with-optional', action='store_true',
help=_("include optional packages from group"))
grpparser = parser.add_mutually_exclusive_group()
grpparser.add_argument('--hidden', action='store_true',
help=_("show also hidden groups"))
grpparser.add_argument('--installed', action='store_true',
help=_("show only installed groups"))
grpparser.add_argument('--available', action='store_true',
help=_("show only available groups"))
grpparser.add_argument('--ids', action='store_true',
help=_("show also ID of groups"))
parser.add_argument('subcmd', nargs='?', metavar='COMMAND',
help=_('available subcommands: {} (default), {}').format(
GroupCommand._GROUP_SUBCOMMANDS[0],
', '.join(GroupCommand._GROUP_SUBCOMMANDS[1:])))
parser.add_argument('args', nargs='*', metavar='COMMAND_ARG',
help=_('argument for group subcommand'))
def configure(self):
self._canonical()
cmd = self.opts.subcmd
args = self.opts.args
if cmd not in self._GROUP_SUBCOMMANDS:
logger.critical(_('Invalid groups sub-command, use: %s.'),
", ".join(self._GROUP_SUBCOMMANDS))
raise dnf.cli.CliError
if cmd in ('install', 'remove', 'mark', 'info') and not args:
self.cli.optparser.print_help(self)
raise dnf.cli.CliError
demands = self.cli.demands
demands.sack_activation = True
if cmd in ('install', 'mark', 'remove', 'upgrade'):
demands.root_user = True
demands.resolving = True
if cmd == 'remove':
demands.allow_erasing = True
demands.available_repos = False
else:
demands.available_repos = True
if cmd not in ('remove'):
commands._checkEnabledRepo(self.base)
if cmd in ('install', 'upgrade'):
commands._checkGPGKey(self.base, self.cli)
def run(self):
cmd = self.opts.subcmd
extcmds = self.opts.args
if cmd == 'summary':
return self._summary(extcmds)
if cmd == 'list':
return self._list(extcmds)
if cmd == 'info':
return self._info(extcmds)
if cmd == 'mark':
(subcmd, extcmds) = self._mark_subcmd(extcmds)
if subcmd == 'remove':
return self._mark_remove(extcmds)
else:
assert subcmd == 'install'
return self._mark_install(extcmds)
if cmd == 'install':
if self.opts.with_optional:
types = tuple(self.base.conf.group_package_types + ['optional'])
else:
types = tuple(self.base.conf.group_package_types)
self._remark = True
try:
return self.base.env_group_install(extcmds, types,
self.base.conf.strict)
except dnf.exceptions.MarkingError as e:
msg = _('No package %s available.')
logger.info(msg, self.base.output.term.bold(e))
raise dnf.exceptions.PackagesNotAvailableError(
_("Unable to find a mandatory group package."))
if cmd == 'upgrade':
return self.base.env_group_upgrade(extcmds)
if cmd == 'remove':
for arg in extcmds:
try:
self.base.env_group_remove([arg])
except dnf.exceptions.Error:
pass
def run_transaction(self):
if not self._remark:
return
goal = self.base._goal
history = self.base.history
names = goal.group_members
for pkg in self.base.sack.query().installed().filterm(name=names):
reason = history.rpm.get_reason(pkg)
history.set_reason(pkg, goal.group_reason(pkg, reason))
| 14,961
|
Python
|
.py
| 358
| 29.597765
| 92
| 0.552461
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,783
|
__init__.py
|
rpm-software-management_dnf/dnf/module/__init__.py
|
# Copyright (C) 2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dnf.i18n import _
DIFFERENT_STREAM_INFO = 1
NOTHING_TO_SHOW = 2
INSTALLING_NEWER_VERSION = 4
ENABLED_MODULES = 5
NO_PROFILE_SPECIFIED = 6
module_messages = {
DIFFERENT_STREAM_INFO: _("Enabling different stream for '{}'."),
NOTHING_TO_SHOW: _("Nothing to show."),
INSTALLING_NEWER_VERSION: _("Installing newer version of '{}' than specified. Reason: {}"),
ENABLED_MODULES: _("Enabled modules: {}."),
NO_PROFILE_SPECIFIED: _("No profile specified for '{}', please specify profile."),
}
| 1,262
|
Python
|
.py
| 28
| 43.25
| 95
| 0.745735
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,784
|
exceptions.py
|
rpm-software-management_dnf/dnf/module/exceptions.py
|
# supplies the 'module' command.
#
# Copyright (C) 2014-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import dnf
from dnf.module import module_messages, NO_PROFILE_SPECIFIED
from dnf.i18n import _
class NoModuleException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("No such module: {}").format(module_spec)
super(NoModuleException, self).__init__(value)
class NoStreamException(dnf.exceptions.Error):
def __init__(self, stream):
value = _("No such stream: {}").format(stream)
super(NoStreamException, self).__init__(value)
class EnabledStreamException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("No enabled stream for module: {}").format(module_spec)
super(EnabledStreamException, self).__init__(value)
class EnableMultipleStreamsException(dnf.exceptions.Error):
def __init__(self, module_spec, value=None):
if value is None:
value = _("Cannot enable more streams from module '{}' at the same time").format(module_spec)
super(EnableMultipleStreamsException, self).__init__(value)
class DifferentStreamEnabledException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("Different stream enabled for module: {}").format(module_spec)
super(DifferentStreamEnabledException, self).__init__(value)
class NoProfileException(dnf.exceptions.Error):
def __init__(self, profile):
value = _("No such profile: {}").format(profile)
super(NoProfileException, self).__init__(value)
class ProfileNotInstalledException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("Specified profile not installed for {}").format(module_spec)
super(ProfileNotInstalledException, self).__init__(value)
class NoStreamSpecifiedException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("No stream specified for '{}', please specify stream").format(module_spec)
super(NoStreamSpecifiedException, self).__init__(value)
class NoProfileSpecifiedException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = module_messages[NO_PROFILE_SPECIFIED].format(module_spec)
super(NoProfileSpecifiedException, self).__init__(value)
class NoProfilesException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("No such profile: {}. No profiles available").format(module_spec)
super(NoProfilesException, self).__init__(value)
class NoProfileToRemoveException(dnf.exceptions.Error):
def __init__(self, module_spec):
value = _("No profile to remove for '{}'").format(module_spec)
super(NoProfileToRemoveException, self).__init__(value)
| 3,654
|
Python
|
.py
| 66
| 50.5
| 105
| 0.722861
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,785
|
module_base.py
|
rpm-software-management_dnf/dnf/module/module_base.py
|
# Copyright (C) 2017-2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from collections import OrderedDict
import hawkey
import libdnf.smartcols
import libdnf.module
import dnf.selector
import dnf.exceptions
from dnf.module.exceptions import EnableMultipleStreamsException
from dnf.util import logger
from dnf.i18n import _, P_, ucd
import functools
STATE_DEFAULT = libdnf.module.ModulePackageContainer.ModuleState_DEFAULT
STATE_ENABLED = libdnf.module.ModulePackageContainer.ModuleState_ENABLED
STATE_DISABLED = libdnf.module.ModulePackageContainer.ModuleState_DISABLED
STATE_UNKNOWN = libdnf.module.ModulePackageContainer.ModuleState_UNKNOWN
MODULE_TABLE_HINT = _("\n\nHint: [d]efault, [e]nabled, [x]disabled, [i]nstalled")
MODULE_INFO_TABLE_HINT = _("\n\nHint: [d]efault, [e]nabled, [x]disabled, [i]nstalled, [a]ctive")
def _profile_comparison_key(profile):
return profile.getName()
class ModuleBase(object):
# :api
def __init__(self, base):
# :api
self.base = base
def enable(self, module_specs):
# :api
no_match_specs, error_specs, solver_errors, module_dicts = \
self._resolve_specs_enable_update_sack(module_specs)
for spec, (nsvcap, module_dict) in module_dicts.items():
if nsvcap.profile:
logger.info(_("Ignoring unnecessary profile: '{}/{}'").format(
nsvcap.name, nsvcap.profile))
if no_match_specs or error_specs or solver_errors:
raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
error_group_specs=error_specs,
module_depsolv_errors=solver_errors)
def disable(self, module_specs):
# :api
no_match_specs, solver_errors = self._modules_reset_or_disable(module_specs, STATE_DISABLED)
if no_match_specs or solver_errors:
raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
module_depsolv_errors=solver_errors)
def install(self, module_specs, strict=True):
# :api
no_match_specs, error_specs, solver_errors, module_dicts = \
self._resolve_specs_enable_update_sack(module_specs)
# <package_name, set_of_spec>
fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
install_dict = {}
install_set_artifacts = set()
fail_safe_repo_used = False
for spec, (nsvcap, moduledict) in module_dicts.items():
for name, streamdict in moduledict.items():
for stream, module_list in streamdict.items():
install_module_list = [x for x in module_list
if self.base._moduleContainer.isModuleActive(x.getId())]
if not install_module_list:
logger.error(_("All matches for argument '{0}' in module '{1}:{2}' are not "
"active").format(spec, name, stream))
error_specs.append(spec)
continue
profiles = []
latest_module = self._get_latest(install_module_list)
if latest_module.getRepoID() == fail_safe_repo:
msg = _(
"Installing module '{0}' from Fail-Safe repository {1} is not allowed")
logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo))
fail_safe_repo_used = True
if nsvcap.profile:
profiles.extend(latest_module.getProfiles(nsvcap.profile))
if not profiles:
available_profiles = latest_module.getProfiles()
if available_profiles:
profile_names = ", ".join(sorted(
[profile.getName() for profile in available_profiles]))
msg = _("Unable to match profile for argument {}. Available "
"profiles for '{}:{}': {}").format(
spec, name, stream, profile_names)
else:
msg = _("Unable to match profile for argument {}").format(spec)
logger.error(msg)
no_match_specs.append(spec)
continue
else:
profiles_strings = self.base._moduleContainer.getDefaultProfiles(
name, stream)
if not profiles_strings:
available_profiles = latest_module.getProfiles()
if available_profiles:
profile_names = ", ".join(sorted(
[profile.getName() for profile in available_profiles]))
msg = _("No default profiles for module {}:{}. Available profiles"
": {}").format(
name, stream, profile_names)
else:
msg = _("No profiles for module {}:{}").format(name, stream)
logger.error(msg)
error_specs.append(spec)
for profile in set(profiles_strings):
module_profiles = latest_module.getProfiles(profile)
if not module_profiles:
logger.error(
_("Default profile {} not available in module {}:{}").format(
profile, name, stream))
error_specs.append(spec)
profiles.extend(module_profiles)
for profile in profiles:
self.base._moduleContainer.install(latest_module ,profile.getName())
for pkg_name in profile.getContent():
install_dict.setdefault(pkg_name, set()).add(spec)
for module in install_module_list:
install_set_artifacts.update(module.getArtifacts())
if fail_safe_repo_used:
raise dnf.exceptions.Error(_(
"Installing module from Fail-Safe repository is not allowed"))
__, profiles_errors = self._install_profiles_internal(
install_set_artifacts, install_dict, strict)
if profiles_errors:
error_specs.extend(profiles_errors)
if no_match_specs or error_specs or solver_errors:
raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
error_group_specs=error_specs,
module_depsolv_errors=solver_errors)
def switch_to(self, module_specs, strict=True):
# :api
no_match_specs, error_specs, module_dicts = self._resolve_specs_enable(module_specs)
# collect name of artifacts from new modules for distrosync
new_artifacts_names = set()
# collect name of artifacts from active modules for distrosync before sack update
active_artifacts_names = set()
src_arches = {"nosrc", "src"}
for spec, (nsvcap, moduledict) in module_dicts.items():
for name in moduledict.keys():
for module in self.base._moduleContainer.query(name, "", "", "", ""):
if self.base._moduleContainer.isModuleActive(module):
for artifact in module.getArtifacts():
arch = artifact.rsplit(".", 1)[1]
if arch in src_arches:
continue
pkg_name = artifact.rsplit("-", 2)[0]
active_artifacts_names.add(pkg_name)
solver_errors = self._update_sack()
dependency_error_spec = self._enable_dependencies(module_dicts)
if dependency_error_spec:
error_specs.extend(dependency_error_spec)
# <package_name, set_of_spec>
fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
install_dict = {}
install_set_artifacts = set()
fail_safe_repo_used = False
# list of name: [profiles] for module profiles being removed
removed_profiles = self.base._moduleContainer.getRemovedProfiles()
for spec, (nsvcap, moduledict) in module_dicts.items():
for name, streamdict in moduledict.items():
for stream, module_list in streamdict.items():
install_module_list = [x for x in module_list
if self.base._moduleContainer.isModuleActive(x.getId())]
if not install_module_list:
"No active matches for argument '{0}' in module '{1}:{2}'"
logger.error(_("No active matches for argument '{0}' in module "
"'{1}:{2}'").format(spec, name, stream))
error_specs.append(spec)
continue
profiles = []
latest_module = self._get_latest(install_module_list)
if latest_module.getRepoID() == fail_safe_repo:
msg = _(
"Installing module '{0}' from Fail-Safe repository {1} is not allowed")
logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo))
fail_safe_repo_used = True
if nsvcap.profile:
profiles.extend(latest_module.getProfiles(nsvcap.profile))
if not profiles:
available_profiles = latest_module.getProfiles()
if available_profiles:
profile_names = ", ".join(sorted(
[profile.getName() for profile in available_profiles]))
msg = _("Unable to match profile for argument {}. Available "
"profiles for '{}:{}': {}").format(
spec, name, stream, profile_names)
else:
msg = _("Unable to match profile for argument {}").format(spec)
logger.error(msg)
no_match_specs.append(spec)
continue
elif name in removed_profiles:
for profile in removed_profiles[name]:
module_profiles = latest_module.getProfiles(profile)
if not module_profiles:
logger.warning(
_("Installed profile '{0}' is not available in module "
"'{1}' stream '{2}'").format(profile, name, stream))
continue
profiles.extend(module_profiles)
for profile in profiles:
self.base._moduleContainer.install(latest_module, profile.getName())
for pkg_name in profile.getContent():
install_dict.setdefault(pkg_name, set()).add(spec)
for module in install_module_list:
artifacts = module.getArtifacts()
install_set_artifacts.update(artifacts)
for artifact in artifacts:
arch = artifact.rsplit(".", 1)[1]
if arch in src_arches:
continue
pkg_name = artifact.rsplit("-", 2)[0]
new_artifacts_names.add(pkg_name)
if fail_safe_repo_used:
raise dnf.exceptions.Error(_(
"Installing module from Fail-Safe repository is not allowed"))
install_base_query, profiles_errors = self._install_profiles_internal(
install_set_artifacts, install_dict, strict)
if profiles_errors:
error_specs.extend(profiles_errors)
# distrosync module name
all_names = set()
all_names.update(new_artifacts_names)
all_names.update(active_artifacts_names)
remove_query = self.base.sack.query().filterm(empty=True)
base_no_source_query = self.base.sack.query().filterm(arch__neq=['src', 'nosrc']).apply()
for pkg_name in all_names:
query = base_no_source_query.filter(name=pkg_name)
installed = query.installed()
if not installed:
continue
available = query.available()
if not available:
logger.warning(_("No packages available to distrosync for package name "
"'{}'").format(pkg_name))
if pkg_name not in new_artifacts_names:
remove_query = remove_query.union(query)
continue
only_new_module = query.intersection(install_base_query)
if only_new_module:
query = only_new_module
sltr = dnf.selector.Selector(self.base.sack)
sltr.set(pkg=query)
self.base._goal.distupgrade(select=sltr)
self.base._remove_if_unneeded(remove_query)
if no_match_specs or error_specs or solver_errors:
raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
error_group_specs=error_specs,
module_depsolv_errors=solver_errors)
def reset(self, module_specs):
# :api
no_match_specs, solver_errors = self._modules_reset_or_disable(module_specs, STATE_UNKNOWN)
if no_match_specs:
raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
module_depsolv_errors=solver_errors)
def upgrade(self, module_specs):
# :api
no_match_specs = []
fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
fail_safe_repo_used = False
# Remove source packages because they cannot be installed or upgraded
base_no_source_query = self.base.sack.query().filterm(arch__neq=['src', 'nosrc']).apply()
for spec in module_specs:
module_list, nsvcap = self._get_modules(spec)
if not module_list:
no_match_specs.append(spec)
continue
update_module_list = [x for x in module_list
if self.base._moduleContainer.isModuleActive(x.getId())]
if not update_module_list:
logger.error(_("Unable to resolve argument {}").format(spec))
continue
module_dict = self._create_module_dict_and_enable(update_module_list, spec, False)
upgrade_package_set = set()
for name, streamdict in module_dict.items():
for stream, module_list_from_dict in streamdict.items():
upgrade_package_set.update(self._get_package_name_set_and_remove_profiles(
module_list_from_dict, nsvcap))
latest_module = self._get_latest(module_list_from_dict)
if latest_module.getRepoID() == fail_safe_repo:
msg = _(
"Upgrading module '{0}' from Fail-Safe repository {1} is not allowed")
logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo))
fail_safe_repo_used = True
if nsvcap.profile:
profiles_set = latest_module.getProfiles(nsvcap.profile)
if not profiles_set:
continue
for profile in profiles_set:
upgrade_package_set.update(profile.getContent())
else:
for profile in latest_module.getProfiles():
upgrade_package_set.update(profile.getContent())
for artifact in latest_module.getArtifacts():
subj = hawkey.Subject(artifact)
for nevra_obj in subj.get_nevra_possibilities(
forms=[hawkey.FORM_NEVRA]):
upgrade_package_set.add(nevra_obj.name)
if not upgrade_package_set:
logger.error(_("Unable to match profile in argument {}").format(spec))
query = base_no_source_query.filter(name=upgrade_package_set)
if query:
sltr = dnf.selector.Selector(self.base.sack)
sltr.set(pkg=query)
self.base._goal.upgrade(select=sltr)
if fail_safe_repo_used:
raise dnf.exceptions.Error(_(
"Upgrading module from Fail-Safe repository is not allowed"))
return no_match_specs
def remove(self, module_specs):
# :api
no_match_specs = []
remove_package_set = set()
for spec in module_specs:
module_list, nsvcap = self._get_modules(spec)
if not module_list:
no_match_specs.append(spec)
continue
module_dict = self._create_module_dict_and_enable(module_list, spec, False)
remove_packages_names = []
for name, streamdict in module_dict.items():
for stream, module_list_from_dict in streamdict.items():
remove_packages_names.extend(self._get_package_name_set_and_remove_profiles(
module_list_from_dict, nsvcap, True))
if not remove_packages_names:
logger.error(_("Unable to match profile in argument {}").format(spec))
remove_package_set.update(remove_packages_names)
if remove_package_set:
keep_pkg_names = self.base._moduleContainer.getInstalledPkgNames()
remove_package_set = remove_package_set.difference(keep_pkg_names)
if remove_package_set:
query = self.base.sack.query().installed().filterm(name=remove_package_set)
if query:
self.base._remove_if_unneeded(query)
return no_match_specs
def get_modules(self, module_spec):
# :api
return self._get_modules(module_spec)
def _get_modules(self, module_spec):
# used by ansible (lib/ansible/modules/packaging/os/dnf.py)
subj = hawkey.Subject(module_spec)
for nsvcap in subj.nsvcap_possibilities():
name = nsvcap.name if nsvcap.name else ""
stream = nsvcap.stream if nsvcap.stream else ""
version = ""
context = nsvcap.context if nsvcap.context else ""
arch = nsvcap.arch if nsvcap.arch else ""
if nsvcap.version and nsvcap.version != -1:
version = str(nsvcap.version)
modules = self.base._moduleContainer.query(name, stream, version, context, arch)
if modules:
return modules, nsvcap
return (), None
def _get_latest(self, module_list):
latest = None
if module_list:
latest = module_list[0]
for module in module_list[1:]:
if module.getVersionNum() > latest.getVersionNum():
latest = module
return latest
def _create_module_dict_and_enable(self, module_list, spec, enable=True):
moduleDict = {}
for module in module_list:
moduleDict.setdefault(
module.getName(), {}).setdefault(module.getStream(), []).append(module)
for moduleName, streamDict in moduleDict.items():
moduleState = self.base._moduleContainer.getModuleState(moduleName)
if len(streamDict) > 1:
if moduleState != STATE_DEFAULT and moduleState != STATE_ENABLED \
and moduleState != STATE_DISABLED:
streams_str = "', '".join(
sorted(streamDict.keys(), key=functools.cmp_to_key(self.base.sack.evr_cmp)))
msg = _("Argument '{argument}' matches {stream_count} streams ('{streams}') of "
"module '{module}', but none of the streams are enabled or "
"default").format(
argument=spec, stream_count=len(streamDict), streams=streams_str,
module=moduleName)
raise EnableMultipleStreamsException(moduleName, msg)
if moduleState == STATE_ENABLED:
stream = self.base._moduleContainer.getEnabledStream(moduleName)
else:
stream = self.base._moduleContainer.getDefaultStream(moduleName)
if not stream or stream not in streamDict:
raise EnableMultipleStreamsException(moduleName)
for key in sorted(streamDict.keys()):
if key == stream:
if enable:
self.base._moduleContainer.enable(moduleName, key)
continue
del streamDict[key]
elif enable:
for key in streamDict.keys():
self.base._moduleContainer.enable(moduleName, key)
assert len(streamDict) == 1
return moduleDict
def _resolve_specs_enable(self, module_specs):
no_match_specs = []
error_spec = []
module_dicts = {}
for spec in module_specs:
module_list, nsvcap = self._get_modules(spec)
if not module_list:
no_match_specs.append(spec)
continue
try:
module_dict = self._create_module_dict_and_enable(module_list, spec, True)
module_dicts[spec] = (nsvcap, module_dict)
except (RuntimeError, EnableMultipleStreamsException) as e:
error_spec.append(spec)
logger.error(ucd(e))
logger.error(_("Unable to resolve argument {}").format(spec))
return no_match_specs, error_spec, module_dicts
def _update_sack(self):
hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
try:
solver_errors = self.base.sack.filter_modules(
self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot,
self.base.conf.module_platform_id, update_only=True,
debugsolver=self.base.conf.debug_solver)
except hawkey.Exception as e:
raise dnf.exceptions.Error(ucd(e))
return solver_errors
def _enable_dependencies(self, module_dicts):
error_spec = []
for spec, (nsvcap, moduleDict) in module_dicts.items():
for streamDict in moduleDict.values():
for modules in streamDict.values():
try:
self.base._moduleContainer.enableDependencyTree(
libdnf.module.VectorModulePackagePtr(modules))
except RuntimeError as e:
error_spec.append(spec)
logger.error(ucd(e))
logger.error(_("Unable to resolve argument {}").format(spec))
return error_spec
def _resolve_specs_enable_update_sack(self, module_specs):
no_match_specs, error_spec, module_dicts = self._resolve_specs_enable(module_specs)
solver_errors = self._update_sack()
dependency_error_spec = self._enable_dependencies(module_dicts)
if dependency_error_spec:
error_spec.extend(dependency_error_spec)
return no_match_specs, error_spec, solver_errors, module_dicts
def _modules_reset_or_disable(self, module_specs, to_state):
no_match_specs = []
for spec in module_specs:
module_list, nsvcap = self._get_modules(spec)
if not module_list:
logger.error(_("Unable to resolve argument {}").format(spec))
no_match_specs.append(spec)
continue
if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile:
logger.info(_("Only module name is required. "
"Ignoring unneeded information in argument: '{}'").format(spec))
module_names = set()
for module in module_list:
module_names.add(module.getName())
for name in module_names:
if to_state == STATE_UNKNOWN:
self.base._moduleContainer.reset(name)
if to_state == STATE_DISABLED:
self.base._moduleContainer.disable(name)
solver_errors = self._update_sack()
return no_match_specs, solver_errors
def _get_package_name_set_and_remove_profiles(self, module_list, nsvcap, remove=False):
package_name_set = set()
latest_module = self._get_latest(module_list)
installed_profiles_strings = set(self.base._moduleContainer.getInstalledProfiles(
latest_module.getName()))
if not installed_profiles_strings:
return set()
if nsvcap.profile:
profiles_set = latest_module.getProfiles(nsvcap.profile)
if not profiles_set:
return set()
for profile in profiles_set:
if profile.getName() in installed_profiles_strings:
if remove:
self.base._moduleContainer.uninstall(latest_module, profile.getName())
package_name_set.update(profile.getContent())
else:
for profile_string in installed_profiles_strings:
if remove:
self.base._moduleContainer.uninstall(latest_module, profile_string)
for profile in latest_module.getProfiles(profile_string):
package_name_set.update(profile.getContent())
return package_name_set
def _get_info_profiles(self, module_specs):
output = set()
for module_spec in module_specs:
module_list, nsvcap = self._get_modules(module_spec)
if not module_list:
logger.info(_("Unable to resolve argument {}").format(module_spec))
continue
if nsvcap.profile:
logger.info(_("Ignoring unnecessary profile: '{}/{}'").format(
nsvcap.name, nsvcap.profile))
for module in module_list:
lines = OrderedDict()
lines["Name"] = module.getFullIdentifier()
for profile in sorted(module.getProfiles(), key=_profile_comparison_key):
lines[profile.getName()] = "\n".join(
[pkgName for pkgName in profile.getContent()])
output.add(self._create_simple_table(lines).toString())
return "\n\n".join(sorted(output))
def _profile_report_formatter(self, modulePackage, default_profiles, enabled_str):
installed_profiles = self.base._moduleContainer.getInstalledProfiles(
modulePackage.getName())
available_profiles = modulePackage.getProfiles()
profiles_str = ""
for profile in sorted(available_profiles, key=_profile_comparison_key):
profiles_str += "{}{}".format(
profile.getName(), " [d]" if profile.getName() in default_profiles else "")
profiles_str += " [i], " if profile.getName() in installed_profiles and enabled_str \
else ", "
return profiles_str[:-2]
def _summary_report_formatter(self, summary):
return summary.strip().replace("\n", " ")
def _module_strs_formatter(self, modulePackage, markActive=False):
default_str = ""
enabled_str = ""
disabled_str = ""
if modulePackage.getStream() == self.base._moduleContainer.getDefaultStream(
modulePackage.getName()):
default_str = " [d]"
if self.base._moduleContainer.isEnabled(modulePackage):
if not default_str:
enabled_str = " "
enabled_str += "[e]"
elif self.base._moduleContainer.isDisabled(modulePackage):
if not default_str:
disabled_str = " "
disabled_str += "[x]"
if markActive and self.base._moduleContainer.isModuleActive(modulePackage):
if not default_str:
disabled_str = " "
disabled_str += "[a]"
return default_str, enabled_str, disabled_str
def _get_info(self, module_specs):
output = set()
for module_spec in module_specs:
module_list, nsvcap = self._get_modules(module_spec)
if not module_list:
logger.info(_("Unable to resolve argument {}").format(module_spec))
continue
if nsvcap.profile:
logger.info(_("Ignoring unnecessary profile: '{}/{}'").format(
nsvcap.name, nsvcap.profile))
for modulePackage in module_list:
default_str, enabled_str, disabled_str = self._module_strs_formatter(
modulePackage, markActive=True)
default_profiles = self.base._moduleContainer.getDefaultProfiles(
modulePackage.getName(), modulePackage.getStream())
profiles_str = self._profile_report_formatter(
modulePackage, default_profiles, enabled_str)
lines = OrderedDict()
lines["Name"] = modulePackage.getName()
lines["Stream"] = modulePackage.getStream() + default_str + enabled_str + \
disabled_str
lines["Version"] = modulePackage.getVersion()
lines["Context"] = modulePackage.getContext()
lines["Architecture"] = modulePackage.getArch()
lines["Profiles"] = profiles_str
lines["Default profiles"] = " ".join(default_profiles)
lines["Repo"] = modulePackage.getRepoID()
lines["Summary"] = modulePackage.getSummary()
lines["Description"] = modulePackage.getDescription()
req_set = set()
for req in modulePackage.getModuleDependencies():
for require_dict in req.getRequires():
for mod_require, stream in require_dict.items():
req_set.add("{}:[{}]".format(mod_require, ",".join(stream)))
lines["Requires"] = "\n".join(sorted(req_set))
demodularized = modulePackage.getDemodularizedRpms()
if demodularized:
lines["Demodularized rpms"] = "\n".join(demodularized)
lines["Artifacts"] = "\n".join(sorted(modulePackage.getArtifacts()))
output.add(self._create_simple_table(lines).toString())
str_table = "\n\n".join(sorted(output))
if str_table:
str_table += MODULE_INFO_TABLE_HINT
return str_table
@staticmethod
def _create_simple_table(lines):
table = libdnf.smartcols.Table()
table.enableNoheadings(True)
table.setColumnSeparator(" : ")
column_name = table.newColumn("Name")
column_value = table.newColumn("Value")
column_value.setWrap(True)
column_value.setSafechars("\n")
column_value.setNewlineWrapFunction()
for line_name, value in lines.items():
if value is None:
value = ""
line = table.newLine()
line.getColumnCell(column_name).setData(line_name)
line.getColumnCell(column_value).setData(str(value))
return table
def _get_full_info(self, module_specs):
output = set()
for module_spec in module_specs:
module_list, nsvcap = self._get_modules(module_spec)
if not module_list:
logger.info(_("Unable to resolve argument {}").format(module_spec))
continue
if nsvcap.profile:
logger.info(_("Ignoring unnecessary profile: '{}/{}'").format(
nsvcap.name, nsvcap.profile))
for modulePackage in module_list:
info = modulePackage.getYaml()
if info:
output.add(info)
output_string = "\n\n".join(sorted(output))
return output_string
def _what_provides(self, rpm_specs):
output = set()
modulePackages = self.base._moduleContainer.getModulePackages()
baseQuery = self.base.sack.query().filterm(empty=True).apply()
getBestInitQuery = self.base.sack.query(flags=hawkey.IGNORE_MODULAR_EXCLUDES)
for spec in rpm_specs:
subj = dnf.subject.Subject(spec)
baseQuery = baseQuery.union(subj.get_best_query(
self.base.sack, with_nevra=True, with_provides=False, with_filenames=False,
query=getBestInitQuery))
baseQuery.apply()
for modulePackage in modulePackages:
artifacts = modulePackage.getArtifacts()
if not artifacts:
continue
query = baseQuery.filter(nevra_strict=artifacts)
if query:
for pkg in query:
string_output = ""
profiles = []
for profile in sorted(modulePackage.getProfiles(), key=_profile_comparison_key):
if pkg.name in profile.getContent():
profiles.append(profile.getName())
lines = OrderedDict()
lines["Module"] = modulePackage.getFullIdentifier()
lines["Profiles"] = " ".join(sorted(profiles))
lines["Repo"] = modulePackage.getRepoID()
lines["Summary"] = modulePackage.getSummary()
table = self._create_simple_table(lines)
string_output += "{}\n".format(self.base.output.term.bold(str(pkg)))
string_output += "{}".format(table.toString())
output.add(string_output)
return "\n\n".join(sorted(output))
def _create_and_fill_table(self, latest):
table = libdnf.smartcols.Table()
table.setTermforce(libdnf.smartcols.Table.TermForce_AUTO)
table.enableMaxout(True)
column_name = table.newColumn("Name")
column_stream = table.newColumn("Stream")
column_profiles = table.newColumn("Profiles")
column_profiles.setWrap(True)
column_info = table.newColumn("Summary")
column_info.setWrap(True)
if not self.base.conf.verbose:
column_info.hidden = True
for latest_per_repo in latest:
for nameStreamArch in latest_per_repo:
if len(nameStreamArch) == 1:
modulePackage = nameStreamArch[0]
else:
active = [module for module in nameStreamArch
if self.base._moduleContainer.isModuleActive(module)]
if active:
modulePackage = active[0]
else:
modulePackage = nameStreamArch[0]
line = table.newLine()
default_str, enabled_str, disabled_str = self._module_strs_formatter(
modulePackage, markActive=False)
default_profiles = self.base._moduleContainer.getDefaultProfiles(
modulePackage.getName(), modulePackage.getStream())
profiles_str = self._profile_report_formatter(modulePackage, default_profiles,
enabled_str)
line.getColumnCell(column_name).setData(modulePackage.getName())
line.getColumnCell(
column_stream).setData(
modulePackage.getStream() + default_str + enabled_str + disabled_str)
line.getColumnCell(column_profiles).setData(profiles_str)
summary_str = self._summary_report_formatter(modulePackage.getSummary())
line.getColumnCell(column_info).setData(summary_str)
return table
def _get_brief_description(self, module_specs, module_state):
modules = []
if module_specs:
for spec in module_specs:
module_list, nsvcap = self._get_modules(spec)
modules.extend(module_list)
else:
modules = self.base._moduleContainer.getModulePackages()
latest = self.base._moduleContainer.getLatestModulesPerRepo(module_state, modules)
if not latest:
return ""
table = self._create_and_fill_table(latest)
current_repo_id_index = 0
already_printed_lines = 0
try:
repo_name = self.base.repos[latest[0][0][0].getRepoID()].name
except KeyError:
repo_name = latest[0][0][0].getRepoID()
versions = len(latest[0])
header = self._format_header(table)
str_table = self._format_repoid(repo_name)
str_table += header
for i in range(0, table.getNumberOfLines()):
if versions + already_printed_lines <= i:
already_printed_lines += versions
current_repo_id_index += 1
# Fail-Safe repository is not in self.base.repos
try:
repo_name = self.base.repos[
latest[current_repo_id_index][0][0].getRepoID()].name
except KeyError:
repo_name = latest[current_repo_id_index][0][0].getRepoID()
versions = len(latest[current_repo_id_index])
str_table += "\n"
str_table += self._format_repoid(repo_name)
str_table += header
line = table.getLine(i)
str_table += table.toString(line, line)
return str_table + MODULE_TABLE_HINT
def _format_header(self, table):
line = table.getLine(0)
return table.toString(line, line).split('\n', 1)[0] + '\n'
def _format_repoid(self, repo_name):
return "{}\n".format(self.base.output.term.bold(repo_name))
def _install_profiles_internal(self, install_set_artifacts, install_dict, strict):
# Remove source packages because they cannot be installed or upgraded
base_no_source_query = self.base.sack.query().filterm(arch__neq=['src', 'nosrc']).apply()
install_base_query = base_no_source_query.filter(nevra_strict=install_set_artifacts)
error_specs = []
# add hot-fix packages
hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
hotfix_packages = base_no_source_query.filter(
reponame=hot_fix_repos, name=install_dict.keys())
install_base_query = install_base_query.union(hotfix_packages)
for pkg_name, set_specs in install_dict.items():
query = install_base_query.filter(name=pkg_name)
if not query:
# package can also be non-modular or part of another stream
query = base_no_source_query.filter(name=pkg_name)
if not query:
for spec in set_specs:
logger.error(_("Unable to resolve argument {}").format(spec))
logger.error(_("No match for package {}").format(pkg_name))
error_specs.extend(set_specs)
continue
self.base._goal.group_members.add(pkg_name)
sltr = dnf.selector.Selector(self.base.sack)
sltr.set(pkg=query)
self.base._goal.install(select=sltr, optional=(not strict))
return install_base_query, error_specs
def format_modular_solver_errors(errors):
msg = dnf.util._format_resolve_problems(errors)
return "\n".join(
[P_('Modular dependency problem:', 'Modular dependency problems:', len(errors)), msg])
| 41,537
|
Python
|
.py
| 768
| 37.869792
| 100
| 0.559046
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,786
|
substitutions.py
|
rpm-software-management_dnf/dnf/conf/substitutions.py
|
# substitutions.py
# Config file substitutions.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import logging
import os
import re
from dnf.i18n import _
from dnf.exceptions import ReadOnlyVariableError
ENVIRONMENT_VARS_RE = re.compile(r'^DNF_VAR_[A-Za-z0-9_]+$')
READ_ONLY_VARIABLES = frozenset(("releasever_major", "releasever_minor"))
logger = logging.getLogger('dnf')
class Substitutions(dict):
# :api
def __init__(self):
super(Substitutions, self).__init__()
self._update_from_env()
def _update_from_env(self):
numericvars = ['DNF%d' % num for num in range(0, 10)]
for key, val in os.environ.items():
if ENVIRONMENT_VARS_RE.match(key):
self[key[8:]] = val # remove "DNF_VAR_" prefix
elif key in numericvars:
self[key] = val
@staticmethod
def _split_releasever(releasever):
# type: (str) -> tuple[str, str]
pos = releasever.find(".")
if pos == -1:
releasever_major = releasever
releasever_minor = ""
else:
releasever_major = releasever[:pos]
releasever_minor = releasever[pos + 1:]
return releasever_major, releasever_minor
def __setitem__(self, key, value):
if Substitutions.is_read_only(key):
raise ReadOnlyVariableError(f"Variable \"{key}\" is read-only", variable_name=key)
setitem = super(Substitutions, self).__setitem__
setitem(key, value)
if key == "releasever" and value:
releasever_major, releasever_minor = Substitutions._split_releasever(value)
setitem("releasever_major", releasever_major)
setitem("releasever_minor", releasever_minor)
@staticmethod
def is_read_only(key):
# type: (str) -> bool
return key in READ_ONLY_VARIABLES
def update_from_etc(self, installroot, varsdir=("/etc/yum/vars/", "/etc/dnf/vars/")):
# :api
for vars_path in varsdir:
fsvars = []
try:
dir_fsvars = os.path.join(installroot, vars_path.lstrip('/'))
fsvars = os.listdir(dir_fsvars)
except OSError:
continue
for fsvar in fsvars:
filepath = os.path.join(dir_fsvars, fsvar)
val = None
if os.path.isfile(filepath):
try:
with open(filepath) as fp:
val = fp.readline()
if val and val[-1] == '\n':
val = val[:-1]
except (OSError, IOError, UnicodeDecodeError) as e:
logger.warning(_("Error when parsing a variable from file '{0}': {1}").format(filepath, e))
continue
if val is not None:
self[fsvar] = val
| 3,815
|
Python
|
.py
| 86
| 35.104651
| 115
| 0.616958
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,787
|
config.py
|
rpm-software-management_dnf/dnf/conf/config.py
|
# dnf configuration classes.
#
# Copyright (C) 2016-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.yum import misc
from dnf.i18n import ucd, _
from dnf.pycomp import basestring, urlparse
import fnmatch
import dnf.conf.substitutions
import dnf.const
import dnf.exceptions
import dnf.pycomp
import dnf.util
import hawkey
import logging
import os
import libdnf.conf
import libdnf.repo
import tempfile
PRIO_EMPTY = libdnf.conf.Option.Priority_EMPTY
PRIO_DEFAULT = libdnf.conf.Option.Priority_DEFAULT
PRIO_MAINCONFIG = libdnf.conf.Option.Priority_MAINCONFIG
PRIO_AUTOMATICCONFIG = libdnf.conf.Option.Priority_AUTOMATICCONFIG
PRIO_REPOCONFIG = libdnf.conf.Option.Priority_REPOCONFIG
PRIO_PLUGINDEFAULT = libdnf.conf.Option.Priority_PLUGINDEFAULT
PRIO_PLUGINCONFIG = libdnf.conf.Option.Priority_PLUGINCONFIG
PRIO_COMMANDLINE = libdnf.conf.Option.Priority_COMMANDLINE
PRIO_RUNTIME = libdnf.conf.Option.Priority_RUNTIME
logger = logging.getLogger('dnf')
class BaseConfig(object):
"""Base class for storing configuration definitions.
Subclass when creating your own definitions.
"""
def __init__(self, config=None, section=None, parser=None):
self.__dict__["_config"] = config
self._section = section
def __getattr__(self, name):
if "_config" not in self.__dict__:
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__, name))
option = getattr(self._config, name)
if option is None:
return None
try:
value = option().getValue()
except Exception as ex:
return None
if isinstance(value, str):
return ucd(value)
return value
def __setattr__(self, name, value):
option = getattr(self._config, name, None)
if option is None:
# unknown config option, store to BaseConfig only
return super(BaseConfig, self).__setattr__(name, value)
self._set_value(name, value, PRIO_RUNTIME)
def __str__(self):
out = []
out.append('[%s]' % self._section)
if self._config:
for optBind in self._config.optBinds():
try:
value = optBind.second.getValueString()
except RuntimeError:
value = ""
out.append('%s: %s' % (optBind.first, value))
return '\n'.join(out)
def _has_option(self, name):
method = getattr(self._config, name, None)
return method is not None
def _get_value(self, name):
method = getattr(self._config, name, None)
if method is None:
return None
return method().getValue()
def _get_priority(self, name):
method = getattr(self._config, name, None)
if method is None:
return None
return method().getPriority()
def _set_value(self, name, value, priority=PRIO_RUNTIME):
"""Set option's value if priority is equal or higher
than current priority."""
method = getattr(self._config, name, None)
if method is None:
raise Exception("Option \"" + name + "\" does not exists")
option = method()
if value is None:
try:
option.set(priority, value)
except Exception:
pass
else:
try:
if isinstance(value, list) or isinstance(value, tuple):
option.set(priority, libdnf.conf.VectorString(value))
elif (isinstance(option, libdnf.conf.OptionBool)
or isinstance(option, libdnf.conf.OptionChildBool)
) and isinstance(value, int):
option.set(priority, bool(value))
else:
option.set(priority, value)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(_("Error parsing '%s': %s")
% (value, str(e)),
raw_error=str(e))
def _populate(self, parser, section, filename, priority=PRIO_DEFAULT):
"""Set option values from an INI file section."""
if parser.hasSection(section):
for name in parser.options(section):
value = parser.getSubstitutedValue(section, name)
if not value or value == 'None':
value = ''
if hasattr(self._config, name):
try:
self._config.optBinds().at(name).newString(priority, value)
except RuntimeError as e:
logger.error(_('Invalid configuration value: %s=%s in %s; %s'),
ucd(name), ucd(value), ucd(filename), str(e))
else:
if name == 'arch' and hasattr(self, name):
setattr(self, name, value)
else:
logger.debug(
_('Unknown configuration option: %s = %s in %s'),
ucd(name), ucd(value), ucd(filename))
def dump(self):
# :api
"""Return a string representing the values of all the
configuration options.
"""
output = ['[%s]' % self._section]
if self._config:
for optBind in self._config.optBinds():
# if not opt._is_runtimeonly():
try:
output.append('%s = %s' % (optBind.first, optBind.second.getValueString()))
except RuntimeError:
pass
return '\n'.join(output) + '\n'
def set_or_append_opt_value(self, name, value_string, priority=PRIO_RUNTIME):
# :api
"""For standard options, sets the value of the option if the priority is equal to or higher
than the current priority.
For "append" options, appends the values parsed from value_string to the current list of values. If the first
parsed element of the list of values is empty and the priority is equal to or higher than the current
priority, the current list is replaced with the new values.
If the priority is higher than the current priority, the current priority is increased to the priority.
Raises dnf.exceptions.ConfigError if the option with the given name does not exist or value_string contains
an invalid value or not allowed value.
"""
opt_binds = self._config.optBinds()
try:
opt_binds.at(name).newString(priority, value_string)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_('Cannot set "{}" to "{}": {}').format(name, value_string, str(e)), str(e))
@staticmethod
def write_raw_configfile(filename, section_id, substitutions, modify):
# :api
"""
filename - name of config file (.conf or .repo)
section_id - id of modified section (e.g. main, fedora, updates)
substitutions - instance of base.conf.substitutions
modify - dict of modified options
"""
parser = libdnf.conf.ConfigParser()
parser.read(filename)
# b/c repoids can have $values in them we need to map both ways to figure
# out which one is which
if not parser.hasSection(section_id):
for sect in parser.getData():
if libdnf.conf.ConfigParser.substitute(sect, substitutions) == section_id:
section_id = sect
for name, value in modify.items():
if isinstance(value, list):
value = ' '.join(value)
parser.setValue(section_id, name, value)
parser.write(filename, False)
class MainConf(BaseConfig):
# :api
"""Configuration option definitions for dnf.conf's [main] section."""
def __init__(self, section='main', parser=None):
# pylint: disable=R0915
config = libdnf.conf.ConfigMain()
super(MainConf, self).__init__(config, section, parser)
self._set_value('pluginpath', [dnf.const.PLUGINPATH], PRIO_DEFAULT)
self._set_value('pluginconfpath', [dnf.const.PLUGINCONFPATH], PRIO_DEFAULT)
self.substitutions = dnf.conf.substitutions.Substitutions()
self.arch = hawkey.detect_arch()
self._config.system_cachedir().set(PRIO_DEFAULT, dnf.const.SYSTEM_CACHEDIR)
# setup different cache and log for non-privileged users
if dnf.util.am_i_root():
cachedir = dnf.const.SYSTEM_CACHEDIR
logdir = '/var/log'
else:
try:
cachedir = logdir = misc.getCacheDir()
except (IOError, OSError) as e:
msg = _('Could not set cachedir: {}').format(ucd(e))
raise dnf.exceptions.Error(msg)
self._config.cachedir().set(PRIO_DEFAULT, cachedir)
self._config.logdir().set(PRIO_DEFAULT, logdir)
# track list of temporary files created
self.tempfiles = []
def __del__(self):
if hasattr(self, 'tempfiles'):
for file_name in self.tempfiles:
os.unlink(file_name)
@property
def get_reposdir(self):
# :api
"""Returns the value of reposdir"""
myrepodir = None
# put repo file into first reposdir which exists or create it
for rdir in self._get_value('reposdir'):
if os.path.exists(rdir):
myrepodir = rdir
break
if not myrepodir:
myrepodir = self._get_value('reposdir')[0]
dnf.util.ensure_dir(myrepodir)
return myrepodir
def _check_remote_file(self, optname):
"""
In case the option value is a remote URL, download it to the temporary location
and use this temporary file instead.
"""
prio = self._get_priority(optname)
val = self._get_value(optname)
if isinstance(val, basestring):
location = urlparse.urlparse(val)
if location[0] in ('file', ''):
# just strip the file:// prefix
self._set_value(optname, location.path, prio)
else:
downloader = libdnf.repo.Downloader()
temp_fd, temp_path = tempfile.mkstemp(prefix='dnf-downloaded-config-')
self.tempfiles.append(temp_path)
try:
downloader.downloadURL(self._config, val, temp_fd)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_('Configuration file URL "{}" could not be downloaded:\n'
' {}').format(val, str(e)))
else:
self._set_value(optname, temp_path, prio)
finally:
os.close(temp_fd)
def _search_inside_installroot(self, optname):
"""
Return root used as prefix for option (installroot or "/"). When specified from commandline
it returns value from conf.installroot
"""
installroot = self._get_value('installroot')
if installroot == "/":
return installroot
prio = self._get_priority(optname)
# don't modify paths specified on commandline
if prio >= PRIO_COMMANDLINE:
return installroot
val = self._get_value(optname)
# if it exists inside installroot use it (i.e. adjust configuration)
# for lists any component counts
if not isinstance(val, str):
if any(os.path.exists(os.path.join(installroot, p.lstrip('/'))) for p in val):
self._set_value(
optname,
libdnf.conf.VectorString([self._prepend_installroot_path(p) for p in val]),
prio
)
return installroot
elif os.path.exists(os.path.join(installroot, val.lstrip('/'))):
self._set_value(optname, self._prepend_installroot_path(val), prio)
return installroot
return "/"
def prepend_installroot(self, optname):
# :api
prio = self._get_priority(optname)
new_path = self._prepend_installroot_path(self._get_value(optname))
self._set_value(optname, new_path, prio)
def _prepend_installroot_path(self, path):
root_path = os.path.join(self._get_value('installroot'), path.lstrip('/'))
return libdnf.conf.ConfigParser.substitute(root_path, self.substitutions)
def _configure_from_options(self, opts):
"""Configure parts of CLI from the opts """
config_args = ['plugins', 'version', 'config_file_path',
'debuglevel', 'errorlevel', 'installroot',
'best', 'assumeyes', 'assumeno', 'clean_requirements_on_remove', 'gpgcheck',
'showdupesfromrepos', 'plugins', 'ip_resolve',
'rpmverbosity', 'disable_excludes', 'color',
'downloadonly', 'exclude', 'excludepkgs', 'skip_broken',
'tsflags', 'arch', 'basearch', 'ignorearch', 'cacheonly', 'comment']
for name in config_args:
value = getattr(opts, name, None)
if value is not None and value != []:
if self._has_option(name):
appendValue = False
if self._config:
try:
appendValue = self._config.optBinds().at(name).getAddValue()
except RuntimeError:
# fails if option with "name" does not exist in _config (libdnf)
pass
if appendValue:
add_priority = dnf.conf.PRIO_COMMANDLINE
if add_priority < self._get_priority(name):
add_priority = self._get_priority(name)
for item in value:
if item:
self._set_value(name, self._get_value(name) + [item], add_priority)
else:
self._set_value(name, [], dnf.conf.PRIO_COMMANDLINE)
else:
self._set_value(name, value, dnf.conf.PRIO_COMMANDLINE)
elif hasattr(self, name):
setattr(self, name, value)
else:
logger.warning(_('Unknown configuration option: %s = %s'),
ucd(name), ucd(value))
if getattr(opts, 'gpgcheck', None) is False:
self._set_value("localpkg_gpgcheck", False, dnf.conf.PRIO_COMMANDLINE)
if hasattr(opts, 'main_setopts'):
# now set all the non-first-start opts from main from our setopts
# pylint: disable=W0212
for name, values in opts.main_setopts.items():
for val in values:
if hasattr(self._config, name):
try:
# values in main_setopts are strings, try to parse it using newString()
self._config.optBinds().at(name).newString(PRIO_COMMANDLINE, val)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_("Error parsing --setopt with key '%s', value '%s': %s")
% (name, val, str(e)), raw_error=str(e))
else:
# if config option with "name" doesn't exist in _config, it could be defined
# only in Python layer
if hasattr(self, name):
setattr(self, name, val)
else:
msg = _("Main config did not have a %s attr. before setopt")
logger.warning(msg, name)
def exclude_pkgs(self, pkgs):
# :api
name = "excludepkgs"
if pkgs is not None and pkgs != []:
if self._has_option(name):
self._set_value(name, pkgs, dnf.conf.PRIO_COMMANDLINE)
else:
logger.warning(_('Unknown configuration option: %s = %s'),
ucd(name), ucd(pkgs))
def _adjust_conf_options(self):
"""Adjust conf options interactions"""
skip_broken_val = self._get_value('skip_broken')
if skip_broken_val:
self._set_value('strict', not skip_broken_val, self._get_priority('skip_broken'))
@property
def releasever(self):
# :api
return self.substitutions.get('releasever')
@releasever.setter
def releasever(self, val):
# :api
if val is None:
self.substitutions.pop('releasever', None)
return
self.substitutions['releasever'] = str(val)
@property
def arch(self):
# :api
return self.substitutions.get('arch')
@arch.setter
def arch(self, val):
# :api
if val is None:
self.substitutions.pop('arch', None)
return
if val not in dnf.rpm._BASEARCH_MAP.keys():
msg = _('Incorrect or unknown "{}": {}')
raise dnf.exceptions.Error(msg.format("arch", val))
self.substitutions['arch'] = val
self.basearch = dnf.rpm.basearch(val)
@property
def basearch(self):
# :api
return self.substitutions.get('basearch')
@basearch.setter
def basearch(self, val):
# :api
if val is None:
self.substitutions.pop('basearch', None)
return
if val not in dnf.rpm._BASEARCH_MAP.values():
msg = _('Incorrect or unknown "{}": {}')
raise dnf.exceptions.Error(msg.format("basearch", val))
self.substitutions['basearch'] = val
def read(self, filename=None, priority=PRIO_DEFAULT):
# :api
if filename is None:
filename = self._get_value('config_file_path')
parser = libdnf.conf.ConfigParser()
try:
parser.read(filename)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(_('Parsing file "%s" failed: %s') % (filename, e))
except IOError as e:
logger.warning(e)
self._populate(parser, self._section, filename, priority)
# update to where we read the file from
self._set_value('config_file_path', filename, priority)
@property
def verbose(self):
return self._get_value('debuglevel') >= dnf.const.VERBOSE_LEVEL
class RepoConf(BaseConfig):
"""Option definitions for repository INI file sections."""
def __init__(self, parent, section=None, parser=None):
mainConfig = parent._config if parent else libdnf.conf.ConfigMain()
super(RepoConf, self).__init__(libdnf.conf.ConfigRepo(mainConfig), section, parser)
# Do not remove! Attribute is a reference holder.
# Prevents premature removal of the mainConfig. The libdnf ConfigRepo points to it.
self._mainConfigRefHolder = mainConfig
if section:
self._config.name().set(PRIO_DEFAULT, section)
def _configure_from_options(self, opts):
"""Configure repos from the opts. """
if getattr(opts, 'gpgcheck', None) is False:
for optname in ['gpgcheck', 'repo_gpgcheck']:
self._set_value(optname, False, dnf.conf.PRIO_COMMANDLINE)
repo_setopts = getattr(opts, 'repo_setopts', {})
for repoid, setopts in repo_setopts.items():
if not fnmatch.fnmatch(self._section, repoid):
continue
for name, values in setopts.items():
for val in values:
if hasattr(self._config, name):
try:
# values in repo_setopts are strings, try to parse it using newString()
self._config.optBinds().at(name).newString(PRIO_COMMANDLINE, val)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(
_("Error parsing --setopt with key '%s.%s', value '%s': %s")
% (self._section, name, val, str(e)), raw_error=str(e))
else:
msg = _("Repo %s did not have a %s attr. before setopt")
logger.warning(msg, self._section, name)
| 21,634
|
Python
|
.py
| 461
| 34.401302
| 120
| 0.573425
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,788
|
__init__.py
|
rpm-software-management_dnf/dnf/conf/__init__.py
|
# conf.py
# dnf configuration classes.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
The configuration classes and routines in yum are splattered over too many
places, hard to change and debug. The new structure here will replace that. Its
goal is to:
* accept configuration options from all three sources (the main config file,
repo config files, command line switches)
* handle all the logic of storing those and producing related values.
* returning configuration values.
* optionally: asserting no value is overridden once it has been applied
somewhere (e.g. do not let a new repo be initialized with different global
cache path than an already existing one).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.conf.config import PRIO_DEFAULT, PRIO_MAINCONFIG, PRIO_AUTOMATICCONFIG
from dnf.conf.config import PRIO_REPOCONFIG, PRIO_PLUGINDEFAULT, PRIO_PLUGINCONFIG
from dnf.conf.config import PRIO_COMMANDLINE, PRIO_RUNTIME
from dnf.conf.config import BaseConfig, MainConf, RepoConf
Conf = MainConf
| 1,977
|
Python
|
.py
| 38
| 50.657895
| 82
| 0.79855
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,789
|
read.py
|
rpm-software-management_dnf/dnf/conf/read.py
|
# read.py
# Reading configuration from files.
#
# Copyright (C) 2014-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _, ucd
import dnf.conf
import libdnf.conf
import dnf.exceptions
import dnf.repo
import glob
import logging
import os
logger = logging.getLogger('dnf')
class RepoReader(object):
def __init__(self, conf, opts):
self.conf = conf
self.opts = opts
def __iter__(self):
# get the repos from the main yum.conf file
for r in self._get_repos(self.conf.config_file_path):
yield r
# read .repo files from directories specified by conf.reposdir
repo_configs = []
for reposdir in self.conf.reposdir:
for path in glob.glob(os.path.join(reposdir, "*.repo")):
repo_configs.append(path)
# remove .conf suffix before calling the sort function
# also split the path so the separators are not treated as ordinary characters
repo_configs.sort(key=lambda x: dnf.util.split_path(x[:-5]))
for repofn in repo_configs:
try:
for r in self._get_repos(repofn):
yield r
except dnf.exceptions.ConfigError:
logger.warning(_("Warning: failed loading '%s', skipping."),
repofn)
def _build_repo(self, parser, id_, repofn):
"""Build a repository using the parsed data."""
substituted_id = libdnf.conf.ConfigParser.substitute(id_, self.conf.substitutions)
# Check the repo.id against the valid chars
invalid = dnf.repo.repo_id_invalid(substituted_id)
if invalid is not None:
if substituted_id != id_:
msg = _("Bad id for repo: {} ({}), byte = {} {}").format(substituted_id, id_,
substituted_id[invalid],
invalid)
else:
msg = _("Bad id for repo: {}, byte = {} {}").format(id_, id_[invalid], invalid)
raise dnf.exceptions.ConfigError(msg)
repo = dnf.repo.Repo(substituted_id, self.conf)
try:
repo._populate(parser, id_, repofn, dnf.conf.PRIO_REPOCONFIG)
except ValueError as e:
if substituted_id != id_:
msg = _("Repository '{}' ({}): Error parsing config: {}").format(substituted_id,
id_, e)
else:
msg = _("Repository '{}': Error parsing config: {}").format(id_, e)
raise dnf.exceptions.ConfigError(msg)
# Ensure that the repo name is set
if repo._get_priority('name') == dnf.conf.PRIO_DEFAULT:
if substituted_id != id_:
msg = _("Repository '{}' ({}) is missing name in configuration, using id.").format(
substituted_id, id_)
else:
msg = _("Repository '{}' is missing name in configuration, using id.").format(id_)
logger.warning(msg)
repo.name = ucd(repo.name)
repo._substitutions.update(self.conf.substitutions)
repo.cfg = parser
return repo
def _get_repos(self, repofn):
"""Parse and yield all repositories from a config file."""
substs = self.conf.substitutions
parser = libdnf.conf.ConfigParser()
parser.setSubstitutions(substs)
try:
parser.read(repofn)
except RuntimeError as e:
raise dnf.exceptions.ConfigError(_('Parsing file "{}" failed: {}').format(repofn, e))
except IOError as e:
logger.warning(e)
# Check sections in the .repo file that was just slurped up
for section in parser.getData():
if section == 'main':
continue
try:
thisrepo = self._build_repo(parser, ucd(section), repofn)
except (dnf.exceptions.RepoError, dnf.exceptions.ConfigError) as e:
logger.warning(e)
continue
else:
thisrepo.repofile = repofn
thisrepo._configure_from_options(self.opts)
yield thisrepo
| 5,259
|
Python
|
.py
| 112
| 36.071429
| 99
| 0.600858
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,790
|
error.py
|
rpm-software-management_dnf/dnf/rpm/error.py
|
# error.py
# RpmUtilsError
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
class RpmUtilsError(Exception):
pass
| 1,030
|
Python
|
.py
| 21
| 47.809524
| 77
| 0.785714
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,791
|
miscutils.py
|
rpm-software-management_dnf/dnf/rpm/miscutils.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Copyright 2003 Duke University
from __future__ import print_function, absolute_import, unicode_literals
import os
import subprocess
import logging
from shutil import which
from dnf.i18n import _
_logger = logging.getLogger('dnf')
_rpmkeys_binary = None
def _find_rpmkeys_binary():
global _rpmkeys_binary
if _rpmkeys_binary is None:
_rpmkeys_binary = which("rpmkeys")
_logger.debug(_('Using rpmkeys executable at %s to verify signatures'),
_rpmkeys_binary)
return _rpmkeys_binary
def _process_rpm_output(data):
# No signatures or digests = corrupt package.
# There is at least one line for -: and another (empty) entry after the
# last newline.
if len(data) < 3 or data[0] != b'-:' or data[-1]:
return 2
seen_sig, missing_key, not_trusted, not_signed = False, False, False, False
for i in data[1:-1]:
if b': BAD' in i:
return 2
elif i.endswith(b': NOKEY'):
missing_key = True
elif i.endswith(b': NOTTRUSTED'):
not_trusted = True
elif i.endswith(b': NOTFOUND'):
not_signed = True
elif not i.endswith(b': OK'):
return 2
if not_trusted:
return 3
elif missing_key:
return 1
elif not_signed:
return 4
# we still check return code, so this is safe
return 0
def _verifyPackageUsingRpmkeys(package, installroot):
rpmkeys_binary = _find_rpmkeys_binary()
if rpmkeys_binary is None or not os.path.isfile(rpmkeys_binary):
_logger.critical(_('Cannot find rpmkeys executable to verify signatures.'))
return 2
# "--define=_pkgverify_level signature" enforces signature checking;
# "--define=_pkgverify_flags 0x0" ensures that all signatures are checked.
args = ('rpmkeys', '--checksig', '--root', installroot, '--verbose',
'--define=_pkgverify_level signature', '--define=_pkgverify_flags 0x0',
'-')
env = dict(os.environ)
env['LC_ALL'] = 'C'
with subprocess.Popen(
args=args,
executable=rpmkeys_binary,
env=env,
stdout=subprocess.PIPE,
cwd='/',
stdin=package) as p:
data = p.communicate()[0]
returncode = p.returncode
if type(returncode) is not int:
raise AssertionError('Popen set return code to non-int')
# rpmkeys can return something other than 0 or 1 in the case of a
# fatal error (OOM, abort() called, SIGSEGV, etc)
if returncode >= 2 or returncode < 0:
return 2
ret = _process_rpm_output(data.split(b'\n'))
if ret:
return ret
return 2 if returncode else 0
def checkSig(ts, package):
"""Takes a transaction set and a package, check it's sigs,
return 0 if they are all fine
return 1 if the gpg key can't be found
return 2 if the header is in someway damaged
return 3 if the key is not trusted
return 4 if the pkg is not gpg or pgp signed"""
fdno = os.open(package, os.O_RDONLY|os.O_NOCTTY|os.O_CLOEXEC)
try:
value = _verifyPackageUsingRpmkeys(fdno, ts.ts.rootDir)
finally:
os.close(fdno)
return value
| 3,906
|
Python
|
.py
| 99
| 33.40404
| 83
| 0.667632
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,792
|
transaction.py
|
rpm-software-management_dnf/dnf/rpm/transaction.py
|
#
# Client code for Update Agent
# Copyright (c) 1999-2002 Red Hat, Inc. Distributed under GPL.
#
# Adrian Likins <alikins@redhat.com>
# Some Edits by Seth Vidal <skvidal@phy.duke.edu>
#
# a couple of classes wrapping up transactions so that we
# can share transactions instead of creating new ones all over
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _
import logging
import rpm
_logger = logging.getLogger('dnf')
read_ts = None
ts = None
# wrapper/proxy class for rpm.Transaction so we can
# instrument it, etc easily
class TransactionWrapper(object):
def __init__(self, root='/'):
self.ts = rpm.TransactionSet(root)
self._methods = ['check',
'order',
'addErase',
'addInstall',
'addReinstall',
'run',
'pgpImportPubkey',
'pgpPrtPkts',
'problems',
'setFlags',
'setVSFlags',
'setProbFilter',
'hdrFromFdno',
'next',
'clean']
self.tsflags = []
self.open = True
def __del__(self):
# Automatically close the rpm transaction when the reference is lost
self.close()
def close(self):
if self.open:
self.ts.closeDB()
self.ts = None
self.open = False
def dbMatch(self, *args, **kwds):
if 'patterns' in kwds:
patterns = kwds.pop('patterns')
else:
patterns = []
mi = self.ts.dbMatch(*args, **kwds)
for (tag, tp, pat) in patterns:
mi.pattern(tag, tp, pat)
return mi
def dbCookie(self):
# dbCookie() does not support lazy opening of rpm database.
# The following line opens the database if it is not already open.
if self.ts.openDB() != 0:
_logger.error(_('The openDB() function cannot open rpm database.'))
return ''
cookie = self.ts.dbCookie()
if not cookie:
_logger.error(_('The dbCookie() function did not return cookie of rpm database.'))
return ''
return cookie
def __getattr__(self, attr):
if attr in self._methods:
return self.getMethod(attr)
else:
raise AttributeError(attr)
def __iter__(self):
return self.ts
def getMethod(self, method):
# in theory, we can override this with
# profile/etc info
return getattr(self.ts, method)
# push/pop methods so we don't lose the previous
# set value, and we can potentially debug a bit
# easier
def pushVSFlags(self, flags):
self.tsflags.append(flags)
self.ts.setVSFlags(self.tsflags[-1])
def addTsFlag(self, flag):
curflags = self.ts.setFlags(0)
self.ts.setFlags(curflags | flag)
def getTsFlags(self):
curflags = self.ts.setFlags(0)
self.ts.setFlags(curflags)
return curflags
def isTsFlagSet(self, flag):
val = self.getTsFlags()
return bool(flag & val)
def setScriptFd(self, fd):
self.ts.scriptFd = fd.fileno()
def test(self, cb, conf={}):
"""tests the ts we've setup, takes a callback function and a conf dict
for flags and what not"""
origflags = self.getTsFlags()
self.addTsFlag(rpm.RPMTRANS_FLAG_TEST)
# FIXME GARBAGE - remove once this is reimplemented elsewhere
# KEEPING FOR API COMPLIANCE ONLY
if conf.get('diskspacecheck') == 0:
self.ts.setProbFilter(rpm.RPMPROB_FILTER_DISKSPACE)
tserrors = self.ts.run(cb.callback, '')
self.ts.setFlags(origflags)
reserrors = []
if tserrors is not None:
for (descr, (etype, mount, need)) in tserrors:
reserrors.append(descr)
if not reserrors:
reserrors.append(_('Errors occurred during test transaction.'))
return reserrors
def initReadOnlyTransaction(root='/'):
read_ts = TransactionWrapper(root=root)
read_ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS))
return read_ts
| 4,375
|
Python
|
.py
| 119
| 27.058824
| 94
| 0.580676
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,793
|
__init__.py
|
rpm-software-management_dnf/dnf/rpm/__init__.py
|
# __init__.py
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from . import transaction
from dnf.pycomp import is_py3bytes
import dnf.const
import dnf.exceptions
import rpm # used by ansible (dnf.rpm.rpm.labelCompare in lib/ansible/modules/packaging/os/dnf.py)
def detect_releasever(installroot):
# :api
"""Calculate the release version for the system."""
ts = transaction.initReadOnlyTransaction(root=installroot)
ts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))
for distroverpkg in dnf.const.DISTROVERPKG:
if dnf.pycomp.PY3:
distroverpkg = bytes(distroverpkg, 'utf-8')
try:
idx = ts.dbMatch('provides', distroverpkg)
except (TypeError, rpm.error) as e:
raise dnf.exceptions.Error('Error: %s' % str(e))
if not len(idx):
continue
try:
hdr = next(idx)
except StopIteration:
msg = 'Error: rpmdb failed to list provides. Try: rpm --rebuilddb'
raise dnf.exceptions.Error(msg)
releasever = hdr['version']
try:
try:
# header returns bytes -> look for bytes
# it may fail because rpm returns a decoded string since 10 Apr 2019
off = hdr[rpm.RPMTAG_PROVIDENAME].index(distroverpkg)
except ValueError:
# header returns a string -> look for a string
off = hdr[rpm.RPMTAG_PROVIDENAME].index(distroverpkg.decode("utf8"))
flag = hdr[rpm.RPMTAG_PROVIDEFLAGS][off]
ver = hdr[rpm.RPMTAG_PROVIDEVERSION][off]
if flag == rpm.RPMSENSE_EQUAL and ver:
if hdr['name'] not in (distroverpkg, distroverpkg.decode("utf8")):
# override the package version
releasever = ver
except (ValueError, KeyError, IndexError):
pass
if is_py3bytes(releasever):
releasever = str(releasever, "utf-8")
return releasever
return None
def _header(path):
"""Return RPM header of the file."""
ts = transaction.initReadOnlyTransaction()
with open(path) as package:
fdno = package.fileno()
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error as e:
raise dnf.exceptions.Error("{0}: '{1}'".format(e, path))
return hdr
def _invert(dct):
return {v: k for k in dct for v in dct[k]}
_BASEARCH_MAP = _invert({
'aarch64': ('aarch64',),
'alpha': ('alpha', 'alphaev4', 'alphaev45', 'alphaev5', 'alphaev56',
'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7', 'alphapca56'),
'arm': ('armv5tejl', 'armv5tel', 'armv5tl', 'armv6l', 'armv7l', 'armv8l'),
'armhfp': ('armv6hl', 'armv7hl', 'armv7hnl', 'armv8hl'),
'i386': ('i386', 'athlon', 'geode', 'i386', 'i486', 'i586', 'i686'),
'ia64': ('ia64',),
'mips': ('mips',),
'mipsel': ('mipsel',),
'mips64': ('mips64',),
'mips64el': ('mips64el',),
'loongarch64': ('loongarch64',),
'noarch': ('noarch',),
'ppc': ('ppc',),
'ppc64': ('ppc64', 'ppc64iseries', 'ppc64p7', 'ppc64pseries'),
'ppc64le': ('ppc64le',),
'riscv32' : ('riscv32',),
'riscv64' : ('riscv64',),
'riscv128' : ('riscv128',),
's390': ('s390',),
's390x': ('s390x',),
'sh3': ('sh3',),
'sh4': ('sh4', 'sh4a'),
'sparc': ('sparc', 'sparc64', 'sparc64v', 'sparcv8', 'sparcv9',
'sparcv9v'),
'x86_64': ('x86_64', 'amd64', 'ia32e'),
})
def basearch(arch):
# :api
return _BASEARCH_MAP[arch]
def getheader(rpm_hdr, key):
'''
Returns value of rpm_hdr[key] as a string. Rpm has switched from bytes to str
and we need to handle both properly.
'''
value = rpm_hdr[key]
if is_py3bytes(value):
value = str(value, "utf-8")
return value
| 4,837
|
Python
|
.py
| 117
| 34.615385
| 99
| 0.630047
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,794
|
connection.py
|
rpm-software-management_dnf/dnf/rpm/connection.py
|
# connection.py
# Maintain RPMDB connections.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from .transaction import initReadOnlyTransaction
import dnf.util
class RpmConnection(object):
def __init__(self, root):
self.root = root
@property
@dnf.util.lazyattr("_readonly_ts")
def readonly_ts(self):
return initReadOnlyTransaction(self.root)
| 1,369
|
Python
|
.py
| 30
| 43.433333
| 77
| 0.773783
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,795
|
test_plugin.py
|
rpm-software-management_dnf/tests/test_plugin.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf.logging
import dnf.plugin
import dnf.pycomp
import tests.support
PLUGINS = "%s/tests/plugins" % tests.support.dnf_toplevel()
def testconf():
conf = tests.support.FakeConf()
conf.pluginpath = [PLUGINS]
conf.pluginconfpath = [PLUGINS]
return conf
class PluginTest(tests.support.TestCase):
def setUp(self):
self.plugins = dnf.plugin.Plugins()
self.plugins._load(testconf(), (), ())
def tearDown(self):
self.plugins._unload()
def test_load(self):
self.assertLength(self.plugins.plugin_cls, 1)
cls = self.plugins.plugin_cls[0]
assert(issubclass(cls, dnf.plugin.Plugin))
self.assertEqual(cls.name, 'lucky')
def test_runs(self):
self.assertLength(self.plugins.plugins, 0)
self.plugins._run_init(None, None)
self.assertLength(self.plugins.plugins, 1)
self.plugins._run_config()
lucky = self.plugins.plugins[0]
self.assertTrue(lucky._config)
def test_config(self):
base = tests.support.MockBase()
base.conf.pluginconfpath = ['/wrong', PLUGINS]
self.plugins._run_init(base, None)
lucky = self.plugins.plugins[0]
conf = lucky.read_config(base.conf)
self.assertTrue(conf.getboolean('main', 'enabled'))
self.assertEqual(conf.get('main', 'wanted'), '/to/be/haunted')
base.close()
def test_disabled(self):
base = tests.support.MockBase()
base.conf.pluginconfpath = [PLUGINS]
self.plugins._run_init(base, None)
self.assertFalse(any([p.name == 'disabled-plugin'
for p in self.plugins.plugins]))
self.assertLength(self.plugins.plugin_cls, 1)
self.assertEqual(self.plugins.plugin_cls[0].name, 'lucky')
base.close()
class PluginSkipsTest(tests.support.TestCase):
def test_skip(self):
self.plugins = dnf.plugin.Plugins()
self.plugins._load(testconf(), ('luck*',), ())
self.assertLength(self.plugins.plugin_cls, 0)
def tearDown(self):
self.plugins._unload()
class PluginNonExistentTest(tests.support.TestCase):
"""Tests with a non-existent plugin."""
def test_logs_traceback(self):
"""Test whether the traceback is logged if a plugin cannot be imported."""
package = dnf.pycomp.ModuleType('testpkg')
package.__path__ = []
stream = dnf.pycomp.StringIO()
with tests.support.wiretap_logs('dnf', dnf.logging.SUBDEBUG, stream):
dnf.plugin._import_modules(package, ('nonexistent.py',))
end = ('Error: No module named \'testpkg\'\n' if dnf.pycomp.PY3
else 'Error: No module named testpkg.nonexistent\n')
self.assertTracebackIn(end, stream.getvalue())
| 3,844
|
Python
|
.py
| 84
| 39.416667
| 82
| 0.686563
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,796
|
test_goal.py
|
rpm-software-management_dnf/tests/test_goal.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import libdnf.transaction
import dnf.goal
import dnf.selector
import tests.support
class GoalTest(tests.support.DnfBaseTestCase):
REPOS = ['main']
INIT_SACK = True
def test_get_reason(self):
sltr = dnf.selector.Selector(self.sack)
sltr.set(name='mrkite')
grp_sltr = dnf.selector.Selector(self.sack)
grp_sltr.set(name='lotus')
self.goal.install(select=sltr)
self.goal.install(select=grp_sltr)
self.goal.group_members.add('lotus')
self.goal.run()
installs = self.goal.list_installs()
mrkite = [pkg for pkg in installs if pkg.name == 'mrkite'][0]
lotus = [pkg for pkg in installs if pkg.name == 'lotus'][0]
trampoline = [pkg for pkg in installs if pkg.name == 'trampoline'][0]
self.assertEqual(self.goal.get_reason(lotus), libdnf.transaction.TransactionItemReason_GROUP)
self.assertEqual(self.goal.get_reason(mrkite), libdnf.transaction.TransactionItemReason_USER)
self.assertEqual(self.goal.get_reason(trampoline), libdnf.transaction.TransactionItemReason_DEPENDENCY)
def test_group_reason(self):
hole = self.sack.query().filter(name='hole')[0]
self.goal.group_members.add('hole')
self.assertEqual(libdnf.transaction.TransactionItemReason_GROUP, self.goal.group_reason(hole, libdnf.transaction.TransactionItemReason_GROUP))
self.assertEqual(libdnf.transaction.TransactionItemReason_DEPENDENCY, self.goal.group_reason(hole, libdnf.transaction.TransactionItemReason_DEPENDENCY))
| 2,616
|
Python
|
.py
| 47
| 50.851064
| 160
| 0.743941
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,797
|
test_provides.py
|
rpm-software-management_dnf/tests/test_provides.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import tests.support
class ProvidesTest(tests.support.DnfBaseTestCase):
REPOS = ['main']
def test_file(self):
self.assertLength(self.base.provides("*ais*smile")[0], 1)
self.assertLength(self.base.provides("/raised/smile")[0], 1)
def test_name(self):
self.assertLength(self.base.provides("henry(the_horse)")[0], 1)
self.assertLength(self.base.provides("lotus")[0], 2)
def test_glob(self):
self.assertLength(self.base.provides("henry(*)")[0], 1)
self.assertEqual(set(self.base.provides("dup*")[0]), set(self.base.provides('dup')[0]))
self.assertEqual(set(self.base.provides(["dup*"])[0]), set(self.base.provides('dup')[0]))
| 1,765
|
Python
|
.py
| 32
| 51.625
| 97
| 0.732019
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,798
|
test_history.py
|
rpm-software-management_dnf/tests/test_history.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import libdnf.transaction
import dnf.history
import tests.support
from tests.support import mock
'''
class NEVRAOperationsTest(tests.support.TestCase):
"""Unit tests of dnf.history.NEVRAOperations."""
def test_add_erase_installed(self):
"""Test add with an erasure of NEVRA which was installed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Install', 'tour-0:4.6-1.noarch', obsoleted_nevras=('lotus-0:3-16.x86_64',))
ops.add('Erase', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Erase', 'lotus-0:3-16.x86_64', None, set()),))
def test_add_erase_removed(self):
"""Test add with an erasure of NEVRA which was removed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Erase', 'tour-0:4.6-1.noarch')
self.assertRaises(
ValueError,
ops.add, 'Erase', 'tour-0:4.6-1.noarch')
def test_add_install_installed(self):
"""Test add with two installs of the same NEVRA."""
ops = dnf.history.NEVRAOperations()
ops.add('Install', 'tour-0:4.6-1.noarch')
self.assertRaises(
ValueError,
ops.add, 'Install', 'tour-0:4.6-1.noarch')
def test_add_install_removed(self):
"""Test add with an install of NEVRA which was removed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Erase', 'tour-0:4.6-1.noarch')
ops.add('Install', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Reinstall', 'tour-0:4.6-1.noarch', 'tour-0:4.6-1.noarch', set()),))
def test_add_obsoleted_installed(self):
"""Test add with an obsoleted NEVRA which was installed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Install', 'lotus-0:3-16.x86_64')
ops.add('Install', 'tour-0:4.6-1.noarch', obsoleted_nevras=('lotus-0:3-16.x86_64',))
self.assertCountEqual(
ops,
(('Install', 'tour-0:4.6-1.noarch', None, set()),))
def test_add_obsoleted_obsoleted(self):
"""Test add with an obsoleted NEVRA which was obsoleted before."""
ops = dnf.history.NEVRAOperations()
ops.add(
'Install',
'tour-0:4.6-1.noarch',
obsoleted_nevras=('lotus-0:3-16.x86_64', 'mrkite-0:2-0.x86_64')
)
ops.add(
'Install',
'pepper-0:20-0.x86_64',
obsoleted_nevras=('lotus-0:3-16.x86_64', 'librita-0:1-1.x86_64')
)
self.assertCountEqual(
ops,
(
(
'Install',
'tour-0:4.6-1.noarch',
None,
{'lotus-0:3-16.x86_64', 'mrkite-0:2-0.x86_64'}
),
(
'Install',
'pepper-0:20-0.x86_64',
None,
{'lotus-0:3-16.x86_64', 'librita-0:1-1.x86_64'}
)
)
)
def test_add_obsoleted_removed(self):
"""Test add with an obsoleted NEVRA which was removed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Erase', 'lotus-0:3-16.x86_64')
self.assertRaises(
ValueError,
ops.add, 'Install', 'tour-0:4.6-1.noarch', obsoleted_nevras=('lotus-0:3-16.x86_64',))
def test_add_reinstall_installed(self):
"""Test add with a reinstall of NEVRA which was installed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Install', 'tour-0:4.6-1.noarch')
ops.add('Reinstall', 'tour-0:4.6-1.noarch', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Install', 'tour-0:4.6-1.noarch', None, set()),))
def test_add_replace_installed(self):
"""Test add with a replacing NEVRA which was installed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Install', 'tour-0:4.8-1.noarch')
self.assertRaises(
ValueError,
ops.add, 'Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
def test_add_replace_opposite(self):
"""Test add with a replacement which was done before, but swapped."""
ops = dnf.history.NEVRAOperations()
ops.add('Downgrade', 'tour-0:4.6-1.noarch', 'tour-0:4.8-1.noarch')
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Reinstall', 'tour-0:4.8-1.noarch', 'tour-0:4.8-1.noarch', set()),))
def test_add_replace_opposite_manual(self):
"""Test add with a manual replacement which was done before, but swapped."""
ops = dnf.history.NEVRAOperations()
ops.add('Erase', 'tour-0:4.8-1.noarch')
ops.add('Install', 'tour-0:4.6-1.noarch')
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Reinstall', 'tour-0:4.8-1.noarch', 'tour-0:4.8-1.noarch', set()),))
def test_add_replace_removed(self):
"""Test add with a replacing NEVRA which was removed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Erase', 'tour-0:4.8-1.noarch')
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Reinstall', 'tour-0:4.8-1.noarch', 'tour-0:4.8-1.noarch', set()),
('Erase', 'tour-0:4.6-1.noarch', None, set())))
def test_add_replaced_opposite(self):
"""Test add with a replaced NEVRA which replaced a NEVRA before in the opposite direction."""
ops = dnf.history.NEVRAOperations()
ops.add('Downgrade', 'tour-0:4.6-1.noarch', 'tour-0:4.9-1.noarch')
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Erase', 'tour-0:4.9-1.noarch', None, set()),
('Install', 'tour-0:4.8-1.noarch', None, set())))
def test_add_replaced_removed(self):
"""Test add with a replaced NEVRA which was removed before."""
ops = dnf.history.NEVRAOperations()
ops.add('Erase', 'tour-0:4.6-1.noarch')
self.assertRaises(
ValueError,
ops.add, 'Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
def test_add_replaced_reinstall(self):
"""Test add with a replaced NEVRA which was reinstalled before."""
ops = dnf.history.NEVRAOperations()
ops.add('Reinstall', 'tour-0:4.6-1.noarch', 'tour-0:4.6-1.noarch')
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
self.assertCountEqual(
ops,
(('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch', set()),))
def test_add_replaced_replacement(self):
"""Test add with a replaced NEVRA which replaced a NEVRA before."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
self.assertCountEqual(
ops,
(('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.6-1.noarch', set()),))
def test_addition(self):
"""Test addition of two instances."""
left_ops = dnf.history.NEVRAOperations()
left_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
expected_ops = dnf.history.NEVRAOperations()
expected_ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.6-1.noarch')
result_ops = left_ops + right_ops
self.assertEqual(result_ops, expected_ops)
def test_addition_inplace(self):
"""Test in-place addition of two instances."""
left_ops = dnf.history.NEVRAOperations()
left_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
expected_ops = dnf.history.NEVRAOperations()
expected_ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.6-1.noarch')
left_ops += right_ops
self.assertEqual(left_ops, expected_ops)
def test_equality(self):
"""Test equality of two equal instances."""
left_ops = dnf.history.NEVRAOperations()
left_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_equal = left_ops == right_ops
self.assertTrue(is_equal)
def test_equality_differentcontent(self):
"""Test equality of two instances with different contents."""
left_ops = dnf.history.NEVRAOperations()
left_ops.add('Downgrade', 'tour-0:4.6-1.noarch', 'tour-0:4.8-1.noarch')
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_equal = left_ops == right_ops
self.assertFalse(is_equal)
def test_equality_differentlength(self):
"""Test equality of two instances with different lengths."""
left_ops = dnf.history.NEVRAOperations()
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_equal = left_ops == right_ops
self.assertFalse(is_equal)
def test_equality_differenttype(self):
"""Test equality of an instance and an object of a different type."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_equal = ops == 'tour-0:4.8-1.noarch'
self.assertFalse(is_equal)
def test_equality_identity(self):
"""Test equality of the same instance."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_equal = ops == ops
self.assertTrue(is_equal)
def test_inequality(self):
"""Test inequality of two different instances."""
left_ops = dnf.history.NEVRAOperations()
left_ops.add('Downgrade', 'tour-0:4.6-1.noarch', 'tour-0:4.8-1.noarch')
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_inequal = left_ops != right_ops
self.assertTrue(is_inequal)
def test_inequality_equal(self):
"""Test inequality of two equal instances."""
left_ops = dnf.history.NEVRAOperations()
left_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
right_ops = dnf.history.NEVRAOperations()
right_ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
is_inequal = left_ops != right_ops
self.assertFalse(is_inequal)
def test_iterator(self):
"""Test iterator of an instance."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
iterator = iter(ops)
self.assertEqual(
next(iterator),
('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch', set()))
self.assertRaises(StopIteration, next, iterator)
def test_length(self):
"""Test length of an instance."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.8-1.noarch', 'tour-0:4.6-1.noarch')
length = len(ops)
self.assertEqual(length, 1)
def test_membership(self):
"""Test membership of a contained operation."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
is_in = ('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch', ()) in ops
self.assertTrue(is_in)
def test_membership_differentnevra(self):
"""Test membership of an operation with different (replacing) NEVRA."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
is_in = ('Update', 'pepper-0:20-0.x86_64', 'tour-0:4.8-1.noarch', ()) in ops
self.assertFalse(is_in)
def test_membership_differentobsoleted(self):
"""Test membership of an operation with different obsoleted NEVRAs."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
op = (
'Update',
'tour-0:4.9-1.noarch',
'tour-0:4.8-1.noarch',
('pepper-0:20-0.x86_64',)
)
self.assertFalse(op in ops)
def test_membership_differentreplaced(self):
"""Test membership of an operation with different replaced NEVRA."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
is_in = ('Update', 'tour-0:4.9-1.noarch', 'pepper-0:20-0.x86_64', ()) in ops
self.assertFalse(is_in)
def test_membership_differentstate(self):
"""Test membership of an operation with different state."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
is_in = ('Downgrade', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch', ()) in ops
self.assertFalse(is_in)
def test_membership_differenttype(self):
"""Test membership of an object of a different type."""
ops = dnf.history.NEVRAOperations()
ops.add('Update', 'tour-0:4.9-1.noarch', 'tour-0:4.8-1.noarch')
is_in = 'tour-0:4.9-1.noarch' in ops
self.assertFalse(is_in)
class TransactionConverterTest(tests.support.TestCase):
"""Unit tests of dnf.history.TransactionConverter."""
def assert_transaction_equal(self, actual, expected):
"""Assert that two transactions are equal."""
self.assertCountEqual(self.transaction2tuples(actual),
self.transaction2tuples(expected))
def test_find_available_na(self):
"""Test finding with an unavailable NEVRA."""
sack = tests.support.mock_sack('main')
converter = dnf.history.TransactionConverter(sack)
with self.assertRaises(dnf.exceptions.PackagesNotAvailableError) as ctx:
converter._find_available('none-1-0.noarch')
self.assertEqual(ctx.exception.pkg_spec, 'none-1-0.noarch')
def test_find_installed_ni(self):
"""Test finding with an unistalled NEVRA."""
sack = tests.support.mock_sack('main')
converter = dnf.history.TransactionConverter(sack)
with self.assertRaises(dnf.exceptions.PackagesNotInstalledError) as ctx:
converter._find_installed('none-1-0.noarch')
self.assertEqual(ctx.exception.pkg_spec, 'none-1-0.noarch')
def test_convert_downgrade(self):
"""Test conversion with a downgrade."""
operations = dnf.history.NEVRAOperations()
operations.add('Downgrade', 'tour-4.6-1.noarch', 'tour-5-0.noarch',
['hole-1-1.x86_64'])
sack = tests.support.mock_sack('main')
converter = dnf.history.TransactionConverter(sack)
actual = converter.convert(operations)
expected = dnf.transaction.Transaction()
expected.add_downgrade(
next(iter(sack.query().available()._nevra('tour-4.6-1.noarch'))),
next(iter(sack.query().installed()._nevra('tour-5-0.noarch'))),
[next(iter(sack.query().installed()._nevra('hole-1-1.x86_64')))])
self.assert_transaction_equal(actual, expected)
def test_convert_erase(self):
"""Test conversion with an erasure."""
operations = dnf.history.NEVRAOperations()
operations.add('Erase', 'pepper-20-0.x86_64')
sack = tests.support.mock_sack()
converter = dnf.history.TransactionConverter(sack)
actual = converter.convert(operations)
expected = dnf.transaction.Transaction()
expected.add_erase(
next(iter(sack.query().installed()._nevra('pepper-20-0.x86_64'))))
self.assert_transaction_equal(actual, expected)
def test_convert_install(self):
"""Test conversion with an installation."""
operations = dnf.history.NEVRAOperations()
operations.add('Install', 'lotus-3-16.x86_64',
obsoleted_nevras=['hole-1-1.x86_64'])
sack = tests.support.mock_sack('main')
converter = dnf.history.TransactionConverter(sack)
actual = converter.convert(operations, libdnf.transaction.TransactionItemReason_USER)
expected = dnf.transaction.Transaction()
expected.add_install(
next(iter(sack.query().available()._nevra('lotus-3-16.x86_64'))),
[next(iter(sack.query().installed()._nevra('hole-1-1.x86_64')))],
libdnf.transaction.TransactionItemReason_USER)
self.assert_transaction_equal(actual, expected)
def test_convert_reinstall(self):
"""Test conversion with a reinstallation."""
operations = dnf.history.NEVRAOperations()
operations.add('Reinstall', 'pepper-20-0.x86_64', 'pepper-20-0.x86_64',
['hole-1-1.x86_64'])
sack = tests.support.mock_sack('main')
converter = dnf.history.TransactionConverter(sack)
actual = converter.convert(operations)
expected = dnf.transaction.Transaction()
expected.add_reinstall(
next(iter(sack.query().available()._nevra('pepper-20-0.x86_64'))),
next(iter(sack.query().installed()._nevra('pepper-20-0.x86_64'))),
[next(iter(sack.query().installed()._nevra('hole-1-1.x86_64')))])
self.assert_transaction_equal(actual, expected)
def test_upgrade(self):
"""Test repeating with an upgrade."""
operations = dnf.history.NEVRAOperations()
operations.add('Update', 'pepper-20-1.x86_64', 'pepper-20-0.x86_64',
['hole-1-1.x86_64'])
sack = tests.support.mock_sack('updates')
converter = dnf.history.TransactionConverter(sack)
actual = converter.convert(operations)
expected = dnf.transaction.Transaction()
expected.add_upgrade(
next(iter(sack.query().available()._nevra('pepper-20-1.x86_64'))),
next(iter(sack.query().installed()._nevra('pepper-20-0.x86_64'))),
[next(iter(sack.query().installed()._nevra('hole-1-1.x86_64')))])
self.assert_transaction_equal(actual, expected)
@staticmethod
def transaction2tuples(transaction):
"""Convert a transaction to the iterable of tuples."""
for item in transaction:
yield (item.op_type, item.installed, item.erased, item.obsoleted,
item.reason)
'''
| 20,057
|
Python
|
.py
| 395
| 41.298734
| 101
| 0.615231
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
15,799
|
test_package.py
|
rpm-software-management_dnf/tests/test_package.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import binascii
import hawkey
import rpm
from dnf.pycomp import long
import tests.support
from tests.support import mock
TOUR_MD5 = binascii.unhexlify("68e9ded8ea25137c964a638f12e9987c")
TOUR_SHA256 = binascii.unhexlify("ce77c1e5694b037b6687cf0ab812ca60431ec0b65116abbb7b82684f0b092d62")
TOUR_WRONG_MD5 = binascii.unhexlify("ffe9ded8ea25137c964a638f12e9987c")
TOUR_SIZE = 2317
class PackageTest(tests.support.DnfBaseTestCase):
REPOS = ['main']
def setUp(self):
super(PackageTest, self).setUp()
self.pkg = self.sack.query().available().filter(name="pepper")[1]
def test_from_cmdline(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
self.assertTrue(local_pkg._from_cmdline)
self.assertFalse(self.pkg._from_cmdline)
def test_from_system(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertTrue(pkg._from_system)
self.assertFalse(self.pkg._from_system)
def test_header(self):
self.sack.create_cmdline_repo()
pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
header = pkg._header
self.assertIsInstance(header, rpm.hdr)
def fn_getter():
return tests.support.NONEXISTENT_FILE
with mock.patch.object(pkg, 'localPkg', fn_getter):
with self.assertRaises(IOError):
pkg._header
# rpm.hdr() is not easy to construct with custom data, we just return a string
# instead, as we don't actually need an instance of rpm.hdr for the test
@mock.patch("rpm.TransactionSet.dbMatch", lambda self, a, b: iter(["package_header_test_data"]))
def test_get_header(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
header = pkg.get_header()
self.assertEqual(header, "package_header_test_data")
pkg = self.sack.query().available().filter(name="pepper")[0]
header = pkg.get_header()
self.assertEqual(header, None)
@mock.patch("dnf.package.Package.rpmdbid", long(3))
def test_idx(self):
""" pkg.idx is an int. """
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertEqual(type(pkg.idx), int)
def test_pkgtup(self):
self.assertEqual(self.pkg.pkgtup, ('pepper', 'x86_64', '0', '20', '0'))
@mock.patch("dnf.package.Package.location", 'f/foo.rpm')
def test_localPkg(self):
self.pkg.repo.basecachedir = '/cachedir'
self.pkg.repo.baseurl = ['file:///mnt/cd']
self.assertTrue(self.pkg._is_local_pkg())
self.assertEqual(self.pkg.localPkg(), '/mnt/cd/f/foo.rpm')
self.pkg.repo.baseurl = ['http://remote']
self.assertFalse(self.pkg._is_local_pkg())
self.assertEqual(self.pkg.localPkg(),
self.pkg.repo._repo.getCachedir() + '/packages/foo.rpm')
def test_verify(self):
with mock.patch.object(self.pkg, 'localPkg',
return_value=tests.support.TOUR_44_PKG_PATH):
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_MD5)
self.pkg._size = TOUR_SIZE
self.assertTrue(self.pkg.verifyLocalPkg())
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_WRONG_MD5)
self.assertFalse(self.pkg.verifyLocalPkg())
def test_return_id_sum(self):
self.pkg._chksum = (hawkey.CHKSUM_MD5, TOUR_MD5)
self.assertEqual(self.pkg.returnIdSum(),
('md5', '68e9ded8ea25137c964a638f12e9987c'))
def test_verify_local(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
self.assertEqual(local_pkg.reponame, hawkey.CMDLINE_REPO_NAME)
self.assertTrue(local_pkg.verifyLocalPkg())
def test_chksum_local(self):
self.sack.create_cmdline_repo()
local_pkg = self.sack.add_cmdline_package(tests.support.TOUR_44_PKG_PATH)
chksum = local_pkg._chksum
self.assertEqual(chksum[0], hawkey.CHKSUM_SHA256)
self.assertEqual(chksum[1], TOUR_SHA256)
def test_verify_installed(self):
pkg = self.sack.query().installed().filter(name="pepper")[0]
self.assertRaises(ValueError, pkg.verifyLocalPkg)
| 5,397
|
Python
|
.py
| 106
| 43.773585
| 100
| 0.68509
|
rpm-software-management/dnf
| 1,227
| 411
| 56
|
GPL-2.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|