code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# dr14_t.meter: compute the DR14 value of the given audiofiles
# Copyright (C) 2011 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
from dr14tmeter.audio_math import *
import wave
def wav_write( filename , Fs , Y ):
amplitude = 2.0**16 - 1.0
wav_file = wave.open(filename, "w")
s = Y.shape
if len( Y.shape ) > 1 :
nchannels = s[1]
else :
nchannels = 1
sampwidth = 2
framerate = int(Fs)
nframes = s[0]
comptype = "NONE"
compname = "no comp"
wav_file.setparams(( nchannels , sampwidth , framerate , nframes , comptype , compname ))
Y_s = numpy.int16( (amplitude/2.0) * Y )
Y_s = Y_s.tostring() ;
wav_file.writeframes( Y_s )
wav_file.close()
| magicgoose/dr14_t.meter | dr14tmeter/wav_write.py | Python | gpl-3.0 | 1,401 |
import tensorflow as tf
import numpy as np
'''
tf.estimator is a high-level TensorFlow library that simplifies the
mechanics of machine learning, including the following:
1. running training loops
2. running evaluation loops
3. managing data sets
tf.estimator defines many common models.
'''
feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
estimator.train(input_fn=input_fn, steps=1000)
# evaluate how well our model did
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
print("train metrics: %r" % train_metrics)
print("eval metrics: %r" % eval_metrics)
| AppleFairy/machinelearning | framework_study/tensorflow/tutorial_r1.4/getting_started_with_tensorflow/estimator.py | Python | mit | 1,249 |
def connect(url, **kwargs):
"""Connect to remote server (default is tornado)"""
# dispatch to vaex.server.tornado package
from .tornado_client import connect
return connect(url, **kwargs)
| maartenbreddels/vaex | packages/vaex-server/vaex/server/__init__.py | Python | mit | 204 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# python import
import base64
# Djando import
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
class Cer(models.Model):
id = models.BigAutoField(primary_key=True)
codice = models.CharField(max_length=10, null=False)
descrizione = models.CharField(max_length=800, null=False)
class Meta:
verbose_name = 'CER'
verbose_name_plural = 'CER'
class User(AbstractUser):
id = models.BigAutoField(primary_key=True)
producer = models.CharField(max_length=250, null=False)
p_iva = models.CharField(max_length=11, null=False)
c_fiscale = models.CharField(max_length=16, null=False)
cell = models.CharField(max_length=10, null=True)
fax = models.CharField(max_length=10, null=True)
reference = models.CharField(max_length=50, null=True)
def __unicode__(self):
return unicode(self.password) or u''
class Meta:
verbose_name = 'Utenti'
verbose_name_plural = 'Utenti'
unique_together = (("producer", "email"), )
# pass
User._meta.fields[0].null = True
# last log
User._meta.fields[1].null = True
#username
User._meta.fields[3].null = True
#first_name
User._meta.fields[4].null = True
#last_name
User._meta.fields[5].null = True
class Module(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(User)
date = models.DateField(auto_now_add=True, null=False)
pdf = models.TextField(db_column="pdf", blank=False, null=False)
typeModele = models.CharField(max_length=20, blank=False, null=False)
class ModuleRitiro(Module):
class Meta:
verbose_name = 'module_ritiro'
verbose_name_plural = 'Moduli ritiro'
class ModulePreventivo(Module):
class Meta:
verbose_name = 'module_preventivo'
verbose_name_plural = 'Moduli preventivo'
| Vittorio92/IrpiniaRecuperiWeb | service/models.py | Python | apache-2.0 | 1,943 |
# DDE support for Pythonwin
#
# Seems to work fine (in the context that IE4 seems to have broken
# DDE on _all_ NT4 machines I have tried, but only when a "Command Prompt" window
# is open. Strange, but true. If you have problems with this, close all Command Prompts!
import win32ui
import win32api, win32con
from pywin.mfc import object
from dde import *
import sys, traceback
class DDESystemTopic(object.Object):
def __init__(self, app):
self.app = app
object.Object.__init__(self, CreateServerSystemTopic())
def Exec(self, data):
try:
# print "Executing", cmd
self.app.OnDDECommand(data)
except:
t,v,tb = sys.exc_info()
# The DDE Execution failed.
print "Error executing DDE command."
traceback.print_exception(t,v,tb)
return 0
class DDEServer(object.Object):
def __init__(self, app):
self.app = app
object.Object.__init__(self, CreateServer())
self.topic = self.item = None
def CreateSystemTopic(self):
return DDESystemTopic(self.app)
def Shutdown(self):
self._obj_.Shutdown()
self._obj_.Destroy()
if self.topic is not None:
self.topic.Destroy()
self.topic = None
if self.item is not None:
self.item.Destroy()
self.item = None
def OnCreate(self):
return 1
def Status(self, msg):
try:
win32ui.SetStatusText(msg)
except win32ui.error:
pass
| zhanqxun/cv_fish | pythonwin/pywin/framework/intpydde.py | Python | apache-2.0 | 1,390 |
'''
Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
This will just hold some common functions used for testing.
Created on Aug 15, 2014
@author: dfleck
'''
from twisted.internet import defer,reactor
from twisted.python import log
from gmu.chord.ChordNode import ChordNode
from gmu.chord.GmuServerFactory import GmuServerFactory
from gmu.chord.MetricsMessageObserver import MetricsMessageObserver
from gmu.chord.CopyEnvelope import CopyEnvelope
from gmu.chord.FingerEntry import FingerEntry
from gmu.chord import Config, Utils, ClassIDFactory
from SampleClient import SampleClient
from gmu.netclient.classChordNetworkChord import classChordNetworkChord
import datetime, random, hashlib
from TestMessageObserver import TestMessageObserver
import os, psutil
import socket
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
AD = "-"
AF_INET6 = getattr(socket, 'AF_INET6', object())
proto_map = {
(AF_INET, SOCK_STREAM): 'tcp',
(AF_INET6, SOCK_STREAM): 'tcp6',
(AF_INET, SOCK_DGRAM): 'udp',
(AF_INET6, SOCK_DGRAM): 'udp6',
}
loggingOn = False
import sys
def printLogger(aDict):
print(aDict)
sys.stdout.flush()
@defer.inlineCallbacks
def waitForConnectionCache(_=None):
if Config.USE_CONNECTION_CACHE:
log.msg("Waiting for ConnectionCache to close out...")
yield wait(Config.CONNECTION_CACHE_DELAY + 2)
def generateID(ipAddr, port, theEnclave, classID):
'''Generates the node's ID from it's IP addr/port.
The enclave is added on as the high order bits.
The final ID looks like enclaveBits | uniq ID bits
So, if the enclave is 0xFF11 and the uniq ID is 0x12345 theId=0xFF1112345
'''
if classID == None:
# Build a random class spec
classID = ClassIDFactory.generateID()
h = hashlib.new('sha1')
h.update(ipAddr)
h.update(str(port))
# Grab the bits from the ip/port hash
ipPortBits = bin(int(h.hexdigest(), 16))
ipPortInt = int(ipPortBits, 2)
theId = Utils.generateNodeID(ipPortInt, theEnclave, classChars=classID)
return theId
@defer.inlineCallbacks
def startupBootstrapNode(ip, port=12345, enclave='localhost', classID=None):
'''Start a Bootstrap node'''
# Generate an ID
nodeID = generateID(ip, port, enclave, classID)
print("DEBUG: BS nodeID is %s" % nodeID)
bsNode = ChordNode(ip, port, nodeID)
serverFactory = GmuServerFactory(bsNode, unsafeTracebacks=True)
MetricsMessageObserver(bsNode)
testObserver = TestMessageObserver(bsNode)
enableAutoDiscovery = False
status = yield bsNode.join(None, enableAutoDiscovery, enclave, None, True, serverFactory)
defer.returnValue( (status, bsNode, testObserver) )
@defer.inlineCallbacks
def startupClientNode(ip, port, enclave, bootstrapNodeLocation, allowFloatingIP=True, classID=None):
'''Start a client node and connect it to the network.
Returns (status=True|False, node)
'''
# Generate an ID
nodeID = generateID(ip, port, enclave, classID)
print("DEBUG: Client nodeID is %s" % nodeID)
normalNode = ChordNode(ip, port, nodeID, allowFloatingIP)
MetricsMessageObserver(normalNode)
testObserver = TestMessageObserver(normalNode)
enableAutoDiscovery = False
#import TestUtils
#yield TestUtils.wait(4)
authenticationPayload = "open-sesame"
status = yield normalNode.join([ bootstrapNodeLocation ] , enableAutoDiscovery, enclave, authenticationPayload, False)
defer.returnValue( (status, normalNode, testObserver) )
@defer.inlineCallbacks
def startNodeUsingAPI(ip, port, bootstrapNodeLocation, enclaveStr, useAutoDiscover, isBootstrapNode):
'''Starts up a client node but uses the API.
Once complete returns a tuple (joinStatus, clientAPI, networkAPI)
Return a deferred which should fire after join succeeds.
'''
d = defer.Deferred()
clientAPI = SampleClient(ip, port, None)
networkAPI = classChordNetworkChord(clientAPI, port, ip)
nodeID = networkAPI.generateNodeID(str(port), enclaveStr) # Get the ID with the bits on it we need. Use "port" because it'll be uniq for tests
# Join the network
if bootstrapNodeLocation is None:
bootstrapNodeList = None
else:
bootstrapNodeList = [ bootstrapNodeLocation ]
callFunc = lambda result, payload: shouldSucceedCallback(result, payload, d)
networkAPI.start(callFunc, nodeID, enclaveStr, "authenticate:succeed", bootstrapNodeList, isBootstrapNode, useAutoDiscover)
# Wait for the join to finish
joinStatus = yield d # This is the value returned from shoudlSucceedCallback
# Now return everything
defer.returnValue( (joinStatus, clientAPI, networkAPI) )
def shouldSucceedCallback(result, payload, deferToFire):
'''Status callback from the networkAPI.start call. Should be true.
Uses a lambda function so we can sneak in a few more parameters :-)
'''
if result:
deferToFire.callback(result)
else:
deferToFire.errback(result)
def defWait(dummy, seconds=5):
return wait(seconds)
def wait(seconds=5):
d = defer.Deferred()
print("Waiting for %d seconds..." % seconds)
# simulate a delayed result by asking the reactor to schedule
# gotResults in 2 seconds time
reactor.callLater(seconds, d.callback, True)
return d
def sendFlood(chordNode,messageNum,enclave, data=""):
'''Send a flooding message to the enclave specified. Content is "messageNum".
Returns a deferred status
'''
# Send out a flooding message
msgText = { "type" : "STORE", "loc" : chordNode.nodeLocation, "msgNum" : messageNum, "data" : data }
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = chordNode.nodeLocation
env['type'] = 'flood'
env['enclave'] = enclave # Flooding
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
# Send the message
d = chordNode.sendFloodingMessage(msgText, env)
return d
def sendP2P(src, dst, messageNum, data=""):
'''Send a P2P message to the dst node specified. Content is "messageNum".
Returns a defered status
'''
# Send out a flooding message
msgText = { "type" : "STORE", "loc" : src.nodeLocation, "msgNum" : messageNum, "data": data }
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = src.nodeLocation
env['type'] = 'p2p'
env['destination'] = dst.nodeLocation.id
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
# Send the message
d = src.sendSyncMessage(msgText, env)
return d
def sendClassQuery(src, classSpec, messageNum, data=""):
'''Send a Class query message to the class spec specified. Content is "messageNum".
Returns a deferred status
'''
# Send out a class message
msgText = { "type" : "STORE", "loc" : src.nodeLocation, "msgNum" : messageNum, "data": data }
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = src.nodeLocation
env['type'] = 'classType'
env['destination'] = classSpec
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
# Send the message
d = src.sendClassMessage(msgText, env)
return d
def didNodeReceive(observers, node, messageNum):
'''Return True if nodes received the message, False otherwise.'''
for testObserver in observers:
if testObserver.chordNode == node:
return testObserver.messageNumStored(messageNum)
log.err("didNodeReceive: could not find observer.")
return False
def didReceive(observers, enclave, messageNum, expectedCount):
'''Return True if all nodes received the message, False otherwise.'''
actualCount = 0
for testObserver in observers:
if enclave == 'ALL' or enclave in testObserver.chordNode.remote_getEnclaveNames():
if testObserver.messageNumStored(messageNum):
actualCount += 1
log.err("didReceive: actualCount:%d expectedCount:%d" % (actualCount, expectedCount))
return actualCount == expectedCount
def didNotReceive(observers, enclave, messageNum, expectedCount):
'''Return True if all nodes did not receive the message, False otherwise.'''
actualCount = 0
for testObserver in observers:
if enclave == 'ALL' or enclave in testObserver.chordNode.remote_getEnclaveNames():
if not testObserver.messageNumStored(messageNum):
actualCount += 1
if actualCount != expectedCount:
log.err("didNotReceive: actualCount:%d expectedCount:%d" % (actualCount, expectedCount))
return actualCount == expectedCount
def showOpenConnections():
# Get my PID
pid = os.getpid()
# Run the lsof command
p = psutil.Process(pid)
conns = p.connections()
templ = "%-5s %-30s %-30s %-13s %-6s "
print(templ % (
"Proto", "Local address", "Remote address", "Status", "PID" ))
# Print output
for c in conns:
laddr = "%s:%s" % (c.laddr)
raddr = ""
if c.raddr:
raddr = "%s:%s" % (c.raddr)
print(templ % (
proto_map[(c.family, c.type)],
laddr,
raddr or AD,
c.status,
pid or AD
))
print("Total net connections: %d" % len(conns))
| danfleck/Class-Chord | network-client/src/tests/TestUtils.py | Python | apache-2.0 | 9,854 |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from __future__ import division, absolute_import
import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.exceptions import WorkflowException
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class AntiLoopTaskTest(BpmnWorkflowTestCase):
"""The example bpmn is actually a MultiInstance. It should not report that it is a looping task and
it should fail when we try to terminate the loop"""
def setUp(self):
self.spec = self.load_workflow1_spec()
def load_workflow1_spec(self):
return self.load_workflow_spec('bpmnAntiLoopTask.bpmn','LoopTaskTest')
def testRunThroughHappy(self):
self.workflow = BpmnWorkflow(self.spec)
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_ready_user_tasks()
self.assertTrue(len(ready_tasks) ==1)
self.assertFalse(ready_tasks[0].task_spec.is_loop_task())
try:
ready_tasks[0].terminate_loop()
self.fail("Terminate Loop should throw and error when called on a non-loop MultiInstance")
except WorkflowException as ex:
self.assertTrue(
'The method terminate_loop should only be called in the case of a BPMN Loop Task' in (
'%r' % ex),
'\'The method terminate_loop should only be called in the case of a BPMN Loop Task\' should be a substring of error message: \'%r\'' % ex)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(AntiLoopTaskTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| knipknap/SpiffWorkflow | tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py | Python | lgpl-3.0 | 1,753 |
# smartmirror.py
# requirements
# requests, feedparser, traceback, Pillow
from Tkinter import *
import locale
import threading
import time
import locale
import requests
import json
import traceback
import feedparser
import serial
import subprocess #checks if camera connected
from time import sleep
#from serial import SerialException
from PIL import Image, ImageTk
from contextlib import contextmanager
#classes stored in utilites below
from utilities.userData.data import Data
from utilities.facematch.FaceMatch import FaceMatch
from utilities.septa import Septa
from contextlib import contextmanager #check http://preshing.com/20110920/the-python-with-statement-by-example/
from serial import SerialException
LOCALE_LOCK = threading.Lock()
ui_locale = "en_US.utf8" # e.g. 'fr_FR' fro French, '' as default
time_format = 12 # 12 or 24
date_format = "%b %d, %Y" # check python doc for strftime() for options
news_country_code = 'us'
weather_api_token = 'fbbdfae3c5f26c016c543398fc7f8cbf' # create account at https://darksky.net/dev/
weather_lang = 'en' # see https://darksky.net/dev/docs/forecast for full list of language parameters values
weather_unit = 'us' # see https://darksky.net/dev/docs/forecast for full list of unit parameters values
latitude = None # Set this if IP location lookup does not work for you (must be a string)
longitude = None # Set this if IP location lookup does not work for you (must be a string)
xlarge_text_size = 94
large_text_size = 48
medium_text_size = 28
small_text_size = 18
#Serial port parameters
serial_speed = 9600
serial_port = '/dev/rfcomm0'
camera_folder = '/home/pi/Smart-Mirror/test/pictures_faces'
#setpa
############
#Septa API website
#http://www3.septa.org/hackathon/
septa_API = "http://www3.septa.org/hackathon/Arrivals"
stationID = 90815
numTrains = 5 #number of scheduled results
parameters = {"req1": stationID, "req2": numTrains} #req1 = Septa train station code / req2 number of results
direction = 1 # 0 for north / 1 for south
traintime = 1 # temp, used to parse out train schedules from numTrains (can only go up to numTrains)
#Bluetooth-Serial connection check#
#############
send = 1
try:
ser = serial.Serial(serial_port, serial_speed, timeout=1)
print "Bluetooth Connected"
except serial.SerialException:
print "No connection to the bluetooth device could be established"
send=0
##############
#CAMERA CHECK#
##############
camera = subprocess.check_output(["vcgencmd","get_camera"])
#int(camera.strip()[-1]) #gets only 0 or 1 from detected status
section = camera.split(" ")[1]
detector = section.split("=")[-1]
print detector
if (int(detector)==1):
print "Camera Detected"
print camera
else:
print "Camera not detected"
print camera
##############
#@contextmanager
def setlocale(name): #thread proof function to work with locale
print "locale " + name
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
# maps open weather icons to
# icon reading is not impacted by the 'lang' parameter
icon_lookup = {
'clear-day': "assets/Sun.png", # clear sky day
'wind': "assets/Wind.png", #wind
'cloudy': "assets/Cloud.png", # cloudy day
'partly-cloudy-day': "assets/PartlySunny.png", # partly cloudy day
'rain': "assets/Rain.png", # rain day
'snow': "assets/Snow.png", # snow day
'snow-thin': "assets/Snow.png", # sleet day
'fog': "assets/Haze.png", # fog day
'clear-night': "assets/Moon.png", # clear sky night
'partly-cloudy-night': "assets/PartlyMoon.png", # scattered clouds night
'thunderstorm': "assets/Storm.png", # thunderstorm
'tornado': "assests/Tornado.png", # tornado
'hail': "assests/Hail.png" # hail
}
class FaceRec:
def find(self):
print detector
if (int(detector)==1):
fm=FaceMatch(camera_folder)
name=fm.getName()
else:
name=("camera not connected")
return name
class TempTest(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.temp_data = StringVar()
self.name = StringVar()
self.weight = IntVar()
if(int(send)==1): #if bluetooth connected
self.measure()
print "reading data"
self.Person()
self.StoreData()
self.createWidgets()
def measure(self):
# Request data and read the answer
ser.write("t")
print("this is t")
data = ser.readline()
print str(data)
# If the answer is not empty, process & display data
if (data != ""):
processed_data = data.split(",")
self.temp_data.set("Temperature: " + str(data))
# self.temperature.pack(side=LEFT, anchor=W)
def Person(self):
try:
pt=FaceRec()
name = pt.find() #calls fuction from FaceRec
print ("name is " + name)
except:
name = "Person not found"
self.name.set("Hello, " + name)
def StoreData(self):
try:
store=Data()
store.storeData(self.name, self.weight)
except:
print("data not stored")
def createWidgets(self):
self.name = Label(self, textvariable=self.name, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.name.pack(side=TOP, anchor=W)
# self.temperature = Label(self, textvariable=self.temp_data, font=('Helvetica', small_text_size), fg="white", bg="black")
# self.temperature.pack(side=TOP, anchor=E)
class Trains(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg="black")
self.septaFrm = Frame(self, bg="black")
self.septaFrm.pack(side=TOP, anchor = W)
self.train1Lbl = Label(self, font=('Helvetica', small_text_size), justify = LEFT, fg="white", bg="black")
self.train1Lbl.pack(side=TOP, anchor=E)
self.train2Lbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.train2Lbl.pack(side=TOP, anchor=W)
self.getTimes()
def getTimes(self):
if (direction ==1):
direc = "Southbound"
else:
direc = "Northbound"
x = Septa()
y = x.traintimes(septa_API, parameters, direction, traintime)
z = x.traintimes(septa_API, parameters, direction, 2)
trainline = ("Line: " +y[7])
destination = ("Destination: " +y[3])
depart = ("Departure: " + y[1][11:16])
status = ("Status: " + z[21])
filtered = [direc, trainline, destination, depart, status]
filtered = "\n".join(filtered)
print(filtered)
self.train1Lbl.config(text=(filtered))
# self.train1Lbl.config(text=destination1)
#self.train1Lbl.config(text=)
#self.train1Lbl.config(text=filtered[3])
trainline2 = z[7]
destination2 = z[3]
depart2 = z[1][11:16]
status2 = z[21]
filtered2 = (trainline2, destination2, depart2, status2)
#self.train2Lbl.config(text=filtered[0])
#self.train2Lbl.config(text=filtered[1])
#self.train2Lbl.config(text=filtered[2])
#self.train2Lbl.config(text=filtered[3])
class Clock(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
# initialize time label
self.time1 = ''
self.timeLbl = Label(self, font=('Helvetica', large_text_size), fg="white", bg="black")
self.timeLbl.pack(side=TOP, anchor=E)
# initialize day of week
self.day_of_week1 = ''
self.dayOWLbl = Label(self, text=self.day_of_week1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dayOWLbl.pack(side=TOP, anchor=E)
# initialize date label
self.date1 = ''
self.dateLbl = Label(self, text=self.date1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dateLbl.pack(side=TOP, anchor=E)
self.tick()
def tick(self):
# print "test -->: " + ui_locale
# with setlocale('en_US.utf8'):
if time_format == 12:
time2 = time.strftime('%I:%M %p') #hour in 12h format
else:
time2 = time.strftime('%H:%M') #hour in 24h format
if 1:
day_of_week2 = time.strftime('%A')
date2 = time.strftime(date_format)
# if time string has changed, update it
if time2 != self.time1:
self.time1 = time2
self.timeLbl.config(text=time2)
if day_of_week2 != self.day_of_week1:
self.day_of_week1 = day_of_week2
self.dayOWLbl.config(text=day_of_week2)
if date2 != self.date1:
self.date1 = date2
self.dateLbl.config(text=date2)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
self.timeLbl.after(200, self.tick)
class Weather(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.temperature = ''
self.forecast = ''
self.location = ''
self.currently = ''
self.icon = ''
self.degreeFrm = Frame(self, bg="black")
self.degreeFrm.pack(side=TOP, anchor=W)
self.temperatureLbl = Label(self.degreeFrm, font=('Helvetica', xlarge_text_size), fg="white", bg="black")
self.temperatureLbl.pack(side=LEFT, anchor=N)
self.iconLbl = Label(self.degreeFrm, bg="black")
self.iconLbl.pack(side=LEFT, anchor=N, padx=20)
self.currentlyLbl = Label(self, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.currentlyLbl.pack(side=TOP, anchor=W)
self.forecastLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.forecastLbl.pack(side=TOP, anchor=W)
self.locationLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.locationLbl.pack(side=TOP, anchor=W)
self.get_weather()
def get_ip(self):
try:
ip_url = "http://jsonip.com/"
req = requests.get(ip_url)
ip_json = json.loads(req.text)
return ip_json['ip']
except Exception as e:
traceback.print_exc()
return "Error: %s. Cannot get ip." % e
def get_weather(self):
try:
if latitude is None and longitude is None:
# get location
location_req_url = "http://freegeoip.net/json/%s" % self.get_ip()
r = requests.get(location_req_url)
location_obj = json.loads(r.text)
lat = location_obj['latitude']
lon = location_obj['longitude']
location2 = "%s, %s" % (location_obj['city'], location_obj['region_code'])
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, lat,lon,weather_lang,weather_unit)
else:
location2 = ""
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, latitude, longitude, weather_lang, weather_unit)
r = requests.get(weather_req_url)
weather_obj = json.loads(r.text)
degree_sign= u'\N{DEGREE SIGN}'
temperature2 = "%s%s" % (str(int(weather_obj['currently']['temperature'])), degree_sign)
currently2 = weather_obj['currently']['summary']
forecast2 = weather_obj["hourly"]["summary"]
icon_id = weather_obj['currently']['icon']
icon2 = None
if icon_id in icon_lookup:
icon2 = icon_lookup[icon_id]
if icon2 is not None:
if self.icon != icon2:
self.icon = icon2
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl.config(image=photo)
self.iconLbl.image = photo
else:
# remove image
self.iconLbl.config(image='')
if self.currently != currently2:
self.currently = currently2
self.currentlyLbl.config(text=currently2)
if self.forecast != forecast2:
self.forecast = forecast2
self.forecastLbl.config(text=forecast2)
if self.temperature != temperature2:
self.temperature = temperature2
self.temperatureLbl.config(text=temperature2)
if self.location != location2:
if location2 == ", ":
self.location = "Cannot Pinpoint Location"
self.locationLbl.config(text="Cannot Pinpoint Location")
else:
self.location = location2
self.locationLbl.config(text=location2)
except Exception as e:
traceback.print_exc()
print "Error: %s. Cannot get weather." % e
self.after(600000, self.get_weather)
@staticmethod
def convert_kelvin_to_fahrenheit(kelvin_temp):
return 1.8 * (kelvin_temp - 273) + 32
class News(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg='black')
self.title = 'News!' # 'News' is more internationally generic
self.newsLbl = Label(self, text=self.title, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.newsLbl.pack(side=TOP, anchor=W)
self.headlinesContainer = Frame(self, bg="black")
self.headlinesContainer.pack(side=TOP, anchor=S)
self.get_headlines()
def get_headlines(self):
try:
# remove all children
for widget in self.headlinesContainer.winfo_children():
widget.destroy()
if news_country_code == None:
headlines_url = "https://news.google.com/news?ned=us&output=rss"
else:
headlines_url = "https://news.google.com/news?ned=%s&output=rss" % news_country_code
feed = feedparser.parse(headlines_url)
for post in feed.entries[0:5]:
headline = NewsHeadline(self.headlinesContainer, post.title)
headline.pack(side=TOP, anchor=W)
except Exception as e:
traceback.print_exc()
print "Error: %s. Cannot get news." % e
self.after(600000, self.get_headlines)
class NewsHeadline(Frame):
def __init__(self, parent, event_name=""):
Frame.__init__(self, parent, bg='black')
image = Image.open("assets/Newspaper.png")
image = image.resize((25, 25), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl = Label(self, bg='black', image=photo)
self.iconLbl.image = photo
self.iconLbl.pack(side=LEFT, anchor=W)
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=LEFT, anchor=N)
class FullscreenWindow:
def __init__(self):
self.tk = Tk()
self.tk.configure(background='black')
self.topFrame = Frame(self.tk, background = 'black')
self.centerFrame = Frame(self.tk, background = 'black')
self.bottomFrame = Frame(self.tk, background = 'black')
self.topFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.centerFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.bottomFrame.pack(side = BOTTOM, fill=BOTH, expand = YES)
self.state = False
self.tk.bind("<Return>", self.toggle_fullscreen)
self.tk.bind("<Escape>", self.end_fullscreen)
# Name
# clock
self.clock = Clock(self.topFrame)
self.clock.pack(side=RIGHT, anchor=N, padx=10, pady=10)
# weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=LEFT, anchor=N, padx=10, pady=10)
#temp
# self.temp = TempTest(self.centerFrame)
# self.temp.pack(side=LEFT, anchor=W, padx=10)
#setpa
self.septa = Trains(self.centerFrame)
self.septa.pack(side=LEFT, anchor=N, padx=10)
# news
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=10, pady=10)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
return "break"
if __name__ == '__main__':
w = FullscreenWindow()
w.tk.mainloop()
| bennergarrett/TempleSmartMirror | smartmirror.py | Python | mit | 17,287 |
def hsd_inc_beh(rxd, txd):
'''|
| Specify the behavior, describe data processing; there is no notion
| of clock. Access the in/out interfaces via get() and append()
| methods. The "hsd_inc_beh" function does not return values.
|________'''
if rxd.hasPacket():
data = rxd.get() + 1
txd.append(data)
| hnikolov/pihdf | examples/hsd_inc/src/hsd_inc_beh.py | Python | mit | 340 |
import sys
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import dataset
from chainer import testing
from chainer.testing import attr
import chainer.testing.backend # NOQA
import chainerx
_inject_backend_tests = testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class ConverterTestBase(object):
def get_arrays_to_concat(self, backend_config):
return [
backend_config.get_array(numpy.random.rand(2, 3))
for _ in range(5)]
def check_concat_arrays(self, arrays, device, expected_device):
array = self.converter(arrays, device)
self.assertEqual(array.shape, (len(arrays),) + arrays[0].shape)
assert backend.get_device_from_array(array) == expected_device
np_array = backend.CpuDevice().send(array)
for x, y in zip(np_array, arrays):
numpy.testing.assert_array_equal(x, backend.CpuDevice().send(y))
def test_concat_arrays(self, backend_config):
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(arrays, None, backend_config.device)
@attr.gpu
def test_concat_arrays_to_gpu(self, backend_config):
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(
arrays, 0, backend.GpuDevice.from_device_id(0))
@attr.chainerx
def test_concat_arrays_to_chainerx(self, backend_config):
device = chainerx.get_device('native:0')
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(
arrays, device, backend.ChainerxDevice(device))
def get_tuple_arrays_to_concat(self, backend_config):
return [
(backend_config.get_array(numpy.random.rand(2, 3)),
backend_config.get_array(numpy.random.rand(3, 4)))
for _ in range(5)]
def check_concat_tuples(self, tuples, device, expected_device):
arrays = self.converter(tuples, device)
self.assertEqual(len(arrays), len(tuples[0]))
for i in range(len(arrays)):
shape = (len(tuples),) + tuples[0][i].shape
self.assertEqual(arrays[i].shape, shape)
assert backend.get_device_from_array(arrays[i]) == expected_device
arr = backend.CpuDevice().send(arrays[i])
for x, y in zip(arr, tuples):
numpy.testing.assert_array_equal(
x, backend.CpuDevice().send(y[i]))
def test_concat_tuples(self, backend_config):
tuples = self.get_tuple_arrays_to_concat(backend_config)
self.check_concat_tuples(tuples, None, backend_config.device)
@attr.gpu
def test_concat_tuples_to_gpu(self, backend_config):
tuples = self.get_tuple_arrays_to_concat(backend_config)
self.check_concat_tuples(
tuples, 0, backend.GpuDevice.from_device_id(0))
@attr.chainerx
def test_concat_tuples_to_chainerx(self, backend_config):
device = chainerx.get_device('native:0')
arrays = self.get_tuple_arrays_to_concat(backend_config)
self.check_concat_tuples(
arrays, device, backend.ChainerxDevice(device))
def get_dict_arrays_to_concat(self, backend_config):
return [
{'x': backend_config.get_array(numpy.random.rand(2, 3)),
'y': backend_config.get_array(numpy.random.rand(3, 4))}
for _ in range(5)]
def check_concat_dicts(self, dicts, device, expected_device):
arrays = self.converter(dicts, device)
self.assertEqual(frozenset(arrays.keys()), frozenset(dicts[0].keys()))
for key in arrays:
shape = (len(dicts),) + dicts[0][key].shape
self.assertEqual(arrays[key].shape, shape)
self.assertEqual(
backend.get_device_from_array(arrays[key]), expected_device)
arr = backend.CpuDevice().send(arrays[key])
for x, y in zip(arr, dicts):
numpy.testing.assert_array_equal(
x, backend.CpuDevice().send(y[key]))
def test_concat_dicts(self, backend_config):
dicts = self.get_dict_arrays_to_concat(backend_config)
self.check_concat_dicts(dicts, None, backend_config.device)
@attr.gpu
def test_concat_dicts_to_gpu(self, backend_config):
dicts = self.get_dict_arrays_to_concat(backend_config)
self.check_concat_dicts(
dicts, 0, backend.GpuDevice.from_device_id(0))
@attr.chainerx
def test_concat_dicts_to_chainerx(self, backend_config):
device = chainerx.get_device('native:0')
arrays = self.get_dict_arrays_to_concat(backend_config)
self.check_concat_dicts(
arrays, device, backend.ChainerxDevice(device))
@_inject_backend_tests
class TestConcatExamples(ConverterTestBase, unittest.TestCase):
def setUp(self):
self.converter = dataset.concat_examples
class _XFailConcatWithAsyncTransfer(object):
@attr.chainerx
@unittest.expectedFailure
def test_concat_arrays_to_chainerx(self, *args, **kwargs):
(
super(_XFailConcatWithAsyncTransfer, self)
.test_concat_arrays_to_chainerx(*args, **kwargs)
)
@attr.chainerx
@unittest.expectedFailure
def test_concat_tuples_to_chainerx(self, *args, **kwargs):
(
super(_XFailConcatWithAsyncTransfer, self)
.test_concat_tuples_to_chainerx(*args, **kwargs)
)
@attr.chainerx
@unittest.expectedFailure
def test_concat_dicts_to_chainerx(self, *args, **kwargs):
(
super(_XFailConcatWithAsyncTransfer, self)
.test_concat_dicts_to_chainerx(*args, **kwargs)
)
@testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
])
class TestConcatWithAsyncTransfer(
_XFailConcatWithAsyncTransfer,
ConverterTestBase, unittest.TestCase):
def setUp(self):
self.converter = chainer.dataset.ConcatWithAsyncTransfer()
@_inject_backend_tests
class TestConcatExamplesWithPadding(unittest.TestCase):
def test_concat_arrays_padding(self, backend_config):
arrays = backend_config.get_array(
[numpy.random.rand(3, 4),
numpy.random.rand(2, 5),
numpy.random.rand(4, 3)])
array = dataset.concat_examples(arrays, padding=0)
self.assertEqual(array.shape, (3, 4, 5))
self.assertEqual(type(array), type(arrays[0]))
arrays = [backend.CpuDevice().send(a) for a in arrays]
array = backend.CpuDevice().send(array)
numpy.testing.assert_array_equal(array[0, :3, :4], arrays[0])
numpy.testing.assert_array_equal(array[0, 3:, :], 0)
numpy.testing.assert_array_equal(array[0, :, 4:], 0)
numpy.testing.assert_array_equal(array[1, :2, :5], arrays[1])
numpy.testing.assert_array_equal(array[1, 2:, :], 0)
numpy.testing.assert_array_equal(array[2, :4, :3], arrays[2])
numpy.testing.assert_array_equal(array[2, :, 3:], 0)
def test_concat_tuples_padding(self, backend_config):
tuples = [
backend_config.get_array(
(numpy.random.rand(3, 4), numpy.random.rand(2, 5))),
backend_config.get_array(
(numpy.random.rand(4, 4), numpy.random.rand(3, 4))),
backend_config.get_array(
(numpy.random.rand(2, 5), numpy.random.rand(2, 6))),
]
arrays = dataset.concat_examples(tuples, padding=0)
self.assertEqual(len(arrays), 2)
self.assertEqual(arrays[0].shape, (3, 4, 5))
self.assertEqual(arrays[1].shape, (3, 3, 6))
self.assertEqual(type(arrays[0]), type(tuples[0][0]))
self.assertEqual(type(arrays[1]), type(tuples[0][1]))
for i in range(len(tuples)):
tuples[i] = (
backend.CpuDevice().send(tuples[i][0]),
backend.CpuDevice().send(tuples[i][1]))
arrays = tuple(backend.CpuDevice().send(array) for array in arrays)
numpy.testing.assert_array_equal(arrays[0][0, :3, :4], tuples[0][0])
numpy.testing.assert_array_equal(arrays[0][0, 3:, :], 0)
numpy.testing.assert_array_equal(arrays[0][0, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[0][1, :4, :4], tuples[1][0])
numpy.testing.assert_array_equal(arrays[0][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[0][2, :2, :5], tuples[2][0])
numpy.testing.assert_array_equal(arrays[0][2, 2:, :], 0)
numpy.testing.assert_array_equal(arrays[1][0, :2, :5], tuples[0][1])
numpy.testing.assert_array_equal(arrays[1][0, 2:, :], 0)
numpy.testing.assert_array_equal(arrays[1][0, :, 5:], 0)
numpy.testing.assert_array_equal(arrays[1][1, :3, :4], tuples[1][1])
numpy.testing.assert_array_equal(arrays[1][1, 3:, :], 0)
numpy.testing.assert_array_equal(arrays[1][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[1][2, :2, :6], tuples[2][1])
numpy.testing.assert_array_equal(arrays[1][2, 2:, :], 0)
def test_concat_dicts_padding(self, backend_config):
dicts = [
{'x': numpy.random.rand(3, 4), 'y': numpy.random.rand(2, 5)},
{'x': numpy.random.rand(4, 4), 'y': numpy.random.rand(3, 4)},
{'x': numpy.random.rand(2, 5), 'y': numpy.random.rand(2, 6)},
]
dicts = [
{key: backend_config.get_array(arr) for key, arr in d.items()}
for d in dicts]
arrays = dataset.concat_examples(dicts, padding=0)
self.assertIn('x', arrays)
self.assertIn('y', arrays)
self.assertEqual(arrays['x'].shape, (3, 4, 5))
self.assertEqual(arrays['y'].shape, (3, 3, 6))
self.assertEqual(type(arrays['x']), type(dicts[0]['x']))
self.assertEqual(type(arrays['y']), type(dicts[0]['y']))
for d in dicts:
d['x'] = backend.CpuDevice().send(d['x'])
d['y'] = backend.CpuDevice().send(d['y'])
arrays = {
'x': backend.CpuDevice().send(arrays['x']),
'y': backend.CpuDevice().send(arrays['y'])}
numpy.testing.assert_array_equal(arrays['x'][0, :3, :4], dicts[0]['x'])
numpy.testing.assert_array_equal(arrays['x'][0, 3:, :], 0)
numpy.testing.assert_array_equal(arrays['x'][0, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['x'][1, :4, :4], dicts[1]['x'])
numpy.testing.assert_array_equal(arrays['x'][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['x'][2, :2, :5], dicts[2]['x'])
numpy.testing.assert_array_equal(arrays['x'][2, 2:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][0, :2, :5], dicts[0]['y'])
numpy.testing.assert_array_equal(arrays['y'][0, 2:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][0, :, 5:], 0)
numpy.testing.assert_array_equal(arrays['y'][1, :3, :4], dicts[1]['y'])
numpy.testing.assert_array_equal(arrays['y'][1, 3:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['y'][2, :2, :6], dicts[2]['y'])
numpy.testing.assert_array_equal(arrays['y'][2, 2:, :], 0)
@testing.parameterize(
{'padding': None},
{'padding': 0},
)
class TestConcatExamplesWithBuiltInTypes(unittest.TestCase):
int_arrays = [1, 2, 3]
float_arrays = [1.0, 2.0, 3.0]
def check_device(self, array, device, expected_device):
self.assertIsInstance(array, expected_device.xp.ndarray)
self.assertEqual(
backend.get_device_from_array(array), expected_device)
def check_concat_arrays(
self, arrays, device, expected_device, expected_dtype):
array = dataset.concat_examples(arrays, device, self.padding)
self.assertEqual(array.shape, (len(arrays),))
self.check_device(array, device, expected_device)
np_array = backend.CpuDevice().send(array)
for x, y in zip(np_array, arrays):
assert x.dtype == expected_dtype
numpy.testing.assert_array_equal(
x, numpy.array(y, dtype=expected_dtype))
def test_concat_arrays_to_cpu(self):
if sys.platform == 'win32':
expected_int_dtype = numpy.int32
else:
expected_int_dtype = numpy.int64
for device in (-1, None):
self.check_concat_arrays(
self.int_arrays,
device,
backend.CpuDevice(),
expected_int_dtype)
self.check_concat_arrays(
self.float_arrays,
device,
backend.CpuDevice(),
numpy.float64)
@attr.gpu
def test_concat_arrays_to_gpu(self):
device = 0
if sys.platform == 'win32':
expected_int_dtype = numpy.int32
else:
expected_int_dtype = numpy.int64
self.check_concat_arrays(
self.int_arrays,
device,
backend.GpuDevice.from_device_id(0),
expected_int_dtype)
self.check_concat_arrays(
self.float_arrays,
device,
backend.GpuDevice.from_device_id(0),
numpy.float64)
@attr.chainerx
def test_concat_arrays_to_chainerx(self):
device = 'native:0'
self.check_concat_arrays(
self.int_arrays,
device,
backend.ChainerxDevice(chainerx.get_device(device)),
numpy.int64)
self.check_concat_arrays(
self.float_arrays,
device,
backend.ChainerxDevice(chainerx.get_device(device)),
numpy.float64)
def get_xp(gpu):
if gpu:
return cuda.cupy
else:
return numpy
@testing.parameterize(
{'device': None, 'src_gpu': False, 'dst_gpu': False},
{'device': -1, 'src_gpu': False, 'dst_gpu': False},
)
class TestToDeviceCPU(unittest.TestCase):
def test_to_device(self):
src_xp = get_xp(self.src_gpu)
dst_xp = get_xp(self.dst_gpu)
x = src_xp.array([1], 'i')
y = dataset.to_device(self.device, x)
self.assertIsInstance(y, dst_xp.ndarray)
@testing.parameterize(
{'device': None, 'src_gpu': True, 'dst_gpu': True},
{'device': -1, 'src_gpu': True, 'dst_gpu': False},
{'device': 0, 'src_gpu': False, 'dst_gpu': True},
{'device': 0, 'src_gpu': True, 'dst_gpu': True},
)
class TestToDeviceGPU(unittest.TestCase):
@attr.gpu
def test_to_device(self):
src_xp = get_xp(self.src_gpu)
dst_xp = get_xp(self.dst_gpu)
x = src_xp.array([1], 'i')
y = dataset.to_device(self.device, x)
self.assertIsInstance(y, dst_xp.ndarray)
if self.device is not None and self.device >= 0:
self.assertEqual(int(y.device), self.device)
if self.device is None and self.src_gpu:
self.assertEqual(int(x.device), int(y.device))
@testing.parameterize(
{'device': 1, 'src_gpu': False, 'dst_gpu': True},
{'device': 1, 'src_gpu': True, 'dst_gpu': True},
)
class TestToDeviceMultiGPU(unittest.TestCase):
@attr.multi_gpu(2)
def test_to_device(self):
src_xp = get_xp(self.src_gpu)
dst_xp = get_xp(self.dst_gpu)
x = src_xp.array([1], 'i')
y = dataset.to_device(self.device, x)
self.assertIsInstance(y, dst_xp.ndarray)
self.assertEqual(int(y.device), self.device)
testing.run_module(__name__, __file__)
| tkerola/chainer | tests/chainer_tests/dataset_tests/test_convert.py | Python | mit | 15,856 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class Website(models.Model):
_inherit = 'website'
logo = fields.Binary(
string="Website logo",
help="This field holds the logo for this website, showed in header. "
"Recommended size is 180x50")
| Tecnativa/website | website_logo/models/website.py | Python | agpl-3.0 | 1,221 |
# coding: utf-8
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.db.models import Q
from django.contrib.auth.models import User
from django.forms import ModelForm
from .models import Image, Album, Tag
def main(request):
"""Main listing."""
context = RequestContext(request)
albums = Album.objects.all()
if not request.user.is_authenticated():
albums = albums.filter(public=True)
paginator = Paginator(albums, 4)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
albums = paginator.page(page)
except (InvalidPage, EmptyPage):
albums = paginator.page(paginator.num_pages)
for album in albums.object_list:
album.images = album.image_set.all()[:4]
#album.images = album.image_set.all()
context_dict = {'albums':albums}
return render_to_response("photo/list.html", context_dict, context)
def album(request, pk, view="thumbnails"):
"""Album listing."""
# Code without Slideshow
"""album = Album.objects.get(pk=pk)
if not album.public and not request.user.is_authenticated():
return HttpResponse("Error: you need to be logged in to view this album.")
images = album.image_set.all()
paginator = Paginator(images, 30)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)"""
#Write another code for Slideshow realization
num_images = 30
if view == "full": num_images = 10
album = Album.objects.get(pk=pk)
images = album.image_set.all()
paginator = Paginator(images, num_images)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)
# add list of tags as string and list of album objects to each image object
for img in images.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context = RequestContext(request)
context_dict = dict(album=album, images=images, view=view, albums=Album.objects.all())
#context_dict.update(csrf(request))
return render_to_response("photo/album.html", context_dict, context )
def image(request, pk):
"""Image page."""
img = Image.objects.get(pk=pk)
context = RequestContext(request)
context_dict = dict(image=img, backurl=request.META["HTTP_REFERER"])
return render_to_response("photo/image.html", context_dict, context)
def update(request):
"""Update image title, rating, tags, albums."""
p = request.POST
images = defaultdict(dict)
# create dictionary of properties for each image
for k, v in p.items():
if k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
return HttpResponseRedirect(request.META["HTTP_REFERER"])
#@login_required
def search(request):
"""Search, filter, sort images."""
context = RequestContext(request)
context_dict = dict( albums=Album.objects.all(), authors=User.objects.all())
# Если это первый заход по ссылке Search , то просто отображаем страницу, не производя расчетов
if request.method == 'GET' and not request.GET.get("page"):
return render_to_response("photo/search.html", context_dict, context)
# Тут уже работает метод POST or GET(?page)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
p = request.POST
images = defaultdict(dict)
# init parameters
parameters = {}
keys = ['title', 'filename', 'rating_from', 'rating_to', 'width_from',
'width_to', 'height_from', 'height_to', 'tags', 'view', 'user', 'sort', 'asc_desc']
for k in keys:
parameters[k] = ''
parameters["album"] = []
# create dictionary of properties for each image and a dict of search/filter parameters
for k, v in p.items():
if k == "album":
parameters[k] = [int(x) for x in p.getlist(k)]
elif k in parameters:
parameters[k] = v
elif k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# save or restore parameters from session
if page != 1 and "parameters" in request.session:
parameters = request.session["parameters"]
else:
request.session["parameters"] = parameters
results = update_and_filter(images, parameters)
# make paginator
paginator = Paginator(results, 20)
try:
results = paginator.page(page)
except (InvalidPage, EmptyPage):
results = paginator.page(paginator.num_pages)
# add list of tags as string and list of album names to each image object
for img in results.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context_dict['results'] = results
context_dict['prm'] = parameters
return render_to_response("photo/search.html", context_dict, context)
def update_and_filter(images, p):
"""Update image data if changed, filter results through parameters and return results list."""
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
# filter results by parameters
results = Image.objects.all()
if p["title"] : results = results.filter(title__icontains=p["title"])
if p["filename"] : results = results.filter(image__icontains=p["filename"])
if p["rating_from"] : results = results.filter(rating__gte=int(p["rating_from"]))
if p["rating_to"] : results = results.filter(rating__lte=int(p["rating_to"]))
if p["width_from"] : results = results.filter(width__gte=int(p["width_from"]))
if p["width_to"] : results = results.filter(width__lte=int(p["width_to"]))
if p["height_from"] : results = results.filter(height__gte=int(p["height_from"]))
if p["height_to"] : results = results.filter(height__lte=int(p["height_to"]))
if p["tags"]:
tags = p["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
results = results.filter(tags=Tag.objects.get(tag=t))
if p["album"]:
lst = p["album"]
or_query = Q(albums=lst[0])
for album in lst[1:]:
or_query = or_query | Q(albums=album)
results = results.filter(or_query).distinct()
return results | vadosl/photorganizer | photorganizer/photo/views__.py | Python | mit | 8,479 |
#
# Copyright 2010-2012 Fabric Engine Inc. All rights reserved.
#
import fabric
client = fabric.createClient()
cv = client.MR.createConstValue("Size", 100)
ago = client.KLC.createArrayGeneratorOperator("foo.kl", "operator foo(io Float64 output, Size index) { output = Float64(index+1); }", "foo")
ag = client.MR.createArrayGenerator(cv, ago)
mo = client.KLC.createArrayMapOperator("map.kl", "operator map(Float64 input, io Float64 output, Size index, Size count, Scalar multiplier) { output = input * multiplier; }", "map")
sv = client.MR.createConstValue("Scalar", 3.14)
m = client.MR.createArrayMap(ag, mo, sv)
ro = client.KLC.createReduceOperator("reduce.kl", "operator reduce(Float64 input, io Float64 result, Size index, Size count, Scalar multiplier) { result += input * multiplier; }", "reduce")
sv = client.MR.createConstValue("Scalar", 3.14)
r = client.MR.createReduce(m, ro, sv)
def callback( result ):
print( result )
client.close()
r.produceAsync( callback )
| ghostx2013/FabricEngine_Backup | Native/Test/Python/mr-sum-async.py | Python | agpl-3.0 | 986 |
# -*- coding: utf-8 -*-
import pygame #@UnresolvedImport
from vec2d import vec2d
from shots import BaseShot
import os
class PlayerShip(pygame.sprite.Sprite):
"""Player ship."""
def __init__(self, screen):
"""Konstruktorn."""
pygame.sprite.Sprite.__init__(self)
self.screen = screen
# Originalbilden
self.image = pygame.image.load(os.path.join('images','mort.png')).convert_alpha()
# Rect behövs för kolissionshanteringen
self.rect = self.image.get_rect()
self.rect.center = (100, 220)
self.x = 0
self.y = 0
self.power = 1 # 1,3,5
# Behövs för att samla skotten
self.playershots_grp = pygame.sprite.Group()
def update(self):
"""Update metoden kallas varje gång vi itererar spel-loopen.
Förflyttar spelarens skepp och kontrollerar gränserna.
"""
self.rect.move_ip(self.x,self.y)
# Så att inte skeppet åker utanför kanterna
if self.rect.left < 0:
self.rect.left = 0
elif self.rect.right > self.screen.get_width():
self.rect.right = self.screen.get_width()
if self.rect.top < 0:
self.rect.top = 0
elif self.rect.bottom >= self.screen.get_height():
self.rect.bottom = self.screen.get_height()
def show_boundary(self):
"""Vart går gränsen? Ritar en röd ruta vid player rect.
For testing purposes.
"""
pygame.draw.rect(self.screen, pygame.Color('Red'), self.rect, 1)
def fire_weapon(self):
"""Skjuter vapnet.
Börjar med en bubbla. Nästa nivå är tre bubblor.
Och nästa fem bubblor.. Men sen?
"""
ydir = [0, 1.5, -1.5, 3, -3]
shot_xpos = self.rect.x + 35
shot_ypos = self.rect.y + 7
shot_xdir = 7
for i in range(self.power):
self.playershots_grp.add(BaseShot(self.screen,
shot_xpos,
shot_ypos,
shot_xdir,
ydir[i]))
| saintdragon2/python-3-lecture-2015 | civil-final/1st_presentation/6조/aa/player.py | Python | mit | 2,424 |
class APIError(Exception):
"""Represents an error returned in a response to a fleet API call
This exception will be raised any time a response code >= 400 is returned
Attributes:
code (int): The response code
message(str): The message included with the error response
http_error(googleapiclient.errors.HttpError): The underlying exception that caused this exception to be raised
If you need access to the raw response, this is where you'll find
it.
"""
def __init__(self, code, message, http_error):
"""Construct an exception representing an error returned by fleet
Args:
code (int): The response code
message(str): The message included with the error response
http_error(googleapiclient.errors.HttpError): The underlying exception that caused this exception
to be raised.
"""
self.code = code
self.message = message
self.http_error = http_error
def __str__(self):
# Return a string like r'Some bad thing happened(400)'
return '{1} ({0})'.format(
self.code,
self.message
)
def __repr__(self):
# Retun a string like r'<Fleetv1Error; Code: 400; Message: Some bad thing happened>'
return '<{0}; Code: {1}; Message: {2}>'.format(
self.__class__.__name__,
self.code,
self.message
)
| cnelson/python-fleet | fleet/v1/errors.py | Python | apache-2.0 | 1,594 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('servo', '0015_auto_20150208_1629'),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(default=b'SEND_EMAIL', max_length=32, choices=[(b'SEND_SMS', 'Send SMS'), (b'SEND_EMAIL', 'Send email'), (b'ADD_TAG', 'Add Tag'), (b'SET_PRIO', 'Set Priority'), (b'SET_QUEUE', 'Set Queue'), (b'SET_USER', 'Assign to')])),
('value', models.TextField(default=b'')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Condition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=16, choices=[(b'QUEUE', 'Queue'), (b'STATUS', 'Status'), (b'CUSTOMER_NAME', 'Customer name'), (b'DEVICE', 'Device name')])),
('operator', models.CharField(max_length=4, choices=[(b'^%s$', 'Equals'), (b'%s', 'Contains'), (b'%d < %d', 'Less than'), (b'%d > %d', 'Greater than')])),
('value', models.TextField(default=b'')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(default='New Rule', max_length=128)),
('match', models.CharField(default=b'ANY', max_length=3, choices=[(b'ANY', 'Any'), (b'ALL', 'All')])),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='condition',
name='rule',
field=models.ForeignKey(to='servo.Rule'),
preserve_default=True,
),
migrations.AddField(
model_name='action',
name='rule',
field=models.ForeignKey(to='servo.Rule'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='customer',
field=mptt.fields.TreeForeignKey(blank=True, to='servo.Customer', null=True),
preserve_default=True,
),
]
| filipp/Servo | servo/migrations/0016_auto_20150316_1152.py | Python | bsd-2-clause | 2,787 |
# -*- coding: utf-8 -*-
# Copyright(C) 2015 Matthieu Weber
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser import PagesBrowser, URL
from .pages import SearchPage
class DPDBrowser(PagesBrowser):
BASEURL = 'https://tracking.dpd.de/'
search_page = URL('/cgi-bin/simpleTracking.cgi\?parcelNr=(?P<id>.+)&locale=en_D2&type=1', SearchPage)
def get_tracking_info(self, _id):
return self.search_page.go(id=_id).get_info(_id)
| vicnet/weboob | modules/dpd/browser.py | Python | lgpl-3.0 | 1,136 |
import levelFetcher
a = levelFetcher.getLevel(1)
print(a)
print(a.overworld) | hnawner/The-Upside-Down | Levels/importTest.py | Python | mit | 77 |
import unittest
import pyxb_114.binding.datatypes as xsd
class Test_string (unittest.TestCase):
def testRange (self):
# Not really anything to test here.
pass
if __name__ == '__main__':
unittest.main()
| msherry/PyXB-1.1.4 | tests/datatypes/test-string.py | Python | apache-2.0 | 228 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Copyright: (c) 2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
- This module only works on Python 2. If you require Python 3 support see the M(ansible.builtin.dnf) module.
options:
use_backend:
description:
- This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
"new yum" and it has an C(dnf) backend.
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
default: "auto"
choices: [ auto, yum, yum4, dnf ]
type: str
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
type: list
elements: str
exclude:
description:
- Package name(s) to exclude when state=present, or latest
type: list
elements: str
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list --show-duplicates <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
- This parameter is mutually exclusive with C(name).
type: str
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
type: str
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
type: list
elements: str
version_added: "0.9"
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
type: list
elements: str
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
type: str
version_added: "0.6"
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
version_added: "1.2"
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
without raising an error. Equivalent to passing the --skip-broken option.
type: bool
default: "no"
version_added: "2.3"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
type: bool
default: "yes"
version_added: "2.1"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
default: "no"
type: bool
version_added: "2.5"
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
type: str
version_added: "2.3"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.4"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
default: "no"
type: bool
version_added: "2.6"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.4"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
type: list
elements: str
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
type: list
elements: str
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
type: str
version_added: "2.7"
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
- "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
type: bool
default: "no"
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in YUM config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
type: str
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the yum lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
- "NOTE: This feature requires yum >= 4 (RHEL/CentOS 8+)"
type: bool
default: "yes"
version_added: "2.8"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if I(download_only) is specified.
type: str
version_added: "2.8"
install_repoquery:
description:
- If repoquery is not available, install yum-utils. If the system is
registered to RHN or an RHN Satellite, repoquery allows for querying
all channels assigned to the system. It is also required to use the
'list' parameter.
- "NOTE: This will run and be logged as a separate yum transation which
takes place before any other installation or removal."
- "NOTE: This will use the system's default enabled repositories without
regard for disablerepo/enablerepo given to the module."
required: false
version_added: "1.5"
default: "yes"
type: bool
cacheonly:
description:
- Tells yum to run entirely from system cache; does not download or update metadata.
default: "no"
type: bool
version_added: "2.12"
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
attributes:
action:
details: In the case of yum, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
support: partial
async:
support: none
bypass_host_loop:
support: none
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: rhel
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
- In versions prior to 1.9.2 this module installed and removed each package
given to the yum module separately. This caused problems when packages
specified by filename or url had to be installed or removed together. In
1.9.2 this was fixed so that packages are installed in one yum
transaction. However, if one of the packages adds a new yum repository
that the other packages come from (such as epel-release) then that package
needs to be installed in a separate task. This mimics yum's command line
behaviour.
- 'Yum itself has two types of groups. "Package groups" are specified in the
rpm itself while "environment groups" are specified in a separate file
(usually by the distribution). Unfortunately, this division becomes
apparent to ansible users because ansible needs to operate on the group
of packages in a single transaction and yum requires groups to be specified
in different ways when used in that way. Package groups are specified as
"@development-tools" and environment groups are "@^gnome-desktop-environment".
Use the "yum group list hidden ids" command to see which category of group the group
you want to install falls into.'
- 'The yum module does not support clearing yum cache in an idempotent way, so it
was decided not to implement it, the only method is to use command and call the yum
command directly, namely "command: yum clean all"
https://github.com/ansible/ansible/pull/31450#issuecomment-352889579'
# informational: requirements for nodes
requirements:
- yum
author:
- Ansible Core Team
- Seth Vidal (@skvidal)
- Eduard Snesarev (@verm666)
- Berend De Schouwer (@berenddeschouwer)
- Abhijeet Kasurde (@Akasurde)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: Install the latest version of Apache
yum:
name: httpd
state: latest
- name: Install Apache >= 2.4
yum:
name: httpd>=2.4
state: present
- name: Install a list of packages (suitable replacement for 2.11 loop deprecation warning)
yum:
name:
- nginx
- postgresql
- postgresql-server
state: present
- name: Install a list of packages with a list variable
yum:
name: "{{ packages }}"
vars:
packages:
- httpd
- httpd-tools
- name: Remove the Apache package
yum:
name: httpd
state: absent
- name: Install the latest version of Apache from the testing repo
yum:
name: httpd
enablerepo: testing
state: present
- name: Install one specific version of Apache
yum:
name: httpd-2.2.29-1.4.amzn1
state: present
- name: Upgrade all packages
yum:
name: '*'
state: latest
- name: Upgrade all packages, excluding kernel & foo related packages
yum:
name: '*'
state: latest
exclude: kernel*,foo*
- name: Install the nginx rpm from a remote repo
yum:
name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install nginx rpm from a local file
yum:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: Install the 'Development tools' package group
yum:
name: "@Development tools"
state: present
- name: Install the 'Gnome desktop' environment group
yum:
name: "@^gnome-desktop-environment"
state: present
- name: List ansible packages and register result to print with debug later
yum:
list: ansible
register: result
- name: Install package with multiple repos enabled
yum:
name: sos
enablerepo: "epel,ol7_latest"
- name: Install package with multiple repos disabled
yum:
name: sos
disablerepo: "epel,ol7_latest"
- name: Download the nginx package but do not install it
yum:
name:
- nginx
state: latest
download_only: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, respawn_module
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
import errno
import os
import re
import sys
import tempfile
try:
import rpm
HAS_RPM_PYTHON = True
except ImportError:
HAS_RPM_PYTHON = False
try:
import yum
HAS_YUM_PYTHON = True
except ImportError:
HAS_YUM_PYTHON = False
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename, compareEVR
transaction_helpers = True
except ImportError:
transaction_helpers = False
from contextlib import contextmanager
from ansible.module_utils.urls import fetch_file
def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
rpmbin = None
class YumModule(YumDnf):
"""
Yum Ansible module back-end implementation
"""
def __init__(self, module):
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# This populates instance vars for all argument spec params
super(YumModule, self).__init__(module)
self.pkg_mgr_name = "yum"
self.lockfile = '/var/run/yum.pid'
self._yum_base = None
def _enablerepos_with_error_checking(self):
# NOTE: This seems unintuitive, but it mirrors yum's CLI behavior
if len(self.enablerepo) == 1:
try:
self.yum_base.repos.enableRepo(self.enablerepo[0])
except yum.Errors.YumBaseError as e:
if u'repository not found' in to_text(e):
self.module.fail_json(msg="Repository %s not found." % self.enablerepo[0])
else:
raise e
else:
for rid in self.enablerepo:
try:
self.yum_base.repos.enableRepo(rid)
except yum.Errors.YumBaseError as e:
if u'repository not found' in to_text(e):
self.module.warn("Repository %s not found." % rid)
else:
raise e
def is_lockfile_pid_valid(self):
try:
try:
with open(self.lockfile, 'r') as f:
oldpid = int(f.readline())
except ValueError:
# invalid data
os.unlink(self.lockfile)
return False
if oldpid == os.getpid():
# that's us?
os.unlink(self.lockfile)
return False
try:
with open("/proc/%d/stat" % oldpid, 'r') as f:
stat = f.readline()
if stat.split()[2] == 'Z':
# Zombie
os.unlink(self.lockfile)
return False
except IOError:
# either /proc is not mounted or the process is already dead
try:
# check the state of the process
os.kill(oldpid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
# No such process
os.unlink(self.lockfile)
return False
self.module.fail_json(msg="Unable to check PID %s in %s: %s" % (oldpid, self.lockfile, to_native(e)))
except (IOError, OSError) as e:
# lockfile disappeared?
return False
# another copy seems to be running
return True
@property
def yum_base(self):
if self._yum_base:
return self._yum_base
else:
# Only init once
self._yum_base = yum.YumBase()
self._yum_base.preconf.debuglevel = 0
self._yum_base.preconf.errorlevel = 0
self._yum_base.preconf.plugins = True
self._yum_base.preconf.enabled_plugins = self.enable_plugin
self._yum_base.preconf.disabled_plugins = self.disable_plugin
if self.releasever:
self._yum_base.preconf.releasever = self.releasever
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
self._yum_base.preconf.root = self.installroot
self._yum_base.conf.installroot = self.installroot
if self.conf_file and os.path.exists(self.conf_file):
self._yum_base.preconf.fn = self.conf_file
if os.geteuid() != 0:
if hasattr(self._yum_base, 'setCacheDir'):
self._yum_base.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
self._yum_base.repos.setCacheDir(cachedir)
self._yum_base.conf.cache = 0
if self.disable_excludes:
self._yum_base.conf.disable_excludes = self.disable_excludes
# A sideeffect of accessing conf is that the configuration is
# loaded and plugins are discovered
self.yum_base.conf
try:
for rid in self.disablerepo:
self.yum_base.repos.disableRepo(rid)
self._enablerepos_with_error_checking()
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return self._yum_base
def po_to_envra(self, po):
if hasattr(po, 'ui_envra'):
return po.ui_envra
return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
def is_group_env_installed(self, name):
name_lower = name.lower()
if yum.__version_info__ >= (3, 4):
groups_list = self.yum_base.doGroupLists(return_evgrps=True)
else:
groups_list = self.yum_base.doGroupLists()
# list of the installed groups on the first index
groups = groups_list[0]
for group in groups:
if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
return True
if yum.__version_info__ >= (3, 4):
# list of the installed env_groups on the third index
envs = groups_list[2]
for env in envs:
if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
return True
return False
def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
if qf is None:
qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
if not repoq:
pkgs = []
try:
e, m, _ = self.yum_base.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs and not is_pkg:
pkgs.extend(self.yum_base.returnInstalledPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
global rpmbin
if not rpmbin:
rpmbin = self.module.get_bin_path('rpm', required=True)
cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
if '*' in pkgspec:
cmd.append('-a')
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
# rpm localizes messages and we're screen scraping so make sure we use
# an appropriate locale
locale = get_best_parsable_locale(self.module)
lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc != 0 and 'is not installed' not in out:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
if 'is not installed' in out:
out = ''
pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
if not pkgs and not is_pkg:
cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
else:
rc2, out2, err2 = (0, '', '')
if rc2 != 0 and 'no package provides' not in out2:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
if 'no package provides' in out2:
out2 = ''
pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
return pkgs
return []
def is_available(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
try:
e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(self.yum_base.returnPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
if self.releasever:
myrepoq.extend('--releasever=%s' % self.releasever)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return [p for p in out.split('\n') if p.strip()]
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
updates = []
try:
pkgs = self.yum_base.returnPackagesByDep(pkgspec) + \
self.yum_base.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = self.yum_base.doPackageLists(pkgnarrow='updates').updates
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
retpkgs = (pkg for pkg in pkgs if pkg in updates)
return set(self.po_to_envra(p) for p in retpkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
if self.releasever:
myrepoq.extend('--releasever=%s' % self.releasever)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set()
def what_provides(self, repoq, req_spec, qf=def_qf):
if not repoq:
pkgs = []
try:
try:
pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
self.yum_base.returnInstalledPackagesByDep(req_spec)
except Exception as e:
# If a repo with `repo_gpgcheck=1` is added and the repo GPG
# key was never accepted, querying this repo will throw an
# error: 'repomd.xml signature could not be verified'. In that
# situation we need to run `yum -y makecache` which will accept
# the key and try again.
if 'repomd.xml signature could not be verified' in to_native(e):
if self.releasever:
self.module.run_command(self.yum_basecmd + ['makecache'] + ['--releasever=%s' % self.releasever])
else:
self.module.run_command(self.yum_basecmd + ['makecache'])
pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
self.yum_base.returnInstalledPackagesByDep(req_spec)
else:
raise
if not pkgs:
e, m, _ = self.yum_base.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e, m, _ = self.yum_base.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return set(self.po_to_envra(p) for p in pkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
if self.releasever:
myrepoq.extend('--releasever=%s' % self.releasever)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc, out, err = self.module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2, out2, err2 = self.module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = {p for p in out.split('\n') if p.strip()}
if not pkgs:
pkgs = self.is_installed(repoq, req_spec, qf=qf)
return pkgs
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return set()
def transaction_exists(self, pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n, v, r, e, a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n, a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n, a))
break
return conflicts
def local_envra(self, path):
"""return envra of a local rpm passed in"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return '%s:%s-%s-%s.%s' % (
header[rpm.RPMTAG_EPOCH] or '0',
header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH]
)
@contextmanager
def set_env_proxy(self):
# setting system proxy environment and saving old, if exists
namepass = ""
scheme = ["http", "https"]
old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
try:
# "_none_" is a special value to disable proxy in yum.conf/*.repo
if self.yum_base.conf.proxy and self.yum_base.conf.proxy not in ("_none_",):
if self.yum_base.conf.proxy_username:
namepass = namepass + self.yum_base.conf.proxy_username
proxy_url = self.yum_base.conf.proxy
if self.yum_base.conf.proxy_password:
namepass = namepass + ":" + self.yum_base.conf.proxy_password
elif '@' in self.yum_base.conf.proxy:
namepass = self.yum_base.conf.proxy.split('@')[0].split('//')[-1]
proxy_url = self.yum_base.conf.proxy.replace("{0}@".format(namepass), "")
if namepass:
namepass = namepass + '@'
for item in scheme:
os.environ[item + "_proxy"] = re.sub(
r"(http://)",
r"\g<1>" + namepass, proxy_url
)
else:
for item in scheme:
os.environ[item + "_proxy"] = self.yum_base.conf.proxy
yield
except yum.Errors.YumBaseError:
raise
finally:
# revert back to previously system configuration
for item in scheme:
if os.getenv("{0}_proxy".format(item)):
del os.environ["{0}_proxy".format(item)]
if old_proxy_env[0]:
os.environ["http_proxy"] = old_proxy_env[0]
if old_proxy_env[1]:
os.environ["https_proxy"] = old_proxy_env[1]
def pkg_to_dict(self, pkgstr):
if pkgstr.strip() and pkgstr.count('|') == 5:
n, e, v, r, a, repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name': n,
'arch': a,
'epoch': e,
'release': r,
'version': v,
'repo': repo,
'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(self, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
if self.releasever:
cmd.extend(['--releasever=%s' % self.releasever])
rc, out, _ = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
return []
def list_stuff(self, repoquerybin, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
# is_installed goes through rpm instead of repoquery so it needs a slightly different format
is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.disablerepo:
repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
if self.enablerepo:
repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
if self.installroot != '/':
repoq.extend(['--installroot', self.installroot])
if self.conf_file and os.path.exists(self.conf_file):
repoq += ['-c', self.conf_file]
if stuff == 'installed':
return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
if stuff == 'updates':
return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'available':
return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'repos':
return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
return [
self.pkg_to_dict(p) for p in
sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
if p.strip()
]
def exec_install(self, items, action, pkgs, res):
cmd = self.yum_basecmd + [action] + pkgs
if self.releasever:
cmd.extend(['--releasever=%s' % self.releasever])
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
else:
res['changes'] = dict(installed=pkgs)
locale = get_best_parsable_locale(self.module)
lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc == 1:
for spec in items:
# Fail on invalid urls:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
self.module.fail_json(changed=False, msg=err, rc=rc)
res['rc'] = rc
res['results'].append(out)
res['msg'] += err
res['changed'] = True
if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
res['changed'] = False
if rc != 0:
res['changed'] = False
self.module.fail_json(**res)
# Fail if yum prints 'No space left on device' because that means some
# packages failed executing their post install scripts because of lack of
# free space (e.g. kernel package couldn't generate initramfs). Note that
# yum can still exit with rc=0 even if some post scripts didn't execute
# correctly.
if 'No space left on device' in (out or err):
res['changed'] = False
res['msg'] = 'No space left on device'
self.module.fail_json(**res)
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for each pkg in rpmdb
# look for each pkg via obsoletes
return res
def install(self, items, repoq):
pkgs = []
downgrade_pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
downgrade_candidate = False
# check if pkgspec is installed (if possible for idempotence)
if spec.endswith('.rpm') or '://' in spec:
if '://' not in spec and not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
if '://' in spec:
with self.set_env_proxy():
package = fetch_file(self.module, spec)
if not package.endswith('.rpm'):
# yum requires a local file to have the extension of .rpm and we
# can not guarantee that from an URL (redirects, proxies, etc)
new_package_path = '%s.rpm' % package
os.rename(package, new_package_path)
package = new_package_path
else:
package = spec
# most common case is the pkg is already installed
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
installed_pkgs = self.is_installed(repoq, envra)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
continue
(name, ver, rel, epoch, arch) = splitFilename(envra)
installed_pkgs = self.is_installed(repoq, name)
# case for two same envr but different archs like x86_64 and i686
if len(installed_pkgs) == 2:
(cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
(cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
cur_epoch0 = cur_epoch0 or '0'
cur_epoch1 = cur_epoch1 or '0'
compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
if compare == 0 and cur_arch0 != cur_arch1:
for installed_pkg in installed_pkgs:
if installed_pkg.endswith(arch):
installed_pkgs = [installed_pkg]
if len(installed_pkgs) == 1:
installed_pkg = installed_pkgs[0]
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
cur_epoch = cur_epoch or '0'
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
# compare > 0 -> higher version is installed
# compare == 0 -> exact version is installed
# compare < 0 -> lower version is installed
if compare > 0 and self.allow_downgrade:
downgrade_candidate = True
elif compare >= 0:
continue
# else: if there are more installed packages with the same name, that would mean
# kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
pkg = package
# groups
elif spec.startswith('@'):
if self.is_group_env_installed(spec):
continue
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*', '?']).intersection(set(spec)):
installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = self.what_provides(repoq, spec)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['rc'] = 125 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if self.is_installed(repoq, this, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if self.is_installed(repoq, spec):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# Downgrade - The yum install command will only install or upgrade to a spec version, it will
# not install an older version of an RPM even if specified by the install spec. So we need to
# determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
if self.allow_downgrade:
for package in pkglist:
# Get the NEVRA of the requested package using pkglist instead of spec because pkglist
# contains consistently-formatted package names returned by yum, rather than user input
# that is often not parsed correctly by splitFilename().
(name, ver, rel, epoch, arch) = splitFilename(package)
# Check if any version of the requested package is installed
inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
if inst_pkgs:
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
if compare > 0:
downgrade_candidate = True
else:
downgrade_candidate = False
break
# If package needs to be installed/upgraded/downgraded, then pass in the spec
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
if downgrade_candidate and self.allow_downgrade:
downgrade_pkgs.append(pkg)
else:
pkgs.append(pkg)
if downgrade_pkgs:
res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
if pkgs:
res = self.exec_install(items, 'install', pkgs, res)
return res
def remove(self, items, repoq):
pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
pkgs.append(pkg)
else:
res['results'].append('%s is not installed' % pkg)
if pkgs:
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
else:
res['changes'] = dict(removed=pkgs)
# run an actual yum transaction
if self.autoremove:
cmd = self.yum_basecmd + ["autoremove"] + pkgs
else:
cmd = self.yum_basecmd + ["remove"] + pkgs
rc, out, err = self.module.run_command(cmd)
res['rc'] = rc
res['results'].append(out)
res['msg'] = err
if rc != 0:
if self.autoremove and 'No such command' in out:
self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
else:
self.module.fail_json(**res)
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we check to see if the pkg is no longer present
self._yum_base = None # previous YumBase package index is now invalid
for pkg in pkgs:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg, is_pkg=True)
if installed:
# Return a message so it's obvious to the user why yum failed
# and which package couldn't be removed. More details:
# https://github.com/ansible/ansible/issues/35672
res['msg'] = "Package '%s' couldn't be removed!" % pkg
self.module.fail_json(**res)
res['changed'] = True
return res
def run_check_update(self):
# run check-update to see if we have packages pending
if self.releasever:
rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'] + ['--releasever=%s' % self.releasever])
else:
rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
return rc, out, err
@staticmethod
def parse_check_update(check_update_output):
# preprocess string and filter out empty lines so the regex below works
out = '\n'.join((l for l in check_update_output.splitlines() if l))
# Remove incorrect new lines in longer columns in output from yum check-update
# yum line wrapping can move the repo to the next line:
# some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
# some-repo-label
out = re.sub(r'\n\W+(.*)', r' \1', out)
updates = {}
obsoletes = {}
for line in out.split('\n'):
line = line.split()
"""
Ignore irrelevant lines:
- '*' in line matches lines like mirror lists: "* base: mirror.corbina.net"
- len(line) != 3 or 6 could be strings like:
"This system is not registered with an entitlement server..."
- len(line) = 6 is package obsoletes
- checking for '.' in line[0] (package name) likely ensures that it is of format:
"package_name.arch" (coreutils.x86_64)
"""
if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
continue
pkg, version, repo = line[0], line[1], line[2]
name, dist = pkg.rsplit('.', 1)
if name not in updates:
updates[name] = []
updates[name].append({'version': version, 'dist': dist, 'repo': repo})
if len(line) == 6:
obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
if obsolete_name not in obsoletes:
obsoletes[obsolete_name] = []
obsoletes[obsolete_name].append({'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo})
return updates, obsoletes
def latest(self, items, repoq):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {}
pkgs['update'] = []
pkgs['install'] = []
updates = {}
obsoletes = {}
update_all = False
cmd = None
# determine if we're doing an update all
if '*' in items:
update_all = True
rc, out, err = self.run_check_update()
if rc == 0 and update_all:
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif rc == 100:
updates, obsoletes = self.parse_check_update(out)
elif rc == 1:
res['msg'] = err
res['rc'] = rc
self.module.fail_json(**res)
if update_all:
cmd = self.yum_basecmd + ['update']
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
# some guess work involved with groups. update @<group> will install the group if missing
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
# check if pkgspec is installed (if possible for idempotence)
# localpkg
if spec.endswith('.rpm') and '://' not in spec:
if not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# get the pkg e:name-v-r.arch
envra = self.local_envra(spec)
if envra is None:
self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# URL
if '://' in spec:
# download package so that we can check if it's already installed
with self.set_env_proxy():
package = fetch_file(self.module, spec)
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# dep/pkgname - find it
if self.is_installed(repoq, spec):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = self.what_provides(repoq, spec)
# FIXME..? may not be desirable to throw an exception here if a single package is missing
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if spec in pkgs['install'] and self.is_available(repoq, pkg):
nothing_to_do = False
break
# this contains the full NVR and spec could contain wildcards
# or virtual provides (like "python-*" or "smtp-daemon") while
# updates contains name only.
pkgname, _, _, _, _ = splitFilename(pkg)
if spec in pkgs['update'] and pkgname in updates:
nothing_to_do = False
will_update.add(spec)
# Massage the updates list
if spec != pkgname:
# For reporting what packages would be updated more
# succinctly
will_update_from_other_package[spec] = pkgname
break
if not self.is_installed(repoq, spec) and self.update_only:
res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
continue
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
res['rc'] = 128 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# check_mode output
to_update = []
for w in will_update:
if w.startswith('@'):
# yum groups
to_update.append((w, None))
elif w not in updates:
# There are (at least, probably more) 2 ways we can get here:
#
# * A virtual provides (our user specifies "webserver", but
# "httpd" is the key in 'updates').
#
# * A wildcard. emac* will get us here if there's a package
# called 'emacs' in the pending updates list. 'updates' will
# of course key on 'emacs' in that case.
other_pkg = will_update_from_other_package[w]
# We are guaranteed that: other_pkg in updates
# ...based on the logic above. But we only want to show one
# update in this case (given the wording of "at least") below.
# As an example, consider a package installed twice:
# foobar.x86_64, foobar.i686
# We want to avoid having both:
# ('foo*', 'because of (at least) foobar-1.x86_64 from repo')
# ('foo*', 'because of (at least) foobar-1.i686 from repo')
# We just pick the first one.
#
# TODO: This is something that might be nice to change, but it
# would be a module UI change. But without it, we're
# dropping potentially important information about what
# was updated. Instead of (given_spec, random_matching_package)
# it'd be nice if we appended (given_spec, [all_matching_packages])
#
# ... But then, we also drop information if multiple
# different (distinct) packages match the given spec and
# we should probably fix that too.
pkg = updates[other_pkg][0]
to_update.append(
(
w,
'because of (at least) %s-%s.%s from %s' % (
other_pkg,
pkg['version'],
pkg['dist'],
pkg['repo']
)
)
)
else:
# Otherwise the spec is an exact match
for pkg in updates[w]:
to_update.append(
(
w,
'%s.%s from %s' % (
pkg['version'],
pkg['dist'],
pkg['repo']
)
)
)
if self.update_only:
res['changes'] = dict(installed=[], updated=to_update)
else:
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if obsoletes:
res['obsoletes'] = obsoletes
# return results before we actually execute stuff
if self.module.check_mode:
if will_update or pkgs['install']:
res['changed'] = True
return res
if self.releasever:
cmd.extend(['--releasever=%s' % self.releasever])
# run commands
if cmd: # update all
rc, out, err = self.module.run_command(cmd)
res['changed'] = True
elif self.update_only:
if pkgs['update']:
cmd = self.yum_basecmd + ['update'] + pkgs['update']
locale = get_best_parsable_locale(self.module)
lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
elif pkgs['install'] or will_update and not self.update_only:
cmd = self.yum_basecmd + ['install'] + pkgs['install'] + pkgs['update']
locale = get_best_parsable_locale(self.module)
lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res
def ensure(self, repoq):
pkgs = self.names
# autoremove was provided without `name`
if not self.names and self.autoremove:
pkgs = []
self.state = 'absent'
if self.conf_file and os.path.exists(self.conf_file):
self.yum_basecmd += ['-c', self.conf_file]
if repoq:
repoq += ['-c', self.conf_file]
if self.skip_broken:
self.yum_basecmd.extend(['--skip-broken'])
if self.disablerepo:
self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
if self.enablerepo:
self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
if self.enable_plugin:
self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
if self.disable_plugin:
self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
if self.exclude:
e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
self.yum_basecmd.extend(e_cmd)
if self.disable_excludes:
self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
if self.cacheonly:
self.yum_basecmd.extend(['--cacheonly'])
if self.download_only:
self.yum_basecmd.extend(['--downloadonly'])
if self.download_dir:
self.yum_basecmd.extend(['--downloaddir=%s' % self.download_dir])
if self.releasever:
self.yum_basecmd.extend(['--releasever=%s' % self.releasever])
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
e_cmd = ['--installroot=%s' % self.installroot]
self.yum_basecmd.extend(e_cmd)
if self.state in ('installed', 'present', 'latest'):
""" The need of this entire if conditional has to be changed
this function is the ensure function that is called
in the main section.
This conditional tends to disable/enable repo for
install present latest action, same actually
can be done for remove and absent action
As solution I would advice to cal
try: self.yum_base.repos.disableRepo(disablerepo)
and
try: self.yum_base.repos.enableRepo(enablerepo)
right before any yum_cmd is actually called regardless
of yum action.
Please note that enable/disablerepo options are general
options, this means that we can call those with any action
option. https://linux.die.net/man/8/yum
This docstring will be removed together when issue: #21619
will be solved.
This has been triggered by: #19587
"""
if self.update_cache:
self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
try:
current_repos = self.yum_base.repos.repos.keys()
if self.enablerepo:
try:
new_repos = self.yum_base.repos.repos.keys()
for i in new_repos:
if i not in current_repos:
rid = self.yum_base.repos.getRepo(i)
a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
current_repos = new_repos
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
if self.state == 'latest' or self.update_only:
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
if self.security:
self.yum_basecmd.append('--security')
if self.bugfix:
self.yum_basecmd.append('--bugfix')
res = self.latest(pkgs, repoq)
elif self.state in ('installed', 'present'):
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
res = self.install(pkgs, repoq)
elif self.state in ('removed', 'absent'):
res = self.remove(pkgs, repoq)
else:
# should be caught by AnsibleModule argument_spec
self.module.fail_json(
msg="we should never get here unless this all failed",
changed=False,
results='',
errors='unexpected state'
)
return res
@staticmethod
def has_yum():
return HAS_YUM_PYTHON
def run(self):
"""
actually execute the module code backend
"""
if (not HAS_RPM_PYTHON or not HAS_YUM_PYTHON) and sys.executable != '/usr/bin/python' and not has_respawned():
respawn_module('/usr/bin/python')
# end of the line for this process; we'll exit here once the respawned module has completed
error_msgs = []
if not HAS_RPM_PYTHON:
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if not HAS_YUM_PYTHON:
error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
self.wait_for_lock()
if error_msgs:
self.module.fail_json(msg='. '.join(error_msgs))
# fedora will redirect yum to dnf, which has incompatibilities
# with how this module expects yum to operate. If yum-deprecated
# is available, use that instead to emulate the old behaviors.
if self.module.get_bin_path('yum-deprecated'):
yumbin = self.module.get_bin_path('yum-deprecated')
else:
yumbin = self.module.get_bin_path('yum')
# need debug level 2 to get 'Nothing to do' for groupinstall.
self.yum_basecmd = [yumbin, '-d', '2', '-y']
if self.update_cache and not self.names and not self.list:
rc, stdout, stderr = self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
if rc == 0:
self.module.exit_json(
changed=False,
msg="Cache updated",
rc=rc,
results=[]
)
else:
self.module.exit_json(
changed=False,
msg="Failed to update cache",
rc=rc,
results=[stderr],
)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.install_repoquery and not repoquerybin and not self.module.check_mode:
yum_path = self.module.get_bin_path('yum')
if yum_path:
if self.releasever:
self.module.run_command('%s -y install yum-utils --releasever %s' % (yum_path, self.releasever))
else:
self.module.run_command('%s -y install yum-utils' % yum_path)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.list:
if not repoquerybin:
self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = {'results': self.list_stuff(repoquerybin, self.list)}
else:
# If rhn-plugin is installed and no rhn-certificate is available on
# the system then users will see an error message using the yum API.
# Use repoquery in those cases.
repoquery = None
try:
yum_plugins = self.yum_base.plugins._plugins
except AttributeError:
pass
else:
if 'rhnplugin' in yum_plugins:
if repoquerybin:
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.installroot != '/':
repoquery.extend(['--installroot', self.installroot])
if self.disable_excludes:
# repoquery does not support --disableexcludes,
# so make a temp copy of yum.conf and get rid of the 'exclude=' line there
try:
with open('/etc/yum.conf', 'r') as f:
content = f.readlines()
tmp_conf_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, delete=False)
self.module.add_cleanup_file(tmp_conf_file.name)
tmp_conf_file.writelines([c for c in content if not c.startswith("exclude=")])
tmp_conf_file.close()
except Exception as e:
self.module.fail_json(msg="Failure setting up repoquery: %s" % to_native(e))
repoquery.extend(['-c', tmp_conf_file.name])
results = self.ensure(repoquery)
if repoquery:
results['msg'] = '%s %s' % (
results.get('msg', ''),
'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
)
self.module.exit_json(**results)
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = YumModule(module)
module_implementation.run()
if __name__ == '__main__':
main()
| nitzmahone/ansible | lib/ansible/modules/yum.py | Python | gpl-3.0 | 72,676 |
"""
Created on Oct 31, 2011
@author: bolme and sohara
Original version by David Bolme.
Modified 2014 by Stephen O'Hara to support additional capabilities,
and an addition of an interface to capture polygons.
Modified 2017 by Stephen O'Hara to forward port to Pyvision3, PEP8 compliance
"""
import pyvision3 as pv3
import cv2
def null_callback(*args, **kwargs):
pass
class CaptureClicks:
"""
This object handles the data management and display of the capture clicks window.
"""
def __init__(
self,
im,
default_points=None,
keep_window_open=False,
window="PyVision Capture Points",
pos=None,
):
"""
Initialize the data.
"""
self.window = window
self.im = im
self.keep_window_open = keep_window_open
self._user_quit = False
self.pos = pos # position of window
self.current_points = []
self.default_points = [] if default_points is None else default_points
self._show_help = True
def _clear_last_point(self):
if self.current_points:
_ = (
self.current_points.pop()
) # remove most recent element from list and discard
return
@staticmethod
def _draw_instructions(canvas):
canvas.annotate_rect((2, 2), (320, 70), color=pv3.RGB_BLUE, thickness=-1)
text_messages = [
"Click anywhere in the image to select a point.",
"Press 'r' to reset.",
"Press 'x' to delete the recent point.",
"Press the space bar when finished.",
"Press 'h' to toggle display of this help text.",
]
for idx, txt in enumerate(text_messages):
canvas.annotate_text(
txt,
(10, 10 * (idx + 1)),
color=pv3.RGB_WHITE,
font_face=cv2.FONT_HERSHEY_COMPLEX_SMALL,
font_scale=0.5,
)
@staticmethod
def _draw_points(canvas, points, color=pv3.RGB_YELLOW):
for idx, pt in enumerate(points):
canvas.annotate_point(pt, color=color)
canvas.annotate_text(str(idx + 1), pt, color=color)
def _update_image(self):
"""
Renders the annotations on top of the current image
"""
canvas = self.im.copy()
if self._show_help:
self._draw_instructions(canvas)
if self.default_points:
self._draw_points(canvas, self.default_points, color=pv3.RGB_YELLOW)
if self.current_points:
self._draw_points(canvas, self.current_points, color=pv3.RGB_RED)
self.canvas = canvas
def display(self):
"""
Display the window and run the main event loop.
"""
# Setup the mouse callback to handle mouse events (optional)
cv2.namedWindow(self.window)
if self.pos:
cv2.moveWindow(self.window, *self.pos)
cv2.setMouseCallback(self.window, self.mouse_callback)
while True:
self._update_image()
key_press = self.canvas.show(
self.window, delay=100, annotations_opacity=1.0
)
key_press = key_press % 256
# Handle key press events.
if key_press == ord(" "):
break
if key_press == ord("h"):
self._show_help = not self._show_help
if key_press == ord("q"):
self._user_quit = True
break
if key_press == ord("x"):
self._clear_last_point()
if key_press == ord("r"):
self.reset()
if not self.keep_window_open:
cv2.destroyWindow(self.window)
all_points = self.default_points + self.current_points
return all_points
def reset(self):
"""
Clear the points and start over.
"""
self.current_points = []
def mouse_callback(self, event, x, y, flags, param):
"""
Call back function for mouse events.
"""
if event in [cv2.EVENT_LBUTTONDOWN]:
point = (x, y)
self.current_points.append(point)
| svohara/pyvision3 | pyvision3/dataset_tools/capture_clicks.py | Python | mit | 4,216 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class cashFlowdistribution(orm.Model):
_name = "cash.flow.distribution"
_inherit = "account.distribution.line"
_description = "Cash Flow Distribution"
#======== Check distribution percentage. Use distribution_percentage_sum in account.move.line to check
def _check_distribution_percentage_cash(self, cr, uid, ids, context=None):
for distribution in self.browse(cr, uid, ids, context=context):
#distribution_percentage_sum compute all the percentages for a specific move line.
line_percentage = distribution.account_move_line_id.distribution_percentage_sum_cash or 0.0
line_percentage_remaining = 100 - line_percentage
if distribution.distribution_percentage > line_percentage_remaining:
return False
return True
#========= Check distribution percentage. Use distribution_amount_sum in account.move.line to check
def _check_distribution_amount_cash(self, cr, uid, ids, context=None):
amount = 0.0
for distribution in self.browse(cr, uid, ids, context=context):
#==== distribution_amount_sum compute all the percentages for a specific move line.
line_amount_dis = distribution.account_move_line_id.distribution_amount_sum_cash or 0.0
#=====Find amount for the move_line
if distribution.account_move_line_id.credit > 0:
amount = distribution.account_move_line_id.credit
if distribution.account_move_line_id.debit > 0:
amount = distribution.account_move_line_id.debit
#====Check which is the remaining between the amount line and sum of amount in distributions.
amount_remaining = amount - line_amount_dis
x = distribution.distribution_amount
if distribution.distribution_amount > amount_remaining:
return False
return True
_columns = {
'reconcile_ids': fields.many2many('account.move.reconcile', 'cash_reconcile_distribution_ids', string='Cash Reconcile Distributions',),
'type': fields.selection([('type_cash_flow', 'Type Cash Flow'),('move_cash_flow', 'Moves Cash Flow')], 'Distribution Cash Flow Type', select=True),
}
_constraints = [
(_check_distribution_percentage_cash, 'The cash flow distribution percentage can not be greater than sum of all percentage for the account move line selected', ['account_move_line_id']),
(_check_distribution_amount_cash, 'The cash flow distribution amount can not be greater than maximum amount of remaining amount for account move line selected', ['distribution_amount']),
]
_defaults = {
'type': 'move_cash_flow',
'distribution_amount': 0.0,
'distribution_percentage': 0.0,
}
#Line is an object
def get_amounts_distribution(self, cr, uid, line, lines_distribution_list):
amount = 0.0
amount_line = 0.0
for distribution in lines_distribution_list:
amount += distribution.distribution_amount
#Amount line
if line.debit > 0.0:
amount_line = line.debit
else:
amount_line = line.credit
if amount == amount_line:
return 0.0
else:
return abs (amount_line - amount) | sysadminmatmoz/odoo-clearcorp | TODO-8.0/cash_flow_report/cash_flow_distribution.py | Python | agpl-3.0 | 4,621 |
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The iosxr_lacp class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.iosxr.facts.facts import Facts
from ansible.module_utils.network.common.utils import dict_diff
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import remove_empties
from ansible.module_utils.network.iosxr. \
utils.utils import flatten_dict
class Lacp(ConfigBase):
"""
The iosxr_lacp class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lacp',
]
def __init__(self, module):
super(Lacp, self).__init__(module)
def get_lacp_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
lacp_facts = facts['ansible_network_resources'].get('lacp')
if not lacp_facts:
return {}
return lacp_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
commands = list()
warnings = list()
existing_lacp_facts = self.get_lacp_facts()
commands.extend(self.set_config(existing_lacp_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_lacp_facts = self.get_lacp_facts()
result['before'] = existing_lacp_facts
if result['changed']:
result['after'] = changed_lacp_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lacp_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params.get('config')
if not want:
want = {}
have = existing_lacp_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
if state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
commands.extend(
Lacp._state_deleted(want, have)
)
commands.extend(
Lacp._state_merged(want, have)
)
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
updates = dict_diff(have, want)
if updates:
for key, value in iteritems(flatten_dict(remove_empties(updates['system']))):
commands.append('lacp system {0} {1}'.format(key.replace('address', 'mac'), value))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for x in [k for k in have.get('system', {}) if k not in remove_empties(want.get('system', {}))]:
commands.append('no lacp system {0}'.format(x))
return commands
| resmo/ansible | lib/ansible/module_utils/network/iosxr/config/lacp/lacp.py | Python | gpl-3.0 | 5,221 |
#-------------------------------------------------------------------------------
# Name: AttributeExploration.py
# Purpose: class for attribute exploration
#
# Author: Jakob Kogler
#-------------------------------------------------------------------------------
class AttributeExploration:
def __init__(self, attributes, objects):
self.attributes = attributes
self.attributeCount = len(attributes)
self.objects = objects
self.B = 0
self.implicationsBasis = []
self.waitForResponse = False
def getNextImplication(self):
def Lstar(X):
B = [b for (a,b) in self.implicationsBasis if a & X == a and a != X]
for b in B:
X |= b
return X
if not self.waitForResponse:
for i in reversed(range(self.attributeCount)):
j = self.attributeCount - 1 - i
# m = {m_1, m_2, ..., m_i-1}
m = 2**self.attributeCount - 2 * 2**j
# P = (B cut {m_1, m_2, ..., m_i-1}) union m_i
P = (self.B & m) | 2**j
# L*-operator
LstarP, P = P, -1
while LstarP != P:
LstarP, P = Lstar(LstarP), LstarP
# B <_i L*
if (P & ~self.B & 2**j == 0) or (self.B & m != P & m):
continue
# P**
Pstar = [obj for obj in self.objects if obj & P == P]
Pstarstar = 2**self.attributeCount - 1
for obj in Pstar:
Pstarstar &= obj
if P == Pstarstar:
# P => P, not interesting
self.B = P
return self.getNextImplication()
else:
# interesting implication found
self.implication = (P, Pstarstar)
self.waitForResponse = True
return self.implication
return None
def acceptImplication(self):
if self.waitForResponse:
self.waitForResponse = False
self.implicationsBasis.append(self.implication)
self.B = self.implication[0]
def rejectImplication(self, counterExample):
if self.waitForResponse:
self.waitForResponse = False
self.objects.append(counterExample) | jakobkogler/AttributeExploration | AttributeExploration.py | Python | mit | 2,401 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Runs on buildbot slave, creating a release package using the build
# system and zipping it into buildbot_upload.zip. This is then uploaded
# to the master in the next buildbot step.
import os
import subprocess
import sys
import zipfile
# get builder name
if len(sys.argv) < 2:
sys.stderr.write("Not enough arguments, expecting builder name\n")
sys.exit(1)
builder = sys.argv[1]
# Never write branch if it is master.
branch = sys.argv[2] if (len(sys.argv) >= 3 and sys.argv[2] != 'master') else ''
# scons does own packaging
if builder.find('scons') != -1:
python_bin = 'python'
if builder.find('linux') != -1:
python_bin = '/opt/lib/python-2.7/bin/python2.7'
os.chdir('../blender.git')
scons_options = ['BF_QUICK=slnt', 'BUILDBOT_BRANCH=' + branch, 'buildslave', 'BF_FANCY=False']
buildbot_dir = os.path.dirname(os.path.realpath(__file__))
config_dir = os.path.join(buildbot_dir, 'config')
build_dir = os.path.join('..', 'build', builder)
install_dir = os.path.join('..', 'install', builder)
if builder.find('linux') != -1:
scons_options += ['WITH_BF_NOBLENDER=True', 'WITH_BF_PLAYER=False',
'BF_BUILDDIR=' + build_dir,
'BF_INSTALLDIR=' + install_dir,
'WITHOUT_BF_INSTALL=True']
config = None
bits = None
if builder.endswith('linux_glibc211_x86_64_scons'):
config = 'user-config-glibc211-x86_64.py'
chroot_name = 'buildbot_squeeze_x86_64'
bits = 64
elif builder.endswith('linux_glibc211_i386_scons'):
config = 'user-config-glibc211-i686.py'
chroot_name = 'buildbot_squeeze_i686'
bits = 32
if config is not None:
config_fpath = os.path.join(config_dir, config)
scons_options.append('BF_CONFIG=' + config_fpath)
blender = os.path.join(install_dir, 'blender')
blenderplayer = os.path.join(install_dir, 'blenderplayer')
subprocess.call(['schroot', '-c', chroot_name, '--', 'strip', '--strip-all', blender, blenderplayer])
extra = '/' + os.path.join('home', 'sources', 'release-builder', 'extra')
mesalibs = os.path.join(extra, 'mesalibs' + str(bits) + '.tar.bz2')
software_gl = os.path.join(extra, 'blender-softwaregl')
os.system('tar -xpf %s -C %s' % (mesalibs, install_dir))
os.system('cp %s %s' % (software_gl, install_dir))
os.system('chmod 755 %s' % (os.path.join(install_dir, 'blender-softwaregl')))
retcode = subprocess.call(['schroot', '-c', chroot_name, '--', python_bin, 'scons/scons.py'] + scons_options)
sys.exit(retcode)
else:
if builder.find('win') != -1:
bitness = '32'
if builder.find('win64') != -1:
bitness = '64'
scons_options.append('BF_INSTALLDIR=' + install_dir)
scons_options.append('BF_BUILDDIR=' + build_dir)
scons_options.append('BF_BITNESS=' + bitness)
scons_options.append('WITH_BF_CYCLES_CUDA_BINARIES=True')
scons_options.append('BF_CYCLES_CUDA_NVCC=nvcc.exe')
if builder.find('mingw') != -1:
scons_options.append('BF_TOOLSET=mingw')
if builder.endswith('vc2013'):
scons_options.append('MSVS_VERSION=12.0')
scons_options.append('MSVC_VERSION=12.0')
elif builder.find('mac') != -1:
if builder.find('x86_64') != -1:
config = 'user-config-mac-x86_64.py'
else:
config = 'user-config-mac-i386.py'
scons_options.append('BF_CONFIG=' + os.path.join(config_dir, config))
retcode = subprocess.call([python_bin, 'scons/scons.py'] + scons_options)
sys.exit(retcode)
else:
# CMake
if 'win' in builder:
files = [f for f in os.listdir('.') if os.path.isfile(f) and f.endswith('.zip')]
for f in files:
os.remove(f)
retcode = subprocess.call(['cpack', '-G', 'ZIP'])
result_file = [f for f in os.listdir('.') if os.path.isfile(f) and f.endswith('.zip')][0]
# TODO(sergey): Such magic usually happens in SCon's packaging bu we don't have it
# in the CMake yet. For until then we do some magic here.
tokens = result_file.split('-')
blender_version = tokens[1].split('.')
blender_full_version = '.'.join(blender_version[0:2])
git_hash = tokens[2].split('.')[1]
platform = builder.split('_')[0]
builderified_name = 'blender-{}-{}-{}'.format(blender_full_version, git_hash, platform)
if branch != '':
builderified_name = branch + "-" + builderified_name
os.rename(result_file, "{}.zip".format(builderified_name))
# create zip file
try:
upload_zip = "buildbot_upload.zip"
if os.path.exists(upload_zip):
os.remove(upload_zip)
z = zipfile.ZipFile(upload_zip, "w", compression=zipfile.ZIP_STORED)
z.write("{}.zip".format(builderified_name))
z.close()
sys.exit(retcode)
except Exception as ex:
sys.stderr.write('Create buildbot_upload.zip failed' + str(ex) + '\n')
sys.exit(1)
# clean release directory if it already exists
release_dir = 'release'
if os.path.exists(release_dir):
for f in os.listdir(release_dir):
if os.path.isfile(os.path.join(release_dir, f)):
os.remove(os.path.join(release_dir, f))
# create release package
try:
subprocess.call(['make', 'package_archive'])
except Exception as ex:
sys.stderr.write('Make package release failed' + str(ex) + '\n')
sys.exit(1)
# find release directory, must exist this time
if not os.path.exists(release_dir):
sys.stderr.write("Failed to find release directory %r.\n" % release_dir)
sys.exit(1)
# find release package
file = None
filepath = None
for f in os.listdir(release_dir):
rf = os.path.join(release_dir, f)
if os.path.isfile(rf) and f.startswith('blender'):
file = f
filepath = rf
if not file:
sys.stderr.write("Failed to find release package.\n")
sys.exit(1)
# create zip file
try:
upload_zip = "buildbot_upload.zip"
if os.path.exists(upload_zip):
os.remove(upload_zip)
z = zipfile.ZipFile(upload_zip, "w", compression=zipfile.ZIP_STORED)
z.write(filepath, arcname=file)
z.close()
except Exception as ex:
sys.stderr.write('Create buildbot_upload.zip failed' + str(ex) + '\n')
sys.exit(1)
| pawkoz/dyplom | blender/build_files/buildbot/slave_pack.py | Python | gpl-2.0 | 7,424 |
from django.urls import include, re_path
from rest_framework.routers import SimpleRouter
from . import views
block = SimpleRouter()
block.register('block', views.BlockViewSet,
basename='blocklist-block')
urlpatterns = [
re_path(r'', include(block.urls)),
]
| eviljeff/olympia | src/olympia/blocklist/urls.py | Python | bsd-3-clause | 281 |
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N/2)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, fs, w, N, H)
mY,pY = STFT.stftAnal(y, fs, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(N/2)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| acmaheri/sms-tools | lectures/8-Sound-transformations/plots-code/stftFiltering-orchestra.py | Python | agpl-3.0 | 1,648 |
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> import bienstman2_ext
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
| satya-das/common | third_party/boost_tp/libs/python/test/bienstman2.py | Python | mit | 529 |
"""
test_api_api
~~~~~~~~~~~~
Tests for the :mod:`~ulid.api.api` module.
"""
import pytest
from ulid import providers
from ulid.api.api import ALL, Api
@pytest.fixture(scope='function')
def mock_provider(mocker):
"""
Fixture that yields a mock provider.
"""
provider = mocker.Mock(spec=providers.Provider)
provider.new = mocker.Mock(side_effect=providers.DEFAULT.new)
provider.timestamp = mocker.Mock(side_effect=providers.DEFAULT.timestamp)
provider.randomness = mocker.Mock(side_effect=providers.DEFAULT.randomness)
return provider
@pytest.fixture(scope='function')
def mock_api(mock_provider):
"""
Fixture that yields a :class:`~ulid.api.api.Api` instance with a mock provider.
"""
return Api(mock_provider)
def test_all_defined_expected_methods():
"""
Assert that :attr:`~ulid.api.api.ALL` exposes expected interface.
"""
assert ALL == [
'new',
'parse',
'create',
'from_bytes',
'from_int',
'from_str',
'from_uuid',
'from_timestamp',
'from_randomness',
'MIN_TIMESTAMP',
'MAX_TIMESTAMP',
'MIN_RANDOMNESS',
'MAX_RANDOMNESS',
'MIN_ULID',
'MAX_ULID',
'Timestamp',
'Randomness',
'ULID'
]
def test_api_new_calls_provider_new(mock_api):
"""
Assert :meth:`~ulid.api.api.Api.new` calls :meth:`~ulid.providers.base.Provider.new` for timestamp
and randomness values.
"""
mock_api.new()
mock_api.provider.new.assert_called_once_with()
def test_api_from_timestamp_calls_provider_randomness(mocker, mock_api, valid_bytes_48):
"""
Assert :meth:`~ulid.api.api.Api.from_timestamp` calls :meth:`~ulid.providers.base.Provider.randomness` for a value.
"""
mock_api.from_timestamp(valid_bytes_48)
mock_api.provider.timestamp.assert_not_called()
mock_api.provider.randomness.assert_called_once_with(mocker.ANY)
def test_api_from_randomness_calls_provider_timestamp(mock_api, valid_bytes_80):
"""
Assert :meth:`~ulid.api.api.Api.from_randomness` calls :meth:`~ulid.providers.base.Provider.timestamp` for a value.
"""
mock_api.from_randomness(valid_bytes_80)
mock_api.provider.timestamp.assert_called_once_with()
mock_api.provider.randomness.assert_not_called()
| ahawker/ulid | tests/test_api_api.py | Python | apache-2.0 | 2,354 |
import os
import tempfile
import shutil
from doublex import Spy
from expects import expect, have_key, raise_error, be
from doublex_expects import have_been_called_with
from pysellus import loader
from pysellus import integrations
from pysellus import integration_config
with description('the integration_config module'):
with description('loads integrations from a config file'):
with context('raises FileNotFoundError if the config file is not present at the given path'):
with before.each:
self.path_to_directory_without_config_file = tempfile.mkdtemp()
with it('when the path is to a directory'):
def attempt_to_read_config_file():
integration_config._get_path_to_configuration_file(
self.path_to_directory_without_config_file
)
expect(attempt_to_read_config_file).to(raise_error(FileNotFoundError))
with it('when the path is to a file (in which case its parent directory is considered)'):
path_to_file_whose_parent_directory_doesnt_contain_config_file = os.path.join(
self.path_to_directory_without_config_file,
'a_file.py'
)
open(path_to_file_whose_parent_directory_doesnt_contain_config_file, 'w').close()
def attempt_to_read_config_file():
integration_config._get_path_to_configuration_file(
path_to_file_whose_parent_directory_doesnt_contain_config_file
)
expect(attempt_to_read_config_file).to(raise_error(FileNotFoundError))
with after.each:
shutil.rmtree(self.path_to_directory_without_config_file)
with it('raises an exception if the config file is empty'):
expect(lambda: integration_config._load_configuration_from_config_file('')).to(
raise_error(integration_config.EmptyConfigurationFileError)
)
with description('loads integrations from a dict'):
with context('which has a definition section'):
with it('returns None if it is missing'):
expect(integration_config._load_custom_integrations({})).to(be(None))
with context('each definition contains aliases and configurations'):
with it('aborts the program if any alias is duplicated'):
config_dict = {'custom_integrations': {'duplicated_name': {}}}
integrations.integration_classes['duplicated_name'] = {}
expect(lambda: integration_config._load_custom_integrations(config_dict)).to(
raise_error(SystemExit)
)
del integrations.integration_classes['duplicated_name']
with context('each configuration contains a module path and a class name'):
with it('aborts the program if either key is missing'):
for malformed_config_dict in [{'some_alias': {}},
{'some_alias': {'name': 'foo'}},
{'some_alias': {'path': 'foo'}}]:
expect(lambda: integration_config._load_custom_integrations_classes(malformed_config_dict)).to(
raise_error(SystemExit)
)
with it('finds the class name inside the module'):
load_modules = loader.load_modules
original_class_finder = integration_config._get_classes_in_module
an_integration_class_name = 'IntegrationClassName'
a_path_to_an_integration_module = '/some/filesystem/path'
config_dict = {'some_alias': {
'name': an_integration_class_name,
'path': a_path_to_an_integration_module
}}
an_integration_class_object = Spy()
loader.load_modules = lambda path: ['sample_returned_module']
integration_config._get_classes_in_module = \
lambda module: [(an_integration_class_name, an_integration_class_object)]
expect(integration_config._get_matching_classobject_from_path(
an_integration_class_name,
a_path_to_an_integration_module)
).to(be(an_integration_class_object))
loader.load_modules = load_modules
integration_config._get_classes_in_module = original_class_finder
with it('saves the class to the pysellus.integrations.integration_classes dict under the specified alias'):
load_modules = loader.load_modules
original_class_finder = integration_config._get_classes_in_module
an_integration_class_name = 'IntegrationClassName'
a_path_to_an_integration_module = '/some/filesystem/path'
config_dict = {'some_alias': {
'name': an_integration_class_name,
'path': a_path_to_an_integration_module
}}
an_integration_class_object = Spy()
loader.load_modules = lambda path: ['sample_returned_module']
integration_config._get_classes_in_module = \
lambda module: [(an_integration_class_name, an_integration_class_object)]
integration_config._load_custom_integrations_classes(config_dict)
expect(integrations.integration_classes).to(have_key('some_alias'))
expect(integration_config.integration_classes['some_alias']).to(be(an_integration_class_object))
del integration_config.integration_classes['some_alias']
loader.load_modules = load_modules
integration_config._get_classes_in_module = original_class_finder
with it('aborts the program if the given class name is not in the module at the specified path'):
original_integration_class_finder = integration_config._get_matching_classobject_from_path
integration_config._get_matching_classobject_from_path = lambda a, b: None
config_dict = {'some_alias': {'path': '/some/path', 'name': 'some_name'}}
expect(lambda: integration_config._load_custom_integrations_classes(config_dict)).to(
raise_error(SystemExit)
)
integration_config._get_matching_classobject_from_path = original_integration_class_finder
with context('and a notify section'):
with before.each:
self.original_integration_instance_creator = integration_config._get_integration_instance
self.integration_config_spy = Spy()
integration_config._get_integration_instance = self.integration_config_spy._get_integration_instance
with it('aborts the program if it is missing'):
expect(lambda: integration_config._load_defined_integrations({})).to(
raise_error(SystemExit)
)
with context('when an integration alias is specified'):
with context('and the integration is configured with one or more parameters'):
with it('requests an integration instance and registers that alias'):
kwargs_for_integration_constructor = {
'some_arg': 'some_value',
'another_arg': 35
}
integrations_configuration = {'notify': {
'my-alias': {
'an_integration': kwargs_for_integration_constructor
}
}}
integration_config._load_defined_integrations(integrations_configuration)
expect(self.integration_config_spy._get_integration_instance).to(
have_been_called_with('an_integration', kwargs_for_integration_constructor).once
)
expect(integrations.loaded_integrations).to(have_key('my-alias'))
with context('and the integration is configured with no parameters'):
with it('requests an integration instance and registers that alias'):
kwargs_for_integration_constructor = None
integrations_configuration = {'notify': {
'my-alias': {
'an_integration': kwargs_for_integration_constructor
}
}}
integration_config._load_defined_integrations(integrations_configuration)
expect(self.integration_config_spy._get_integration_instance).to(
have_been_called_with('an_integration', kwargs_for_integration_constructor).once
)
expect(integrations.loaded_integrations).to(have_key('my-alias'))
with context('when an integration alias is not specified'):
with context('and the integration is configured with one or more parameters'):
with it('requests an integration instance and registers the stock name'):
kwargs_for_integration_constructor = {
'some_arg': 'some_value'
}
integrations_configuration = {'notify': {
'an_integration': kwargs_for_integration_constructor
}}
integration_config._load_defined_integrations(integrations_configuration)
expect(self.integration_config_spy._get_integration_instance).to(
have_been_called_with('an_integration', kwargs_for_integration_constructor).once
)
expect(integrations.loaded_integrations).to(have_key('an_integration'))
with context('and the integration is configured with no parameters'):
with it('requests an integration instance and registers the stock name'):
kwargs_for_integration_constructor = None
integrations_configuration = {'notify': {
'an_integration': kwargs_for_integration_constructor
}}
integration_config._load_defined_integrations(integrations_configuration)
expect(self.integration_config_spy._get_integration_instance).to(
have_been_called_with('an_integration', kwargs_for_integration_constructor).once
)
expect(integrations.loaded_integrations).to(have_key('an_integration'))
with after.each:
integration_config._get_integration_instance = self.original_integration_instance_creator
| ergl/pysellus | spec/integration_config_spec.py | Python | mit | 11,715 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from argparse import ArgumentParser
from dataclasses import dataclass, field
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
@dataclass
class A(FairseqDataclass):
data: str = field(default="test", metadata={"help": "the data input"})
num_layers: int = field(default=200, metadata={"help": "more layers is better?"})
@dataclass
class B(FairseqDataclass):
bar: A = field(default=A())
foo: int = field(default=0, metadata={"help": "not a bar"})
@dataclass
class D(FairseqDataclass):
arch: A = field(default=A())
foo: int = field(default=0, metadata={"help": "not a bar"})
@dataclass
class C(FairseqDataclass):
data: str = field(default="test", metadata={"help": "root level data input"})
encoder: D = field(default=D())
decoder: A = field(default=A())
lr: int = field(default=0, metadata={"help": "learning rate"})
class TestDataclassUtils(unittest.TestCase):
def test_argparse_convert_basic(self):
parser = ArgumentParser()
gen_parser_from_dataclass(parser, A(), True)
args = parser.parse_args(["--num-layers", "10", "the/data/path"])
self.assertEqual(args.num_layers, 10)
self.assertEqual(args.data, "the/data/path")
def test_argparse_recursive(self):
parser = ArgumentParser()
gen_parser_from_dataclass(parser, B(), True)
args = parser.parse_args(["--num-layers", "10", "--foo", "10", "the/data/path"])
self.assertEqual(args.num_layers, 10)
self.assertEqual(args.foo, 10)
self.assertEqual(args.data, "the/data/path")
def test_argparse_recursive_prefixing(self):
self.maxDiff = None
parser = ArgumentParser()
gen_parser_from_dataclass(parser, C(), True, "")
args = parser.parse_args(
[
"--encoder-arch-data",
"ENCODER_ARCH_DATA",
"--encoder-arch-num-layers",
"10",
"--encoder-foo",
"10",
"--decoder-data",
"DECODER_DATA",
"--decoder-num-layers",
"10",
"--lr",
"10",
"the/data/path",
]
)
self.assertEqual(args.encoder_arch_data, "ENCODER_ARCH_DATA")
self.assertEqual(args.encoder_arch_num_layers, 10)
self.assertEqual(args.encoder_foo, 10)
self.assertEqual(args.decoder_data, "DECODER_DATA")
self.assertEqual(args.decoder_num_layers, 10)
self.assertEqual(args.lr, 10)
self.assertEqual(args.data, "the/data/path")
if __name__ == "__main__":
unittest.main()
| pytorch/fairseq | tests/test_dataclass_utils.py | Python | mit | 2,896 |
"""
This module contains the methods for the ``openxc-control`` command line
program.
`main` is executed when ``openxc-control`` is run, and all other callables in
this module are internal only.
"""
import argparse
import sys
import time
from openxc.formats.json import JsonFormatter
from .common import device_options, configure_logging, select_device
def version(interface):
print(("Device is running version %s" % interface.version()))
def platform(interface):
print(("Device is a %s" % interface.platform()))
def sd_mount_status(interface):
result = interface.sd_mount_status()
if(result == 1):
print("SD card mount status: true")
else:
print("SD card mount status: false")
def device_id(interface):
print(("Device ID is %s" % interface.device_id()))
def get_vin(interface):
print(("Vehicle VIN is %s" % interface.get_vin()))
def passthrough(interface, bus, passthrough_enabled):
if interface.set_passthrough(bus, passthrough_enabled):
print(("Bus %u passthrough set to %s" % (bus, passthrough_enabled)))
def af_bypass(interface, bus, bypass):
if interface.set_acceptance_filter_bypass(bus, bypass):
if bypass:
bypass_string = "bypassed"
else:
bypass_string = "enabled"
print(("Bus %u AF is now %s" % (bus, bypass_string)))
def set_payload_format(interface, payload_format):
if interface.set_payload_format(payload_format):
print(("Changed payload format to %s" % payload_format))
def set_rtc_time(interface, unix_time):
if interface.rtc_configuration(unix_time):
print(("Time set to %d" % unix_time))
def modem_configuration(interface, host, port):
if interface.modem_configuration(host, port):
print(("host set to %s:%s" %(host, port)))
def write_file(interface, filename):
first_timestamp = None
with open(filename, "r") as output_file:
corrupt_entries = 0
message_count = 0
start_time = time.time()
for line in output_file:
try:
parsed_message = JsonFormatter.deserialize(line.encode("utf-8"))
if not isinstance(parsed_message, dict):
raise ValueError()
except ValueError:
corrupt_entries += 1
else:
# TODO at the moment it's taking longer to write all of
# individual CAN messages than the time that actually
# elapsed in receiving the trace - need to implement
# batching to speed this up. right now this will never sleep
# because it's always behind.
timestamp = parsed_message.get('timestamp', None)
# TODO this duplicates some code from sources/trace.py
if timestamp is not None:
first_timestamp = first_timestamp or timestamp
target_time = start_time + (timestamp - first_timestamp)
time.sleep(max(.0002, target_time - time.time()))
message_count += 1
interface.write(**parsed_message)
print(("%d lines sent" % message_count))
if corrupt_entries > 0:
print(("%d invalid lines in the data file were not sent" %
corrupt_entries))
def parse_options():
parser = argparse.ArgumentParser(description="Send control messages to an "
"attached OpenXC vehicle interface", parents=[device_options()])
parser.add_argument("command", type=str,
choices=['version', 'platform', 'write', 'id', 'set', 'sd_mount_status', 'get_vin'])
write_group = parser.add_mutually_exclusive_group()
write_group.add_argument("--name", action="store", dest="write_name",
help="name for message write request")
write_group.add_argument("--id", action="store", dest="write_id",
help="ID for raw message write request")
parser.add_argument("--bus", action="store", dest="bus",
default=1,
help="CAN bus number for the control request")
parser.add_argument("--value", action="store", dest="write_value",
help="optional value for message write request")
parser.add_argument("--event", action="store", dest="write_event",
help="optional event for message write request")
parser.add_argument("--data", action="store", dest="write_data",
help="data for raw message write request")
parser.add_argument("--frame-format", action="store",
dest="write_frame_format", choices=['standard', 'extended'],
help="explicit frame format for raw message write request")
write_group.add_argument("-f", "--write-input-file", action="store",
dest="write_input_file",
help="the path to a file with a list of raw or translated "
"messages to write to the selected vehicle interface")
parser.add_argument("--passthrough", action="store_true", default=None,
dest="passthrough_enabled")
parser.add_argument("--no-passthrough", action="store_false", default=None,
dest="passthrough_enabled")
parser.add_argument("--af-bypass", action="store_true", default=None,
dest="af_bypass")
parser.add_argument("--no-af-bypass", action="store_false", default=None,
dest="af_bypass")
parser.add_argument("--new-payload-format", action="store", default=None,
choices=['json', 'protobuf'], dest="new_payload_format")
parser.add_argument("--time", action="store",default=None,
dest="unix_time")
parser.add_argument("--host", action="store", default=None,
dest="host")
parser.add_argument("--network-host", action="store", default=None,
dest="network_host")
parser.add_argument("--port", action="store", default=80,
dest="port")
parser.set_defaults(format="json")
return parser.parse_args()
def handle_set_command(arguments, interface):
if arguments.passthrough_enabled is not None:
passthrough(interface, int(arguments.bus), arguments.passthrough_enabled)
if arguments.af_bypass is not None:
af_bypass(interface, int(arguments.bus), arguments.af_bypass)
if arguments.new_payload_format is not None:
set_payload_format(interface, arguments.new_payload_format)
if arguments.unix_time is not None:
set_rtc_time(interface, int(arguments.unix_time))
if arguments.network_host is not None:
modem_configuration(interface, arguments.network_host, arguments.port)
def handle_write_command(arguments, interface):
if arguments.write_name:
interface.write(name=arguments.write_name,
value=arguments.write_value,
event=arguments.write_event)
elif arguments.write_id:
if not arguments.write_data:
sys.exit("%s requires an id and data" % arguments.command)
# TODO we should use unhexlify as with the diagnostic command
# payloads so we can standardize the API and not deal with hex
# strings in code
interface.write(bus=int(arguments.bus),
id=arguments.write_id,
data=arguments.write_data,
frame_format=arguments.write_frame_format)
elif arguments.write_input_file:
write_file(interface, arguments.write_input_file)
else:
sys.exit("%s requires a signal name, message ID or filename" % arguments.command)
def main():
configure_logging()
arguments = parse_options()
interface_class, interface_kwargs = select_device(arguments)
interface = interface_class(**interface_kwargs)
interface.start()
handle_set_command(arguments, interface)
if arguments.command == "version":
version(interface)
elif arguments.command == "platform":
platform(interface)
elif arguments.command == "sd_mount_status":
sd_mount_status(interface)
elif arguments.command == "id":
device_id(interface)
elif arguments.command == "get_vin":
get_vin(interface)
elif arguments.command == "set":
handle_set_command(arguments, interface)
elif arguments.command == "write":
handle_write_command(arguments, interface)
else:
print(("Unrecognized command \"%s\"" % arguments.command)) | openxc/openxc-python | openxc/tools/control.py | Python | bsd-3-clause | 8,372 |
from io import BytesIO
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from PIL import Image, ImageDraw, ImageFont
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from editormd.settings import WATERMARK, WATERMARK_TEXT, UPLOAD_SUFFIX
from .forms import ImageUploadForm
from .models import Image as EditorImage
def add_text_watermark(filename, text):
# Open the original image
img = Image.open(filename)
# Create a new image for the watermark with an alpha layer (RGBA)
# the same size as the original image
watermark = Image.new("RGBA", img.size)
# Get an ImageDraw object so we can draw on the image
waterdraw = ImageDraw.ImageDraw(watermark, "RGBA")
# Place the text at (10, 10) in the upper left corner. Text will be white.
font_path = "/System/Library/Fonts/Palatino.ttc"
font = ImageFont.truetype(font_path, 30)
im = Image.open(filename)
width, height = im.size
waterdraw.text((10, height - 35), text,
fill=(128, 128, 128, 128), font=font)
# Get the watermark image as grayscale and fade the image
# See <http://www.pythonware.com/library/pil/handbook/image.htm#Image.point>
# for information on the point() function
# Note that the second parameter we give to the min function determines
# how faded the image will be. That number is in the range [0, 256],
# where 0 is black and 256 is white. A good value for fading our white
# text is in the range [100, 200].
watermask = watermark.convert("L").point(lambda x: min(x, 100))
# Apply this mask to the watermark image, using the alpha filter to
# make it transparent
watermark.putalpha(watermask)
# Paste the watermark (with alpha layer) onto the original image and save it
img.paste(watermark, None, watermark)
buffer = BytesIO()
img.save(buffer, format=img.format)
buffer_val = buffer.getvalue()
return ContentFile(buffer_val)
def is_url(url):
return urlparse(url).scheme in ('http', 'https',)
@csrf_exempt
def upload_image(request):
result = {
'success': 0,
'message': 'Method not allowed'
}
if request.method == 'POST':
request.FILES['image_file'] = request.FILES['editormd-image-file']
form = ImageUploadForm(request.POST, request.FILES)
if form.is_valid():
image_file = form.cleaned_data['image_file']
if WATERMARK and image_file.content_type != 'image/gif':
pillow_image = add_text_watermark(
image_file,
WATERMARK_TEXT)
image_file = InMemoryUploadedFile(
pillow_image, None, image_file.name,
image_file.content_type,
pillow_image.tell, None)
instance = EditorImage(image_file=image_file)
instance.author = request.user
instance.save()
result['success'] = 1
result['message'] = 'Upload image success'
if is_url(instance.image_file.url):
result['url'] = instance.image_file.url
else:
result['url'] = '{}://{}'.format(request.scheme,
request.get_host()) \
+ instance.image_file.url
if UPLOAD_SUFFIX:
result['url'] += UPLOAD_SUFFIX
return JsonResponse(result)
result['message'] = 'Upload image failed'
return JsonResponse(result)
return JsonResponse(result)
| chen2aaron/django-editormd | editormd/views.py | Python | mit | 3,749 |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djarduino_mac_maker.views.home', name='home'),
# url(r'^djarduino_mac_maker/', include('djarduino_mac_maker.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| ajfisher/arduino-mac-maker | djarduino_mac_maker/urls.py | Python | bsd-3-clause | 604 |
#-------------------------------------------------------------------------------
# htmlize-ast-dump.py: Turn a Clang AST dump (-ast-dump) into cross-linked HTML.
#
# Run with --help for usage information.
#
# Note: this script requires Python 3.4; earlier versions of Python 3 should
# work if you install the enum34 module.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
import argparse
import enum
import html
import io
import json
import pprint
import re
import sys
HTML_OUTPUT_TEMPLATE = r'''
<html>
<head>
<style>
.main_area, .nav_area {{
position: absolute;
left: 0;
right: 0;
}}
.main_area {{
top: 0;
height: 75%;
overflow: scroll;
background-color: black;
white-space: nowrap;
padding-left: 10px;
}}
.nav_area {{
bottom: 0;
height: 25%;
overflow: scroll;
background-color: #131313;
}}
#nav_title {{
margin-left: auto;
margin-right: auto;
width: 200px;
font-weight: bold;
color: white;
font-size: 140%;
}}
#nav_contents {{
font-family: Consolas,monospace;
font-size: 80%;
color: #AAAAAA;
padding: 10px;
}}
.my-pre {{
line-height: 0.8;
padding: 0px 0px;
font-family: Consolas,monospace;
font-size: 80%;
}}
a:link {{
text-decoration: underline;
color: inherit;
}}
a:visited {{
text-decoration: underline;
color: inherit;
}}
a:hover {{
text-decoration: underline;
color: #FFFFFF;
}}
a:active {{
text-decoration: underline;
color: #FFFFFF;
}}
.ansi-bold {{
font-weight: bold;
white-space: pre;
}}
.ansi-black {{
color: #000000;
white-space: pre;
}}
.ansi-red {{
color: #d23737;
white-space: pre;
}}
.ansi-green {{
color: #17b217;
white-space: pre;
}}
.ansi-yellow {{
color: #b26717;
white-space: pre;
}}
.ansi-blue {{
color: #2727c2;
white-space: pre;
}}
.ansi-magenta {{
color: #b217b2;
white-space: pre;
}}
.ansi-cyan {{
color: #17b2b2;
white-space: pre;
}}
.ansi-white {{
color: #f2f2f2;
white-space: pre;
}}
</style>
</head>
<body>
<div class="main_area">
<pre class="my-pre">{lines}
</pre>
</div>
<div class="nav_area">
<div id="nav_contents">[Click on node address for cross-reference]</div>
</div>
<!-- Javascript -->
<script type="text/javascript">
var nav_data = {nav_data};
{js_code}
</script>
</body>
</html>
'''
JS_CODE = r'''
MakeAnchorLink = function(addr) {
anchorname = 'anchor_' + addr
return '<a href="#' + anchorname + '">' + addr + '</a>'
}
OnAnchorClick = function(elem_id) {
var nav_entry = nav_data[elem_id];
var contents = '';
contents += nav_entry['name'] + ' ' + nav_entry['id'];
contents += '<ul>\n';
parent_id = nav_entry['parent'];
if (parent_id === null) {
contents += '<li>Parent: none</li>\n';
} else {
parent_name = nav_data[parent_id]['name']
contents += '<li>Parent: ' + parent_name + ' ' +
MakeAnchorLink(parent_id) + '</li>\n';
}
contents += '<li>Children:'
if (nav_entry['children'].length == 0) {
contents += 'none</li>'
} else {
contents += '\n<ul>\n'
for (var i = 0; i < nav_entry['children'].length; i++) {
child_id = nav_entry['children'][i];
child_name = nav_data[child_id]['name'];
contents += '<li>' + child_name + ' ' +
MakeAnchorLink(child_id) + '</li>\n';
}
contents += '</ul>\n'
}
contents += '<li>Users:'
if (nav_entry['users'].length == 0) {
contents += 'none</li>'
} else {
contents += '\n<ul>\n'
for (var i = 0; i < nav_entry['users'].length; i++) {
user_id = nav_entry['users'][i];
user_name = nav_data[user_id]['name'];
contents += '<li>' + user_name + ' ' +
MakeAnchorLink(user_id) + '</li>\n';
}
contents += '</ul>\n'
}
document.getElementById('nav_contents').innerHTML = contents;
}
'''
SPAN_TEMPLATE = r'<span class="{klass}">{text}</span>'
class Color(enum.Enum):
"""Colors with values corresponding to the ANSI codes.
"""
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
# Input is broken to tokens. A token is a piece of text with the style that
# applies to it. The text is decoded from binary to a string.
class Token:
def __init__(self, text, style):
self.text = text.decode('ascii')
self.style = style
def __repr__(self):
return 'Token<text={}, style={}>'.format(self.text, self.style)
class Style:
def __init__(self, color=Color.WHITE, bold=False):
self.color = color
self.bold = bold
def __repr__(self):
return 'Style<color={}, bold={}>'.format(self.color, self.bold)
ANSI_PATTERN = re.compile(rb'\x1b\[([^m]+)m')
def tokenize_line(line):
"""Produce (yield) a stream of tokens from an input line.
"""
# The end pos of the last pattern match.
last_end = 0
# Current style
cur_style = Style()
for match in ANSI_PATTERN.finditer(line):
preceding_text = line[last_end:match.start()]
yield Token(preceding_text, cur_style)
last_end = match.end()
# Set the current style according to the ANSI code in the match.
for ansi_code in (int(c) for c in match.group(1).split(b';')):
if ansi_code == 0:
# Reset
cur_style = Style()
elif ansi_code == 1:
cur_style.bold = True
else:
# Color code. Obtain from Color enum. This will bomb if the
# color code is invalid.
cur_style.color = Color(ansi_code)
leftover_text = line[last_end:]
yield Token(leftover_text, cur_style)
# Link injections happens on HTML level - everything is a string now.
ADDR_PATTERN = re.compile(r'0x[0-9a-fA-F]+')
def make_anchor_link(addr, link_text):
anchorname = 'anchor_' + addr
return '<a href="#' + anchorname + '">' + link_text + '</a>'
def make_anchor_target(addr):
anchorname = 'anchor_' + addr
return '<a id="' + anchorname + '"></a>'
def inject_links(html_line_chunks):
first_addr = True
for i, chunk in enumerate(html_line_chunks):
match = ADDR_PATTERN.search(chunk)
if match:
addr = match.group()
if first_addr:
# The first address encountered in the line is the address of
# the node the line describes. This becomes a link anchor.
#print(tok.text[match.start():match.end()], file=sys.stderr)
html_line_chunks[i] = (
chunk[:match.start()] +
make_anchor_target(addr) +
'<a onclick="OnAnchorClick(\'' + addr +
'\');" href="#javascript:void(0)">' +
chunk[match.start():] + '</a>')
first_addr = False
else:
# All other addresses refer to other nodes. These become links
# to anchors.
html_line_chunks[i] = (
chunk[:match.start()] +
make_anchor_link(addr, chunk[match.start():match.end()]) +
chunk[match.end():])
def analyze_line(tokens):
"""Analyzes the given line (a list of tokens).
Returns the tuple: <id>, <name>, <nesting level>, [<used id>...]
"""
assert(len(tokens) > 2)
# The top-level TranslationUnitDecl node has no nesting
if tokens[1].text.startswith('Translation'):
nesting = ''
itok = 1
else:
nesting = tokens[1].text
itok = 2
# The name is a concat of the following non-empty tokens, until something
# that looks like the ID is encountered, or the line ends.
name_parts = []
while itok < len(tokens):
t = tokens[itok].text.strip()
if len(t) > 0:
if ADDR_PATTERN.match(t):
# Found an ID; bail out
break
else:
# Part of the name
name_parts.append(t)
itok += 1
name = ' '.join(name_parts)
# Here itok is either past the end of the list, or it points to the ID.
id = tokens[itok].text.strip() if itok < len(tokens) else ''
itok += 1
# Gather all uses
uses = []
while itok < len(tokens):
t = tokens[itok].text.strip()
if ADDR_PATTERN.match(t):
uses.append(t)
itok += 1
nesting_level = len(nesting)
return id, name, nesting_level, uses
def prepare_nav_data(line_info):
"""Given a list of tuples from analyze_line, prepares navigation data.
Navigation data is a dictionary mapping an id to its children ids, paren id
and user ids.
It's important for line_info to be in the order gathered from the input. The
order is essential for determining parent/child relationships.
"""
# ZZZ: in the end, add 'users' fields...
nav_data = {}
def new_data_entry(line_entry):
"""Create a new entry with empty parent and child info."""
nonlocal nav_data
id, name, nesting_level, uselist = line_entry
nav_data[id] = {'id': id, 'name': name,
'uses': uselist, 'users': [],
'nesting_level': nesting_level,
'parent': None, 'children': []}
return nav_data[id]
# Keep a stack of parents. The topmost parent on the stack is the one
# collecting the current children, and their parent ID is mapped to it. The
# stack is popped when the nesting level decreases (popped until the topmost
# parent has a lower nesting level). Every entry is eventually pushed onto
# the stack because it may have children.
assert len(line_info) > 0
assert line_info[0][2] == 0, "Expect top-level entry at nesting level 0"
# Initialize the parent stack to the first entry
parent_stack = [new_data_entry(line_info[0])]
for line_entry in line_info[1:]:
data_entry = new_data_entry(line_entry)
# Pop the stack until the topmost entry is a suitable parent for this
# one.
while parent_stack[-1]['nesting_level'] >= data_entry['nesting_level']:
# Note: no entry except the toplevel has nesting 0, so this will
# always terminate with at most 1 entry remaining on the stack.
parent_stack.pop()
# Now parent_stack[-1] is the parent of this entry. Update the entries
# accordingly.
data_entry['parent'] = parent_stack[-1]['id']
parent_stack[-1]['children'].append(data_entry['id'])
# At this point, we push the current entry onto the stack.
parent_stack.append(data_entry)
# Finally, add 'users' fields to all entries. This is an inversion of 'uses'
for id, entry in nav_data.items():
for used_id in entry['uses']:
if used_id in nav_data:
nav_data[used_id]['users'].append(id)
return nav_data
def htmlize(input):
"""HTML-ize the input text, producing output.
input: stream / file-like object with textual AST dump.
Returns a string with HTML-ized dump.
"""
html_lines = []
# collected list of line analysis info
line_info = []
for text_line in input:
html_line_chunks = []
tokens = list(tokenize_line(text_line))
line_info.append(analyze_line(tokens))
for tok in tokens:
style = tok.style
klass = 'ansi-{}'.format(style.color.name.lower())
if style.bold:
klass += ' ansi-bold'
html_line_chunks.append(SPAN_TEMPLATE.format(
klass=klass,
text=html.escape(tok.text)))
html_line_chunks.append('<br/>')
inject_links(html_line_chunks)
html_lines.append(''.join(html_line_chunks))
nav_data = prepare_nav_data(line_info)
return HTML_OUTPUT_TEMPLATE.format(lines='\n'.join(html_lines),
nav_data=json.dumps(nav_data),
js_code=JS_CODE)
def main():
argparser = argparse.ArgumentParser(
description='HTML output is emitted to stdout')
argparser.add_argument('dump_file',
help='AST dump file, "-" for reading from stdin')
args = argparser.parse_args()
try:
# Argh: it would be nice to use argparse's FileType to do this
# automatically, but unfortunately it's broken for binary mode
# (http://bugs.python.org/issue14156)
input_stream = (open(sys.argv[1], 'rb') if args.dump_file != '-' else
io.BufferedReader(sys.stdin.buffer))
print(htmlize(input_stream))
#tokens = list(tokenize_line(l) for l in input_stream)
#print(list(tokens[0]))
finally:
input_stream.close()
if __name__ == '__main__':
main()
| eliben/llvm-clang-samples | tools/htmlize-ast-dump.py | Python | unlicense | 13,724 |
import AZOrangeConfig as AZOC
from AZutilities import miscUtilities
import os
class config():
def __init__(self):
self.report = ""
def ConfigNFSScratchDir(self):
# Create the AZOrange scratch dir on the nfs system is if does not exist
if not os.path.exists(AZOC.NFS_SCRATCHDIR):
os.system("mkdir "+AZOC.NFS_SCRATCHDIR)
if not os.path.exists(AZOC.NFS_SCRATCHDIR):
return False
return True
def ConfigInterfacelessSSH(self,machine,user=None):
#Test SSH connection to localhost
if not miscUtilities.testInterfacelessSSH(machine,user,5):
print "SSH is not configured for local machine. Trying to fix problem now..."
if miscUtilities.autoValidateRSAKey(machine,user):
print "RSA fingerprint added to known_hosts with success"
else:
print "Not able to add fingerprint to known_hosts."
if miscUtilities.testInterfacelessSSH(machine,user,5):
print "SSH correctly configured for "+machine
else:
print "Unable to configure properly interfaceless SSH"
return False
return True
def __call__(self):
self.report = ""
undefinedError = False
if not self.ConfigNFSScratchDir():
self.report += "Unable to create the NFS scratch dir: "+AZOC.NFS_SCRATCHDIR+"\n"
if not self.ConfigInterfacelessSSH("localhost") or not self.ConfigInterfacelessSSH("127.0.0.1"):
self.report += "Unable to configure interfaceless ssh.You should take the following actions:\n"
if os.path.isfile(os.path.join(os.environ["HOME"],".ssh","id_dsa")) and \
not os.path.isfile(os.path.join(os.environ["HOME"],".ssh","id_dsa.pub")):
self.report += "1)In a terminal window create a public SSH key by running the commands:\n"+\
" On ANY question, just hit ENTER!\n"+\
" ssh-keygen -y -t dsa > ~/.ssh/id_dsa.pub\n"+\
" cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys\n"
elif not os.path.isfile(os.path.join(os.environ["HOME"],".ssh","id_dsa")):
self.report += "2)In a terminal window create private and public SSH keys by running the commands:\n"+\
" On ANY question, just hit ENTER!\n"+\
" ssh-keygen -b 1024 -t dsa\n"+\
" cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys\n"
elif not os.path.isfile(os.path.join(os.environ["HOME"],".ssh","authorized_keys")):
self.report += "1)In a terminal window run the command:\n"+\
" cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys\n"
else:
undefinedError = True
self.report += "The problem appears to be on invalid known_hosts or authorized_keys.\n"+\
"1)Try fix it by running in a terminal window run the commands:\n"+\
" You will probably be asked to take further procedures at next start of AZOrange\n"+\
" rm ~/.ssh/known_hosts\n"+\
" rm ~/.ssh/authorized_keys\n"
if not os.path.isfile(os.path.join(os.environ["HOME"],".ssh","config")):
if undefinedError:
self.report = ""
self.report += "It was detected that there is no '~/.ssh/config' file. \n"+\
"1)It is advised to create the SSH config file running for example the commands:\n"+\
" echo \"Host *\" >> ~/.ssh/config\n"+\
" echo \" StrictHostKeyChecking no\" >> ~/.ssh/config\n"+\
" echo \" ServerAliveInterval 45\" >> ~/.ssh/config\n"
else:
# Read the .ssh/config file
sshcfg = open(os.path.join(os.environ["HOME"],".ssh","config"),"r")
sshcfgText = sshcfg.readlines()
sshcfg.close()
#Get the line with the keyword StrictHostKeyChecking
lineWithKeyCh = None
for idx,line in enumerate(sshcfgText):
if "StrictHostKeyChecking" in line:
lineWithKeyCh = idx
#Get the line with the keyword Host *
lineWithHost = None
for idx,line in enumerate(sshcfgText):
if "Host *" in line:
lineWithHost = idx
#Get the line with the keyword KeepAlive
lineWithKeepAlive = None
for idx,line in enumerate(sshcfgText):
if "ServerAliveInterval" in line:
lineWithKeepAlive = idx
#Check ~/.ssh/config is not properly configured
if (lineWithHost == None or "#") in (sshcfgText[lineWithHost]) or \
(lineWithKeyCh == None) or ("#" in sshcfgText[lineWithKeyCh]) or ("no" not in sshcfgText[lineWithKeyCh].lower()) or \
(lineWithKeepAlive == None) or ("#" in sshcfgText[lineWithKeepAlive]) or (" 0" in sshcfgText[lineWithKeepAlive].lower()):
if undefinedError:
self.report = ""
self.report += "It was detected that the file '~/.ssh/config' may not be properly configured. \n"+\
"1)It is advised to edit the file '~/.ssh/config' and make sure that \n"+\
" you have the the the following configuration it is not commented:\n"+\
" \n"+\
" Host *\n"+\
" StrictHostKeyChecking no\n"+\
" ServerAliveInterval 45\n"
if lineWithHost != None:
self.report += "\nTIP: Look in '~/.ssh/config' file arround line number "+str(lineWithHost+1)
return self.report
if __name__ == "__main__":
AC = config()
report = AC()
if report != "":
print "AutoConfig report:\n"+report
else:
print "AutoConfiguration was done without any errors reported!"
| JonnaStalring/AZOrange | azorange/AutoConfig.py | Python | lgpl-3.0 | 6,443 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Sascha Silbe <sascha-pgp@silbe.org> (PGP signed emails only)
# Modified by: Daniel Francis <santiago.danielfrancis@gmail.com> for usage in the Sugar File Manager
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""datastore-fuse: access the Sugar data store using FUSE
Mounting this "file system" allows "legacy" applications to access the Sugar
data store.
"""
import errno
import fuse
import logging
import operator
import os
import os.path
import shutil
import stat
import sys
import tempfile
import time
import dbus
from sugar.logger import trace
import sugar.logger
import sugar.mime
fuse.fuse_python_api = (0, 2)
DS_DBUS_SERVICE = "org.laptop.sugar.DataStore"
DS_DBUS_INTERFACE = "org.laptop.sugar.DataStore"
DS_DBUS_PATH = "/org/laptop/sugar/DataStore"
XATTR_CREATE = 1
XATTR_REPLACE = 2
# DBus still has no way to indicate an infinite timeout :-/
DBUS_TIMEOUT_MAX = 2**31 / 1000
class DataStoreObjectStat(fuse.Stat):
# pylint: disable-msg=R0902,R0903
def __init__(self, filesystem, metadata, size, inode):
fuse.Stat.__init__(self, st_mode=stat.S_IFREG | 0750, st_ino=inode,
st_uid=os.getuid(), st_gid=os.getgid(), st_size=size,
st_mtime=self._parse_time(metadata.get('timestamp', '')))
self.st_ctime = self.st_mtime
self.st_atime = self.st_mtime
tags = [tag for tag in metadata.get('tags', '').split()
if tag and '/' not in tag]
self.st_nlink = len(tags) + 1
self.metadata = metadata
self._filesystem = filesystem
self.object_id = metadata['uid']
def _parse_time(self, timestamp):
try:
return int(timestamp, 10)
except ValueError:
return 0
def should_truncate(self):
return self._filesystem.should_truncate(self.object_id)
def reset_truncate(self):
return self._filesystem.reset_truncate(self.object_id)
class Symlink(fuse.Stat):
def __init__(self, filesystem, target, inode_nr):
self._filesystem = filesystem
self.target = target
fuse.Stat.__init__(self, st_mode=stat.S_IFLNK | 0777, st_nlink=1,
st_uid=os.getuid(), st_gid=os.getgid(), st_ino=inode_nr,
st_mtime=time.time())
self.st_ctime = self.st_mtime
self.st_atime = self.st_mtime
class Directory(fuse.Stat):
def __init__(self, path, parent_path, filesystem, mode):
self._path = path
self._filesystem = filesystem
fuse.Stat.__init__(self, st_mode=stat.S_IFDIR | mode, st_nlink=2,
st_uid=os.getuid(), st_gid=os.getgid(),
st_mtime=time.time())
self.st_ctime = self.st_mtime
self.st_atime = self.st_mtime
self.st_ino = filesystem.get_inode_number(path)
def getxattr(self, name_, attribute_):
# on Linux ENOATTR=ENODATA (Python errno doesn't contain ENOATTR)
raise IOError(errno.ENODATA, os.strerror(errno.ENODATA))
def listxattr(self, name_):
return []
def lookup(self, name_):
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))
def mkdir(self, name_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
def mknod(self, name_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
def readdir(self, offset_):
yield fuse.Direntry('.',
self._filesystem.get_inode_number(self._path))
yield fuse.Direntry('..',
self._filesystem.get_inode_number(self._parent_path))
def readlink(self, name):
entry = self.lookup(name)
if not isinstance(entry, Symlink):
raise IOError(errno.EINVAL, os.strerror(errno.EINVAL))
return entry.target
def remove(self, name_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
def setxattr(self, name_, attribute_, value_, flags_):
# On Linux ENOTSUP = EOPNOTSUPP
raise IOError(errno.EOPNOTSUPP, os.strerror(errno.EOPNOTSUPP))
class ByTitleDirectory(Directory):
def __init__(self, path, parent_path, filesystem):
Directory.__init__(self, path, parent_path, filesystem, 0750)
def readdir(self, offset):
Directory.readdir(self, offset)
for entry in self._find_entries():
if 'uid' not in entry:
# corrupted entry
continue
name = self._filesystem.lookup_title_name(entry['uid'])
yield fuse.Direntry(name,
ino=self._filesystem.get_inode_number(entry['uid']))
@trace()
def _find_entries(self):
return self._filesystem.find({},
{'metadata': ['title', 'uid', 'timestamp']})
def getxattr(self, name, attribute):
object_id = self._filesystem.resolve_title_name(name)
metadata = self._filesystem.get_metadata(object_id)
if attribute in metadata:
return metadata[attribute]
Directory.getxattr(self, object_id, attribute)
def listxattr(self, name):
object_id = self._filesystem.resolve_title_name(name)
metadata = self._filesystem.get_metadata(object_id)
return [str(name) for name in metadata.keys()]
def lookup(self, name):
object_id = self._filesystem.resolve_title_name(name)
metadata = self._filesystem.get_metadata(object_id)
size = self._filesystem.get_data_size(object_id)
return DataStoreObjectStat(self._filesystem, metadata, size,
self._filesystem.get_inode_number(object_id))
def mknod(self, name):
object_id = self._filesystem.create_new(name, '')
def remove(self, name):
object_id = self._filesystem.resolve_title_name(name)
self._filesystem.remove_entry(object_id)
def setxattr(self, name, attribute, value, flags):
object_id = self._filesystem.resolve_title_name(name)
metadata = self._filesystem.get_metadata(object_id)
if flags & XATTR_CREATE and attribute in metadata:
raise IOError(errno.EEXIST, os.strerror(errno.EEXIST))
if flags & XATTR_REPLACE and attribute not in metadata:
# on Linux ENOATTR=ENODATA (Python errno doesn't contain ENOATTR)
raise IOError(errno.ENODATA, os.strerror(errno.ENODATA))
metadata[attribute] = value
self._filesystem.write_metadata(object_id, metadata)
class RootDirectory(ByTitleDirectory):
def __init__(self, filesystem):
ByTitleDirectory.__init__(self, '/', '/', filesystem)
self.by_title_directory = self
def readdir(self, offset_):
for entry in ByTitleDirectory.readdir(self, offset_):
yield entry
def lookup(self, name):
if name == 'by-id':
return self.by_id_directory
elif name == 'by-tags':
return self.by_tags_directory
return ByTitleDirectory.lookup(self, name)
def remove(self, name):
if name in ['by-id', 'by-tags']:
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
return ByTitleDirectory.remove(self, name)
class DataStoreFile(object):
_ACCESS_MASK = os.O_RDONLY | os.O_RDWR | os.O_WRONLY
direct_io = False
keep_cache = False
@trace()
def __init__(self, filesystem, path, flags, mode=None):
self._filesystem = filesystem
self._flags = flags
self._read_only = False
self._is_temporary = False
self._dirty = False
self._path = path
# Contrary to what's documented in the wiki, we'll get passed O_CREAT
# and mknod() won't get called automatically, so we'll have to take
# care of all possible cases ourselves.
if flags & os.O_EXCL:
filesystem.mknod(path)
entry = filesystem.getattr(path)
else:
try:
entry = filesystem.getattr(path)
except IOError, exception:
if exception.errno != errno.ENOENT:
raise
if not flags & os.O_CREAT:
raise
filesystem.mknod(path, flags, mode)
entry = filesystem.getattr(path)
# mknod() might have created a symlink at our path...
if isinstance(entry, Symlink):
entry = filesystem.getattr(entry.target)
self._object_id = entry.object_id
self._read_only = flags & self._ACCESS_MASK == os.O_RDONLY
if entry.should_truncate() or flags & os.O_TRUNC:
self._file = self._create()
entry.reset_truncate()
else:
self._file = self._checkout()
def _create(self):
self._is_temporary = True
return tempfile.NamedTemporaryFile(prefix='datastore-fuse')
def _checkout(self):
name = self._filesystem.get_data(self._object_id)
if not name:
# existing, but empty entry
return self._create()
if self._read_only:
return file(name)
try:
copy = self._create()
shutil.copyfileobj(file(name), copy)
copy.seek(0)
return copy
finally:
os.remove(name)
@trace()
def read(self, length, offset):
self._file.seek(offset)
return self._file.read(length)
@trace()
def write(self, buf, offset):
if self._flags & os.O_APPEND:
self._file.seek(0, os.SEEK_END)
else:
self._file.seek(offset)
self._file.write(buf)
self._dirty = True
return len(buf)
@trace()
def release(self, flags_):
self.fsync()
self._file.close()
if not self._is_temporary:
os.remove(self._file.name)
@trace()
def fsync(self, isfsyncfile_=None):
self.flush()
if self._read_only:
return
if self._dirty:
self._filesystem.write_data(self._object_id, self._file.name)
@trace()
def flush(self):
self._file.flush()
@trace()
def fgetattr(self):
return self._filesystem.getattr(self._path)
@trace()
def ftruncate(self, length):
self._file.truncate(length)
def lock(self, cmd_, owner_, **kwargs_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
class DataStoreFS(fuse.Fuse):
def __init__(self_fs, *args, **kw):
# pylint: disable-msg=E0213
class WrappedDataStoreFile(DataStoreFile):
def __init__(self_file, *args, **kwargs):
# pylint: disable-msg=E0213
DataStoreFile.__init__(self_file, self_fs, *args, **kwargs)
self_fs.file_class = WrappedDataStoreFile
self_fs._truncate_object_ids = set()
self_fs._object_id_to_title_name = {}
self_fs._title_name_to_object_id = {}
self_fs._max_inode_number = 1
self_fs._object_id_to_inode_number = {}
fuse.Fuse.__init__(self_fs, *args, **kw)
bus = dbus.SessionBus()
self_fs._data_store = dbus.Interface(bus.get_object(DS_DBUS_SERVICE,
DS_DBUS_PATH), DS_DBUS_INTERFACE)
self_fs._root = RootDirectory(self_fs)
# TODO: listen to DS signals to update name mapping
@trace()
def getattr(self, path):
components = [name for name in path.lstrip('/').split('/') if name]
entry = self._root
while components:
entry = entry.lookup(components.pop(0))
return entry
@trace()
def _delegate(self, path, action, *args):
directory_name, file_name = os.path.split(path.strip('/'))
directory = self.getattr(directory_name)
return getattr(directory, action)(file_name, *args)
def readdir(self, path, offset=None):
return self.getattr(path).readdir(offset)
def readlink(self, path):
return self._delegate(path, 'readlink')
def mknod(self, path, mode_=None, dev_=None):
# called by FUSE for open(O_CREAT) before instantiating the file
return self._delegate(path, 'mknod')
def truncate(self, path, mode_=None, dev_=None):
# Documented to be called by FUSE when opening files with O_TRUNC,
# unless -o o_trunc_atomic is passed as a CLI option
entry = self.getattr(path)
if isinstance(entry, Directory):
raise IOError(errno.EISDIR, os.strerror(errno.EISDIR))
self._truncate_object_ids.add(entry.object_id)
def unlink(self, path):
self._delegate(path, 'remove')
@trace()
def utime(self, path_, times_):
# TODO: update timestamp property
return
def mkdir(self, path, mode_):
self._delegate(path, 'mkdir')
@trace()
def rmdir(self, path_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
def rename(self, pathfrom, pathto):
self._delegate(pathfrom, 'rename', pathto)
@trace()
def symlink(self, destination_, path_):
# TODO for tags?
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
@trace()
def link(self, destination_, path_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
@trace()
def chmod(self, path_, mode_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
@trace()
def chown(self, path_, user_, group_):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
def getxattr(self, path, name, size):
if not name.startswith('user.'):
raise IOError(errno.ENODATA, os.strerror(errno.ENODATA))
name = name[5:]
value = self._delegate(path, 'getxattr', name)
if not size:
# We are asked for size of the value.
return len(value)
return str(value)
def listxattr(self, path, size):
attribute_names = ['user.' + name
for name in self._delegate(path, 'listxattr')]
if not size:
# We are asked for the size of the \0-separated list.
return reduce(operator.add,
[len(name) + 1 for name in attribute_names], 0)
return attribute_names
def setxattr(self, path, name, value, flags):
if not name.startswith('user.'):
raise IOError(errno.EACCES, os.strerror(errno.EACCES))
name = name[5:]
return self._delegate(path, 'setxattr', name, value, flags)
def should_truncate(self, object_id):
return object_id in self._truncate_object_ids
def reset_truncate(self, object_id):
self._truncate_object_ids.discard(object_id)
@trace()
def find(self, metadata, options):
mess = metadata.copy()
mess.update(options)
properties = mess.pop('metadata', [])
logging.debug('mess=%r, properties=%r', mess, properties)
return self._data_store.find(mess, properties,
timeout=DBUS_TIMEOUT_MAX, byte_arrays=True)[0]
def get_metadata(self, object_id):
try:
return self._data_store.get_properties(object_id,
timeout=DBUS_TIMEOUT_MAX, byte_arrays=True)
except Exception, exception:
raise IOError(errno.ENOENT, str(exception))
def create_new(self, name, path, tags=None):
base_name = os.path.splitext(name)[0]
metadata = {'title': base_name}
mime_type = sugar.mime.get_from_file_name(name)
if mime_type:
metadata['mime_type'] = mime_type
if tags:
metadata['tags'] = ' '.join(tags)
object_id = self._data_store.create(metadata, path, False,
timeout=DBUS_TIMEOUT_MAX, byte_arrays=True)
self._add_title_name(name, object_id)
def remove_entry(self, object_id):
try:
self._data_store.delete(object_id)
except Exception, exception:
raise IOError(errno.ENOENT, str(exception))
self._remove_title_name_by_object_id(object_id)
self._truncate_object_ids.discard(object_id)
def get_data(self, object_id):
try:
return self._data_store.get_filename(object_id,
timeout=DBUS_TIMEOUT_MAX, byte_arrays=True)
except Exception, exception:
raise IOError(errno.ENOENT, str(exception))
def get_data_size(self, object_id):
try:
file_name = self.get_data(object_id)
except Exception, exception:
raise IOError(errno.ENOENT, str(exception))
if not file_name:
return 0
try:
return os.stat(file_name).st_size
finally:
os.remove(file_name)
@trace()
def write_data(self, object_id, file_name):
metadata = self.get_metadata(object_id)
return self._data_store.update(object_id, metadata, file_name, False,
timeout=DBUS_TIMEOUT_MAX, byte_arrays=True)
def write_metadata(self, object_id, metadata):
# Current data store doesn't support metadata-only updates
file_name = self.get_data(object_id)
return self._data_store.update(object_id, metadata, file_name,
True, timeout=DBUS_TIMEOUT_MAX, byte_arrays=True)
def resolve_title_name(self, name):
if name not in self._title_name_to_object_id:
# FIXME: Hack to fill self._title_name_to_object_id. To be
# replaced by parsing the name and doing a specific search.
list(self.readdir('/'))
try:
return self._title_name_to_object_id[name]
except KeyError:
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))
def try_resolve_title_name(self, name):
return self._title_name_to_object_id.get(name)
def lookup_title_name(self, object_id):
name = self._object_id_to_title_name.get(object_id)
if name:
return name
metadata = self.get_metadata(object_id)
name = self._generate_title_name(metadata, object_id)
self._add_title_name(name, object_id)
return name
def _add_title_name(self, name, object_id):
self._object_id_to_title_name[object_id] = name
self._title_name_to_object_id[name] = object_id
return name
@trace()
def _generate_title_name(self, metadata, object_id):
title = metadata.get('title')
name = title
name = safe_name(name)
extension = self._guess_extension(metadata.get('mime_type'), object_id)
if extension:
current_name = '%s.%s' % (name, extension)
else:
current_name = name
counter = 1
while current_name in self._title_name_to_object_id:
counter += 1
if extension:
current_name = '%s %d.%s' % (name, counter, extension)
else:
current_name = '%s %d' % (name, counter)
return current_name
def _remove_title_name_by_object_id(self, object_id):
name = self._object_id_to_title_name.pop(object_id, None)
if name:
del self._title_name_to_object_id[name]
def _remove_title_name_by_name(self, name):
object_id = self._title_name_to_object_id.pop(name, None)
if object_id:
del self._object_id_to_title_name[object_id]
def get_inode_number(self, key):
if key not in self._object_id_to_inode_number:
inode_number = self._max_inode_number
self._max_inode_number += 1
self._object_id_to_inode_number[key] = inode_number
return self._object_id_to_inode_number[key]
def _guess_extension(self, mime_type, object_id):
extension = None
if not mime_type:
file_name = self.get_data(object_id)
if file_name:
try:
mime_type = sugar.mime.get_for_file(file_name)
finally:
os.remove(file_name)
if mime_type:
extension = sugar.mime.get_primary_extension(mime_type)
return extension
def safe_name(name):
return name.replace('/', '_')
def main():
usage = "datastore-fuse: access the Sugar data store using FUSE\n"
usage += fuse.Fuse.fusage
# FIXME: figure out how to force options to on, properly.
sys.argv += ['-o', 'use_ino']
server = DataStoreFS(version="%prog " + fuse.__version__, usage=usage,
dash_s_do='setsingle')
server.parse(errex=1)
sugar.logger.start()
server.main()
while True:
pass
if __name__ == '__main__':
main()
| Daksh/file-manager | mount_journal.py | Python | gpl-3.0 | 20,979 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class SubscriptionsOperations(object):
"""SubscriptionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-01"
self.config = config
def list_by_topic(
self, resource_group_name, namespace_name, topic_name, custom_headers=None, raw=False, **operation_config):
"""List all the subscriptions under a specified topic.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SBSubscription
:rtype:
~azure.mgmt.servicebus.models.SBSubscriptionPaged[~azure.mgmt.servicebus.models.SBSubscription]
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.SBSubscriptionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SBSubscriptionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, namespace_name, topic_name, subscription_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a topic subscription.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param parameters: Parameters supplied to create a subscription
resource.
:type parameters: ~azure.mgmt.servicebus.models.SBSubscription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SBSubscription or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.SBSubscription or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SBSubscription')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBSubscription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, namespace_name, topic_name, subscription_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a subscription from the specified topic.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, namespace_name, topic_name, subscription_name, custom_headers=None, raw=False, **operation_config):
"""Returns a subscription description for the specified topic.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SBSubscription or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.SBSubscription or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SBSubscription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| lmazuel/azure-sdk-for-python | azure-mgmt-servicebus/azure/mgmt/servicebus/operations/subscriptions_operations.py | Python | mit | 15,254 |
# coding: utf-8
from werkzeug import urls
from .authorize_request import AuthorizeAPI
from datetime import datetime
import hashlib
import hmac
import logging
import time
from odoo import _, api, fields, models
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment_authorize.controllers.main import AuthorizeController
from odoo.tools.float_utils import float_compare
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class PaymentAcquirerAuthorize(models.Model):
_inherit = 'payment.acquirer'
provider = fields.Selection(selection_add=[('authorize', 'Authorize.Net')])
authorize_login = fields.Char(string='API Login Id', required_if_provider='authorize', groups='base.group_user')
authorize_transaction_key = fields.Char(string='API Transaction Key', required_if_provider='authorize', groups='base.group_user')
def _get_feature_support(self):
"""Get advanced feature support by provider.
Each provider should add its technical in the corresponding
key for the following features:
* fees: support payment fees computations
* authorize: support authorizing payment (separates
authorization and capture)
* tokenize: support saving payment data in a payment.tokenize
object
"""
res = super(PaymentAcquirerAuthorize, self)._get_feature_support()
res['authorize'].append('authorize')
res['tokenize'].append('authorize')
return res
def _get_authorize_urls(self, environment):
""" Authorize URLs """
if environment == 'prod':
return {'authorize_form_url': 'https://secure2.authorize.net/gateway/transact.dll'}
else:
return {'authorize_form_url': 'https://test.authorize.net/gateway/transact.dll'}
def _authorize_generate_hashing(self, values):
data = '^'.join([
values['x_login'],
values['x_fp_sequence'],
values['x_fp_timestamp'],
values['x_amount'],
values['x_currency_code']])
return hmac.new(values['x_trans_key'].encode('utf-8'), data.encode('utf-8'), hashlib.md5).hexdigest()
@api.multi
def authorize_form_generate_values(self, values):
self.ensure_one()
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
authorize_tx_values = dict(values)
temp_authorize_tx_values = {
'x_login': self.authorize_login,
'x_trans_key': self.authorize_transaction_key,
'x_amount': str(values['amount']),
'x_show_form': 'PAYMENT_FORM',
'x_type': 'AUTH_CAPTURE' if not self.capture_manually else 'AUTH_ONLY',
'x_method': 'CC',
'x_fp_sequence': '%s%s' % (self.id, int(time.time())),
'x_version': '3.1',
'x_relay_response': 'TRUE',
'x_fp_timestamp': str(int(time.time())),
'x_relay_url': urls.url_join(base_url, AuthorizeController._return_url),
'x_cancel_url': urls.url_join(base_url, AuthorizeController._cancel_url),
'x_currency_code': values['currency'] and values['currency'].name or '',
'address': values.get('partner_address'),
'city': values.get('partner_city'),
'country': values.get('partner_country') and values.get('partner_country').name or '',
'email': values.get('partner_email'),
'zip_code': values.get('partner_zip'),
'first_name': values.get('partner_first_name'),
'last_name': values.get('partner_last_name'),
'phone': values.get('partner_phone'),
'state': values.get('partner_state') and values['partner_state'].code or '',
'billing_address': values.get('billing_partner_address'),
'billing_city': values.get('billing_partner_city'),
'billing_country': values.get('billing_partner_country') and values.get('billing_partner_country').name or '',
'billing_email': values.get('billing_partner_email'),
'billing_zip_code': values.get('billing_partner_zip'),
'billing_first_name': values.get('billing_partner_first_name'),
'billing_last_name': values.get('billing_partner_last_name'),
'billing_phone': values.get('billing_partner_phone'),
'billing_state': values.get('billing_partner_state') and values['billing_partner_state'].code or '',
}
temp_authorize_tx_values['returndata'] = authorize_tx_values.pop('return_url', '')
temp_authorize_tx_values['x_fp_hash'] = self._authorize_generate_hashing(temp_authorize_tx_values)
authorize_tx_values.update(temp_authorize_tx_values)
return authorize_tx_values
@api.multi
def authorize_get_form_action_url(self):
self.ensure_one()
return self._get_authorize_urls(self.environment)['authorize_form_url']
@api.model
def authorize_s2s_form_process(self, data):
values = {
'cc_number': data.get('cc_number'),
'cc_holder_name': data.get('cc_holder_name'),
'cc_expiry': data.get('cc_expiry'),
'cc_cvc': data.get('cc_cvc'),
'cc_brand': data.get('cc_brand'),
'acquirer_id': int(data.get('acquirer_id')),
'partner_id': int(data.get('partner_id'))
}
PaymentMethod = self.env['payment.token'].sudo().create(values)
return PaymentMethod
@api.multi
def authorize_s2s_form_validate(self, data):
error = dict()
mandatory_fields = ["cc_number", "cc_cvc", "cc_holder_name", "cc_expiry", "cc_brand"]
# Validation
for field_name in mandatory_fields:
if not data.get(field_name):
error[field_name] = 'missing'
if data['cc_expiry'] and datetime.now().strftime('%y%M') > datetime.strptime(data['cc_expiry'], '%M / %y').strftime('%y%M'):
return False
return False if error else True
@api.multi
def authorize_test_credentials(self):
self.ensure_one()
transaction = AuthorizeAPI(self.acquirer_id)
return transaction.test_authenticate()
class TxAuthorize(models.Model):
_inherit = 'payment.transaction'
_authorize_valid_tx_status = 1
_authorize_pending_tx_status = 4
_authorize_cancel_tx_status = 2
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def create(self, vals):
# The reference is used in the Authorize form to fill a field (invoiceNumber) which is
# limited to 20 characters. We truncate the reference now, since it will be reused at
# payment validation to find back the transaction.
if 'reference' in vals and 'acquirer_id' in vals:
acquier = self.env['payment.acquirer'].browse(vals['acquirer_id'])
if acquier.provider == 'authorize':
vals['reference'] = vals.get('reference', '')[:20]
return super(TxAuthorize, self).create(vals)
@api.model
def _authorize_form_get_tx_from_data(self, data):
""" Given a data dict coming from authorize, verify it and find the related
transaction record. """
reference, trans_id, fingerprint = data.get('x_invoice_num'), data.get('x_trans_id'), data.get('x_MD5_Hash')
if not reference or not trans_id or not fingerprint:
error_msg = _('Authorize: received data with missing reference (%s) or trans_id (%s) or fingerprint (%s)') % (reference, trans_id, fingerprint)
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.search([('reference', '=', reference)])
if not tx or len(tx) > 1:
error_msg = 'Authorize: received data for reference %s' % (reference)
if not tx:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.info(error_msg)
raise ValidationError(error_msg)
return tx[0]
@api.multi
def _authorize_form_get_invalid_parameters(self, data):
invalid_parameters = []
if self.acquirer_reference and data.get('x_trans_id') != self.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('x_trans_id'), self.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('x_amount', '0.0')), self.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('x_amount'), '%.2f' % self.amount))
return invalid_parameters
@api.multi
def _authorize_form_validate(self, data):
if self.state in ['done', 'refunded']:
_logger.warning('Authorize: trying to validate an already validated tx (ref %s)' % self.reference)
return True
status_code = int(data.get('x_response_code', '0'))
if status_code == self._authorize_valid_tx_status:
if data.get('x_type').lower() in ['auth_capture', 'prior_auth_capture']:
self.write({
'state': 'done',
'acquirer_reference': data.get('x_trans_id'),
'date_validate': fields.Datetime.now(),
})
elif data.get('x_type').lower() in ['auth_only']:
self.write({
'state': 'authorized',
'acquirer_reference': data.get('x_trans_id'),
})
if self.partner_id and not self.payment_token_id and \
(self.type == 'form_save' or self.acquirer_id.save_token == 'always'):
transaction = AuthorizeAPI(self.acquirer_id)
res = transaction.create_customer_profile_from_tx(self.partner_id, self.acquirer_reference)
token_id = self.env['payment.token'].create({
'authorize_profile': res.get('profile_id'),
'name': res.get('name'),
'acquirer_ref': res.get('payment_profile_id'),
'acquirer_id': self.acquirer_id.id,
'partner_id': self.partner_id.id,
})
self.payment_token_id = token_id
if self.payment_token_id:
self.payment_token_id.verified = True
return True
elif status_code == self._authorize_pending_tx_status:
self.write({
'state': 'pending',
'acquirer_reference': data.get('x_trans_id'),
})
return True
elif status_code == self._authorize_cancel_tx_status:
self.write({
'state': 'cancel',
'acquirer_reference': data.get('x_trans_id'),
'state_message': data.get('x_response_reason_text'),
})
return True
else:
error = data.get('x_response_reason_text')
_logger.info(error)
self.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('x_trans_id'),
})
return False
@api.multi
def authorize_s2s_do_transaction(self, **data):
self.ensure_one()
transaction = AuthorizeAPI(self.acquirer_id)
if not self.acquirer_id.capture_manually:
res = transaction.auth_and_capture(self.payment_token_id, self.amount, self.reference)
else:
res = transaction.authorize(self.payment_token_id, self.amount, self.reference)
return self._authorize_s2s_validate_tree(res)
@api.multi
def authorize_s2s_do_refund(self):
self.ensure_one()
transaction = AuthorizeAPI(self.acquirer_id)
self.state = 'refunding'
if self.type == 'validation':
res = transaction.void(self.acquirer_reference)
else:
res = transaction.credit(self.payment_token_id, self.amount, self.acquirer_reference)
return self._authorize_s2s_validate_tree(res)
@api.multi
def authorize_s2s_capture_transaction(self):
self.ensure_one()
transaction = AuthorizeAPI(self.acquirer_id)
tree = transaction.capture(self.acquirer_reference or '', self.amount)
return self._authorize_s2s_validate_tree(tree)
@api.multi
def authorize_s2s_void_transaction(self):
self.ensure_one()
transaction = AuthorizeAPI(self.acquirer_id)
tree = transaction.void(self.acquirer_reference or '')
return self._authorize_s2s_validate_tree(tree)
@api.multi
def _authorize_s2s_validate_tree(self, tree):
return self._authorize_s2s_validate(tree)
@api.multi
def _authorize_s2s_validate(self, tree):
if self.state in ['done', 'refunded']:
_logger.warning('Authorize: trying to validate an already validated tx (ref %s)' % self.reference)
return True
status_code = int(tree.get('x_response_code', '0'))
if status_code == self._authorize_valid_tx_status:
if tree.get('x_type').lower() in ['auth_capture', 'prior_auth_capture']:
init_state = self.state
self.write({
'state': 'done',
'acquirer_reference': tree.get('x_trans_id'),
'date_validate': fields.Datetime.now(),
})
if init_state != 'authorized':
self.execute_callback()
if self.payment_token_id:
self.payment_token_id.verified = True
if tree.get('x_type').lower() == 'auth_only':
self.write({
'state': 'authorized',
'acquirer_reference': tree.get('x_trans_id'),
})
self.execute_callback()
if tree.get('x_type').lower() == 'void':
if self.type == 'validation' and self.state == 'refunding':
self.write({
'state': 'refunded',
})
else:
self.write({
'state': 'cancel',
})
return True
elif status_code == self._authorize_pending_tx_status:
new_state = 'refunding' if self.state == 'refunding' else 'pending'
self.write({
'state': new_state,
'acquirer_reference': tree.get('x_trans_id'),
})
return True
elif status_code == self._authorize_cancel_tx_status:
self.write({
'state': 'cancel',
'acquirer_reference': tree.get('x_trans_id'),
})
return True
else:
error = tree.get('x_response_reason_text')
_logger.info(error)
self.write({
'state': 'error',
'state_message': error,
'acquirer_reference': tree.get('x_trans_id'),
})
return False
class PaymentToken(models.Model):
_inherit = 'payment.token'
authorize_profile = fields.Char(string='Authorize.net Profile ID', help='This contains the unique reference '
'for this partner/payment token combination in the Authorize.net backend')
provider = fields.Selection(string='Provider', related='acquirer_id.provider')
save_token = fields.Selection(string='Save Cards', related='acquirer_id.save_token')
@api.model
def authorize_create(self, values):
if values.get('cc_number'):
values['cc_number'] = values['cc_number'].replace(' ', '')
acquirer = self.env['payment.acquirer'].browse(values['acquirer_id'])
expiry = str(values['cc_expiry'][:2]) + str(values['cc_expiry'][-2:])
partner = self.env['res.partner'].browse(values['partner_id'])
transaction = AuthorizeAPI(acquirer)
res = transaction.create_customer_profile(partner, values['cc_number'], expiry, values['cc_cvc'])
if res.get('profile_id') and res.get('payment_profile_id'):
return {
'authorize_profile': res.get('profile_id'),
'name': 'XXXXXXXXXXXX%s - %s' % (values['cc_number'][-4:], values['cc_holder_name']),
'acquirer_ref': res.get('payment_profile_id'),
}
else:
raise ValidationError(_('The Customer Profile creation in Authorize.NET failed.'))
else:
return values
| Aravinthu/odoo | addons/payment_authorize/models/payment.py | Python | agpl-3.0 | 16,745 |
"""Парсинг параметров из командной строки и конфигурационного файла."""
from os import linesep
from pathlib import Path
from typing import Tuple
from configargparse import ArgumentParser, Namespace, DefaultConfigFileParser
from codestyle import (__name__ as application_name,
__description__ as application_description)
from codestyle.parameters import PARAMETERS
class ParametersStorage(Namespace):
"""
Хранилище параметров.
Заполняется из конфигурационного файла и командной строки.
"""
@property
def line_separator(self) -> str:
"""Разделитель строк с учётом compact параметра."""
return '' if getattr(self, 'compact', False) else linesep
@property
def logging_level(self) -> str:
"""
Уровень логирования с учётом debug и quiet параметров.
Для включенного режима отладки уровень логирования всегда DEBUG;
в ином случае - WARNING если включен тихий режим (quiet),
по-умолчанию в следующем приоритете - INFO.
"""
if getattr(self, 'debug', False):
return 'DEBUG'
if getattr(self, 'quiet', False):
return 'WARNING'
return 'INFO'
class ArgumentationTool:
"""
Инструмент для добавления аргументов.
Набор аргументов описан в модуле parameters_parse.
"""
PARAMETERS_FILE_NAME = f'.{__package__}.cfg'
USER_PARAMETERS_PATH = Path(PARAMETERS_FILE_NAME).expanduser().absolute()
IN_CWD_PARAMETERS_PATH = Path.cwd().absolute() / PARAMETERS_FILE_NAME
DEFAULT_CONFIG_FILES: Tuple[str, ...] = (str(USER_PARAMETERS_PATH),
str(IN_CWD_PARAMETERS_PATH))
def __init__(self):
"""Подготовка инструмента для работы с параметрами."""
self.__argument_parser = ArgumentParser(
add_env_var_help=False,
config_file_parser_class=DefaultConfigFileParser,
default_config_files=self.DEFAULT_CONFIG_FILES,
prog=application_name,
description=application_description)
self.__define_parameters()
self.parameters_storage, _ = self.__argument_parser.parse_known_args(
namespace=ParametersStorage())
def __define_parameters(self):
"""Добавление параметров."""
for arguments, options in PARAMETERS:
self.__argument_parser.add_argument(*arguments, **options)
| webpp-studio/codestyle | codestyle/parameters_parse.py | Python | gpl-3.0 | 2,850 |
"""
Django settings for form_sab project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^^n3(lyliqi9q$^w_87jg%u6d=li@$_+5gm7=wtxu^i6u#c77_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'form_app.apps.FormAppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'form_sab.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'form_sab.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = ["*"] | Pravandan/sab_lang | form_sab/settings.py | Python | mit | 3,160 |
__version__= '3.0.8' | kumar-physics/PyBMRB | pybmrb/__init__.py | Python | mit | 20 |
#!/usr/bin/python
#-*- coding:utf-8 -*-
def transformation(chaine):
"""Transforme une chaine de caractère selon l'algorithme de la transformée de Burrows-Wheeler"""
chaine = '^' + chaine #On met un délmiteur en début de chaine qui permettra de décoder par la suite
L = len(chaine)
tableau = ["" for i in range(L)] #on crée une liste de chaînes de caractères (qui sont des listes) avec L entrées
chaineTransformee = "" #texte transformé
#CONSTRUCTION DE LA MATRICE
for i in range(L):
tableau[i] = chaine[i:] + chaine[:i] #avec un peu d'astuce, cette ligne permet de prendre les lettres après i et d'y ajouter celles avant i
tableau.sort() #On trie la matrice avec la méthode sort() de l'objet liste
#TRANSFORMATION DE LA CHAINE
for i in range(L):
chaineTransformee += tableau[i][L-1] #la chaîne transformée est celle composée des derniers caractères de chaque chaîne du tableau => la dernière colonne
return chaineTransformee #on retourne l'index de la chaîne originale dans le tableau trié suivi de la chaîne transformée
def decodage(chaine):
"""décode une chaine transformée selon l'algorithme de Burrows-Wheeler"""
L = len(chaine)
tableau = ["" for i in range(L)]
#RESTITUTION DE LA MATRICE
for j in range(0,L):
#A chaque étape, on concatène chaque chaine du tableau avec les caractères de la chaine puis on le trie par ordre alphabêtique
for i in range(0,L):
tableau[i] = chaine[i] + tableau[i]
tableau.sort()
#RECUPERATION DE LA CHAINE INITIALE
for i in tableau:
if(i[0] == '^'):
return i[1:] #On renvoie la chaine sans son premier caractère, qui correspond au délimiteur
return None #La chaine ne contient pas le délimiteur, elle n'est donc pas transformée, on renvoie None
print(transformation("MISSISSIPI RIVER"))
print(decodage("BNN^AAA"))
| LeProjetDeLaSemaine/Projet_ISN | BurrowsWheeler.py | Python | gpl-3.0 | 2,003 |
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
rhnpath="/usr/share/rhn"
if rhnpath not in sys.path:
sys.path.append(rhnpath)
from spacewalkkoan import spacewalkkoan
from up2date_client import up2dateLog
__rhnexport__ = [
'initiate',
]
def initiate(kickstart_host, base, extra_append, static_device="", system_record="", preserve_files=[], cache_only=False):
log = up2dateLog.initLog()
log.log_me("initiating spacewalkkoan kickstart")
return spacewalkkoan.initiate(kickstart_host, base, extra_append=extra_append,
static_device=static_device, system_record=system_record, preserve_files=preserve_files)
| colloquium/spacewalk | client/tools/spacewalk-koan/actions/kickstart.py | Python | gpl-2.0 | 1,215 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import common.models
class Migration(migrations.Migration):
dependencies = [
('fan', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='fanuser',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AddField(
model_name='fanuser',
name='updated',
field=common.models.AutoDateTimeField(default=django.utils.timezone.now),
preserve_default=True,
),
migrations.AlterField(
model_name='fanuser',
name='slug',
field=models.SlugField(),
),
]
| vivyly/fancastic_17 | fancastic_17/fan/migrations/0002_auto_20141013_2232.py | Python | bsd-3-clause | 858 |
from mbio.entities.entity import Entity
from mbio.utils import get_json
import datetime
import json
class Release(Entity):
def __init__(self, data, *args, **kwargs):
super(Release, self).__init__(*args, **kwargs)
if isinstance(data, str):
self.mbid = data
json_struct = get_json("https://musicbrainz.org/ws/2/release/"+self.mbid+"?fmt=json")
else:
json_struct = data
self.parse_json(json_struct)
def json(self):
json_struct = {}
json_struct['country'] = self.country
json_struct['status'] = self.status
json_struct['date'] = datetime.datetime.strftime(self.date,"%Y-%m-%d")
json_struct['barcode'] = self.barcode
json_struct['disambiguation'] = self.disambiguation
json_struct['title'] = self.title
json_struct['asin'] = self.asin
json_struct['quality'] = self.quality
json_struct['packaging'] = self.packaging
return json.dumps(json_struct, ensure_ascii=False).decode('utf-8')
def parse_json(self, json_struct):
self.country = json_struct['country']
self.status = json_struct['status']
self.date = datetime.datetime.strptime(json_struct['date'], "%Y-%m-%d")
self.barcode = json_struct['barcode']
self.disambiguation = json_struct['disambiguation']
self.title = json_struct['title']
self.asin = json_struct['asin']
self.quality = json_struct['quality']
self.packaging = json_struct['packaging']
if "media" in json_struct:
for m in json_struct['media']:
for t in m['track']:
print("Doing recordings!")
| LordSputnik/python-mbio | mbio/entities/release.py | Python | mit | 1,703 |
import glob
import re
import csv
import os
import sys
import argparse
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPTS_DIR = os.path.normpath(os.path.join(FILE_DIR, "../../../Scripts"))
if SCRIPTS_DIR not in sys.path:
sys.path.append(SCRIPTS_DIR)
from CrossReference import CrossReference, Routine, Package, Global, PlatformDependentGenericRoutine
from LogManager import logger, initConsoleLogging
ARoutineEx = re.compile("^A[0-9][^ ]+$")
fileNoPackageMappingDict = {"18.02":"Web Services Client",
"18.12":"Web Services Client",
"18.13":"Web Services Client",
"52.87":"Outpatient Pharmacy",
"59.73":"Pharmacy Data Management",
"59.74":"Pharmacy Data Management"
}
def getVDLHttpLinkByID(vdlId):
return "https://www.va.gov/vdl/application.asp?appid=%s" % vdlId
class InitCrossReferenceGenerator(object):
def __init__(self):
self.crossRef = CrossReference()
@property
def crossReference(self):
return self.crossRef
def parsePercentRoutineMappingFile(self, mappingFile):
result = csv.DictReader(open(mappingFile, "rb"))
for row in result:
self.crossRef.addPercentRoutineMapping(row['Name'],
row['Source'],
row['Package'])
def parsePackagesFile(self, packageFilename):
result = csv.DictReader(open(packageFilename, 'rb'))
crossRef = self.crossRef
currentPackage = None
index = 0
for row in result:
packageName = row['Directory Name']
if len(packageName) > 0:
currentPackage = crossRef.getPackageByName(packageName)
if not currentPackage:
logger.debug ("Package [%s] not found" % packageName)
crossRef.addPackageByName(packageName)
currentPackage = crossRef.getPackageByName(packageName)
currentPackage.setOriginalName(row['Package Name'])
vdlId = row['VDL ID']
if vdlId and len(vdlId):
currentPackage.setDocLink(getVDLHttpLinkByID(vdlId))
else:
if not currentPackage:
logger.warn ("row is not under any package: %s" % row)
continue
if len(row['Prefixes']):
currentPackage.addNamespace(row['Prefixes'])
if len(row['Globals']):
currentPackage.addGlobalNamespace(row['Globals'])
logger.info ("Total # of Packages is %d" % (len(crossRef.getAllPackages())))
def parsePlatformDependentRoutineFile(self, routineCSVFile):
routineFile = open(routineCSVFile, "rb")
sniffer = csv.Sniffer()
dialect = sniffer.sniff(routineFile.read(1024))
routineFile.seek(0)
hasHeader = sniffer.has_header(routineFile.read(1024))
routineFile.seek(0)
result = csv.reader(routineFile, dialect)
currentName = ""
routineDict = dict()
crossRef = self.crossRef
index = 0
for line in result:
if hasHeader and index == 0:
index += 1
continue
if len(line[0]) > 0:
currentName = line[0]
if line[0] not in routineDict:
routineDict[currentName] = []
routineDict[currentName].append(line[-1])
routineDict[currentName].append([line[1], line[2]])
for (routineName, mappingList) in routineDict.iteritems():
crossRef.addPlatformDependentRoutineMapping(routineName,
mappingList[0],
mappingList[1:])
#===============================================================================
# Find all globals by source zwr and package.csv files version v2
#===============================================================================
def findGlobalsBySourceV2(self, dirName, pattern):
searchFiles = glob.glob(os.path.join(dirName, pattern))
logger.info("Total Search Files are %d " % len(searchFiles))
crossReference = self.crossRef
allGlobals = crossReference.getAllGlobals()
allPackages = crossReference.getAllPackages()
skipFile = []
fileNoSet = set()
for file in searchFiles:
packageName = os.path.dirname(file)
packageName = packageName[packageName.index("Packages") + 9:packageName.index("Globals") - 1]
if not crossReference.hasPackage(packageName):
logger.info ("Package: %s is new" % packageName)
crossReference.addPackageByName(packageName)
package = allPackages.get(packageName)
zwrFile = open(file, 'r')
lineNo = 0
fileName = os.path.basename(file)
result = re.search("(?P<fileNo>^[0-9.]+)(-1)?\+(?P<des>.*)\.zwr$", fileName)
if result:
fileNo = result.group('fileNo')
if fileNo.startswith('0'): fileNo = fileNo[1:]
globalDes = result.group('des')
else:
result = re.search("(?P<namespace>^[^.]+)\.zwr$", fileName)
if result:
namespace = result.group('namespace')
# package.addGlobalNamespace(namespace)
continue
else:
continue
globalName = "" # find out the global name by parsing the global file
logger.debug ("Parsing file: %s" % file)
for line in zwrFile:
if lineNo == 0:
globalDes = line.strip()
# Removing the extra text in the header of the ZWR file
# to tell if it needs to be added or skipped
globalDes = globalDes.replace("OSEHRA ZGO Export: ",'')
if globalDes.startswith("^"):
logger.info ("No Description: Skip this file: %s" % file)
skipFile.append(file)
namespace = globalDes[1:]
package.addGlobalNamespace(namespace)
break
if lineNo == 1:
assert re.search('ZWR', line.strip())
if lineNo >= 2:
info = line.strip().split('=')
globalName = info[0]
detail = info[1].strip("\"")
if globalName.find(',') > 0:
result = globalName.split(',')
if len(result) == 2 and result[1] == "0)":
globalName = result[0]
break
elif globalName.endswith("(0)"):
globalName = globalName.split('(')[0]
break
else:
continue
lineNo = lineNo + 1
logger.debug ("globalName: %s, Des: %s, fileNo: %s, package: %s" %
(globalName, globalDes, fileNo, packageName))
if len(fileNo) == 0:
if file not in skipFile:
logger.warn ("Warning: No FileNo found for file %s" % file)
continue
globalVar = Global(globalName, fileNo, globalDes,
allPackages.get(packageName))
try:
fileNum = float(globalVar.getFileNo())
except ValueError, es:
logger.error ("error: %s, globalVar:%s file %s" % (es, globalVar, file))
continue
# crossReference.addGlobalToPackage(globalVar, packageName)
# only add to allGlobals dict as we have to change the package later on
if globalVar.getName() not in allGlobals:
allGlobals[globalVar.getName()] = globalVar
if fileNo not in fileNoSet:
fileNoSet.add(fileNo)
else:
logger.error ("Error, duplicated file No [%s,%s,%s,%s] file:%s " %
(fileNo, globalName, globalDes, packageName, file))
zwrFile.close()
logger.info ("Total # of Packages is %d and Total # of Globals is %d, Total Skip File %d, total FileNo is %d" %
(len(allPackages), len(allGlobals), len(skipFile), len(fileNoSet)))
sortedKeyList = sorted(allGlobals.keys(),
key=lambda item: float(allGlobals[item].getFileNo()))
for key in sortedKeyList:
globalVar = allGlobals[key]
# fix the uncategoried item
if globalVar.getFileNo() in fileNoPackageMappingDict:
globalVar.setPackage(allPackages[fileNoPackageMappingDict[globalVar.getFileNo()]])
crossReference.addGlobalToPackage(globalVar,
globalVar.getPackage().getName())
#===========================================================================
# find all the package name and routines by reading the repository directory
#===========================================================================
def findPackagesAndRoutinesBySource(self, dirName, pattern):
searchFiles = glob.glob(os.path.join(dirName, pattern))
logger.info("Total Search Files are %d " % len(searchFiles))
allRoutines = self.crossRef.getAllRoutines()
allPackages = self.crossRef.getAllPackages()
crossReference = self.crossRef
for file in searchFiles:
routineName = os.path.basename(file).split(".")[0]
needRename = crossReference.routineNeedRename(routineName)
if needRename:
origName = routineName
routineName = crossReference.getRenamedRoutineName(routineName)
if crossReference.isPlatformDependentRoutineByName(routineName):
continue
packageName = os.path.dirname(file)
packageName = packageName[packageName.index("Packages") + 9:packageName.index("Routines") - 1]
crossReference.addRoutineToPackageByName(routineName, packageName)
if needRename:
routine = crossReference.getRoutineByName(routineName)
assert(routine)
routine.setOriginalName(origName)
if ARoutineEx.search(routineName):
logger.debug("A Routines %s should be exempted" % routineName)
pass
logger.info("Total package is %d and Total Routines are %d" %
(len(allPackages), len(allRoutines)))
def parseCrossRefGeneratorWithArgs(args):
return parseCrossReferenceGeneratorArgs(args.MRepositDir,
args.patchRepositDir)
def parseCrossReferenceGeneratorArgs(MRepositDir,
patchRepositDir):
DoxDir = 'Utilities/Dox'
crossRefGen = InitCrossReferenceGenerator()
percentMapFile = os.path.join(patchRepositDir, DoxDir,
"PercentRoutineMapping.csv")
crossRefGen.parsePercentRoutineMappingFile(percentMapFile)
crossRefGen.parsePackagesFile(os.path.join(patchRepositDir,
"Packages.csv"))
platformDepRtnFile = os.path.join(patchRepositDir, DoxDir,
"PlatformDependentRoutine.csv")
crossRefGen.parsePlatformDependentRoutineFile(platformDepRtnFile)
crossRefGen.findGlobalsBySourceV2(os.path.join(MRepositDir,
"Packages"),
"*/Globals/*.zwr")
crossRefGen.findPackagesAndRoutinesBySource(os.path.join(MRepositDir,
"Packages"),
"*/Routines/*.m")
return crossRefGen.crossReference
def createInitialCrossRefGenArgParser():
parser = argparse.ArgumentParser(add_help=False) # no help page
argGroup = parser.add_argument_group(
'Initial CrossReference Generator Arguments',
"Argument for generating initial CrossReference")
argGroup.add_argument('-mr', '--MRepositDir', required=True,
help='VistA M Component Git Repository Directory')
argGroup.add_argument('-pr', '--patchRepositDir', required=True,
help="VistA Git Repository Directory")
return parser
def main():
initParser = createInitialCrossRefGenArgParser()
parser = argparse.ArgumentParser(
description='VistA Cross-Reference Call Graph Log Files Parser',
parents=[initParser])
initConsoleLogging()
result = parser.parse_args();
crossRefGen = parseCrossReferenceGeneratorArgs(result.MRepositDir,
result.patchRepositDir)
if __name__ == '__main__':
main()
| OSEHRA-Sandbox/VistA | Utilities/Dox/PythonScripts/InitCrossReferenceGenerator.py | Python | apache-2.0 | 11,935 |
var.doCheckForDuplicateSequences=False
read('d.nex')
a=var.alignments[0]
a.setCharPartition('cp1')
d = Data()
d.alignments[0].writePhylip()
oneBoot = d.bootstrap()
oneBoot.alignments[0].writePhylip()
| blaiseli/p4-phylogenetics | share/Examples/D_nexusSets/D_bootstrapPartitionedData/s.py | Python | gpl-2.0 | 203 |
#!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import vocoder
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = gr.multiply_const_ff(32767)
f2s = gr.float_to_short ()
enc = vocoder.g723_24_encode_sb()
dec = vocoder.g723_24_decode_bs()
s2f = gr.short_to_float ()
sink_scale = gr.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gnychis/grforwarder | gr-vocoder/examples/g723_24_audio_loopback.py | Python | gpl-3.0 | 1,440 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013, Michael Komitee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Kerberos authentication module"""
import logging
import os
from functools import wraps
from socket import getfqdn
from typing import Any, Callable, Optional, Tuple, TypeVar, Union, cast
import kerberos
from flask import Response, _request_ctx_stack as stack, g, make_response, request # type: ignore
from requests_kerberos import HTTPKerberosAuth
from airflow.configuration import conf
log = logging.getLogger(__name__)
CLIENT_AUTH: Optional[Union[Tuple[str, str], Any]] = HTTPKerberosAuth(service='airflow')
class KerberosService:
"""Class to keep information about the Kerberos Service initialized"""
def __init__(self):
self.service_name = None
# Stores currently initialized Kerberos Service
_KERBEROS_SERVICE = KerberosService()
def init_app(app):
"""Initializes application with kerberos"""
hostname = app.config.get('SERVER_NAME')
if not hostname:
hostname = getfqdn()
log.info("Kerberos: hostname %s", hostname)
service = 'airflow'
_KERBEROS_SERVICE.service_name = f"{service}@{hostname}"
if 'KRB5_KTNAME' not in os.environ:
os.environ['KRB5_KTNAME'] = conf.get('kerberos', 'keytab')
try:
log.info("Kerberos init: %s %s", service, hostname)
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError as err:
log.warning("Kerberos: %s", err)
else:
log.info("Kerberos API: server is %s", principal)
def _unauthorized():
"""
Indicate that authorization is required
:return:
"""
return Response("Unauthorized", 401, {"WWW-Authenticate": "Negotiate"})
def _forbidden():
return Response("Forbidden", 403)
def _gssapi_authenticate(token):
state = None
ctx = stack.top
try:
return_code, state = kerberos.authGSSServerInit(_KERBEROS_SERVICE.service_name)
if return_code != kerberos.AUTH_GSS_COMPLETE:
return None
return_code = kerberos.authGSSServerStep(state, token)
if return_code == kerberos.AUTH_GSS_COMPLETE:
ctx.kerberos_token = kerberos.authGSSServerResponse(state)
ctx.kerberos_user = kerberos.authGSSServerUserName(state)
return return_code
if return_code == kerberos.AUTH_GSS_CONTINUE:
return kerberos.AUTH_GSS_CONTINUE
return None
except kerberos.GSSError:
return None
finally:
if state:
kerberos.authGSSServerClean(state)
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T):
"""Decorator for functions that require authentication with Kerberos"""
@wraps(function)
def decorated(*args, **kwargs):
header = request.headers.get("Authorization")
if header:
ctx = stack.top
token = ''.join(header.split()[1:])
return_code = _gssapi_authenticate(token)
if return_code == kerberos.AUTH_GSS_COMPLETE:
g.user = ctx.kerberos_user
response = function(*args, **kwargs)
response = make_response(response)
if ctx.kerberos_token is not None:
response.headers['WWW-Authenticate'] = ' '.join(['negotiate', ctx.kerberos_token])
return response
if return_code != kerberos.AUTH_GSS_CONTINUE:
return _forbidden()
return _unauthorized()
return cast(T, decorated)
| apache/incubator-airflow | airflow/api/auth/backend/kerberos_auth.py | Python | apache-2.0 | 5,563 |
# Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.tests.functional import functional_helpers
class GroupsTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
_grp_type_name = 'functional_grp_test_type'
osapi_version_major = '3'
osapi_version_minor = '13'
def setUp(self):
super(GroupsTest, self).setUp()
self.volume_type = self.api.create_type(self._vol_type_name)
self.group_type = self.api.create_group_type(self._grp_type_name)
def _get_flags(self):
f = super(GroupsTest, self)._get_flags()
f['volume_driver'] = (
'cinder.tests.fake_driver.FakeLoggingVolumeDriver')
f['default_volume_type'] = self._vol_type_name
f['default_group_type'] = self._grp_type_name
return f
def test_get_groups_summary(self):
"""Simple check that listing groups works."""
grps = self.api.get_groups(False)
self.assertIsNotNone(grps)
def test_get_groups(self):
"""Simple check that listing groups works."""
grps = self.api.get_groups()
self.assertIsNotNone(grps)
def test_create_and_delete_group(self):
"""Creates and deletes a group."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_volume)
self.assertFalse(found_group)
| Hybrid-Cloud/cinder | cinder/tests/functional/test_groups.py | Python | apache-2.0 | 3,817 |
from decimal import Decimal
from mock import patch, Mock
from bs4 import BeautifulSoup
from unittest import TestCase
from amazon_payments import AmazonPaymentsAPI
from api_responses import RESPONSES
class APITestCase(TestCase):
def setUp(self):
self.api = AmazonPaymentsAPI("access_key", "secret_key", "seller_id")
def create_mock_response(self, body, status_code=200):
response = Mock()
response.content = body
response.status_code = status_code
return response
def SaveToDBTestCase(APITestCase):
def setUp(self):
super(SaveToDBTestCase, self).setUp()
self.db_callback_list = []
def save_to_db_callback(self, raw_request, raw_response):
self.db_callback_list.append((raw_request, raw_response))
return "saved"
def test_callback_called(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response("xml response")
self.db_callback_list = []
response, tx = self.api.do_request(
"GetOrderReferenceDetails", {}, False,
callback=self.save_to_db_callback)
self.assertEqual(tx, "saved")
self.assertEqual(len(self.db_callback_list), 1)
class GetAgreementDetailsTestCase(APITestCase):
""" Tests for the get_amazon_order_details method. """
def test_missing_payment_method_and_address(self):
"""
Check that error messages are returned if the user has not
selected a payment method and shipping address and both
validate_shipping_address and validate_payment_method are set.
"""
response_xml = RESPONSES["no_payment_method_and_shipping_address"]
response = self.create_mock_response(response_xml)
with patch('requests.post') as post:
post.return_value = response
result = self.api.get_amazon_order_details(
"billing_agreement_id", "access_token")
self.assertFalse(result[0])
self.assertIn("Please select a payment method.", result[1])
self.assertIn("Please select a shipping address.", result[1])
def test_automatic_payments_consent_not_needed(self):
"""
Check that the method returns True and the BeautifulSoup object
for the "BillingAgreementDetails" tag when the user's consent is
not needed for automatic payments / subscriptions.
"""
response_xml = RESPONSES["subscriptions_consent_not_given"]
response = self.create_mock_response(response_xml)
with patch('requests.post') as post:
post.return_value = response
result = self.api.get_amazon_order_details(
"billing_agreement_id", "access_token")
amazon_order_details = BeautifulSoup(response_xml, "xml").find(
"BillingAgreementDetails")
self.assertEqual(result, (True, amazon_order_details))
def test_automatic_payments_consent_needed(self):
"""
Check that the method returns False and an error message if
has_subscriptions = True and the user has not given consent.
"""
response_xml = RESPONSES["subscriptions_consent_not_given"]
response = self.create_mock_response(response_xml)
with patch('requests.post') as post:
post.return_value = response
result = self.api.get_amazon_order_details(
"billing_agreement_id", "access_token", has_subscriptions=True)
self.assertFalse(result[0])
self.assertIsInstance(result[1], list)
error = ("Please authorize us to charge future payments to your "
"Amazon account. This is required as your order contains "
"subscription items.")
self.assertIn(error, result[1])
class PaymentAuthorizationTestCase(APITestCase):
def test_create_order_reference_id(self):
response_xml = RESPONSES["create_order_reference"]
response = self.create_mock_response(response_xml)
with patch('requests.post') as post:
post.return_value = response
result = self.api.create_order_reference_id(
"billing_agreement_id", "9.99", "USD")
self.assertEqual(result, "S01-6576755-3809974")
def test_payment_authorization(self):
response_xml = RESPONSES["authorize"]
response = self.create_mock_response(response_xml)
with patch('requests.post') as post:
post.return_value = response
self.db_callback_list = []
result = self.api.authorize(
"order_reference_id", "auth_ref", "9.99", "USD")
self.assertEqual(result, ("S01-6576755-3809974-A067494", None))
def test_get_authorization_details(self):
response_xml = RESPONSES["authorization_details"]
response = self.create_mock_response(response_xml)
with patch('requests.post') as post:
post.return_value = response
result = self.api.get_authorization_status("authorization_id")
auth_status = BeautifulSoup(response_xml, "xml").find(
"AuthorizationStatus")
self.assertEqual(result, (auth_status, Decimal("9.99")))
| britco/django-oscar-amazon-payments | tests/tests.py | Python | mit | 5,299 |
#!/usr/bin/python3
"""Punctuation rules around colons and semi-colons."""
import wlint.punctuation
def _colon_rule(name, colon):
rules = []
# a colon might be followed by a digit if it's time
if name == "colon":
rules.append(("{}.missing-space".format(name),
wlint.punctuation.make_pair_regex_rule(colon,
"[^\\s\\d]")))
elif name == "semicolon":
rules.append(("{}.missing-space".format(name),
wlint.punctuation.make_pair_regex_rule(colon,
"\\S")))
rules.append(("{}.preceeding-space".format(name),
wlint.punctuation.make_pair_regex_rule("\\s", colon)))
return rules
def _get_colon_rules():
rules = []
rules += _colon_rule("colon", ":")
rules += _colon_rule("semicolon", ";")
return rules
_COLON_RULES = _get_colon_rules()
| snewell/wlint | lib/wlint/punctuation/colon.py | Python | bsd-2-clause | 973 |
# -*- coding: utf-8 -*-
import random
import pygame
from math import fsum
import classes.board
import classes.drw.ratio_hq
import classes.game_driver as gd
import classes.level_controller as lc
import classes.extras as ex
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 2, 2)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 18, 10)
def create_game_objects(self, level=1):
self.max_size = 99
self.board.draw_grid = False
if self.mainloop.scheme is not None:
white = self.mainloop.scheme.u_color
h1 = 170
h2 = 40
h3 = 0
color1 = ex.hsv_to_rgb(h1, 255, 255)
color2 = ex.hsv_to_rgb(h2, 157, 255)
color3 = ex.hsv_to_rgb(h2, 57, 255)
self.bd_color1 = ex.hsv_to_rgb(h1, 127, 155)
self.bd_color2 = ex.hsv_to_rgb(h2, 127, 155)
self.bd_color3 = ex.hsv_to_rgb(h3, 57, 155)
else:
white = (255, 255, 255)
step = 255 //3
h1 = random.randrange(0, 255)
h2 = (h1 + step) % 255
h3 = (h1 + step * 2) % 255
color1 = ex.hsv_to_rgb(h1, 127, 255)
color2 = ex.hsv_to_rgb(h2, 127, 255)
color3 = ex.hsv_to_rgb(h3, 127, 255)
self.bd_color1 = ex.hsv_to_rgb(h1, 187, 200)
self.bd_color2 = ex.hsv_to_rgb(h2, 187, 200)
self.bd_color3 = ex.hsv_to_rgb(h3, 187, 200)
transp = (0, 0, 0, 0)
self.disabled_font_color = (200, 200, 200)
data = [18, 10]
self.data = data
self.vis_buttons = [0, 0, 0, 0, 1, 1, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0], data[1])
scale = self.layout.scale
self.board.level_start(data[0], data[1], scale)
self.board.board_bg.update_me = True
self.board.board_bg.line_color = (20, 20, 20)
self.max_total = 20
num1 = num2 = num3 = 11
while num1 + num2 + num3 > self.max_total:
num1 = random.randint(1, 3)
num2 = random.randint(1, 3)
num3 = random.randint(1, 3)
self.numbers = [num1, num2, num3]
self.board.add_unit(0, 0, data[1], data[1], classes.board.Label, "", white, "", 0)
self.fraction_canvas = self.board.units[-1]
self.fraction = classes.drw.ratio_hq.Ratio(1, self.board.scale * data[1], color1, color2, color3, self.bd_color1, self.bd_color2, self.bd_color3, self.numbers)
self.fraction_canvas.painting = self.fraction.get_canvas().copy()
self.board.add_unit(data[1], 2, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.bd_color1)
self.board.add_unit(data[1], 4, 2, 2, classes.board.Label, str(num1), white, "", 31)
self.nm1 = self.board.units[-1]
self.board.units[-1].font_color = self.bd_color1
self.board.add_unit(data[1], 6, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.bd_color1)
self.board.add_unit(data[1] + 2, 4, 1, 2, classes.board.Label, ":", white, "", 31)
self.board.add_unit(data[1] + 3, 2, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.bd_color2)
self.board.add_unit(data[1] + 3, 4, 2, 2, classes.board.Label, str(num2), white, "", 31)
self.nm2 = self.board.units[-1]
self.board.units[-1].font_color = self.bd_color2
self.board.add_unit(data[1] + 3, 6, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.bd_color2)
self.board.add_unit(data[1] + 5, 4, 1, 2, classes.board.Label, ":", white, "", 31)
self.board.add_unit(data[1] + 6, 2, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.bd_color3)
self.board.add_unit(data[1] + 6, 4, 2, 2, classes.board.Label, str(num3), white, "", 31)
self.nm3 = self.board.units[-1]
self.board.units[-1].font_color = self.bd_color3
self.board.add_unit(data[1] + 6, 6, 2, 2, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.bd_color3)
for each in self.board.ships:
each.readable = False
each.immobilize()
def handle(self, event):
gd.BoardGame.handle(self, event)
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
active = self.board.active_ship
if active == 0:
self.change_fract_btn(1, 0, 0)
elif active == 1:
self.change_fract_btn(-1, 0, 0)
elif active == 2:
self.change_fract_btn(0, 1, 0)
elif active == 3:
self.change_fract_btn(0, -1, 0)
elif active == 4:
self.change_fract_btn(0, 0, 1)
elif active == 5:
self.change_fract_btn(0, 0, -1)
def change_fract_btn(self, n1, n2, n3):
if n1 == 1:
if fsum(self.numbers) < self.max_total:
self.numbers[0] += 1
if self.numbers[0] > 1:
self.board.ships[1].change_image("nav_d_mts.png")
if n1 == -1:
if self.numbers[0] > 1:
self.numbers[0] -= 1
if self.numbers[0] == 1:
self.board.ships[1].change_image("nav_d_mtsd.png")
if n2 == 1:
if fsum(self.numbers) < self.max_total:
self.numbers[1] += 1
if self.numbers[1] > 1:
self.board.ships[3].change_image("nav_d_mts.png")
if n2 == -1:
if self.numbers[1] > 1:
self.numbers[1] -= 1
if self.numbers[1] == 1:
self.board.ships[3].change_image("nav_d_mtsd.png")
if n3 == 1:
if fsum(self.numbers) < self.max_total:
self.numbers[2] += 1
if self.numbers[2] > 1:
self.board.ships[5].change_image("nav_d_mts.png")
if n3 == -1:
if self.numbers[2] > 1:
self.numbers[2] -= 1
if self.numbers[2] == 1:
self.board.ships[5].change_image("nav_d_mtsd.png")
if fsum(self.numbers) == self.max_total:
self.board.ships[0].change_image("nav_u_mtsd.png")
self.board.ships[2].change_image("nav_u_mtsd.png")
self.board.ships[4].change_image("nav_u_mtsd.png")
else:
self.board.ships[0].change_image("nav_u_mts.png")
self.board.ships[2].change_image("nav_u_mts.png")
self.board.ships[4].change_image("nav_u_mts.png")
for each in self.board.ships:
each.update_me = True
self.nm1.set_value(str(self.numbers[0]))
self.nm2.set_value(str(self.numbers[1]))
self.nm3.set_value(str(self.numbers[2]))
self.fraction.update_values(self.numbers)
self.fraction_canvas.painting = self.fraction.get_canvas().copy()
self.fraction_canvas.update_me = True
self.mainloop.redraw_needed[0] = True
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game)
def check_result(self):
pass
| imiolek-ireneusz/eduActiv8 | game_boards/game033.py | Python | gpl-3.0 | 7,870 |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import webob
from nova.api.openstack.compute.contrib import extended_ips_mac
from nova import compute
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
NW_CACHE = [
{
'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {
'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [
{
'cidr': '192.168.1.0/24',
'ips': [
{
'address': '192.168.1.100',
'type': 'fixed',
'floating_ips': [
{'address': '5.0.0.1', 'type': 'floating'},
],
},
],
},
]
}
},
{
'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {
'bridge': 'br1',
'id': 2,
'label': 'public',
'subnets': [
{
'cidr': '10.0.0.0/24',
'ips': [
{
'address': '10.0.0.100',
'type': 'fixed',
'floating_ips': [
{'address': '5.0.0.2', 'type': 'floating'},
],
}
],
},
]
}
}
]
ALL_IPS = []
for cache in NW_CACHE:
for subnet in cache['network']['subnets']:
for fixed in subnet['ips']:
sanitized = dict(fixed)
sanitized['mac_address'] = cache['address']
sanitized.pop('floating_ips')
sanitized.pop('type')
ALL_IPS.append(sanitized)
for floating in fixed['floating_ips']:
sanitized = dict(floating)
sanitized['mac_address'] = cache['address']
sanitized.pop('type')
ALL_IPS.append(sanitized)
ALL_IPS.sort(key=lambda x: '%s-%s' % (x['address'], x['mac_address']))
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance_obj(None, 1, uuid=UUID3, nw_cache=NW_CACHE)
return inst
def fake_compute_get_all(*args, **kwargs):
inst_list = [
fakes.stub_instance_obj(None, 1, uuid=UUID1, nw_cache=NW_CACHE),
fakes.stub_instance_obj(None, 2, uuid=UUID2, nw_cache=NW_CACHE),
]
return objects.InstanceList(objects=inst_list)
class ExtendedIpsMacTestV21(test.TestCase):
content_type = 'application/json'
prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
def setUp(self):
super(ExtendedIpsMacTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_ips(self, server):
for network in six.itervalues(server['addresses']):
for ip in network:
yield ip
def assertServerStates(self, server):
results = []
for ip in self._get_ips(server):
results.append({'address': ip.get('addr'),
'mac_address': ip.get('%smac_addr' % self.prefix)})
self.assertEqual(ALL_IPS, sorted(results))
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body))
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for _i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server)
class ExtendedIpsMacTestV2(ExtendedIpsMacTestV21):
content_type = 'application/json'
prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
def setUp(self):
super(ExtendedIpsMacTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_ips_mac'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
| akash1808/nova_test_latest | nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py | Python | apache-2.0 | 5,712 |
import logging
from oscar.core.loading import get_model, get_class
from premailer import transform
from ecommerce.extensions.analytics.utils import parse_tracking_context
log = logging.getLogger(__name__)
CommunicationEventType = get_model('customer', 'CommunicationEventType')
Dispatcher = get_class('customer.utils', 'Dispatcher')
def send_notification(user, commtype_code, context, site):
"""Send different notification mail to the user based on the triggering event.
Args:
user(obj): 'User' object to whom email is to send
commtype_code(str): Communication type code
context(dict): context to be used in the mail
"""
tracking_id, client_id, ip = parse_tracking_context(user)
tracking_pixel = 'https://www.google-analytics.com/collect?v=1&t=event&ec=email&ea=open&tid={tracking_id}' \
'&cid={client_id}&uip={ip}'.format(tracking_id=tracking_id, client_id=client_id, ip=ip)
full_name = user.get_full_name()
context.update({
'full_name': full_name,
'site_domain': site.domain,
'platform_name': site.name,
'tracking_pixel': tracking_pixel,
})
try:
event_type = CommunicationEventType.objects.get(code=commtype_code)
except CommunicationEventType.DoesNotExist:
try:
messages = CommunicationEventType.objects.get_and_render(commtype_code, context)
except Exception: # pylint: disable=broad-except
log.error('Unable to locate a DB entry or templates for communication type [%s]. '
'No notification has been sent.', commtype_code)
return
else:
messages = event_type.get_messages(context)
if messages and (messages['body'] or messages['html']):
messages['html'] = transform(messages['html'])
Dispatcher().dispatch_user_messages(user, messages, site)
| mferenca/HMS-ecommerce | ecommerce/notifications/notifications.py | Python | agpl-3.0 | 1,877 |
# raspistillWeb - web interface for raspistill
# Copyright (C) 2013 Tim Jungnickel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
| bighead85/LivCam | raspistillweb/tests.py | Python | gpl-3.0 | 913 |
#
# Copyright 2014 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from enum import Enum, IntEnum, unique
log = logging.getLogger('bundleplacer')
class InstallState(IntEnum):
RUNNING = 0
NODE_WAIT = 1
@unique
class ControllerState(IntEnum):
"""Names for current screen state"""
INSTALL_WAIT = 0
PLACEMENT = 1
SERVICES = 2
ADD_SERVICES = 3
class ServiceState(Enum):
""" Service interaction states """
REQUIRED = 0
OPTIONAL = 1
CONFLICTED = 2
| battlemidget/conjure-up | bundleplacer/state.py | Python | mit | 1,135 |
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import unittest
from qingcloud.iaas.lb_listener import LoadBalancerListener
class LoadBalancerListenerTestCase(unittest.TestCase):
def test_init_instance(self):
port = 80
protocol = 'http'
listener = LoadBalancerListener(port, listener_protocol=protocol,
backend_protocol=protocol)
json = listener.to_json()
self.assertEqual(json['listener_port'], port)
self.assertEqual(json['listener_protocol'], protocol)
def test_init_forwardfor(self):
port = 80
protocol = 'http'
listener = LoadBalancerListener(port, listener_protocol=protocol,
backend_protocol=protocol, forwardfor=1)
json = listener.to_json()
self.assertEqual(json['forwardfor'], 1)
listener = LoadBalancerListener(port, listener_protocol=protocol,
backend_protocol=protocol, headers=['QC-LBIP'])
json = listener.to_json()
self.assertEqual(json['forwardfor'], 4)
listener = LoadBalancerListener(port, listener_protocol=protocol,
backend_protocol=protocol, forwardfor=1, headers=['QC-LBIP'])
json = listener.to_json()
self.assertEqual(json['forwardfor'], 1)
def test_get_forwardfor(self):
headers = []
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 0)
headers = ['wrong_header']
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 0)
headers = ['X-FORWARD-FOR']
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 1)
headers = ['QC-LBID']
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 2)
headers = ['QC-LBIP']
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 4)
headers = ['X-FORWARD-FOR', 'QC-LBID']
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 3)
headers = ['X-FORWARD-FOR', 'QC-LBIP', 'QC-LBID']
self.assertEqual(LoadBalancerListener.get_forwardfor(headers), 7)
def test_create_multiple_listeners_from_string(self):
string = '''
[{"forwardfor":0,"loadbalancer_listener_id":"lbl-1234abcd",
"balance_mode":"roundrobin","listener_protocol":"tcp",
"backend_protocol":"tcp","healthy_check_method":"tcp",
"session_sticky":"","loadbalancer_listener_name":"demo",
"controller":"self","backends":[],"create_time":"2014-02-02T16:51:25Z",
"healthy_check_option":"10|5|2|5","owner":"usr-1234abcd",
"console_id":"qingcloud","loadbalancer_id":"lb-1234abcd",
"listener_port":443},
{"forwardfor":0,
"loadbalancer_listener_id":"lbl-1234abcd","balance_mode":"roundrobin",
"listener_protocol":"http","backend_protocol":"http",
"healthy_check_method":"tcp","session_sticky":"",
"loadbalancer_listener_name":"demo","controller":"self",
"backends":[],"create_time":"2014-02-02T16:51:19Z",
"healthy_check_option":"10|5|2|5","owner":"usr-1234abcd",
"console_id":"qingcloud","loadbalancer_id":"lb-1234abcd",
"listener_port":80}]
'''
listeners = LoadBalancerListener.create_from_string(string)
self.assertEqual(len(listeners), 2)
def test_create_single_listener_from_string(self):
string = '''
{"forwardfor":0,"loadbalancer_listener_id":"lbl-1234abcd",
"balance_mode":"roundrobin","listener_protocol":"tcp",
"backend_protocol":"tcp","healthy_check_method":"tcp",
"session_sticky":"","loadbalancer_listener_name":"demo",
"controller":"self","backends":[],"create_time":"2014-02-02T16:51:25Z",
"healthy_check_option":"10|5|2|5","owner":"usr-1234abcd",
"console_id":"qingcloud","loadbalancer_id":"lb-1234abcd",
"listener_port":443}
'''
listener = LoadBalancerListener.create_from_string(string)
self.assertTrue(isinstance(listener, LoadBalancerListener))
| daodewang/qingcloud-sdk-python | tests/test_lb_listener.py | Python | apache-2.0 | 4,811 |
from rest_framework import serializers
from constants import SLEEP_MINUTES_COLUMN, VERY_PRODUCTIVE_TIME_LABEL, PRODUCTIVITY_DRIVERS_KEYS
class ProductivityRequestParamsSerializer(serializers.Serializer):
correlation_lookback = serializers.IntegerField(default=60, min_value=1, max_value=365)
cumulative_lookback = serializers.IntegerField(default=1, min_value=1, max_value=365)
correlation_driver = serializers.ChoiceField(choices=PRODUCTIVITY_DRIVERS_KEYS,
default=VERY_PRODUCTIVE_TIME_LABEL)
class SleepRequestParamsSerializer(serializers.Serializer):
correlation_lookback = serializers.IntegerField(default=60, min_value=1, max_value=365)
cumulative_lookback = serializers.IntegerField(default=1, min_value=1, max_value=365)
# Kind of odd, but the only correlation_driver should be SLEEP_MINUTES_COLUMN unlike productivity
# in the future this might change because FitBit has tiered levels of sleep, but you ain't there yet
correlation_driver = serializers.ChoiceField(choices=[SLEEP_MINUTES_COLUMN], default=SLEEP_MINUTES_COLUMN)
# TODO
# desired names are
# rollingWindow
# lookbackHistory
| jeffshek/betterself | apis/betterself/v1/correlations/serializers.py | Python | mit | 1,183 |
# -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .helpers import _PackageBoundObject, _endpoint_from_view_func
def _register_module(module, static_path):
"""Internal helper function that returns a function for recording
that registers the `send_static_file` function for the module on
the application if necessary. It also registers the module on
the application.
"""
def _register(state):
state.app.modules[module.name] = module
# do not register the rule if the static folder of the
# module is the same as the one from the application.
if state.app.root_path == module.root_path:
return
path = static_path
if path is None:
path = state.app.static_path
if state.url_prefix:
path = state.url_prefix + path
state.app.add_url_rule(path + '/<path:filename>',
endpoint='%s.static' % module.name,
view_func=module.send_static_file,
subdomain=module.subdomain)
return _register
class _ModuleSetupState(object):
def __init__(self, app, url_prefix=None, subdomain=None):
self.app = app
self.url_prefix = url_prefix
self.subdomain = subdomain
class Module(_PackageBoundObject):
"""Container object that enables pluggable applications. A module can
be used to organize larger applications. They represent blueprints that,
in combination with a :class:`Flask` object are used to create a large
application.
A module is like an application bound to an `import_name`. Multiple
modules can share the same import names, but in that case a `name` has
to be provided to keep them apart. If different import names are used,
the rightmost part of the import name is used as name.
Here an example structure for a larger appliation::
/myapplication
/__init__.py
/views
/__init__.py
/admin.py
/frontend.py
The `myapplication/__init__.py` can look like this::
from flask import Flask
from myapplication.views.admin import admin
from myapplication.views.frontend import frontend
app = Flask(__name__)
app.register_module(admin, url_prefix='/admin')
app.register_module(frontend)
And here an example view module (`myapplication/views/admin.py`)::
from flask import Module
admin = Module(__name__)
@admin.route('/')
def index():
pass
@admin.route('/login')
def login():
pass
For a gentle introduction into modules, checkout the
:ref:`working-with-modules` section.
.. versionadded:: 0.5
The `static_path` parameter was added and it's now possible for
modules to refer to their own templates and static files. See
:ref:`modules-and-resources` for more information.
.. versionadded:: 0.6
The `subdomain` parameter was added.
:param import_name: the name of the Python package or module
implementing this :class:`Module`.
:param name: the internal short name for the module. Unless specified
the rightmost part of the import name
:param url_prefix: an optional string that is used to prefix all the
URL rules of this module. This can also be specified
when registering the module with the application.
:param subdomain: used to set the subdomain setting for URL rules that
do not have a subdomain setting set.
:param static_path: can be used to specify a different path for the
static files on the web. Defaults to ``/static``.
This does not affect the folder the files are served
*from*.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
_PackageBoundObject.__init__(self, import_name)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self._register_events = [_register_module(self, static_path)]
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a module. The endpoint for the
:func:`url_for` function is prefixed with the name of the module.
"""
def decorator(f):
self.add_url_rule(rule, f.__name__, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a module. The endpoint for
the :func:`url_for` function is prefixed with the name of the module.
.. versionchanged:: 0.6
The `endpoint` argument is now optional and will default to the
function name to consistent with the function of the same name
on the application object.
"""
def register_rule(state):
the_rule = rule
if state.url_prefix:
the_rule = state.url_prefix + rule
options.setdefault('subdomain', state.subdomain)
the_endpoint = endpoint
if the_endpoint is None:
the_endpoint = _endpoint_from_view_func(view_func)
state.app.add_url_rule(the_rule, '%s.%s' % (self.name,
the_endpoint),
view_func, **options)
self._record(register_rule)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a module. This function
is only executed before each request that is handled by a function of
that module.
"""
self._record(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a module.
"""
self._record(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a module. This function
is only executed after each request that is handled by a function of
that module.
"""
self._record(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a module. Such a function
is executed after each request, even if outside of the module.
"""
self._record(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a module. This
function is only executed for requests handled by a module.
"""
self._record(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a module. Such a
function is executed each request, even if outside of the module.
"""
self._record(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a module. This
handler is used for all requests, even if outside of the module.
.. versionadded:: 0.4
"""
def decorator(f):
self._record(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def _record(self, func):
self._register_events.append(func)
| wxue/xiakelite | libs/flask/module.py | Python | mit | 8,422 |
#! /usr/bin/python2
# vim: set fileencoding=utf-8
"""Try to find low EMD distance regions fast."""
from collections import defaultdict
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.spatial import ConvexHull, cKDTree
from sklearn.cluster import DBSCAN
from warnings import warn
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import neighborhood as nb
import numpy as np
import persistent as p
import prettyplotlib as ppl
import report_metrics_results as rmr
import ujson
from shapely.geometry import Polygon
from timeit import default_timer as clock
# load data
with open('static/ground_truth.json') as infile:
gold_list = ujson.load(infile)
districts = sorted(gold_list.iterkeys())
cities = sorted(gold_list[districts[0]]['gold'].keys())
cities_desc = {name: nb.cn.gather_info(name, raw_features=True,
hide_category=True)
for name in cities}
WHICH_GEO = []
def profile(f):
return f
@profile
def test_all_queries(queries, query_city='paris', n_steps=5, k=50):
"""Run all `queries` from `query_city`, expanding each recover region by
`n_steps`-1. Return the list of all distances, corresponding computation
time and a dictionary with the best results that can be feed to a DCG
computer."""
all_res = []
timing = []
raw_result = defaultdict(lambda: defaultdict(list))
biased_raw_result = defaultdict(lambda: defaultdict(list))
for query in queries:
target_city, district = query
possible_regions = gold_list[district]['gold'].get(query_city)
gold = [set(_['properties']['venues'])
for _ in gold_list[district]['gold'].get(target_city, [])]
region = nb.choose_query_region(possible_regions)
if not region:
all_res.append([])
timing.append([])
continue
start = clock()
infos = nb.interpret_query(query_city, target_city, region, 'emd')
_, right, _, regions_distance, _, _ = infos
vindex = np.array(right['index'])
# print(query)
vloc = cities_venues[target_city]
infos = retrieve_closest_venues(district, query_city, target_city, k)
candidates, gvi, _ = infos
# xbounds = np.array([vloc[:, 0].min(), vloc[:, 0].max()])
# ybounds = np.array([vloc[:, 1].min(), vloc[:, 1].max()])
# hulls = [vloc[tg, :][ConvexHull(vloc[tg, :]).vertices, :]
# for tg in gvi]
eps, mpts = 210, 18 if len(vloc) < 5000 else 50
clusters = good_clustering(vloc, list(sorted(candidates)), eps, mpts)
# plot_clusters(clusters, candidates, (xbounds, ybounds), vloc, hulls,
# 0.65)
res = []
areas = []
for cluster in clusters:
venues_areas = cluster_to_venues(cluster, vloc,
cities_kdtree[target_city],
n_steps)
if len(venues_areas) == 0:
continue
for venues in venues_areas:
vids = vindex[venues]
venues = right['features'][venues, :]
dst = regions_distance(venues.tolist(),
nb.weighting_venues(venues[:, 1]))
res.append(dst)
areas.append({'venues': set(vids),
'metric': 'femd', 'dst': dst})
# TODO if after a few steps, we are not getting closer to the
# current minimum distance, we may want to break the loop to
# avoid further EMD calls (although it could hurt relevance
# later as they are not well correlated).
timing.append(clock() - start)
venues_so_far = set()
gold_venues = sum(map(len, gold))
rels = [-1 if gold_venues == 0 else rmr.relevance(a['venues'], gold)
for a in areas]
# print(np.sort(rels)[::-1])
# Obviously the line below is cheating, we should order by
# distance and not by how good we know the result is.
for idx in np.argsort(res):
cand = set(areas[idx]['venues'])
if not venues_so_far.intersection(cand):
venues_so_far.update(cand)
else:
continue
raw_result[target_city][district].append(areas[idx])
if len(raw_result[target_city][district]) >= 5:
break
outfile = 'static/{}_{}_{}_femd.json'.format(query_city, district,
target_city)
venues_so_far = set()
for idx in np.argsort(rels)[::-1]:
cand = set(areas[idx]['venues'])
if not venues_so_far.intersection(cand):
venues_so_far.update(cand)
else:
continue
biased_raw_result[target_city][district].append(areas[idx])
if len(biased_raw_result[target_city][district]) >= 5:
break
# WHICH_GEO.append(np.argmin(res) % len(venues_areas))
all_res.append(res)
return all_res, timing, raw_result, biased_raw_result
@profile
def cluster_to_venues(indices, vloc, kdtree, n_steps=5):
# Given a cluster (ie a set of venues indices), it should return
# neighborhoods (ie compact/complete sets of venues indices) that will be
# evaluated by EMD.
# Given how DBSCAN works, most of these clusters look rather convex, so
# convex hull could be a good option. Otherwise, I could use CGAL binding
# to get alpha shapes. Then I can consider bounding box (called envelope
# by Shapely) or circle. Finally, some dilation and erosion of the
# previous shapes.
# I can also add or remove individual points (but it's unclear which one,
# see notebook) while maintaining more or less neighborhood property.
# Get initial polygon
points = vloc[indices, :]
try:
hull = points[ConvexHull(points).vertices, :]
except (KeyboardInterrupt, SystemExit):
raise
except:
print(indices)
return []
poly = Polygon(hull)
center = np.array(poly.centroid.coords)
# Query neighboring venues
radius = np.max(cdist(np.array(poly.exterior.coords), center))
cd_idx = kdtree.query_ball_point(center, 2.0*radius)[0]
# Build increasing regions
inc = 1.0*radius/n_steps
extensions = [poly]
extensions += [poly.buffer(i*inc,
resolution=2).convex_hull.simplify(30, False)
for i in range(1, n_steps+1)]
# Get venues inside them
remaining = set(cd_idx)
inside = set([])
res_cluster = []
for region in extensions:
if region.exterior is None:
continue
cpoly = np.array(region.exterior.coords)
inside_this = set([idx for idx in remaining
if point_inside_poly(cpoly, vloc[idx, :])])
remaining.difference_update(inside_this)
inside.update(inside_this)
res_cluster.append(list(inside))
return res_cluster
def get_candidates_venues(query_features, target_features, k=50):
"""Return the set of all `k` closest venues from `query_features` to
`target_features`."""
distances = cdist(query_features, target_features)
ordered = np.argsort(distances, 1)
return set(ordered[:, :k].ravel())
def retrieve_closest_venues(district, query_city, target_city, k=50):
"""For the given query, return a list of venues indices for knn level of
`k`, as well as a list of indices for each gold area and the threshold
number of venues."""
gold = gold_list[district]['gold']
query = gold[query_city][0]
query_venues = query['properties']['venues']
mask = np.where(np.in1d(cities_desc[query_city]['index'], query_venues))[0]
query_features = cities_desc[query_city]['features'][mask, :]
all_target_features = cities_desc[target_city]['features']
tindex = cities_desc[target_city]['index']
if target_city in gold:
gold_venue_indices = [np.where(np.in1d(tindex,
reg['properties']['venues']))[0]
for reg in gold[target_city]
if len(reg['properties']['venues']) >= 20]
else:
gold_venue_indices = []
if not gold_venue_indices:
msg = '{} in {} has no area with at least 20 venues'
warn(msg.format(district, target_city.title()))
# return None, None, None
candidates = get_candidates_venues(query_features, all_target_features, k)
threshold = int(len(tindex)*1.0*len(query_venues) /
len(cities_desc[query_city]['index']))
return candidates, gold_venue_indices, threshold
def f_score(recall, precision, beta=2.0):
return (1+beta*beta)*(recall * precision)/(beta*beta*precision + recall)
def point_inside_poly(poly, point):
"""Tell whether `point` is inside convex `poly` based on dot product with
every edges:
demonstrations.wolfram.com/AnEfficientTestForAPointToBeInAConvexPolygon/
"""
tpoly = poly - point
size = tpoly.shape[0] - 1
angles = tpoly[1:, 0]*tpoly[:size, 1] - tpoly[:size, 0]*tpoly[1:, 1]
return int(np.abs(np.sign(angles).sum())) == size
# load venues location for all cities
cities_venues_raw = {name: p.load_var(name+'_svenues.my') for name in cities}
cities_venues = {}
cities_index = {}
cities_kdtree = {}
for city in cities:
vids, _, locs = cities_venues_raw[city].all()
vindex = cities_desc[city]['index']
cities_venues[city] = np.zeros((len(vindex), 2))
cities_index[city] = dict(itertools.imap(lambda x: (x[1], x[0]),
enumerate(vindex)))
for vid, loc in itertools.izip(vids, locs):
pos = cities_index[city].get(vid)
if pos is not None:
cities_venues[city][pos, :] = loc
cities_kdtree[city] = cKDTree(cities_venues[city])
gray = '#bdbdbd'
red = '#e51c23'
green = '#64dd17'
blue = '#03a9f4'
orange = '#f57c00'
def evaluate_clustering(labels, candidates_indices, gold_indices_list):
fscores = []
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
for k in range(n_clusters):
best_score = np.nan
for idx, tg in enumerate(gold_indices_list):
relevant = np.sum(np.in1d(candidates_indices[labels == k], tg))
precision = relevant*1.0 / candidates_indices[labels == k].size
recall = relevant*1.0 / len(tg)
fscore = f_score(recall, precision, beta=1.0)
if not np.isnan(fscore):
if np.isnan(best_score):
best_score = fscore
else:
best_score = max(fscore, best_score)
# fscores.append(fscore)
fscores.append(best_score)
assert len(fscores) == n_clusters
# mean of F1-score of best gold, 0 if nan (ie precision = 0)
return [np.mean(np.nan_to_num(fscores)), n_clusters]
QUERIES = itertools.product(cities, districts)
ALL_Q = [(city, district) for city, district in QUERIES
if city not in ['paris', 'berlin'] and
city in gold_list[district]['gold'] and
[1 for reg in gold_list[district]['gold'][city]
if len(reg['properties']['venues']) >= 20]]
def cluster_is_small_enough(max_length, max_venues, vloc):
"""Make sure than `vlocs` is within acceptable constraints in terms of space
and number of venues."""
if len(vloc) > max_venues:
return False
dim_x, dim_y = [vloc[:, _].max() - vloc[:, _].min() for _ in [0, 1]]
return all([dim <= max_length for dim in [dim_x, dim_y]])
def good_clustering(locs, cands, eps, mpts):
"""Return a list of list of indices making up clusters of acceptable
size."""
clocs = locs[cands, :]
pwd = squareform(pdist(clocs))
clusters_indices = recurse_dbscan(pwd, np.arange(len(cands)), clocs,
eps, mpts)
depth = 0
while not clusters_indices and depth < 5:
eps, mpts = eps*1.3, mpts/1.4
clusters_indices = recurse_dbscan(pwd, np.arange(len(cands)), clocs,
eps, mpts)
depth += 1
cands = np.array(cands)
return [cands[c] for c in clusters_indices]
def recurse_dbscan(distances, indices, locs, eps, mpts, depth=0):
"""Do a first DBSCAN with given parameters and if some clusters are too
big, recluster them using stricter parameters."""
# msg = '{}Cluster {} points with ({}, {})'
# instead http://stackoverflow.com/a/24308860
# print(msg.format(depth*'\t', len(indices), eps, mpts))
pwd = distances
mpts = int(mpts)
labels = DBSCAN(eps=eps, min_samples=int(mpts),
metric='precomputed').fit(pwd).labels_
cl_list = []
for k in np.unique(labels):
if k == -1:
continue
k_indices = np.argwhere(labels == k).ravel()
if cluster_is_small_enough(1.5e3, 250, locs[k_indices, :]):
# msg = '{}add one cluster of size {}'
# print(msg.format(depth*'\t'+' ', len(k_indices)))
cl_list.append(indices[k_indices])
else:
if depth < 3:
sub_pwd = pwd[np.ix_(k_indices, k_indices)]
sub_locs = locs[k_indices, :]
sub_indices = recurse_dbscan(sub_pwd, k_indices, sub_locs,
eps/1.4, mpts*1.3, depth+1)
cl_list.extend([indices[c] for c in sub_indices])
else:
warn('Cannot break one cluster at level {}'.format(depth))
return cl_list
def plot_clusters(clusters, candidates, bounds, vloc, hulls, shrink=0.9):
"""Plot all `clusters` among `candidates` with the `bounds` of the city
(or at least `shrink` of them). Also plot convex `hulls` of gold areas if
provided."""
xbounds, ybounds = bounds
unique_labels = len(clusters)
clustered = set().union(*map(list, clusters))
noise = list(candidates.difference(clustered))
if unique_labels > 5:
colors = mpl.cm.Spectral(np.linspace(0, 1, unique_labels+1))
else:
colors = [gray, red, green, blue, orange]
plt.figure(figsize=(20, 15))
for k, indices, col in zip(range(unique_labels+1), [noise]+clusters,
colors):
k -= 1
if k == -1:
col = 'gray'
ppl.scatter(vloc[indices, 0], vloc[indices, 1],
s=35 if k != -1 else 16, color=col,
alpha=0.8 if k != -1 else 0.6,
label='noise' if k == -1 else 'cluster {}'.format(k+1))
hulls = hulls or []
for idx, hull in enumerate(hulls):
first_again = range(len(hull))+[0]
ppl.plot(hull[first_again, 0], hull[first_again, 1], '--',
c=ppl.colors.almost_black, lw=1.0, alpha=0.9,
label='gold region' if idx == 0 else None)
plt.xlim(shrink*xbounds)
plt.ylim(shrink*ybounds)
ppl.legend()
if __name__ == '__main__':
import sys
sys.exit()
query_city, target_city, district = 'paris', 'barcelona', 'triangle'
vloc = cities_venues[target_city]
xbounds = np.array([vloc[:, 0].min(), vloc[:, 0].max()])
ybounds = np.array([vloc[:, 1].min(), vloc[:, 1].max()])
infos = retrieve_closest_venues(district, query_city, target_city)
top_venues, gold_venues_indices, threshold = infos
gold_venues = set().union(*map(list, gold_venues_indices))
candidates = top_venues
hulls = [vloc[tg, :][ConvexHull(vloc[tg, :]).vertices, :]
for tg in gold_venues_indices]
eps, mpts = 210, 18
sclidx = good_clustering(vloc, list(sorted(candidates)), eps, mpts)
print(np.array(map(len, sclidx)))
| daureg/illalla | approx_emd.py | Python | mit | 15,892 |
# coding: utf-8
import u3
ljh = u3.U3()
ljh.getCalibrationData()
get_ipython().run_line_magic('pinfo', 'ljh.getDIOState')
get_ipython().run_line_magic('pinfo', 'ljh.setDIOState')
get_ipython().run_line_magic('pinfo2', 'ljh.setDIOState')
ljh.setDIOState(8,1)
ljh.setDIOState(15,1)
ljh.setDIOState(8,0)
ljh.setDIOState(15,0)
ljh.setDIOState(8,1)
ljh.setDIOState(9,1)
ljh.setDIOState(10,1)
ljh.setDIOState(11,1)
ljh.setDIOState(12,1)
ljh.setDIOState(12,0)
ljh.setDIOState(14,1)
ljh.setDIOState(15,1)
ljh.setDIOState(16,1)
ljh.setDIOState(17,1)
ljh.setDIOState(18,1)
ljh.setDIOState(19,1)
DIOlist = [8,9,10,11,14,15,16,17,18,19]
def allOn():
for i in DIOlist:
ljh.setDIOState(i,1)
def allOff():
for i in DIOlist:
ljh.setDIOState(i,0)
allOff()
allOn()
allOff()
def listOn(inList):
for i in inList:
ljh.setDIOState(i,1)
def listOff(inList):
for i in inList:
ljh.setDIOState(i,0)
get_ipython().run_line_magic('pinfo', 'ljh.write')
get_ipython().run_line_magic('cd', '../')
get_ipython().run_line_magic('ls', '')
get_ipython().run_line_magic('cd', 'DAQ_1/')
get_ipython().run_line_magic('cd', 'include/')
get_ipython().run_line_magic('ls', '')
get_ipython().run_line_magic('less', 'labjackusb.h')
def listOff(inList):
for i in inList:
ljh.setDIOState(i,0)
get_ipython().run_line_magic('less', 'labjackusb.h')
get_ipython().run_line_magic('pinfo', 'ljh._writeToExodriver')
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3])
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3],[])
ljh.write([76,248,4,0,77,2,0,27,240,255,15,0,49,3])
allOn()
ljh = u3.U3()
ljh.getCalibrationData()
allOn()
allOff()
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3],[])
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,0],[])
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3],[])
allOff()
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,0],[])
ljh._writeToExodriver([76,248,4,0,77,0,0,27,240,255,15,0,49,0],[])
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3],[])
allOff()
ljh._writeToExodriver([76,248,4,0,209,2,0,27,240,255,15,0,178,6],[])
ljh = u3.U3()
ljh = u3.U3()
ljh.getCalibrationData()
ljh._writeToExodriver([76,248,4,0,209,2,0,27,240,255,15,0,178,6],[])
ljh._writeToExodriver([76,248,4,0,209,2,0,27,240,255,15,0,178,6],[])
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3],[])
allOff()
ljh._writeToExodriver([76,248,4,0,77,2,0,27,240,255,15,0,49,3],[])
allOff()
arrList = [[ 76, 248,4,0,77,2,0,27,240,255,15,0,49,3 ],
[ 208, 248,4,0,209,2,0,27,240,255,15,0,178,6 ],
[ 223, 248,4,0,224,2,0,27,240,255,15,0,192,7 ],
[ 253, 248,4,0,254,2,0,27,240,255,15,0,225,4 ],
[ 227, 248,4,0,228,2,0,27,240,255,15,0,134,69 ],
[ 6, 248,4,0,6,3,0,27,240,255,15,0,167,70 ],
[ 79, 248,4,0,79,3,0,27,240,255,15,0,242,68 ],
[ 83, 248,4,0,83,3,0,27,240,255,15,0,243,71 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,163,5 ],
[ 189, 248,4,0,190,2,0,27,240,255,15,0,164,1 ],
[ 159, 248,4,0,160,2,0,27,240,255,15,0,133,2 ],
[ 69, 248,4,0,70,2,0,27,240,255,15,0,38,7 ],
[ 69, 248,4,0,70,2,0,27,240,255,15,0,43,2 ],
[ 74, 248,4,0,75,2,0,27,240,255,15,0,45,5 ],
[ 107, 248,4,0,108,2,0,27,240,255,15,0,79,4 ],
[ 42, 248,4,0,43,2,0,27,240,255,15,0,12,6 ],
[ 72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[ 72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 160, 248,4,0,161,2,0,27,240,255,15,0,68,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ]]
arrList = [[ 76, 248,4,0,77,2,0,27,240,255,15,0,49,3 ],
[ 208, 248,4,0,209,2,0,27,240,255,15,0,178,6 ],
[ 223, 248,4,0,224,2,0,27,240,255,15,0,192,7 ],
[ 253, 248,4,0,254,2,0,27,240,255,15,0,225,4 ],
[ 227, 248,4,0,228,2,0,27,240,255,15,0,134,69 ],
[ 6, 248,4,0,6,3,0,27,240,255,15,0,167,70 ],
[ 79, 248,4,0,79,3,0,27,240,255,15,0,242,68 ],
[ 83, 248,4,0,83,3,0,27,240,255,15,0,243,71 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,163,5 ],
[ 189, 248,4,0,190,2,0,27,240,255,15,0,164,1 ],
[ 159, 248,4,0,160,2,0,27,240,255,15,0,133,2 ],
[ 69, 248,4,0,70,2,0,27,240,255,15,0,38,7 ],
[ 69, 248,4,0,70,2,0,27,240,255,15,0,43,2 ],
[ 74, 248,4,0,75,2,0,27,240,255,15,0,45,5 ],
[ 107, 248,4,0,108,2,0,27,240,255,15,0,79,4 ],
[ 42, 248,4,0,43,2,0,27,240,255,15,0,12,6 ],
[ 72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[ 72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 160, 248,4,0,161,2,0,27,240,255,15,0,68,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ]]
arrList[0]
ljh = u3.U3()
ljh = u3.U3()
ljh.getCalibrationData()
ljh._writeToExodriver(arrList[0],[])
ljh._writeToExodriver(arrList[1],[])
ljh._writeToExodriver(arrList[2],[])
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
input('ok?')
len(arrList)
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
input('ok?')
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
input('ok?')
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
allOff()
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
allOff()
allOn()
allOff()
get_ipython().run_line_magic('save', "'../kick-u3/manualSession.py' '0-88'")
get_ipython().run_line_magic('less', '../kick-u3/manualSession.py')
npa = np.array(arrList))
npa = np.array(arrList)
npa[:,0]
plt.plot(npa[:,0],npa[:,-2])
plt.plot(npa[:,0],npa[:,-1])
plt.plot(npa[:,0],npa[:,-1]+npa[:,-1])
plt.plot(npa[:,0],npa[:,-1]+npa[:,-2])
plt.cla()
plt.plot(npa[:,0],npa[:,-1]+npa[:,-2])
plt.plot(npa[:,0],npa[:,-1]*npa[:,-2])
plt.plot(npa[:,0],npa[:,-1]+npa[:,-2])
plt.plot(npa[:,0],npa[:,-1]+npa[:,-2])
plt.cla()
plt.plot(npa[:,0],npa[:,-1]+npa[:,-2])
49+3
49+3-22
76/(49+3-22)
2.5*(49+3-22)
2.5*(49+3-21)
2.533*(49+3-22)
2.5333*(49+3-22)
2.5333*(178+6-22)
2.5333*(192+7-22)
res = []
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
a = input('ok?')
res.append(a)
npa[res]
res
[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,21,22,23]
res2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,21,22,23]
npa[res2]
npa[res2]
for i in range(0,len(npa2)):
ljh._writeToExodriver(arrList[npa2[i]],[])
a = input('ok?')
for i in range(0,len(res2)):
ljh._writeToExodriver(arrList[res2[i]],[])
a = input('ok?')
allOff()
res2
ljh._writeToExodriver(arrList[16],[])
ljh._writeToExodriver(arrList[17],[])
ljh._writeToExodriver(arrList[18],[])
ljh._writeToExodriver(arrList[19],[])
ljh._writeToExodriver(arrList[20],[])
ljh._writeToExodriver(arrList[0],[])
ljh._writeToExodriver(arrList[18],[])
allOff()
res2
allList2 = [[76, 248,4,0,77,2,0,27,240,255,15,0,49,3 ],
[208, 248,4,0,209,2,0,27,240,255,15,0,178,6 ],
[223, 248,4,0,224,2,0,27,240,255,15,0,192,7 ],
[253, 248,4,0,254,2,0,27,240,255,15,0,225,4 ],
[91, 248,4,0,92,2,0,27,240,255,15,0,66,1 ],
[125, 248,4,0,126,2,0,27,240,255,15,0,99,2 ],
[198, 248,4,0,199,2,0,27,240,255,15,0,174,0 ],
[202, 248,4,0,203,2,0,27,240,255,15,0,175,3 ],
[192, 248,4,0,193,2,0,27,240,255,15,0,163,5 ],
[189, 248,4,0,190,2,0,27,240,255,15,0,164,1 ],
[159, 248,4,0,160,2,0,27,240,255,15,0,133,2 ],
[69, 248,4,0,70,2,0,27,240,255,15,0,38,7 ],
[69, 248,4,0,70,2,0,27,240,255,15,0,43,2 ],
[74, 248,4,0,75,2,0,27,240,255,15,0,45,5 ],
[107, 248,4,0,108,2,0,27,240,255,15,0,79,4 ],
[42, 248,4,0,43,2,0,27,240,255,15,0,12,6 ],
[72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ]]
for i in range(0,len(arrList2)):
ljh._writeToExodriver(arrList2[i],[])
arrList2 = [[76, 248,4,0,77,2,0,27,240,255,15,0,49,3 ],
[208, 248,4,0,209,2,0,27,240,255,15,0,178,6 ],
[223, 248,4,0,224,2,0,27,240,255,15,0,192,7 ],
[253, 248,4,0,254,2,0,27,240,255,15,0,225,4 ],
[91, 248,4,0,92,2,0,27,240,255,15,0,66,1 ],
[125, 248,4,0,126,2,0,27,240,255,15,0,99,2 ],
[198, 248,4,0,199,2,0,27,240,255,15,0,174,0 ],
[202, 248,4,0,203,2,0,27,240,255,15,0,175,3 ],
[192, 248,4,0,193,2,0,27,240,255,15,0,163,5 ],
[189, 248,4,0,190,2,0,27,240,255,15,0,164,1 ],
[159, 248,4,0,160,2,0,27,240,255,15,0,133,2 ],
[69, 248,4,0,70,2,0,27,240,255,15,0,38,7 ],
[69, 248,4,0,70,2,0,27,240,255,15,0,43,2 ],
[74, 248,4,0,75,2,0,27,240,255,15,0,45,5 ],
[107, 248,4,0,108,2,0,27,240,255,15,0,79,4 ],
[42, 248,4,0,43,2,0,27,240,255,15,0,12,6 ],
[72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ]]
for i in range(0,len(arrList2)):
ljh._writeToExodriver(arrList2[i],[])
ljh = u3.U3()
ljh.getCalibrationData()
for i in range(0,len(arrList2)):
ljh._writeToExodriver(arrList2[i],[])
allOff()
allOff()
for i in range(0,len(arrList2)):
ljh._writeToExodriver(arrList2[i],[])
input('ok?')
for i in range(0,len(arrList2)):
ljh._writeToExodriver(arrList2[i],[])
input('ok?')
get_ipython().run_line_magic('cd', 'Programs/DAQ_1/kick-u3/')
get_ipython().run_line_magic('ls', '')
import u3
ljh = u3.U3()
ljh.getCalibrationData()
arrList3 = [[166, 248,5,0,166,2,0,27,240,255,15,0,7,4,5,125 ],
[75, 248,5,0,74,3,0,27,240,255,15,0,168,7,5,125 ],
[73, 248,5,0,72,3,0,27,240,255,15,0,169,4,5,125 ],
[166, 248,5,0,166,2,0,27,240,255,15,0,10,1,5,125 ],
[155, 248,5,0,155,2,0,27,240,255,15,0,0,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ]]
ljh._writeToExodriver(arrList3[0],[])
def allOff():
for i in DIOlist:
ljh.setDIOState(i,0)
allOff()
def listOff(inList):
for i in inList:
ljh.setDIOState(i,0)
DIOlist = [8,9,10,11,14,15,16,17,18,19]
def allOff():
for i in range(0,32):
ljh.setDIOState(i,0)
allOff()
allOff()
def allOff():
for i in range(0,24):
ljh.setDIOState(i,0)
allOff()
def allOff():
for i in range(0,20):
ljh.setDIOState(i,0)
allOff()
ljh._writeToExodriver(arrList3[1],[])
ljh._writeToExodriver(arrList3[0],[])
ljh._writeToExodriver(arrList3[1],[])
ljh._writeToExodriver(arrList3[2],[])
ljh._writeToExodriver(arrList3[3],[])
ljh._writeToExodriver(arrList3[4],[])
ljh._writeToExodriver(arrList3[5],[])
ljh._writeToExodriver(arrList3[6],[])
ljh._writeToExodriver(arrList3[7],[])
allOff()
ljh.close()
ljh = u3.U3()
ljh.getCalibrationData()
ljh._writeToExodriver(arrList3[0],[])
ljh._writeToExodriver(arrList3[1],[])
arrList = [[ 76, 248,4,0,77,2,0,27,240,255,15,0,49,3 ],
[ 208, 248,4,0,209,2,0,27,240,255,15,0,178,6 ],
[ 223, 248,4,0,224,2,0,27,240,255,15,0,192,7 ],
[ 253, 248,4,0,254,2,0,27,240,255,15,0,225,4 ],
[ 227, 248,4,0,228,2,0,27,240,255,15,0,134,69 ],
[ 6, 248,4,0,6,3,0,27,240,255,15,0,167,70 ],
[ 79, 248,4,0,79,3,0,27,240,255,15,0,242,68 ],
[ 83, 248,4,0,83,3,0,27,240,255,15,0,243,71 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,163,5 ],
[ 189, 248,4,0,190,2,0,27,240,255,15,0,164,1 ],
[ 159, 248,4,0,160,2,0,27,240,255,15,0,133,2 ],
[ 69, 248,4,0,70,2,0,27,240,255,15,0,38,7 ],
[ 69, 248,4,0,70,2,0,27,240,255,15,0,43,2 ],
[ 74, 248,4,0,75,2,0,27,240,255,15,0,45,5 ],
[ 107, 248,4,0,108,2,0,27,240,255,15,0,79,4 ],
[ 42, 248,4,0,43,2,0,27,240,255,15,0,12,6 ],
[ 72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[ 72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 160, 248,4,0,161,2,0,27,240,255,15,0,68,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 192, 248,4,0,193,2,0,27,240,255,15,0,100,68 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[ 24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ]]
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
input('ok?')
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
input('ok?')
ljh = u3.U3()
ljh.getCalibrationData()
allOff()
ljh = u3.U3()
ljh.getCalibrationData()
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
sleep(10)
allOff()
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
time.sleep(10)
allOff()
import time
time.sleep(10)
time.sleep(1)
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
time.sleep(10)
time.sleep(.1)
time.sleep(.01)
time.sleep(.001)
allOff()
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
time.sleep(.1)
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
time.sleep(.1)
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
time.sleep(.01)
for i in range(0,len(arrList)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList[i+len(arrList)//2],[])
time.sleep(2)
for i in range(0,len(arrList)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList[i+len(arrList)//2],[])
time.sleep(2)
for i in range(0,len(arrList)):
ljh._writeToExodriver(arrList[i],[])
time.sleep(.5)
ljh._writeToExodriver(arrList[15],[])
ljh._writeToExodriver(arrList[16],[])
ljh._writeToExodriver(arrList[14],[])
ljh._writeToExodriver(arrList[17],[])
ljh._writeToExodriver(arrList[18],[])
ljh._writeToExodriver(arrList[19],[])
arrList2 = [[76, 248,4,0,77,2,0,27,240,255,15,0,49,3 ],
[208, 248,4,0,209,2,0,27,240,255,15,0,178,6 ],
[223, 248,4,0,224,2,0,27,240,255,15,0,192,7 ],
[253, 248,4,0,254,2,0,27,240,255,15,0,225,4 ],
[91, 248,4,0,92,2,0,27,240,255,15,0,66,1 ],
[125, 248,4,0,126,2,0,27,240,255,15,0,99,2 ],
[198, 248,4,0,199,2,0,27,240,255,15,0,174,0 ],
[202, 248,4,0,203,2,0,27,240,255,15,0,175,3 ],
[192, 248,4,0,193,2,0,27,240,255,15,0,163,5 ],
[189, 248,4,0,190,2,0,27,240,255,15,0,164,1 ],
[159, 248,4,0,160,2,0,27,240,255,15,0,133,2 ],
[69, 248,4,0,70,2,0,27,240,255,15,0,38,7 ],
[69, 248,4,0,70,2,0,27,240,255,15,0,43,2 ],
[74, 248,4,0,75,2,0,27,240,255,15,0,45,5 ],
[107, 248,4,0,108,2,0,27,240,255,15,0,79,4 ],
[42, 248,4,0,43,2,0,27,240,255,15,0,12,6 ],
[72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[72, 248,4,0,73,2,0,27,240,255,15,0,48,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[56, 248,4,0,57,2,0,27,240,255,15,0,32,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ],
[24, 248,4,0,25,2,0,27,240,255,15,0,0,0 ]]
for i in range(0,len(arrList2)):
ljh._writeToExodriver(arrList2[i],[])
time.sleep(.5)
for i in range(0,len(arrList2)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList2[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList2[i+len(arrList2)//2],[])
time.sleep(2)
allOff()
for i in range(0,len(arrList2)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList2[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList2[i+len(arrList2)//2],[])
time.sleep(2)
for i in range(0,len(arrLis3)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList3[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList3[i+len(arrList3)//2],[])
time.sleep(2)
for i in range(0,len(arrList3)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList3[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList3[i+len(arrList3)//2],[])
time.sleep(2)
for i in range(0,len(arrList3)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList3[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList3[i+len(arrList3)//2],[])
time.sleep(2)
for i in range(0,len(arrList3)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList3[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList3[i+len(arrList3)//2],[])
time.sleep(2)
allOff()
allOff()
arrList3 = [[166, 248,5,0,166,2,0,27,240,255,15,0,7,4,5,125 ],
[75, 248,5,0,74,3,0,27,240,255,15,0,168,7,5,125 ],
[73, 248,5,0,72,3,0,27,240,255,15,0,169,4,5,125 ],
[166, 248,5,0,166,2,0,27,240,255,15,0,10,1,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ]]
arrList3 = [[166, 248,5,0,166,2,0,27,240,255,15,0,7,4,5,125 ],
[75, 248,5,0,74,3,0,27,240,255,15,0,168,7,5,125 ],
[73, 248,5,0,72,3,0,27,240,255,15,0,169,4,5,125 ],
[166, 248,5,0,166,2,0,27,240,255,15,0,10,1,5,125 ],
[155, 248,5,0,155,2,0,27,240,255,15,0,0,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ],
[187, 248,5,0,187,2,0,27,240,255,15,0,32,0,5,125 ]]
for i in range(0,len(arrList3)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList3[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList3[i+len(arrList3)//2],[])
time.sleep(2)
allOff()
for i in range(0,len(arrList3)//2):
if i%2 == 0:
ljh._writeToExodriver(arrList3[i],[])
time.sleep(.01)
else:
ljh._writeToExodriver(arrList3[i+len(arrList3)//2],[])
time.sleep(2)
allOff()
allOff()
| ntbrewer/DAQ_1 | kick-u3/manualSession.py | Python | gpl-3.0 | 20,264 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Jerome Rapinat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Filters/Rules/Person/_MatchesSourceConfidence.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._matchessourceconfidencebase import MatchesSourceConfidenceBase
#-------------------------------------------------------------------------
# "Confidence level"
#-------------------------------------------------------------------------
class MatchesSourceConfidence(MatchesSourceConfidenceBase):
"""Media matching a specific confidence level on its 'direct' source references"""
labels = [_('Confidence level:')]
name = _('Place with a direct source >= <confidence level>')
description = _("Matches places with at least one direct source with confidence level(s)")
| pmghalvorsen/gramps_branch | gramps/gen/filters/rules/place/_matchessourceconfidence.py | Python | gpl-2.0 | 1,880 |
#!/usr/bin/python
import os
import sys
import argparse
dirSelf = os.path.dirname(os.path.realpath(__file__))
libDir = dirSelf.rstrip(os.sep).rstrip("toolsDevice").rstrip(os.sep).rstrip("bin").rstrip(os.sep) + os.sep + "lib"
sys.path.append(libDir)
# print("lib : "+ libDir)
import dbOuiDevices
parser = argparse.ArgumentParser()
parser.add_argument("-n","--name",dest='name',help='company name')
parser.add_argument("-f","--firstname",dest='firstname',help='first name')
parser.add_argument("-l","--lastname",dest='lastname',help='last name')
parser.add_argument("-t","--tin",dest='tin',help='tin number of the company')
parser.add_argument("-p","--pan",dest='pan',help='pan number of the company')
parser.add_argument("-o","--officeaddress",dest='officeaddress',help='office address')
parser.add_argument("-h","--homeaddress",dest='homeaddress',help='home address')
parser.add_argument("-m","--homephone",dest='homephone',help='home phone number')
parser.add_argument("-e","--officephone",dest='officephone',help='office phone number')
parser.add_argument("-c","--country",dest='country',help='country')
parser.add_argument("-s","--state",dest='state',help='state')
parser.add_argument("-y","--city",dest='city',help='city')
parser.add_argument("-l","--list",dest='islist',action='store_true',help='list all the cities')
args = parser.parse_args()
dbconn = dbOuiDevices.db()
if(args.islist):
#print("listing all the id types for devices")
raw = dbconn.execute("select * from clients",dictionary=True)
if(not isinstance(raw,int)):
for x in raw:
print(x['id'] +":"+ x['name'] +":"+ x['firstName'] +":"+ x['lastName'] +":"+ x['country'] +":"+ x['state'] +":"+ x['city'] +":"+ x['officeAddress'] +":"+ x['officePhone'] +":"+ x['homeAddress'] +":"+ x['homePhone'] +":"+ x['tinNo'] +":"+ x['panNo'])
sys.exit(0)
fields = []
values = []
if(not (args.name or (args.firstname and args.lastname))):
print('please give a name or first and last name')
sys.exit(1)
if(args.name):
fields.append('name')
values.append("'"+ args.name +"'")
if(args.firstname):
fields.append('firstName')
values.append("'"+ args.firstname +"'")
if(args.lastname):
fields.append('lastName')
values.append("'"+ args.lastname +"'")
if(args.country):
fields.append('country')
values.append("'"+ args.country +"'")
if(args.state):
fields.append('state')
values.append("'"+ args.state +"'")
if(args.city):
fields.append('city')
values.append("'"+ args.city +"'")
if(args.officeaddress):
fields.append('officeAddress')
values.append("'"+ args.officeaddress +"'")
if(args.homeaddress):
fields.append('homeAddress')
values.append("'"+ args.homeaddress +"'")
if(args.officephone):
fields.append('officePhone')
values.append("'"+ args.officephone +"'")
if(args.homephone):
fields.append('homePhone')
values.append("'"+ args.homephone +"'")
if(args.tin):
fields.append('tinNo')
values.append("'"+ args.tin +"'")
if(args.pan):
fields.append('panNo')
values.append("'"+ args.pan +"'")
try:
dbconn.execute("insert into clients ("+ ",".join(fields) +") values ("+ ",".join(values) +")" )
except:
print(str(sys.exc_info()))
sys.exit(1)
| shrinidhi666/wtfBox | bin/toolsDevice/client_add.py | Python | gpl-3.0 | 3,276 |
from Liga import *
from os import path
import csv
stanjeLigeA={}
stanjeLigeB={}
stanjeLigeC={}
with open('zacetek.txt',encoding='utf-8') as f:
k=0
for line in f:
if k!=0:
a=line.split(';')
ime=a[1]
priimek=a[0]
klub=a[2]
kat=a[3]
tocke=round(int(a[4][:-1]) + (int(a[4][:-1])-900)/4)
if priimek=='Piltaver' and ime=='Jaka':
tocke=1250
elif priimek=='Hribar' and ime =='Andraž':
tocke=1400
elif ime=='Peter' and priimek=='Tušar':
tocke=1150
if kat=='A':
stanjeLigeA[(sumniki(ime),sumniki(priimek))]={'ime':ime,'priimek':priimek,'klub':klub,0:[0,tocke,False]}
elif kat=='B':
stanjeLigeB[(sumniki(ime),sumniki(priimek))]={'ime':ime,'priimek':priimek,'klub':klub,0:[0,tocke,False]}
elif kat=='C':
stanjeLigeC[(sumniki(ime),sumniki(priimek))]={'ime':ime,'priimek':priimek,'klub':klub,0:[0,tocke,False]}
k=1
#print(stanjeLigeA)
st_tekem=0
IP = 1
for st_lige in range(1,20):
if st_lige == 17:
IP = 1.15
if path.isfile('./Rezultati/olp'+str(st_lige)+'.csv'):
c=rezultati(st_lige,{'A':stanjeLigeA,'B':stanjeLigeB,'C':stanjeLigeC})
stanjeLigeA=izracunLigeA(c['A'],st_lige,stanjeLigeA, IP)
stanjeLigeB=izracunLigeA(c['B'],st_lige,stanjeLigeB, IP)
stanjeLigeC=izracunLigeA(c['C'],st_lige,stanjeLigeC, IP)
st_tekem+=1
#mankajociKlubi(stanjeLigeA)
#mankajociKlubi(stanjeLigeB)
#mankajociKlubi(stanjeLigeC)
stanjeLige={'A':stanjeLigeA,'B':stanjeLigeB,'C':stanjeLigeC}
vCsv(stanjeLige,st_tekem)
if path.isfile('./ResnaStanja/StanjeLige'+str(st_tekem)+'.csv'):
g=open('OLP_2016.csv','w+',encoding='utf-8')
with open('./ResnaStanja/StanjeLige'+str(st_tekem)+'.csv','r+',encoding='utf-8') as f:
for i in f.readlines():
g.write(i)
g.close()
| andrejborstnik/OLP | StanjeLige.py | Python | gpl-2.0 | 2,016 |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# IncompleteNames
#
#-------------------------------------------------------------------------
class IncompleteNames(Rule):
"""People with incomplete names"""
name = _('People with incomplete names')
description = _("Matches people with firstname or lastname missing")
category = _('General filters')
def apply(self,db,person):
for name in [person.get_primary_name()] + person.get_alternate_names():
if name.get_first_name().strip() == "":
return True
if name.get_surname_list():
for surn in name.get_surname_list():
if surn.get_surname().strip() == "":
return True
else:
return True
return False
| sam-m888/gprime | gprime/filters/rules/person/_incompletenames.py | Python | gpl-2.0 | 2,096 |
# https://oj.leetcode.com/problems/search-for-a-range/
# First use bisection to find the target, and then search for the upper and lower bound.
# # However, the second search is linear and the complexity can be potentially O(N)
class Solution:
# @param A, a list of integers
# @param target, an integer to be searched
# @return a list of length 2, [index1, index2]
def searchRange(self, A, target):
if len(A) == 0:
return [-1,-1]
left_ind = 0
right_ind = len(A)-1
while left_ind <= right_ind: # note: less than or **equal**
mid_ind = (left_ind+right_ind)/2
if A[mid_ind] == target:
# search for the lower and upper bound containing the same target element
ind1 = mid_ind
ind2 = mid_ind
while ind1>0:
if A[ind1-1] == A[ind1]:
ind1 -= 1
else:
break
while ind2<len(A)-1:
if A[ind2+1] == A[ind2]:
ind2 += 1
else:
break
return [ind1, ind2]
if A[mid_ind] < target:
left_ind = mid_ind +1
else:
right_ind = mid_ind -1
return [-1, -1]
# s = Solution()
# print s.searchRange([2], 2)
| lijunxyz/leetcode_practice | search_for_a_range_medium/Solution1.py | Python | mit | 1,397 |
import os
import osiris
class Page(osiris.IMainPage):
def __init__(self, session):
osiris.IMainPage.__init__(self, session)
if session.request.getUrlParam("mode") == "dialog":
self.ajax = True
def isMcpModeRequired(self):
return False
def getPageName(self):
return "main.pages.isis"
def onInit(self):
osiris.IMainPage.onInit(self)
document = osiris.XMLDocument()
self.root = document.create("isis")
template = osiris.HtmlXSLControl()
template.stylesheet = self.loadStylesheet(os.path.join(os.path.dirname(__file__), "isis.xsl"))
template.document = document
if(self.ajax):
self.controls.add(template)
else:
self.getArea(osiris.pageAreaContent).controls.add(template)
self.act = self.session.request.getUrlParam("act")
if(self.act == ""):
self.act = "home"
self.root.setAttributeString("action", self.act)
if(self.act == "home"):
#osiris.events.connect(self.events.get("onAdd"), self.onAdd)
#osiris.events.connect(self.events.get("onEdit"), self.onEdit)
osiris.events.connect(self.events.get("onRemove"), self.onRemove)
elif( (self.act == "add") or (self.act == "edit") ):
self.saveCommand = osiris.IdeButton(self.getText("common.actions.save"))
self.saveCommand.id = "save"
self.saveCommand.iconHref = self.skin.getImageUrl("icons/16x16/save.png")
osiris.events.connect(self.saveCommand.eventClick, self.onSave)
template.addChildParam(self.saveCommand)
self.cboPortal = osiris.HtmlComboBox()
self.cboPortal.id = "portal"
self.cboPortal.size = 40
template.addChildParam(self.cboPortal)
self.txtName = osiris.HtmlTextBox()
self.txtName.id = "name"
self.txtName.size = 40
template.addChildParam(self.txtName)
self.txtUrl = osiris.HtmlTextBox()
self.txtUrl.id = "url"
self.txtUrl.size = 40
template.addChildParam(self.txtUrl)
self.txtPassword = osiris.HtmlTextBox()
self.txtPassword.id = "password"
self.txtPassword.size = 40
template.addChildParam(self.txtPassword)
self.chkEnabled = osiris.IdePickerBool()
self.chkEnabled.id = "enabled"
template.addChildParam(self.chkEnabled)
def onPreRender(self):
osiris.IMainPage.onPreRender(self)
if(self.act == "home"):
subscribedPortals = osiris.PortalsSystem.instance().portals
if len(subscribedPortals) == 0:
return
nodePortals = self.root.nodes.add("portals")
for portal in subscribedPortals:
nodePortal = nodePortals.nodes.add("portal")
nodePortal.attributes.set("id", portal.portalID.string)
nodePortal.attributes.set("pov", portal.povID.string)
nodePortal.attributes.set("name", portal.name)
if portal.optionsShared.portalDescription != "":
nodePortal.attributes.set("description", portal.optionsShared.portalDescription)
isisEndpoints = portal.options.isisEndpoints;
for isisEndpointID in isisEndpoints.keys():
isisEndpoint = isisEndpoints[isisEndpointID];
nodeIsis = nodePortal.nodes.add("isis")
nodeIsis.attributes.set("id", portal.portalID.string)
nodeIsis.attributes.set("pov", portal.povID.string)
nodeIsis.attributes.set("portal_name",portal.name);
nodeIsis.attributes.set("portal_description",portal.optionsShared.portalDescription);
nodeIsis.attributes.set("name",isisEndpoint.getName());
nodeIsis.attributes.set("url",isisEndpoint.url.toString());
nodeIsis.attributes.set("enabled",isisEndpoint.enabled);
nodeIsis.attributes.set("last_event",isisEndpoint.getLastEvent());
nodeIsis.attributes.set("edit_href", "/main/isis?act=edit&id=" + str(isisEndpointID))
nodeIsis.attributes.set("remove_href", self.getEventCommand("onRemove", str(isisEndpointID)))
self.root.attributes.set("add_href", "/main/isis?act=add")
def onPathway(self):
self.getPathway().add(self.getText("main.pages.isis.title"), osiris.PortalsSystem.instance().getMainLink("isis"));
if(self.act != "home"):
self.getPathway().add(self.getText("main.pages.isis." + self.act + ".title"),"")
def onSave(self, args):
isisID = args[0]
def onRemove(self, args):
isisID = args[0]
osiris.IdeAccountsManager.instance().remove(args[0])
| OsirisSPS/osiris-sps | client/data/extensions/148B613D055759C619D5F4EFD9FDB978387E97CB/scripts/main/isis.py | Python | gpl-3.0 | 4,201 |
import os
import sys
import sqlite3
import datetime
import requests
import json
import logging
import argparse
import pandas as pd
os.environ['TZ'] = 'US/Central'
insert = """
INSERT OR IGNORE INTO vehicles (
vid, tmstmp, lat, lon, hdg, pid, rt, des, pdist, dly, tatripid, tablockid, zone
) VALUES (
:vid, :tmstmp, :lat, :lon, :hdg, :pid, :rt, :des, :pdist, :dly, :tatripid, :tablockid, :zone
)
"""
database_name = "bustracker.db"
URL = "http://www.ctabustracker.com/bustime/api/v2/getvehicles"
schedule_path = "bus_schedule.csv"
def configure_logger(log_path):
logging.basicConfig(
filename=log_path,
level=logging.ERROR,
format='%(asctime)s - %(levelname)s - %(message)s'
)
def load_bus_schedule(filepath):
schedule = pd.read_csv(filepath, dtype=str)
schedule.first_departure = (pd.to_datetime(schedule.first_departure)).dt.time
schedule.last_arrival = (pd.to_datetime(schedule.last_arrival)).dt.time
return schedule
def get_active_routes():
now = datetime.datetime.now()
d = now.weekday()
t = now.time()
schedule = load_bus_schedule(schedule_path)
if t < datetime.time(3,0):
d = (d - 1) % 7
if d < 5:
weekday_type = "W"
elif d == 5:
weekday_type = "S"
else:
weekday_type = "U"
active_routes = schedule[(schedule.type == weekday_type) & (schedule.first_departure <= t) & (schedule.last_arrival >= t)].route
return list(active_routes)
def get_vehicles(api_key):
vehicles = []
routes = get_active_routes()
rt_chunks = [routes[i:i+10] for i in xrange(0, len(routes), 10)]
for chunk in rt_chunks:
rts_str = ",".join(chunk)
payload = {'key': api_key, 'rt': rts_str, 'tmres': 's', 'format': 'json'}
try:
r = requests.get(URL, params=payload)
except requests.exceptions.RequestException as e:
logging.error(e)
sys.exit(1)
response = r.json().get('bustime-response')
try:
vehicles += response.get('vehicle')
except TypeError:
e = response.get('error')
logging.error(e)
return vehicles
def main(api_key, log_path):
configure_logger(log_path)
with sqlite3.connect(database_name) as conn:
c = conn.cursor()
vehicles = get_vehicles(api_key)
c.executemany(insert, vehicles)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('key', help='bustracker api key')
parser.add_argument('--log', default='bustracker_errors.log', help='specify log filepath')
args = parser.parse_args()
main(args.key, args.log)
| spencerchan/ctabus | src/remote/scraper.py | Python | gpl-3.0 | 2,538 |
from superdesk import Resource, Service
from superdesk.utils import ListCursor
from superdesk.errors import AlreadyExistsError
from flask_babel import _
registered_search_providers = {}
allowed_search_providers = []
def register_search_provider(name, fetch_endpoint=None, provider_class=None, label=None):
"""Register a Search Provider with the given name and fetch_endpoint.
Both have to be unique and if not raises AlreadyExistsError.
The fetch_endpoint is used by clients to fetch the article from the Search Provider.
:param name: Search Provider Name
:type name: str
:param fetch_endpoint: relative url to /api
:type fetch_endpoint: str
:param provider_class: provider implementation
:type provider: superdesk.SearchProvider
:param label: label to use (None to use provider_class.label or name in this order)
:type label: str
:raises: AlreadyExistsError - if a search has been registered with either name or fetch_endpoint.
"""
if fetch_endpoint is not None and not isinstance(fetch_endpoint, str):
raise ValueError(_("fetch_enpoint must be a string"))
if name in registered_search_providers:
raise AlreadyExistsError("A Search Provider with name: {} already exists".format(name))
if not ((fetch_endpoint and not provider_class) or (not fetch_endpoint and provider_class)):
raise ValueError(_('You have to specify either fetch_endpoint or provider_class.'))
provider_data = {}
if fetch_endpoint:
existing_endpoints = {d['endpoint'] for d in registered_search_providers.values() if 'endpoint' in d}
if fetch_endpoint in existing_endpoints:
raise AlreadyExistsError(
_("A Search Provider for the fetch endpoint: {endpoint} exists with name: {name}").format(
endpoint=fetch_endpoint, name=registered_search_providers[name]))
provider_data['endpoint'] = fetch_endpoint
else:
provider_data['class'] = provider_class
if label is not None:
provider_data['label'] = label
elif provider_class is not None and hasattr(provider_class, 'label') and provider_class.label:
provider_data['label'] = provider_class.label
else:
provider_data['label'] = name
provider_data = registered_search_providers[name] = provider_data
allowed_search_providers.append(name)
class SearchProviderAllowedResource(Resource):
resource_methods = ['GET']
item_methods = []
class SearchProviderAllowedService(Service):
def get(self, req, lookup):
def provider(provider_id):
provider_data = registered_search_providers[provider_id]
return {
'search_provider': provider_id,
'label': provider_data['label']
}
return ListCursor(
[provider(_id) for _id in registered_search_providers]
)
| mdhaman/superdesk-core | apps/search_providers/registry.py | Python | agpl-3.0 | 2,908 |
numTilings = 8
offset = (0.6/8)
tileWidth = 0.6
#import numpy as np
# X = x + (i*offset); Y = y + (i*offset)
# X = int(x/0.6); Y = int(y/0.6)
def tilecode(x,y,tileIndices):
for i in range(0,numTilings):
X = int(x/0.6); Y = int(y/0.6)
index = ((Y*11)+X)+(121*i)
tileIndices[i] = index
x=x+offset; y=y+offset
def printTileCoderIndices(x,y):
tileIndices = [-1]*numTilings
tilecode(x,y,tileIndices)
print 'Tile indices for input (',x,',',y,') are : ', tileIndices
printTileCoderIndices(0.1,0.1)
printTileCoderIndices(4.0,2.0)
printTileCoderIndices(5.99,5.99)
printTileCoderIndices(4.0,2.1)
| smithe0/366Ass2 | Tilecoder.py | Python | artistic-2.0 | 661 |
#!/usr/bin/python3
#system includes
import os
import sys
import getopt
import time
import threading
import matplotlib.pyplot as plt
# Helper includes
import gbl
import socketHelper
import displacement as disp
import plotHelper as pltHelper
def usage():
print("Wrapper for plotting the displacement")
print("options:")
print("\t-t <target> Define target: <native>, rpi")
print("\t-m <mode> Define mode: <debug>, release")
print("\t-p Only plot result")
print("\t-h Print this help")
print("\t-o <file> Define output file")
print("\t-s <sequence> Define path to sequence to use")
print("\t-c <alg> Define algorithm to use: <0>:SURF+BFM, 1:SURF+FLANN, 2:SIFT+BFM, 3:SIFT+FLANN")
print("\t-l <logLevel> Define log level: <debug>, warning or error")
print("\t-r Clean and rebuild")
print("\t-i Try to interpolate result")
print("\t-a Enable profiling")
print("\t-d <mechanism> Multi-threading mechanism: <none>, openmp")
print("\t-b Show stuff")
def main():
""" Entry function """
scriptDir = os.path.dirname(os.path.realpath(__file__))
try:
opts, args = getopt.getopt(sys.argv[1:], "t:m:pho:s:c:l:riad:b", ["help", "output="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err))
usage()
return 1
target = 'rpi'
mode = 'debug'
output = "displacements.result"
skipProcessing = False
sequence = os.path.abspath('{scriptDir}/../testSequences/tennisball_video2/tennisball_video2_DVD.dvd'.format(scriptDir=scriptDir))
alg = 0
logLevel = 'debug'
rebuild = False
interpolate = False
profile = 'no'
ip = '10.20.5.103'
user = 'root'
threadMode = 'none'
showstuff = 'no'
for o, a in opts:
if o == "-t":
target = a
elif o == "-m":
mode = a
elif o in ("-h", "--help"):
usage()
return 0
elif o in ("-o", "--output"):
output = a
elif o == "-p":
skipProcessing = True
elif o == "-s":
sequence = a
elif o == "-c":
alg = a
elif o == "-l":
logLevel = a
elif o == "-r":
rebuild = True
elif o == "-i":
interpolate = True
elif o == "-a":
profile = 'yes'
elif o == "-d":
threadMode = a
elif o == "-b":
showstuff = 'yes'
else:
assert False, "unhandled option"
usage()
return 1
if(rebuild):
fname = '{scriptDir}/../build/{target}/{mode}/bin/pipeline'.format(scriptDir=scriptDir,target=target, mode=mode)
if(os.path.isfile(fname)):
cmd = 'rm -rf {scriptDir}/../build/{target}/{mode}'.format(scriptDir=scriptDir,target=target,mode=mode)
os.system(cmd)
serverHost,serverPort,serversocket = socketHelper.getSocket()
displacements = disp.DisplacementCollection()
color = 'b'
threadSyncer = gbl.threadSyncVariables()
# Execute
t = threading.Thread(target=executeApplication, args=(target, mode, sequence, alg, output, logLevel, profile, threadMode, showstuff, user, ip, sequence, serverHost, serverPort, threadSyncer))
t.start()
while(threadSyncer.doneBuilding == False):
continue
threadSyncer.startExecuting = True
t2 = threading.Thread(target=socketHelper.runSocketServer, args=(serversocket, displacements, threadSyncer))
t2.start()
pltHelper.plotReceivedPoints(displacements, 'b', threadSyncer)
plt.close()
plt.figure()
pltHelper.processResults(displacements, color, interpolate)
return 0
def executeApplication(target, mode, sequence, alg, output, logLevel, profile, threadMode, showstuff, user, ip, args, serverHost, serverPort, threadSyncer):
""" Build and execute application """
scriptDir = os.path.dirname(os.path.realpath(__file__))
fname = '{scriptDir}/../build/{target}/{mode}/bin/pipeline'.format(scriptDir=scriptDir,target=target, mode=mode)
# build application
cmd = 'scons --directory {scriptDir}/.. --jobs 10 target={target} mode={mode} profile={profile} logLevel={logLevel} multithreading={threadMode} showstuff={showstuff} {buildTarget}'.format(scriptDir=scriptDir, target=target, mode=mode, logLevel=logLevel, threadMode=threadMode, buildTarget='pipeline', profile=profile, showstuff = showstuff)
print(cmd)
ret = os.system(cmd)
if(ret != 0):
print('Building returned error (code: {errorcode}). Exiting'.format(errorcode=ret))
return gbl.RetCodes.RESULT_FAILURE
# Send results
cmd = 'scp {local_file} {user}@{ip}:{remote_file}'.format(local_file=fname, user=user, ip=ip, remote_file='')
ret = os.system(cmd)
threadSyncer.doneBuilding = True
while(threadSyncer.startExecuting == False):
continue
# Execute application
cmd = "ssh -l {user} {ip} 'bash -c \"nice -20 ./pipeline {args} {alg} {address} {port}\"'".format(user=user, ip=ip, args=args, alg=alg, address=serverHost, port=serverPort)
ret = os.system(cmd)
if(ret != 0):
print('Processing returned error (code: {errorcode}). Exiting'.format(errorcode=ret))
return gbl.RetCodes.RESULT_FAILURE
# Retrieve file
cmd = "scp {user}@{ip}:{remote_file} .".format(remote_file=output, user=user, ip=ip)
ret = os.system(cmd)
if(ret != 0):
print('Processing returned error (code: {errorcode}). Exiting'.format(errorcode=ret))
return gbl.RetCodes.RESULT_OK
if __name__ == "__main__":
main()
| tass-belgium/EmbeddedMT | wrappers/remoteStreamDisplacement.py | Python | gpl-2.0 | 5,748 |
import json
from datetime import datetime
import logging
from tornado import web, gen
from lib.blinx.core.router.routestar import AddRoute
from lib.blinx.core.handlers import BaseHandler
app_log = logging.getLogger("tornado.application")
@AddRoute(r'/test/whoami')
class TestValidateHandler(BaseHandler):
"""
Adds users if they don't exist in mongo
"""
@web.asynchronous
@gen.coroutine
def get(self):
if self.get_current_user():
self.write(self.get_current_user())
else:
self.write('lol')
@AddRoute(r'/userssearch')
class SearchUsers(BaseHandler):
"""
Displays all messages with topics of the selected type
"""
@web.asynchronous
@gen.coroutine
def get(self):
username = self.get_argument('username', default=None)
users = yield self.api.search_users(username=username)
app_log.debug(users.body)
users = json.loads(users.body)
# pull out only summary information
users_to_render = {}
for user, values in users.iteritems():
users_to_render[user] = values['num_messages']
self.render('summary.html', summary_items=users_to_render,
fa_summary_type="fa-user", uri_base='user',
title='User Blinx Summary',
user=self.current_user)
@AddRoute(r'/messagesearch')
class SearchUsers(BaseHandler):
"""
Displays all messages with topics of the selected type
"""
@web.asynchronous
@gen.coroutine
def get(self):
username = self.get_argument('topic', default=None)
messages = yield self.api.search_messages(topic=username)
app_log.debug(messages.body)
messages = json.loads(messages.body)
for l in messages:
self.write(l)
#self.write(messages.body)
# # pull out only summary information
# messages_to_render = {}
# for message, values in messagess.iteritems():
# messages_to_render[message] = values['num_messages']
#
# self.render('message_display.html', summary_items=users_to_render,
# fa_summary_type="fa-user", uri_base='user',
# title='User Blinx Summary',
# user=self.current_user) | blinxin/blinx | blinx/routes/test_routes.py | Python | gpl-2.0 | 2,302 |
# Copyright (C) 2002-2007 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email import charset as _charset
Charset = _charset.Charset
NL = '\n'
SPACE = ' '
BSPACE = b' '
SPACE8 = ' ' * 8
EMPTYSTRING = ''
MAXLINELEN = 78
FWS = ' \t'
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (string, charset) pairs containing each of the decoded
parts of the header. Charset is None for non-encoded parts of the header,
otherwise a lower-case string containing the name of the character set
specified in the encoded string.
header may be a string that may or may not contain RFC2047 encoded words,
or it may be a Header object.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If it is a Header object, we can just return the encoded chunks.
if hasattr(header, '_chunks'):
return [(_charset._encode(string, str(charset)), str(charset))
for string, charset in header._chunks]
# If no encoding, just return the header with no charset.
if not ecre.search(header):
return [(header, None)]
# First step is to parse all the encoded parts into triplets of the form
# (encoded_string, encoding, charset). For unencoded strings, the last
# two parts will be None.
words = []
for line in header.splitlines():
parts = ecre.split(line)
while parts:
unencoded = parts.pop(0).strip()
if unencoded:
words.append((unencoded, None, None))
if parts:
charset = parts.pop(0).lower()
encoding = parts.pop(0).lower()
encoded = parts.pop(0)
words.append((encoded, encoding, charset))
# The next step is to decode each encoded word by applying the reverse
# base64 or quopri transformation. decoded_words is now a list of the
# form (decoded_word, charset).
decoded_words = []
for encoded_string, encoding, charset in words:
if encoding is None:
# This is an unencoded word.
decoded_words.append((encoded_string, charset))
elif encoding == 'q':
word = email.quoprimime.header_decode(encoded_string)
decoded_words.append((word, charset))
elif encoding == 'b':
paderr = len(encoded_string) % 4 # Postel's law: add missing padding
if paderr:
encoded_string += '==='[:4 - paderr]
try:
word = email.base64mime.decode(encoded_string)
except binascii.Error:
raise HeaderParseError('Base64 decoding error')
else:
decoded_words.append((word, charset))
else:
raise AssertionError('Unexpected encoding: ' + encoding)
# Now convert all words to bytes and collapse consecutive runs of
# similarly encoded words.
collapsed = []
last_word = last_charset = None
for word, charset in decoded_words:
if isinstance(word, str):
word = bytes(word, 'raw-unicode-escape')
if last_word is None:
last_word = word
last_charset = charset
elif charset != last_charset:
collapsed.append((last_word, last_charset))
last_word = word
last_charset = charset
elif last_charset is None:
last_word += BSPACE + word
else:
last_word += word
collapsed.append((last_word, last_charset))
return collapsed
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicitly via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 78 as recommended
by RFC 2822.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
elif not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
self._maxlinelen = maxlinelen
if header_name is None:
self._headerlen = 0
else:
# Take the separating colon and space into account.
self._headerlen = len(header_name) + 2
def __str__(self):
"""Return the string value of the header."""
self._normalize()
uchunks = []
lastcs = None
for string, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if nextcs == _charset.UNKNOWN8BIT:
original_bytes = string.encode('ascii', 'surrogateescape')
string = original_bytes.decode('ascii', 'replace')
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(SPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(SPACE)
lastcs = nextcs
uchunks.append(string)
return EMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a unicode (of the unencoded header value), swap the
# args and do another comparison.
return other == str(self)
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is false), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In either case, when producing an RFC 2822 compliant
header using RFC 2047 rules, the string will be encoded using the
output codec of the charset. If the string cannot be encoded to the
output codec, a UnicodeError will be raised.
Optional `errors' is passed as the errors argument to the decode
call if s is a byte string.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
if not isinstance(s, str):
input_charset = charset.input_codec or 'us-ascii'
if input_charset == _charset.UNKNOWN8BIT:
s = s.decode('us-ascii', 'surrogateescape')
else:
s = s.decode(input_charset, errors)
# Ensure that the bytes we're storing can be decoded to the output
# character set, otherwise an early error is raised.
output_charset = charset.output_codec or 'us-ascii'
if output_charset != _charset.UNKNOWN8BIT:
try:
s.encode(output_charset, errors)
except UnicodeEncodeError:
if output_charset!='us-ascii':
raise
charset = UTF8
self._chunks.append((s, charset))
def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
r"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
Optional maxlinelen specifies the maximum length of each generated
line, exclusive of the linesep string. Individual lines may be longer
than maxlinelen if a folding point cannot be found. The first line
will be shorter by the length of the header name plus ": " if a header
name was specified at Header construction time. The default value for
maxlinelen is determined at header construction time.
Optional splitchars is a string containing characters which should be
given extra weight by the splitting algorithm during normal header
wrapping. This is in very rough support of RFC 2822's `higher level
syntactic breaks': split points preceded by a splitchar are preferred
during line splitting, with the characters preferred in the order in
which they appear in the string. Space and tab may be included in the
string to indicate whether preference should be given to one over the
other as a split point when other split chars do not appear in the line
being split. Splitchars does not affect RFC 2047 encoded lines.
Optional linesep is a string to be used to separate the lines of
the value. The default value is the most useful for typical
Python applications, but it can be set to \r\n to produce RFC-compliant
line separators when needed.
"""
self._normalize()
if maxlinelen is None:
maxlinelen = self._maxlinelen
# A maxlinelen of 0 means don't wrap. For all practical purposes,
# choosing a huge number here accomplishes that and makes the
# _ValueFormatter algorithm much simpler.
if maxlinelen == 0:
maxlinelen = 1000000
formatter = _ValueFormatter(self._headerlen, maxlinelen,
self._continuation_ws, splitchars)
for string, charset in self._chunks:
lines = string.splitlines()
if lines:
formatter.feed('', lines[0], charset)
else:
formatter.feed('', '', charset)
for line in lines[1:]:
formatter.newline()
if charset.header_encoding is not None:
formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
charset)
else:
sline = line.lstrip()
fws = line[:len(line)-len(sline)]
formatter.feed(fws, sline, charset)
if len(lines) > 1:
formatter.newline()
formatter.add_transition()
value = formatter._str(linesep)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _normalize(self):
# Step 1: Normalize the chunks so that all runs of identical charsets
# get collapsed into a single unicode string.
chunks = []
last_charset = None
last_chunk = []
for string, charset in self._chunks:
if charset == last_charset:
last_chunk.append(string)
else:
if last_charset is not None:
chunks.append((SPACE.join(last_chunk), last_charset))
last_chunk = [string]
last_charset = charset
if last_chunk:
chunks.append((SPACE.join(last_chunk), last_charset))
self._chunks = chunks
class _ValueFormatter:
def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
self._maxlen = maxlen
self._continuation_ws = continuation_ws
self._continuation_ws_len = len(continuation_ws)
self._splitchars = splitchars
self._lines = []
self._current_line = _Accumulator(headerlen)
def _str(self, linesep):
self.newline()
return linesep.join(self._lines)
def __str__(self):
return self._str(NL)
def newline(self):
end_of_line = self._current_line.pop()
if end_of_line != (' ', ''):
self._current_line.push(*end_of_line)
if len(self._current_line) > 0:
if self._current_line.is_onlyws():
self._lines[-1] += str(self._current_line)
else:
self._lines.append(str(self._current_line))
self._current_line.reset()
def add_transition(self):
self._current_line.push(' ', '')
def feed(self, fws, string, charset):
# If the charset has no header encoding (i.e. it is an ASCII encoding)
# then we must split the header at the "highest level syntactic break"
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace. Eventually, this should be pluggable.
if charset.header_encoding is None:
self._ascii_split(fws, string, self._splitchars)
return
# Otherwise, we're doing either a Base64 or a quoted-printable
# encoding which means we don't need to split the line on syntactic
# breaks. We can basically just find enough characters to fit on the
# current line, minus the RFC 2047 chrome. What makes this trickier
# though is that we have to split at octet boundaries, not character
# boundaries but it's only safe to split at character boundaries so at
# best we can only get close.
encoded_lines = charset.header_encode_lines(string, self._maxlengths())
# The first element extends the current line, but if it's None then
# nothing more fit on the current line so start a new line.
try:
first_line = encoded_lines.pop(0)
except IndexError:
# There are no encoded lines, so we're done.
return
if first_line is not None:
self._append_chunk(fws, first_line)
try:
last_line = encoded_lines.pop()
except IndexError:
# There was only one line.
return
self.newline()
self._current_line.push(self._continuation_ws, last_line)
# Everything else are full lines in themselves.
for line in encoded_lines:
self._lines.append(self._continuation_ws + line)
def _maxlengths(self):
# The first line's length.
yield self._maxlen - len(self._current_line)
while True:
yield self._maxlen - self._continuation_ws_len
def _ascii_split(self, fws, string, splitchars):
# The RFC 2822 header folding algorithm is simple in principle but
# complex in practice. Lines may be folded any place where "folding
# white space" appears by inserting a linesep character in front of the
# FWS. The complication is that not all spaces or tabs qualify as FWS,
# and we are also supposed to prefer to break at "higher level
# syntactic breaks". We can't do either of these without intimate
# knowledge of the structure of structured headers, which we don't have
# here. So the best we can do here is prefer to break at the specified
# splitchars, and hope that we don't choose any spaces or tabs that
# aren't legal FWS. (This is at least better than the old algorithm,
# where we would sometimes *introduce* FWS after a splitchar, or the
# algorithm before that, where we would turn all white space runs into
# single spaces or tabs.)
parts = re.split("(["+FWS+"]+)", fws+string)
if parts[0]:
parts[:0] = ['']
else:
parts.pop(0)
for fws, part in zip(*[iter(parts)]*2):
self._append_chunk(fws, part)
def _append_chunk(self, fws, string):
self._current_line.push(fws, string)
if len(self._current_line) > self._maxlen:
# Find the best split point, working backward from the end.
# There might be none, on a long first line.
for ch in self._splitchars:
for i in range(self._current_line.part_count()-1, 0, -1):
if ch.isspace():
fws = self._current_line[i][0]
if fws and fws[0]==ch:
break
prevpart = self._current_line[i-1][1]
if prevpart and prevpart[-1]==ch:
break
else:
continue
break
else:
fws, part = self._current_line.pop()
if self._current_line._initial_size > 0:
# There will be a header, so leave it on a line by itself.
self.newline()
if not fws:
# We don't use continuation_ws here because the whitespace
# after a header should always be a space.
fws = ' '
self._current_line.push(fws, part)
return
remainder = self._current_line.pop_from(i)
self._lines.append(str(self._current_line))
self._current_line.reset(remainder)
class _Accumulator(list):
def __init__(self, initial_size=0):
self._initial_size = initial_size
super().__init__()
def push(self, fws, string):
self.append((fws, string))
def pop_from(self, i=0):
popped = self[i:]
self[i:] = []
return popped
def pop(self):
if self.part_count()==0:
return ('', '')
return super().pop()
def __len__(self):
return sum((len(fws)+len(part) for fws, part in self),
self._initial_size)
def __str__(self):
return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
for fws, part in self))
def reset(self, startval=None):
if startval is None:
startval = []
self[:] = startval
self._initial_size = 0
def is_onlyws(self):
return self._initial_size==0 and (not self or str(self).isspace())
def part_count(self):
return super().__len__()
| LaoZhongGu/kbengine | kbe/src/lib/python/Lib/email/header.py | Python | lgpl-3.0 | 22,711 |
#!/usr/bin/env python
# coding=utf-8
import random,os,sys,unittest,run_app,codecs
reload(sys)
sys.setdefaultencoding( "utf-8" )
class TestCaseUnit(unittest.TestCase):
def test_positive_manifest1(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest1-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest1-positive"))
def test_positive_manifest10(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest10-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest10-positive"))
def test_positive_manifest100(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest100-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest100-positive"))
def test_positive_manifest101(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest101-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest101-positive"))
def test_positive_manifest102(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest102-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest102-positive"))
def test_positive_manifest103(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest103-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest103-positive"))
def test_positive_manifest104(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest104-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest104-positive"))
def test_positive_manifest105(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest105-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest105-positive"))
def test_positive_manifest106(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest106-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest106-positive"))
def test_positive_manifest107(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest107-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest107-positive"))
def test_positive_manifest108(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest108-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest108-positive"))
def test_positive_manifest109(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest109-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest109-positive"))
def test_positive_manifest11(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest11-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest11-positive"))
def test_positive_manifest110(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest110-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest110-positive"))
def test_positive_manifest111(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest111-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest111-positive"))
def test_positive_manifest112(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest112-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest112-positive"))
def test_positive_manifest113(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest113-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest113-positive"))
def test_positive_manifest114(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest114-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest114-positive"))
def test_positive_manifest115(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest115-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest115-positive"))
def test_positive_manifest116(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest116-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest116-positive"))
def test_positive_manifest117(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest117-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest117-positive"))
def test_positive_manifest118(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest118-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest118-positive"))
def test_positive_manifest119(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest119-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest119-positive"))
def test_positive_manifest12(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest12-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest12-positive"))
def test_positive_manifest120(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest120-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest120-positive"))
def test_positive_manifest121(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest121-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest121-positive"))
def test_positive_manifest122(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest122-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest122-positive"))
def test_positive_manifest123(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest123-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest123-positive"))
def test_positive_manifest124(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest124-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest124-positive"))
def test_positive_manifest125(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest125-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest125-positive"))
def test_positive_manifest126(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest126-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest126-positive"))
def test_positive_manifest127(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest127-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest127-positive"))
def test_positive_manifest128(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest128-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest128-positive"))
def test_positive_manifest129(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest129-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest129-positive"))
def test_positive_manifest13(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest13-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest13-positive"))
def test_positive_manifest130(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest130-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest130-positive"))
def test_positive_manifest131(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest131-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest131-positive"))
def test_positive_manifest132(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest132-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest132-positive"))
def test_positive_manifest133(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest133-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest133-positive"))
def test_positive_manifest134(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest134-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest134-positive"))
def test_positive_manifest135(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest135-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest135-positive"))
def test_positive_manifest136(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest136-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest136-positive"))
def test_positive_manifest137(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest137-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest137-positive"))
def test_positive_manifest138(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest138-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest138-positive"))
def test_positive_manifest139(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest139-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest139-positive"))
def test_positive_manifest14(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest14-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest14-positive"))
def test_positive_manifest140(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest140-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest140-positive"))
def test_positive_manifest141(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest141-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest141-positive"))
def test_positive_manifest142(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest142-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest142-positive"))
def test_positive_manifest143(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest143-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest143-positive"))
def test_positive_manifest144(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest144-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest144-positive"))
def test_positive_manifest145(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest145-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest145-positive"))
def test_positive_manifest146(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest146-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest146-positive"))
def test_positive_manifest147(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest147-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest147-positive"))
def test_positive_manifest148(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest148-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest148-positive"))
def test_positive_manifest149(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest149-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest149-positive"))
def test_positive_manifest15(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest15-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest15-positive"))
def test_positive_manifest150(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest150-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest150-positive"))
def test_positive_manifest151(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest151-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest151-positive"))
def test_positive_manifest152(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest152-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest152-positive"))
def test_positive_manifest153(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest153-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest153-positive"))
def test_positive_manifest154(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest154-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest154-positive"))
def test_positive_manifest155(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest155-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest155-positive"))
def test_positive_manifest156(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest156-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest156-positive"))
def test_positive_manifest157(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest157-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest157-positive"))
def test_positive_manifest158(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest158-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest158-positive"))
def test_positive_manifest159(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest159-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest159-positive"))
def test_positive_manifest16(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest16-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest16-positive"))
def test_positive_manifest160(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest160-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest160-positive"))
def test_positive_manifest161(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest161-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest161-positive"))
def test_positive_manifest162(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest162-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest162-positive"))
def test_positive_manifest163(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest163-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest163-positive"))
def test_positive_manifest164(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest164-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest164-positive"))
def test_positive_manifest165(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest165-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest165-positive"))
def test_positive_manifest166(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest166-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest166-positive"))
def test_positive_manifest167(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest167-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest167-positive"))
def test_positive_manifest168(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest168-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest168-positive"))
def test_positive_manifest169(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest169-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest169-positive"))
def test_positive_manifest17(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest17-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest17-positive"))
def test_positive_manifest170(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest170-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest170-positive"))
def test_positive_manifest171(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest171-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest171-positive"))
def test_positive_manifest172(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest172-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest172-positive"))
def test_positive_manifest173(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest173-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest173-positive"))
def test_positive_manifest174(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest174-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest174-positive"))
def test_positive_manifest175(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest175-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest175-positive"))
def test_positive_manifest176(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest176-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest176-positive"))
def test_positive_manifest177(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest177-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest177-positive"))
def test_positive_manifest178(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest178-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest178-positive"))
def test_positive_manifest179(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest179-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest179-positive"))
def test_positive_manifest18(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest18-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest18-positive"))
def test_positive_manifest180(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest180-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest180-positive"))
def test_positive_manifest181(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest181-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest181-positive"))
def test_positive_manifest182(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest182-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest182-positive"))
def test_positive_manifest183(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest183-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest183-positive"))
def test_positive_manifest184(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest184-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest184-positive"))
def test_positive_manifest185(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest185-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest185-positive"))
def test_positive_manifest186(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest186-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest186-positive"))
def test_positive_manifest187(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest187-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest187-positive"))
def test_positive_manifest188(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest188-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest188-positive"))
def test_positive_manifest189(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest189-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest189-positive"))
def test_positive_manifest19(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest19-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest19-positive"))
def test_positive_manifest190(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest190-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest190-positive"))
def test_positive_manifest191(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest191-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest191-positive"))
def test_positive_manifest192(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest192-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest192-positive"))
def test_positive_manifest193(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest193-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest193-positive"))
def test_positive_manifest194(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest194-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest194-positive"))
def test_positive_manifest195(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest195-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest195-positive"))
def test_positive_manifest196(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest196-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest196-positive"))
def test_positive_manifest197(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest197-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest197-positive"))
def test_positive_manifest198(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest198-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest198-positive"))
def test_positive_manifest199(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest199-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest199-positive"))
def test_positive_manifest2(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest2-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest2-positive"))
def test_positive_manifest20(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest20-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest20-positive"))
def test_positive_manifest200(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest200-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest200-positive"))
def test_positive_manifest201(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest201-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest201-positive"))
def test_positive_manifest202(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest202-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest202-positive"))
def test_positive_manifest203(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest203-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest203-positive"))
def test_positive_manifest204(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest204-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest204-positive"))
def test_positive_manifest205(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest205-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest205-positive"))
def test_positive_manifest206(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest206-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest206-positive"))
def test_positive_manifest207(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest207-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest207-positive"))
def test_positive_manifest208(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest208-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest208-positive"))
def test_positive_manifest209(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest209-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest209-positive"))
def test_positive_manifest21(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest21-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest21-positive"))
def test_positive_manifest210(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest210-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest210-positive"))
def test_positive_manifest211(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest211-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest211-positive"))
def test_positive_manifest212(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest212-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest212-positive"))
def test_positive_manifest213(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest213-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest213-positive"))
def test_positive_manifest214(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest214-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest214-positive"))
def test_positive_manifest215(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest215-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest215-positive"))
def test_positive_manifest216(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest216-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest216-positive"))
def test_positive_manifest217(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest217-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest217-positive"))
def test_positive_manifest218(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest218-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest218-positive"))
def test_positive_manifest219(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest219-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest219-positive"))
def test_positive_manifest22(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest22-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest22-positive"))
def test_positive_manifest220(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest220-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest220-positive"))
def test_positive_manifest221(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest221-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest221-positive"))
def test_positive_manifest222(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest222-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest222-positive"))
def test_positive_manifest223(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest223-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest223-positive"))
def test_positive_manifest224(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest224-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest224-positive"))
def test_positive_manifest225(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest225-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest225-positive"))
def test_positive_manifest226(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest226-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest226-positive"))
def test_positive_manifest227(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest227-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest227-positive"))
def test_positive_manifest228(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest228-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest228-positive"))
def test_positive_manifest229(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest229-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest229-positive"))
def test_positive_manifest23(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest23-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest23-positive"))
def test_positive_manifest230(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest230-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest230-positive"))
def test_positive_manifest231(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest231-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest231-positive"))
def test_positive_manifest232(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest232-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest232-positive"))
def test_positive_manifest233(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest233-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest233-positive"))
def test_positive_manifest234(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest234-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest234-positive"))
def test_positive_manifest235(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest235-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest235-positive"))
def test_positive_manifest236(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest236-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest236-positive"))
def test_positive_manifest237(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest237-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest237-positive"))
def test_positive_manifest238(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest238-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest238-positive"))
def test_positive_manifest239(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest239-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest239-positive"))
def test_positive_manifest24(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest24-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest24-positive"))
def test_positive_manifest240(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest240-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest240-positive"))
def test_positive_manifest241(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest241-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest241-positive"))
def test_positive_manifest242(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest242-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest242-positive"))
def test_positive_manifest243(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest243-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest243-positive"))
def test_positive_manifest244(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest244-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest244-positive"))
def test_positive_manifest245(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest245-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest245-positive"))
def test_positive_manifest246(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest246-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest246-positive"))
def test_positive_manifest247(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest247-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest247-positive"))
def test_positive_manifest248(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest248-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest248-positive"))
def test_positive_manifest249(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest249-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest249-positive"))
def test_positive_manifest25(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest25-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest25-positive"))
def test_positive_manifest250(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest250-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest250-positive"))
def test_positive_manifest251(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest251-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest251-positive"))
def test_positive_manifest252(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest252-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest252-positive"))
def test_positive_manifest253(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest253-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest253-positive"))
def test_positive_manifest254(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest254-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest254-positive"))
def test_positive_manifest255(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest255-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest255-positive"))
def test_positive_manifest256(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest256-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest256-positive"))
def test_positive_manifest257(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest257-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest257-positive"))
def test_positive_manifest258(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest258-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest258-positive"))
def test_positive_manifest259(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest259-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest259-positive"))
def test_positive_manifest26(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest26-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest26-positive"))
def test_positive_manifest260(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest260-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest260-positive"))
def test_positive_manifest261(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest261-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest261-positive"))
def test_positive_manifest262(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest262-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest262-positive"))
def test_positive_manifest263(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest263-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest263-positive"))
def test_negative_manifest264(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest264-negative", "/opt/wrt-manifest-android-tests/apks/x86/manifest264-negative"))
def test_negative_manifest265(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest265-negative", "/opt/wrt-manifest-android-tests/apks/x86/manifest265-negative"))
def test_negative_manifest266(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest266-negative", "/opt/wrt-manifest-android-tests/apks/x86/manifest266-negative"))
def test_positive_manifest27(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest27-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest27-positive"))
def test_positive_manifest28(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest28-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest28-positive"))
def test_positive_manifest29(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest29-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest29-positive"))
def test_positive_manifest3(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest3-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest3-positive"))
def test_positive_manifest30(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest30-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest30-positive"))
def test_positive_manifest31(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest31-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest31-positive"))
def test_positive_manifest32(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest32-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest32-positive"))
def test_positive_manifest33(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest33-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest33-positive"))
def test_positive_manifest34(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest34-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest34-positive"))
def test_positive_manifest35(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest35-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest35-positive"))
def test_positive_manifest36(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest36-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest36-positive"))
def test_positive_manifest37(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest37-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest37-positive"))
def test_positive_manifest38(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest38-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest38-positive"))
def test_positive_manifest39(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest39-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest39-positive"))
def test_positive_manifest4(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest4-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest4-positive"))
def test_positive_manifest40(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest40-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest40-positive"))
def test_positive_manifest41(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest41-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest41-positive"))
def test_positive_manifest42(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest42-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest42-positive"))
def test_positive_manifest43(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest43-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest43-positive"))
def test_positive_manifest44(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest44-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest44-positive"))
def test_positive_manifest45(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest45-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest45-positive"))
def test_positive_manifest46(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest46-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest46-positive"))
def test_positive_manifest47(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest47-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest47-positive"))
def test_positive_manifest48(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest48-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest48-positive"))
def test_positive_manifest49(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest49-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest49-positive"))
def test_positive_manifest5(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest5-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest5-positive"))
def test_positive_manifest50(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest50-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest50-positive"))
def test_positive_manifest51(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest51-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest51-positive"))
def test_positive_manifest52(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest52-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest52-positive"))
def test_positive_manifest53(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest53-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest53-positive"))
def test_positive_manifest54(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest54-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest54-positive"))
def test_positive_manifest55(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest55-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest55-positive"))
def test_positive_manifest56(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest56-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest56-positive"))
def test_positive_manifest57(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest57-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest57-positive"))
def test_positive_manifest58(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest58-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest58-positive"))
def test_positive_manifest59(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest59-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest59-positive"))
def test_positive_manifest6(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest6-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest6-positive"))
def test_positive_manifest60(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest60-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest60-positive"))
def test_positive_manifest61(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest61-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest61-positive"))
def test_positive_manifest62(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest62-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest62-positive"))
def test_positive_manifest63(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest63-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest63-positive"))
def test_positive_manifest64(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest64-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest64-positive"))
def test_positive_manifest65(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest65-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest65-positive"))
def test_positive_manifest66(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest66-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest66-positive"))
def test_positive_manifest67(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest67-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest67-positive"))
def test_positive_manifest68(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest68-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest68-positive"))
def test_positive_manifest69(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest69-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest69-positive"))
def test_positive_manifest7(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest7-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest7-positive"))
def test_positive_manifest70(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest70-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest70-positive"))
def test_positive_manifest71(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest71-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest71-positive"))
def test_positive_manifest72(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest72-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest72-positive"))
def test_positive_manifest73(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest73-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest73-positive"))
def test_positive_manifest74(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest74-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest74-positive"))
def test_positive_manifest75(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest75-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest75-positive"))
def test_positive_manifest76(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest76-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest76-positive"))
def test_positive_manifest77(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest77-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest77-positive"))
def test_positive_manifest78(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest78-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest78-positive"))
def test_positive_manifest79(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest79-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest79-positive"))
def test_positive_manifest8(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest8-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest8-positive"))
def test_positive_manifest80(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest80-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest80-positive"))
def test_positive_manifest81(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest81-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest81-positive"))
def test_positive_manifest82(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest82-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest82-positive"))
def test_positive_manifest83(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest83-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest83-positive"))
def test_positive_manifest84(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest84-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest84-positive"))
def test_positive_manifest85(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest85-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest85-positive"))
def test_positive_manifest86(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest86-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest86-positive"))
def test_positive_manifest87(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest87-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest87-positive"))
def test_positive_manifest88(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest88-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest88-positive"))
def test_positive_manifest89(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest89-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest89-positive"))
def test_positive_manifest9(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest9-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest9-positive"))
def test_positive_manifest90(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest90-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest90-positive"))
def test_positive_manifest91(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest91-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest91-positive"))
def test_positive_manifest92(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest92-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest92-positive"))
def test_positive_manifest93(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest93-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest93-positive"))
def test_positive_manifest94(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest94-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest94-positive"))
def test_positive_manifest95(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest95-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest95-positive"))
def test_positive_manifest96(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest96-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest96-positive"))
def test_positive_manifest97(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest97-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest97-positive"))
def test_positive_manifest98(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest98-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest98-positive"))
def test_positive_manifest99(self):
self.assertEqual("PASS", run_app.tryRunApp("manifest99-positive", "/opt/wrt-manifest-android-tests/apks/x86/manifest99-positive"))
if __name__ == '__main__':
unittest.main() | pk-sam/crosswalk-test-suite | wrt/wrt-manifest-android-tests/tests.py | Python | bsd-3-clause | 47,245 |
from __future__ import absolute_import, division, print_function
import os
import fnmatch
import unittest
def get_modules():
# 只能添加test根目录下的 *_test.py 用例
modules = []
for _, _, f in os.walk(os.path.dirname(__file__) or '.'):
for i in filter(lambda x: fnmatch.fnmatch(x, '*_test.py'), f):
modules.append('qiniuManager.test.' + os.path.basename(i)[:-3])
break # just first level
return modules
def tester(modules):
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromNames(modules))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
if __name__ == '__main__':
tester(get_modules())
| hellflame/qiniu_manager | qiniuManager/test/run.py | Python | mit | 723 |
from django.core.urlresolvers import resolve, reverse
import furl
from rest_framework import serializers as ser
import pytz
from modularodm import Q
from framework.auth.core import Auth, User
from website import settings
from website.files.models import FileNode
from website.project.model import Comment
from website.util import api_v2_url
from api.base.serializers import (
FileCommentRelationshipField,
format_relationship_links,
IDField,
JSONAPIListField,
JSONAPISerializer,
Link,
LinksField,
NodeFileHyperLinkField,
RelationshipField,
TypeField,
WaterbutlerLink,
)
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse
from api.base.utils import get_user_auth
class CheckoutField(ser.HyperlinkedRelatedField):
default_error_messages = {'invalid_data': 'Checkout must be either the current user or null'}
json_api_link = True # serializes to a links object
def __init__(self, **kwargs):
kwargs['queryset'] = True
kwargs['read_only'] = False
kwargs['allow_null'] = True
kwargs['lookup_field'] = 'pk'
kwargs['lookup_url_kwarg'] = 'user_id'
self.meta = {'id': 'user_id'}
self.link_type = 'related'
self.always_embed = kwargs.pop('always_embed', False)
super(CheckoutField, self).__init__('users:user-detail', **kwargs)
def resolve(self, resource):
"""
Resolves the view when embedding.
"""
embed_value = resource.stored_object.checkout.pk
kwargs = {self.lookup_url_kwarg: embed_value}
return resolve(
reverse(
self.view_name,
kwargs=kwargs
)
)
def get_queryset(self):
return User.find(Q('_id', 'eq', self.context['request'].user._id))
def get_url(self, obj, view_name, request, format):
if obj is None:
return {}
return super(CheckoutField, self).get_url(obj, view_name, request, format)
def to_internal_value(self, data):
if data is None:
return None
try:
return next(
user for user in
self.get_queryset()
if user._id == data
)
except StopIteration:
self.fail('invalid_data')
def to_representation(self, value):
url = super(CheckoutField, self).to_representation(value)
rel_meta = None
if value:
rel_meta = {'id': value._id}
ret = format_relationship_links(related_link=url, rel_meta=rel_meta)
return ret
class FileTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class FileSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'name',
'node',
'kind',
'path',
'materialized_path',
'size',
'provider',
'last_touched',
'tags',
])
id = IDField(source='_id', read_only=True)
type = TypeField()
guid = ser.SerializerMethodField(read_only=True,
method_name='get_file_guid',
help_text='OSF GUID for this file (if one has been assigned)')
checkout = CheckoutField()
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
kind = ser.CharField(read_only=True, help_text='Either folder or file')
path = ser.CharField(read_only=True, help_text='The unique path used to reference this object')
size = ser.SerializerMethodField(read_only=True, help_text='The size of this file at this version')
provider = ser.CharField(read_only=True, help_text='The Add-on service this file originates from')
materialized_path = ser.CharField(
read_only=True, help_text='The Unix-style path of this object relative to the provider root')
last_touched = ser.DateTimeField(read_only=True, help_text='The last time this file had information fetched about it via the OSF')
date_modified = ser.SerializerMethodField(read_only=True, help_text='Timestamp when the file was last modified')
date_created = ser.SerializerMethodField(read_only=True, help_text='Timestamp when the file was created')
extra = ser.SerializerMethodField(read_only=True, help_text='Additional metadata about this file')
tags = JSONAPIListField(child=FileTagField(), required=False)
current_user_can_comment = ser.SerializerMethodField(help_text='Whether the current user is allowed to post comments')
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder'
)
versions = NodeFileHyperLinkField(
related_view='files:file-versions',
related_view_kwargs={'file_id': '<_id>'},
kind='file'
)
comments = FileCommentRelationshipField(related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'}
)
node = RelationshipField(related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
help_text='The project that this file belongs to'
)
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'move': WaterbutlerLink(),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'download': WaterbutlerLink(must_be_file=True),
'new_folder': WaterbutlerLink(must_be_folder=True, kind='folder'),
})
class Meta:
type_ = 'files'
def get_size(self, obj):
if obj.versions:
return obj.versions[-1].size
return None
def get_date_modified(self, obj):
mod_dt = None
if obj.provider == 'osfstorage' and obj.versions:
# Each time an osfstorage file is added or uploaded, a new version object is created with its
# date_created equal to the time of the update. The date_modified is the modified date
# from the backend the file is stored on. This field refers to the modified date on osfstorage,
# so prefer to use the date_created of the latest version.
mod_dt = obj.versions[-1].date_created
elif obj.provider != 'osfstorage' and obj.history:
mod_dt = obj.history[-1].get('modified', None)
return mod_dt and mod_dt.replace(tzinfo=pytz.utc)
def get_date_created(self, obj):
creat_dt = None
if obj.provider == 'osfstorage' and obj.versions:
creat_dt = obj.versions[0].date_created
elif obj.provider != 'osfstorage' and obj.history:
# Non-osfstorage files don't store a created date, so instead get the modified date of the
# earliest entry in the file history.
creat_dt = obj.history[0].get('modified', None)
return creat_dt and creat_dt.replace(tzinfo=pytz.utc)
def get_extra(self, obj):
metadata = {}
if obj.provider == 'osfstorage' and obj.versions:
metadata = obj.versions[-1].metadata
elif obj.provider != 'osfstorage' and obj.history:
metadata = obj.history[-1].get('extra', {})
extras = {}
extras['hashes'] = { # mimic waterbutler response
'md5': metadata.get('md5', None),
'sha256': metadata.get('sha256', None),
}
return extras
def get_current_user_can_comment(self, obj):
user = self.context['request'].user
auth = Auth(user if not user.is_anonymous() else None)
return obj.node.can_comment(auth)
def get_unread_comments_count(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return 0
return Comment.find_n_unread(user=user, node=obj.node, page='files', root_id=obj.get_guid()._id)
def user_id(self, obj):
# NOTE: obj is the user here, the meta field for
# Hyperlinks is weird
if obj:
return obj._id
return None
def update(self, instance, validated_data):
assert isinstance(instance, FileNode), 'Instance must be a FileNode'
if instance.provider != 'osfstorage' and 'tags' in validated_data:
raise Conflict('File service provider {} does not support tags on the OSF.'.format(instance.provider))
auth = get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in instance.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.pop('tags', []))
else:
current_tags = set(old_tags)
for new_tag in (current_tags - old_tags):
instance.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
instance.remove_tag(deleted_tag, auth=auth)
for attr, value in validated_data.items():
if attr == 'checkout':
user = self.context['request'].user
instance.check_in_or_out(user, value)
else:
setattr(instance, attr, value)
instance.save()
return instance
def is_valid(self, **kwargs):
return super(FileSerializer, self).is_valid(clean_html=False, **kwargs)
def get_file_guid(self, obj):
if obj:
guid = obj.get_guid()
if guid:
return guid._id
return None
def get_absolute_url(self, obj):
return api_v2_url('files/{}/'.format(obj._id))
class FileDetailSerializer(FileSerializer):
"""
Overrides FileSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class FileVersionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'size',
'identifier',
'content_type',
])
id = ser.CharField(read_only=True, source='identifier')
size = ser.IntegerField(read_only=True, help_text='The size of this file at this version')
content_type = ser.CharField(read_only=True, help_text='The mime type of this file at this verison')
links = LinksField({
'self': 'self_url',
'html': 'absolute_url'
})
class Meta:
type_ = 'file_versions'
def self_url(self, obj):
return absolute_reverse('files:version-detail', kwargs={
'version_id': obj.identifier,
'file_id': self.context['view'].kwargs['file_id']
})
def absolute_url(self, obj):
fobj = self.context['view'].get_file()
return furl.furl(settings.DOMAIN).set(
path=(fobj.node._id, 'files', fobj.provider, fobj.path.lstrip('/')),
query={fobj.version_identifier: obj.identifier} # TODO this can probably just be changed to revision or version
).url
def get_absolute_url(self, obj):
return self.self_url(obj)
| SSJohns/osf.io | api/files/serializers.py | Python | apache-2.0 | 11,382 |
import socket
import threading
from pluginHelper.IPlugin import ICryptoPlugin
class tcpRelay():
# This Class is used directly with no modification on both proxy side
def __init__(self, pluginStack: list, uplink: socket.socket, downlink: socket.socket):
self.pluginStack = pluginStack
self.uplink = uplink
self.downlink = downlink
self.traffic_downlink_send = 0
self.traffic_downlink_recv = 0
self.traffic_uplink_send = 0
self.traffic_uplink_recv = 0
pass
def registerEncryptionPlugin(self, plugin: ICryptoPlugin):
self.pluginStack.append(plugin)
def closeConnections(self):
try:
self.uplink.close()
except:
pass
try:
self.downlink.close()
except:
pass
def receive_proc(self):
# print("[RELAY_PROC_RECV START]")
while True:
try:
dat = self.uplink.recv(2048)
# print("[RELAY:RECV]", dat)
if len(dat) == 0:
# when recv returns zero lengthed buffer, it means stream ends, or connection closed.
self.closeConnections()
break
self.traffic_uplink_recv = self.traffic_uplink_recv + len(dat)
for i in range(len(self.pluginStack)):
if len(dat) > 0:
dat = self.pluginStack[len(self.pluginStack) - 1 - i].decapsulate(dat)
else:
break
if len(dat) > 0:
self.downlink.send(dat)
self.traffic_downlink_send = self.traffic_downlink_send + len(dat)
except Exception as ex:
# print("[RECV] Connection Closed because:", ex)
self.closeConnections()
break
# print("[RELAY_PROC_RECV END]")
pass
def start(self):
# this is a blocking mode method.
# should start a new thread for receiving data
# and use current thread for sending data.
t_recv = threading.Thread(target=self.receive_proc, daemon=True)
t_recv.start()
# print("[RELAY_PROC_SEND START]")
while True:
try:
dat = self.downlink.recv(2048)
# print("[RELAY:SEND]", dat)
if len(dat) == 0:
self.closeConnections()
break
self.traffic_downlink_recv = self.traffic_downlink_recv + len(dat)
for i in range(len(self.pluginStack)):
dat = self.pluginStack[i].encapsulate(dat)
if len(dat)>0:
self.uplink.send(dat)
self.traffic_uplink_send = self.traffic_uplink_send + len(dat)
except Exception as ex:
# print("[SEND] Connection Closed because:", ex)
self.closeConnections()
break
# print("[RELAY_PROC_SEND END]")
| securesocketimproved/ssi | socksv/tcpRelay.py | Python | gpl-3.0 | 3,154 |
#This module initializes flags for optional dependencies
try:
import pandas
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
| sandeepkrjha/pgmpy | pgmpy/base/OptionalDependency.py | Python | mit | 145 |
# Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import collections
if sys.version_info[0] == 3:
Iterable = collections.abc.Iterable
else:
Iterable = collections.Iterable
class DynamicMixin(object):
"""Dynamic assertions mixin."""
def __getattr__(self, attr):
"""Asserts that val has attribute attr and that attribute's value is equal to other via a dynamic assertion of the form: has_<attr>()."""
if not attr.startswith('has_'):
raise AttributeError('assertpy has no assertion <%s()>' % attr)
attr_name = attr[4:]
err_msg = False
is_dict = isinstance(self.val, Iterable) and hasattr(self.val, '__getitem__')
if not hasattr(self.val, attr_name):
if is_dict:
if attr_name not in self.val:
err_msg = 'Expected key <%s>, but val has no key <%s>.' % (attr_name, attr_name)
else:
err_msg = 'Expected attribute <%s>, but val has no attribute <%s>.' % (attr_name, attr_name)
def _wrapper(*args, **kwargs):
if err_msg:
self._err(err_msg) # ok to raise AssertionError now that we are inside wrapper
else:
if len(args) != 1:
raise TypeError('assertion <%s()> takes exactly 1 argument (%d given)' % (attr, len(args)))
try:
val_attr = getattr(self.val, attr_name)
except AttributeError:
val_attr = self.val[attr_name]
if callable(val_attr):
try:
actual = val_attr()
except TypeError:
raise TypeError('val does not have zero-arg method <%s()>' % attr_name)
else:
actual = val_attr
expected = args[0]
if actual != expected:
self._err('Expected <%s> to be equal to <%s> on %s <%s>, but was not.' % (actual, expected, 'key' if is_dict else 'attribute', attr_name))
return self
return _wrapper
| ActivisionGameScience/assertpy | assertpy/dynamic.py | Python | bsd-3-clause | 3,623 |
"""
Utility classes and functions to handle connection to a libvirt host system
The entire contents of callables in this module (minus the names defined in
NOCLOSE below), will become methods of the Virsh and VirshPersistent classes.
A Closure class is used to wrap the module functions, lambda does not
properly store instance state in this implementation.
Because none of the methods have a 'self' parameter defined, the classes
are defined to be dict-like, and get passed in to the methods as a the
special ``**dargs`` parameter. All virsh module functions _MUST_ include a
special ``**dargs`` (variable keyword arguments) to accept non-default
keyword arguments.
The standard set of keyword arguments to all functions/modules is declared
in the VirshBase class. Only the 'virsh_exec' key is guaranteed to always
be present, the remainder may or may not be provided. Therefor, virsh
functions/methods should use the dict.get() method to retrieve with a default
for non-existant keys.
:copyright: 2012 Red Hat Inc.
"""
import signal
import logging
import re
import weakref
import time
import select
import locale
import base64
import aexpect
from avocado.utils import path
from avocado.utils import process
from six.moves import urllib
from virttest import propcan
from virttest import remote
from virttest import utils_misc
# list of symbol names NOT to wrap as Virsh class methods
# Everything else from globals() will become a method of Virsh class
NOCLOSE = list(globals().keys()) + [
'NOCLOSE', 'SCREENSHOT_ERROR_COUNT', 'VIRSH_COMMAND_CACHE',
'VIRSH_EXEC', 'VirshBase', 'VirshClosure', 'VirshSession', 'Virsh',
'VirshPersistent', 'VirshConnectBack', 'VIRSH_COMMAND_GROUP_CACHE',
'VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL',
]
# Needs to be in-scope for Virsh* class screenshot method and module function
SCREENSHOT_ERROR_COUNT = 0
# Cache of virsh commands, used by help_command_group() and help_command_only()
# TODO: Make the cache into a class attribute on VirshBase class.
VIRSH_COMMAND_CACHE = None
VIRSH_COMMAND_GROUP_CACHE = None
VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL = False
# This is used both inside and outside classes
try:
VIRSH_EXEC = path.find_command("virsh")
except path.CmdNotFoundError:
logging.warning("Virsh executable not set or found on path, "
"virsh module will not function normally")
VIRSH_EXEC = '/bin/true'
class VirshBase(propcan.PropCanBase):
"""
Base Class storing libvirt Connection & state to a host
"""
__slots__ = ('uri', 'ignore_status', 'debug', 'virsh_exec', 'readonly')
def __init__(self, *args, **dargs):
"""
Initialize instance with virsh_exec always set to something
"""
init_dict = dict(*args, **dargs)
init_dict['virsh_exec'] = init_dict.get('virsh_exec', VIRSH_EXEC)
init_dict['uri'] = init_dict.get('uri', None)
init_dict['debug'] = init_dict.get('debug', False)
init_dict['ignore_status'] = init_dict.get('ignore_status', False)
init_dict['readonly'] = init_dict.get('readonly', False)
super(VirshBase, self).__init__(init_dict)
def get_uri(self):
"""
Accessor method for 'uri' property that must exist
"""
# self.get() would call get_uri() recursivly
try:
return self.__dict_get__('uri')
except KeyError:
return None
class VirshSession(aexpect.ShellSession):
"""
A virsh shell session, used with Virsh instances.
"""
# No way to get virsh sub-command "exit" status
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['error:\s*.+$', '.*failed.*']
def __init__(self, virsh_exec=None, uri=None, a_id=None,
prompt=r"virsh\s*[\#\>]\s*", remote_ip=None,
remote_user=None, remote_pwd=None,
ssh_remote_auth=False, readonly=False,
unprivileged_user=None,
auto_close=False, check_libvirtd=True):
"""
Initialize virsh session server, or client if id set.
:param virsh_exec: path to virsh executable
:param uri: uri of libvirt instance to connect to
:param id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param prompt: Regular expression describing the shell's prompt line.
:param remote_ip: Hostname/IP of remote system to ssh into (if any)
:param remote_user: Username to ssh in as (if any)
:param remote_pwd: Password to use, or None for host/pubkey
:param auto_close: Param to init ShellSession.
:param ssh_remote_auth: ssh to remote first.(VirshConnectBack).
Then execute virsh commands.
Because the VirshSession is designed for class VirshPersistent, so
the default value of auto_close is False, and we manage the reference
to VirshSession in VirshPersistent manually with counter_increase and
counter_decrease. If you really want to use it directly over VirshPe-
rsistent, please init it with auto_close=True, then the session will
be closed in __del__.
* session = VirshSession(virsh.VIRSH_EXEC, auto_close=True)
"""
self.uri = uri
self.remote_ip = remote_ip
self.remote_user = remote_user
self.remote_pwd = remote_pwd
# Special handling if setting up a remote session
if ssh_remote_auth: # remote to remote
if remote_pwd:
pref_auth = "-o PreferredAuthentications=password"
else:
pref_auth = "-o PreferredAuthentications=hostbased,publickey"
# ssh_cmd is not None flags this as remote session
ssh_cmd = ("ssh -o UserKnownHostsFile=/dev/null %s -p %s %s@%s"
% (pref_auth, 22, self.remote_user, self.remote_ip))
if uri:
self.virsh_exec = ("%s \"%s -c '%s'\""
% (ssh_cmd, virsh_exec, self.uri))
else:
self.virsh_exec = ("%s \"%s\"" % (ssh_cmd, virsh_exec))
else: # setting up a local session or re-using a session
self.virsh_exec = virsh_exec
if self.uri:
self.virsh_exec += " -c '%s'" % self.uri
ssh_cmd = None # flags not-remote session
if readonly:
self.virsh_exec += " -r"
if unprivileged_user:
self.virsh_exec = "su - %s -c '%s'" % (unprivileged_user,
self.virsh_exec)
# aexpect tries to auto close session because no clients connected yet
aexpect.ShellSession.__init__(self, self.virsh_exec, a_id,
prompt=prompt, auto_close=auto_close)
# Handle remote session prompts:
# 1.remote to remote with ssh
# 2.local to remote with "virsh -c uri"
if ssh_remote_auth or self.uri:
# Handle ssh / password prompts
remote.handle_prompts(self, self.remote_user, self.remote_pwd,
prompt, debug=True)
# fail if libvirtd is not running
if check_libvirtd:
if self.cmd_status('list', timeout=60) != 0:
logging.debug("Persistent virsh session is not responding, "
"libvirtd may be dead.")
self.auto_close = True
raise aexpect.ShellStatusError(virsh_exec, 'list')
def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None, safe=False):
"""
Send a virsh command and return its exit status and output.
:param cmd: virsh command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:param safe: Whether using safe mode when execute cmd.
In serial sessions, frequently the kernel might print debug or
error messages that make read_up_to_prompt to timeout. Let's
try to be a little more robust and send a carriage return, to
see if we can get to the prompt when safe=True.
:return: A tuple (status, output) where status is the exit status and
output is the output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
"""
out = self.cmd_output(cmd, timeout, internal_timeout, print_func, safe)
for line in out.splitlines():
if self.match_patterns(line, self.ERROR_REGEX_LIST) is not None:
return 1, out
return 0, out
def cmd_result(self, cmd, ignore_status=False, debug=False, timeout=60):
"""Mimic process.run()"""
exit_status, stdout = self.cmd_status_output(cmd, timeout=timeout)
stderr = '' # no way to retrieve this separately
result = process.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise process.CmdError(cmd, result,
"Virsh Command returned non-zero exit status")
if debug:
logging.debug(result)
return result
def read_until_output_matches(self, patterns, filter_func=lambda x: x,
timeout=60, internal_timeout=None,
print_func=None, match_func=None):
"""
Read from child using read_nonblocking until a pattern matches.
Read using read_nonblocking until a match is found using match_patterns,
or until timeout expires. Before attempting to search for a match, the
data is filtered using the filter_func function provided.
:param patterns: List of strings (regular expression patterns)
:param filter_func: Function to apply to the data read from the child before
attempting to match it against the patterns (should take and
return a string)
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:param match_func: Function to compare the output and patterns.
:return: Tuple containing the match index and the data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the child process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
if not match_func:
match_func = self.match_patterns
fd = self._get_fd("expect")
o = ""
end_time = time.time() + timeout
while True:
try:
r, w, x = select.select([fd], [], [],
max(0, end_time - time.time()))
except (select.error, TypeError):
break
if not r:
raise aexpect.ExpectTimeoutError(patterns, o)
# Read data from child
data = self.read_nonblocking(internal_timeout,
end_time - time.time())
if not data:
break
# Print it if necessary
if print_func:
for line in data.splitlines():
print_func(line)
# Look for patterns
o += data
out = ''
match = match_func(filter_func(o), patterns)
if match is not None:
output = o.splitlines()
# Find the second match in output reverse list, only return
# the content between the last match and the second last match.
# read_nonblocking might include output of last command or help
# info when session initiated,
# e.g.
# When use VirshPersistent initiate a virsh session, an list
# command is send in to test libvirtd status, and the first
# command output will be like:
# Welcome to virsh, the virtualization interactive terminal.
#
# Type: 'help' for help with commands
# 'quit' to quit
#
# virsh # Id Name State
# ----------------------------------------------------
#
# virsh #
# the session help info is included, and the exact output
# should be the content start after first virsh # prompt.
# The list command did no harm here with help info included,
# but sometime other commands get list command output included,
# e.g.
# Running virsh command: net-list --all
# Sending command: net-list --all
# Id Name State
# ----------------------------------------------------
#
# virsh # Name State Autostart Persistent
# ----------------------------------------------------------
# default active yes yes
#
# virsh #
# The list command output is mixed in the net-list command
# output, this will fail to extract network name if use set
# number 2 in list of output splitlines like in function
# virsh.net_state_dict.
for i in reversed(list(range(len(output) - 1))):
if match_func(output[i].strip(), patterns) is not None:
if re.split(patterns[match], output[i])[-1]:
output[i] = re.split(patterns[match],
output[i])[-1]
output_slice = output[i:]
else:
output_slice = output[i + 1:]
for j in range(len(output_slice) - 1):
output_slice[j] = output_slice[j] + '\n'
for k in range(len(output_slice)):
out += output_slice[k]
return match, out
return match, o
# Check if the child has terminated
if utils_misc.wait_for(lambda: not self.is_alive(), 5, 0, 0.1):
raise aexpect.ExpectProcessTerminatedError(patterns,
self.get_status(), o)
else:
# This shouldn't happen
raise aexpect.ExpectError(patterns, o)
# Work around for inconsistent builtin closure local reference problem
# across different versions of python
class VirshClosure(object):
"""
Callable with weak ref. to override ``**dargs`` when calling reference_function
"""
def __init__(self, reference_function, dict_like_instance):
"""
Callable reference_function with weak ref dict_like_instance
"""
if not issubclass(dict_like_instance.__class__, dict):
raise ValueError("dict_like_instance %s must be dict or subclass"
% dict_like_instance.__class__.__name__)
self.reference_function = reference_function
self.dict_like_weakref = weakref.ref(dict_like_instance)
def __call__(self, *args, **dargs):
"""
Call reference_function with dict_like_instance augmented by **dargs
:param args: Passthrough to reference_function
:param dargs: Updates dict_like_instance copy before call
"""
new_dargs = self.dict_like_weakref()
if new_dargs is None:
new_dargs = {}
for key in list(new_dargs.keys()):
if key not in list(dargs.keys()):
dargs[key] = new_dargs[key]
return self.reference_function(*args, **dargs)
class Virsh(VirshBase):
"""
Execute libvirt operations, using a new virsh shell each time.
"""
__slots__ = []
def __init__(self, *args, **dargs):
"""
Initialize Virsh instance with persistent options
:param args: Initial property keys/values
:param dargs: Initial property keys/values
"""
super(Virsh, self).__init__(*args, **dargs)
# Define the instance callables from the contents of this module
# to avoid using class methods and hand-written aliases
for sym, ref in list(globals().items()):
if sym not in NOCLOSE and callable(ref):
# Adding methods, not properties, so avoid special __slots__
# handling. __getattribute__ will still find these.
self.__super_set__(sym, VirshClosure(ref, self))
class VirshPersistent(Virsh):
"""
Execute libvirt operations using persistent virsh session.
"""
__slots__ = ('session_id', 'remote_pwd', 'remote_user', 'uri',
'remote_ip', 'ssh_remote_auth', 'unprivileged_user',
'readonly')
# B/c the auto_close of VirshSession is False, we
# need to manage the ref-count of it manually.
COUNTERS = {}
def __init__(self, *args, **dargs):
super(VirshPersistent, self).__init__(*args, **dargs)
if self.get('session_id') is None:
# set_uri does not call when INITIALIZED = False
# and no session_id passed to super __init__
self.new_session()
# increase the counter of session_id in COUNTERS.
self.counter_increase()
def __del__(self):
"""
Clean up any leftover sessions
"""
self.close_session()
def counter_increase(self):
"""
Method to increase the counter to self.a_id in COUNTERS.
"""
session_id = self.__dict_get__("session_id")
try:
counter = self.__class__.COUNTERS[session_id]
except KeyError as e:
VirshPersistent.COUNTERS[session_id] = 1
return
# increase the counter of session_id.
VirshPersistent.COUNTERS[session_id] += 1
def counter_decrease(self):
"""
Method to decrease the counter to self.a_id in COUNTERS.
If the counter is less than 1, it means there is no more
VirshSession instance referring to the session. So close
this session, and return True.
Else, decrease the counter in COUNTERS and return False.
"""
session_id = self.__dict_get__("session_id")
self.__class__.COUNTERS[session_id] -= 1
counter = self.__class__.COUNTERS[session_id]
if counter <= 0:
# The last reference to this session. Closing it.
session = VirshSession(a_id=session_id)
# try nicely first
session.close()
if session.is_alive():
# Be mean, in case it's hung
session.close(sig=signal.SIGTERM)
del self.__class__.COUNTERS[session_id]
return True
else:
return False
def close_session(self):
"""
If a persistent session exists, close it down.
"""
try:
session_id = self.__dict_get__('session_id')
if session_id:
try:
existing = VirshSession(a_id=session_id)
if existing.is_alive():
self.counter_decrease()
except (aexpect.ShellStatusError,
aexpect.ShellProcessTerminatedError):
# session was already closed
pass # don't check is_alive or update counter
self.__dict_del__("session_id")
except KeyError:
# Allow other exceptions to be raised
pass # session was closed already
def new_session(self):
"""
Open new session, closing any existing
"""
# Accessors may call this method, avoid recursion
# Must exist, can't be None
virsh_exec = self.__dict_get__('virsh_exec')
uri = self.__dict_get__('uri') # Must exist, can be None
readonly = self.__dict_get__('readonly')
try:
remote_user = self.__dict_get__('remote_user')
except KeyError:
remote_user = "root"
try:
remote_pwd = self.__dict_get__('remote_pwd')
except KeyError:
remote_pwd = None
try:
remote_ip = self.__dict_get__('remote_ip')
except KeyError:
remote_ip = None
try:
ssh_remote_auth = self.__dict_get__('ssh_remote_auth')
except KeyError:
ssh_remote_auth = False
try:
unprivileged_user = self.__dict_get__('unprivileged_user')
except KeyError:
unprivileged_user = None
self.close_session()
# Always create new session
new_session = VirshSession(virsh_exec, uri, a_id=None,
remote_ip=remote_ip,
remote_user=remote_user,
remote_pwd=remote_pwd,
ssh_remote_auth=ssh_remote_auth,
unprivileged_user=unprivileged_user,
readonly=readonly)
session_id = new_session.get_id()
self.__dict_set__('session_id', session_id)
def set_uri(self, uri):
"""
Accessor method for 'uri' property, create new session on change
"""
if not self.INITIALIZED:
# Allow __init__ to call new_session
self.__dict_set__('uri', uri)
else:
# If the uri is changing
if self.__dict_get__('uri') != uri:
self.__dict_set__('uri', uri)
self.new_session()
# otherwise do nothing
class VirshConnectBack(VirshPersistent):
"""
Persistent virsh session connected back from a remote host
"""
__slots__ = ('remote_ip', )
def new_session(self):
"""
Open new remote session, closing any existing
"""
# Accessors may call this method, avoid recursion
# Must exist, can't be None
virsh_exec = self.__dict_get__('virsh_exec')
uri = self.__dict_get__('uri') # Must exist, can be None
remote_ip = self.__dict_get__('remote_ip')
try:
remote_user = self.__dict_get__('remote_user')
except KeyError:
remote_user = 'root'
try:
remote_pwd = self.__dict_get__('remote_pwd')
except KeyError:
remote_pwd = None
super(VirshConnectBack, self).close_session()
new_session = VirshSession(virsh_exec, uri, a_id=None,
remote_ip=remote_ip,
remote_user=remote_user,
remote_pwd=remote_pwd,
ssh_remote_auth=True)
session_id = new_session.get_id()
self.__dict_set__('session_id', session_id)
@staticmethod
def kosher_args(remote_ip, uri):
"""
Convenience static method to help validate argument sanity before use
:param remote_ip: ip/hostname of remote libvirt helper-system
:param uri: fully qualified libvirt uri of local system, from remote.
:return: True/False if checks pass or not
"""
if remote_ip is None or uri is None:
return False
all_false = [
# remote_ip checks
bool(remote_ip.count("EXAMPLE.COM")),
bool(remote_ip.count("localhost")),
bool(remote_ip.count("127.")),
# uri checks
uri is None,
uri == "",
bool(uri.count("default")),
bool(uri.count(':///')),
bool(uri.count("localhost")),
bool(uri.count("127."))
]
return True not in all_false
# virsh module functions follow (See module docstring for API) #####
def command(cmd, **dargs):
"""
Interface to cmd function as 'cmd' symbol is polluted.
:param cmd: Command line to append to virsh command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
:raise: CmdError if non-zero exit status and ignore_status=False
"""
virsh_exec = dargs.get('virsh_exec', VIRSH_EXEC)
uri = dargs.get('uri', None)
virsh_opt = dargs.get('virsh_opt', '')
debug = dargs.get('debug', False)
# Caller deals with errors
ignore_status = dargs.get('ignore_status', True)
session_id = dargs.get('session_id', None)
readonly = dargs.get('readonly', False)
quiet = dargs.get('quiet', False)
unprivileged_user = dargs.get('unprivileged_user', None)
timeout = dargs.get('timeout', None)
allow_output_check = dargs.get('allow_output_check', None)
# Check if this is a VirshPersistent method call
if session_id:
# Retrieve existing session
session = VirshSession(a_id=session_id)
else:
session = None
if debug:
logging.debug("Running virsh command: %s", cmd)
if timeout:
try:
timeout = int(timeout)
except ValueError:
logging.error("Ignore the invalid timeout value: %s", timeout)
timeout = None
if session:
# Utilize persistent virsh session, not suit for readonly mode
if readonly:
logging.debug("Ignore readonly flag for this virsh session")
if timeout is None:
timeout = 60
ret = session.cmd_result(cmd, ignore_status=ignore_status,
debug=debug, timeout=timeout)
# Mark return value with session it came from
ret.from_session_id = session_id
else:
# Normal call to run virsh command
# Readonly mode
if readonly:
cmd = " -r " + cmd
if quiet:
cmd = " -q " + cmd
if uri:
# uri argument IS being used
uri_arg = " -c '%s' " % uri
else:
uri_arg = " " # No uri argument being used
cmd = "%s%s%s%s" % (virsh_exec, virsh_opt, uri_arg, cmd)
if unprivileged_user:
# Run cmd as unprivileged user
cmd = "su - %s -c '%s'" % (unprivileged_user, cmd)
# Raise exception if ignore_status is False
ret = process.run(cmd, timeout=timeout, verbose=debug,
ignore_status=ignore_status,
allow_output_check=allow_output_check,
shell=True)
# Mark return as not coming from persistent virsh session
ret.from_session_id = None
# Always log debug info, if persistent session or not
if debug:
logging.debug("status: %s", ret.exit_status)
logging.debug("stdout: %s", ret.stdout_text.strip())
logging.debug("stderr: %s", ret.stderr_text.strip())
# Return CmdResult instance when ignore_status is True
return ret
def domname(dom_id_or_uuid, **dargs):
"""
Convert a domain id or UUID to domain name
:param dom_id_or_uuid: a domain id or UUID.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("domname --domain %s" % dom_id_or_uuid, **dargs)
def qemu_monitor_command(name, cmd, options="", **dargs):
"""
This helps to execute the qemu monitor command through virsh command.
:param name: Name of monitor domain
:param cmd: monitor command to execute
:param options: extra options
:param dargs: standardized virsh function API keywords
"""
cmd_str = "qemu-monitor-command %s %s --cmd \'%s\'" % (name, options, cmd)
return command(cmd_str, **dargs)
def qemu_agent_command(name, cmd, options="", **dargs):
"""
This helps to execute the qemu agent command through virsh command.
:param name: Name of monitor domain
:param cmd: agent command to execute
:param options: extra options
:param dargs: standardized virsh function API keywords
"""
cmd_str = "qemu-agent-command %s %s --cmd \'%s\'" % (name, options, cmd)
return command(cmd_str, **dargs)
def qemu_attach(pid, extra="", **dargs):
"""
This helps to execute the qemu-attach command through virsh command.
:param pid: pid of qemu process
:param extra: extra options
:param dargs: standardized virsh function API keywords
"""
cmd_str = "qemu-attach --pid %s %s" % (pid, extra)
return command(cmd_str, **dargs)
def setvcpus(name, count, extra="", **dargs):
"""
Change the number of virtual CPUs in the guest domain.
:param name: name of vm to affect
:param count: value for vcpu parameter
:param options: any extra command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object from command
"""
cmd = "setvcpus %s %s %s" % (name, count, extra)
return command(cmd, **dargs)
def setvcpu(name, cpulist, extra="", **dargs):
"""
attach/detach vcpu or groups of threads
:param name: name of vm to affect
:param cpulist: group of vcpu numbers
:param options: any extra command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object from command
"""
cmd = "setvcpu %s %s %s" % (name, cpulist, extra)
return command(cmd, **dargs)
def guestvcpus(name, cpu_list=None, options=None, **dargs):
"""
Query or modify state of vcpu in the guest (via agent)
:param name: name of domain
:param cpu_list: list of cpus to enable or disable
:param options: --enable, --disable
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "guestvcpus --domain %s" % name
if cpu_list:
cmd += " --cpulist %s" % cpu_list
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def vcpupin(name, vcpu=None, cpu_list=None, options=None, **dargs):
"""
Changes the cpu affinity for respective vcpu.
:param name: name of domain
:param vcpu: virtual CPU to modify
:param cpu_list: physical CPU specification (string)
:param dargs: standardized virsh function API keywords
:param options: --live, --current or --config.
:return: CmdResult object.
"""
cmd = "vcpupin --domain %s" % name
if vcpu is not None:
cmd += " --vcpu %s" % vcpu
if cpu_list is not None:
cmd += " --cpulist %s" % cpu_list
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def vcpuinfo(name, **dargs):
"""
:param name: name of domain
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("vcpuinfo %s" % name, **dargs)
def freecell(cellno=None, options="", **dargs):
"""
Prints the available amount of memory on the machine or within a NUMA cell.
:param cellno: number of cell to show.
:param options: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "freecell "
if cellno:
cmd = "%s --cellno %s " % (cmd, cellno)
cmd = "%s %s" % (cmd, options)
return command(cmd, **dargs)
def nodeinfo(extra="", **dargs):
"""
Returns basic information about the node,like number and type of CPU,
and size of the physical memory.
:param extra: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd_nodeinfo = "nodeinfo %s" % extra
return command(cmd_nodeinfo, **dargs)
def nodecpumap(extra="", **dargs):
"""
Displays the node's total number of CPUs, the number of online
CPUs and the list of online CPUs.
:param extra: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodecpumap %s" % extra
return command(cmd, **dargs)
def nodesuspend(target, duration, extra='', **dargs):
"""
Suspend the host node for a given time duration.
:param target: Suspend target mem/disk/hybrid.
mem(Suspend-to-RAM)
disk(Suspend-to-Disk)
hybrid(Hybrid-Suspend)
:param duration: Suspend duration in seconds, at least 60.
:param extra: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodesuspend %s %s" % (target, duration)
if extra:
cmd += " %s" % extra
return command(cmd, **dargs)
def canonical_uri(option='', **dargs):
"""
Return the hypervisor canonical URI.
:param option: additional option string to pass
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
result = command("uri %s" % option, **dargs)
return result.stdout_text.strip()
def hostname(option='', **dargs):
"""
Return the hypervisor hostname.
:param option: additional option string to pass
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
result = command("hostname %s" % option, **dargs)
return result.stdout_text.strip()
def version(option='', **dargs):
"""
Return the major version info about what this built from.
:param option: additional option string to pass
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("version %s" % option, **dargs)
def maxvcpus(option='', **dargs):
"""
Return the connection vcpu maximum number.
:param: option: additional option string to pass
:param: dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "maxvcpus %s" % option
return command(cmd, **dargs)
def dom_list(options="", **dargs):
"""
Return the list of domains.
:param options: options to pass to list command
:return: CmdResult object
"""
return command("list %s" % options, **dargs)
def reboot(name, options="", **dargs):
"""
Run a reboot command in the target domain.
:param name: Name of domain.
:param options: options to pass to reboot command
:return: CmdResult object
"""
return command("reboot --domain %s %s" % (name, options), **dargs)
def managedsave(name, options="", **dargs):
"""
Managed save of a domain state.
:param name: Name of domain to save
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave --domain %s %s" % (name, options), **dargs)
def managedsave_remove(name, **dargs):
"""
Remove managed save of a domain
:param name: name of managed-saved domain to remove
:return: CmdResult object
"""
return command("managedsave-remove --domain %s" % name, **dargs)
def managedsave_dumpxml(name, options="", **dargs):
"""
Dump XML of domain information for a managed save state file.
:param name: Name of domain to dump
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave-dumpxml --domain %s %s" % (name, options), **dargs)
def managedsave_edit(name, options="", **dargs):
"""
Edit the domain XML associated with the managed save state file.
:param name: Name of domain to edit
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave-edit --domain %s %s" % (name, options), **dargs)
def managedsave_define(name, xml_path, options="", **dargs):
"""
Replace the domain XML associated with a managed save state file.
:param name: Name of domain to define
:param xml_path: Path of xml file to be defined
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave-define --domain %s %s %s" % (name, xml_path, options), **dargs)
def driver(**dargs):
"""
Return the driver by asking libvirt
:param dargs: standardized virsh function API keywords
:return: VM driver name
"""
# libvirt schme composed of driver + command
# ref: http://libvirt.org/uri.html
scheme = urllib.parse.urlsplit(canonical_uri(**dargs))[0]
# extract just the driver, whether or not there is a '+'
return scheme.split('+', 2)[0]
def domstate(name, extra="", **dargs):
"""
Return the state about a running domain.
:param name: VM name
:param extra: command options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("domstate %s %s" % (name, extra), **dargs)
def domid(name_or_uuid, **dargs):
"""
Return VM's ID.
:param name_or_uuid: VM name or uuid
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domid %s" % (name_or_uuid), **dargs)
def dominfo(name, **dargs):
"""
Return the VM information.
:param name: VM's name or id,uuid.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("dominfo %s" % (name), **dargs)
def domfsinfo(name, **dargs):
"""
Return the info of domain mounted fssystems
:param name: VM's name or uuid.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domfsinfo %s" % (name), **dargs)
def domuuid(name_or_id, **dargs):
"""
Return the Converted domain name or id to the domain UUID.
:param name_or_id: VM name or id
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domuuid %s" % name_or_id, **dargs)
def screenshot(name, filename, **dargs):
"""
Capture a screenshot of VM's console and store it in file on host
:param name: VM name
:param filename: name of host file
:param dargs: standardized virsh function API keywords
:return: filename
"""
# Don't take screenshots of shut-off domains
if is_dead(name, **dargs):
return None
global SCREENSHOT_ERROR_COUNT
dargs['ignore_status'] = False
try:
command("screenshot %s %s" % (name, filename), **dargs)
except process.CmdError as detail:
if SCREENSHOT_ERROR_COUNT < 1:
logging.error("Error taking VM %s screenshot. You might have to "
"set take_regular_screendumps=no on your "
"tests.cfg config file \n%s. This will be the "
"only logged error message.", name, detail)
SCREENSHOT_ERROR_COUNT += 1
return filename
def screenshot_test(name, filename="", options="", **dargs):
"""
Capture a screenshot of VM's console and store it in file on host
:param name: VM name or id
:param filename: name of host file
:param options: command options
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("screenshot %s %s %s" % (name, filename, options), **dargs)
def domblkstat(name, device, option, **dargs):
"""
Store state of VM into named file.
:param name: VM's name.
:param device: VM's device.
:param option: command domblkstat option.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domblkstat %s %s %s" % (name, device, option), **dargs)
def domblkthreshold(name, device, threshold, option="", **dargs):
"""
Set the threshold for block-threshold event for a given block device or it's backing chain element.
:param name: VM's name.
:param device: VM's device.
:param threshold: threshold value with unit such as 100M.
:param option: command domblkthreshold option.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domblkthreshold %s %s %s %s" % (name, device, threshold, option), **dargs)
def dumpxml(name, extra="", to_file="", **dargs):
"""
Return the domain information as an XML dump.
:param name: VM name
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: CmdResult object.
"""
cmd = "dumpxml %s %s" % (name, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout_text.strip())
result_file.close()
return result
def domifstat(name, interface, **dargs):
"""
Get network interface stats for a running domain.
:param name: Name of domain
:param interface: interface device
:return: CmdResult object
"""
return command("domifstat %s %s" % (name, interface), **dargs)
def domjobinfo(name, **dargs):
"""
Get domain job information.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domjobinfo %s" % name, **dargs)
def edit(options, **dargs):
"""
Edit the XML configuration for a domain.
:param options: virsh edit options string.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("edit %s" % options, **dargs)
def dompmsuspend(name, target, duration=0, **dargs):
"""
Suspends a running domain using guest OS's power management.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "dompmsuspend %s %s --duration %s" % (name, target, duration)
return command(cmd, **dargs)
def dompmwakeup(name, **dargs):
"""
Wakeup a domain that was previously suspended by power management.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("dompmwakeup %s" % name, **dargs)
def domjobabort(name, **dargs):
"""
Aborts the currently running domain job.
:param name: VM's name, id or uuid.
:param dargs: standardized virsh function API keywords
:return: result from command
"""
return command("domjobabort %s" % name, **dargs)
def domxml_from_native(info_format, native_file, options=None, **dargs):
"""
Convert native guest configuration format to domain XML format.
:param info_format:The command's options. For exmple:qemu-argv.
:param native_file:Native information file.
:param options:extra param.
:param dargs: standardized virsh function API keywords.
:return: result from command
"""
cmd = "domxml-from-native %s %s %s" % (info_format, native_file, options)
return command(cmd, **dargs)
def domxml_to_native(info_format, name, options, **dargs):
"""
Convert existing domain or its XML config to a native guest configuration format.
:param info_format:The command's options. For example: `qemu-argv`.
:param name: XML file or domain name/UUID.
:param options: --xml or --domain
:param dargs: standardized virsh function API keywords
:return: result from command
"""
cmd = "domxml-to-native %s %s %s" % (info_format, options, name)
return command(cmd, **dargs)
def vncdisplay(name, **dargs):
"""
Output the IP address and port number for the VNC display.
:param name: VM's name or id,uuid.
:param dargs: standardized virsh function API keywords.
:return: result from command
"""
return command("vncdisplay %s" % name, **dargs)
def is_alive(name, **dargs):
"""
Return True if the domain is started/alive.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
return not is_dead(name, **dargs)
def is_dead(name, **dargs):
"""
Return True if the domain is undefined or not started/dead.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
dargs['ignore_status'] = False
try:
state = domstate(name, **dargs).stdout_text.strip()
except process.CmdError:
return True
if state not in ('running', 'idle', 'paused', 'in shutdown', 'shut off',
'crashed', 'pmsuspended', 'no state'):
logging.debug("State '%s' not known", state)
if state in ('shut off', 'crashed', 'no state'):
return True
return False
def suspend(name, **dargs):
"""
True on successful suspend of VM - kept in memory and not scheduled.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("suspend %s" % (name), **dargs)
def resume(name, **dargs):
"""
True on successful moving domain out of suspend
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("resume %s" % (name), **dargs)
def dommemstat(name, extra="", **dargs):
"""
Store state of VM into named file.
:param name: VM name
:param extra: extra options to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("dommemstat %s %s" % (name, extra), **dargs)
def dump(name, path, option="", **dargs):
"""
Dump the core of a domain to a file for analysis.
:param name: VM name
:param path: absolute path to state file
:param option: command's option.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("dump %s %s %s" % (name, path, option), **dargs)
def save(name, path, options="", **dargs):
"""
Store state of VM into named file.
:param name: VM'name, id or uuid.
:param path: absolute path to state file
:param options: command's options.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("save %s %s %s" % (name, path, options), **dargs)
def restore(path, options="", **dargs):
"""
Load state of VM from named file and remove file.
:param path: absolute path to state file.
:param options: options for virsh restore.
:param dargs: standardized virsh function API keywords
"""
return command("restore %s %s" % (path, options), **dargs)
def start(name, options="", **dargs):
"""
True on successful start of (previously defined) inactive domain.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object.
"""
return command("start %s %s" % (name, options), **dargs)
def shutdown(name, options="", **dargs):
"""
True on successful domain shutdown.
:param name: VM name
:param options: options for virsh shutdown.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("shutdown %s %s" % (name, options), **dargs)
def destroy(name, options="", **dargs):
"""
True on successful domain destruction
:param name: VM name
:param options: options for virsh destroy
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("destroy %s %s" % (name, options), **dargs)
def define(xml_path, options=None, **dargs):
"""
Return cmd result of domain define.
:param xml_path: XML file path
:param options: options for virsh define
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "define --file %s" % xml_path
if options is not None:
cmd += " %s" % options
logging.debug("Define VM from %s", xml_path)
return command(cmd, **dargs)
def undefine(name, options=None, **dargs):
"""
Return cmd result of domain undefine (after shutdown/destroy).
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "undefine %s" % name
if options is not None:
cmd += " %s" % options
logging.debug("Undefine VM %s", name)
return command(cmd, **dargs)
def remove_domain(name, options=None, **dargs):
"""
Return True after forcefully removing a domain if it exists.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
if domain_exists(name, **dargs):
if is_alive(name, **dargs):
destroy(name, **dargs)
try:
dargs['ignore_status'] = False
undefine(name, options, **dargs)
except process.CmdError as detail:
logging.error("Undefine VM %s failed:\n%s", name, detail)
return False
return True
def domain_exists(name, **dargs):
"""
Return True if a domain exits.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
dargs['ignore_status'] = False
try:
command("domstate %s" % name, **dargs)
return True
except process.CmdError as detail:
logging.warning("VM %s does not exist", name)
if dargs.get('debug', False):
logging.warning(str(detail))
return False
def migrate_postcopy(name, **dargs):
"""
Trigger postcopy migration
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "migrate-postcopy %s" % name
return command(cmd, **dargs)
def migrate(name="", dest_uri="", option="", extra="", **dargs):
"""
Migrate a guest to another host.
:param name: name of guest on uri.
:param dest_uri: libvirt uri to send guest to
:param option: Free-form string of options to virsh migrate
:param extra: Free-form string of options to follow <domain> <desturi>
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "migrate"
if option:
cmd += " %s" % option
if name:
cmd += " --domain %s" % name
if dest_uri:
cmd += " --desturi %s" % dest_uri
if extra:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_setspeed(domain, bandwidth, extra=None, **dargs):
"""
Set the maximum migration bandwidth (in MiB/s) for
a domain which is being migrated to another host.
:param domain: name/uuid/id of guest
:param bandwidth: migration bandwidth limit in MiB/s
:param dargs: standardized virsh function API keywords
"""
cmd = "migrate-setspeed %s %s" % (domain, bandwidth)
if extra is not None:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_getspeed(domain, extra="", **dargs):
"""
Get the maximum migration bandwidth (in MiB/s) for
a domain.
:param domain: name/uuid/id of guest
:param extra: extra options to migrate-getspeed
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
cmd = "migrate-getspeed %s" % domain
if extra:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_setmaxdowntime(domain, downtime, extra=None, **dargs):
"""
Set maximum tolerable downtime of a domain (in ms)
which is being live-migrated to another host.
:param domain: name/uuid/id of guest
:param downtime: downtime number of live migration
"""
cmd = "migrate-setmaxdowntime %s %s" % (domain, downtime)
if extra is not None:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_getmaxdowntime(domain, **dargs):
"""
Get maximum tolerable downtime of a domain.
:param domain: name/uuid/id of guest
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
cmd = "migrate-getmaxdowntime %s" % domain
return command(cmd, **dargs)
def migrate_compcache(domain, size=None, **dargs):
"""
Get/set compression cache size for migration.
:param domain: name/uuid/id of guest
:param size: compression cache size to be set.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = 'migrate-compcache %s' % domain
if size is not None:
cmd += ' --size %s' % size
return command(cmd, **dargs)
def _adu_device(action, domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr=None, **dargs):
"""
Private helper for attach, detach, update device commands
"""
# N/B: Parameter order is significant: RH BZ 1018369
cmd = action
if domain_opt is not None:
cmd += " --domain %s" % domain_opt
if domainarg is not None:
cmd += " %s" % domainarg
if file_opt is not None:
cmd += " --file %s" % file_opt
if filearg is not None:
cmd += " %s" % filearg
if flagstr is not None:
cmd += " %s" % flagstr
return command(cmd, **dargs)
def attach_device(domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr=None, **dargs):
"""
Attach a device using full parameter/argument set.
:param domainarg: Domain name (first pos. parameter)
:param filearg: File name (second pos. parameter)
:param domain_opt: Option to --domain parameter
:param file_opt: Option to --file parameter
:param flagstr: string of "--force, --persistent, etc."
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return _adu_device("attach-device", domainarg=domainarg, filearg=filearg,
domain_opt=domain_opt, file_opt=file_opt,
flagstr=flagstr, **dargs)
def detach_device(domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr=None, wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a device using full parameter/argument set.
:param domainarg: Domain name (first pos. parameter)
:param filearg: File name (second pos. parameter)
:param domain_opt: Option to --domain parameter
:param file_opt: Option to --file parameter
:param flagstr: string of "--force, --persistent, etc."
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
detach_cmd_rv = _adu_device("detach-device", domainarg=domainarg, filearg=filearg,
domain_opt=domain_opt, file_opt=file_opt,
flagstr=flagstr, **dargs)
if wait_remove_event:
event(domain=domainarg, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def update_device(domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr="", **dargs):
"""
Update device from an XML <file>.
:param domainarg: Domain name (first pos. parameter)
:param filearg: File name (second pos. parameter)
:param domain_opt: Option to --domain parameter
:param file_opt: Option to --file parameter
:param flagstr: string of "--force, --persistent, etc."
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return _adu_device("update-device", domainarg=domainarg, filearg=filearg,
domain_opt=domain_opt, file_opt=file_opt,
flagstr=flagstr, **dargs)
def attach_disk(name, source, target, extra="", **dargs):
"""
Attach a disk to VM.
:param name: name of guest
:param source: source of disk device
:param target: target of disk device
:param extra: additional arguments to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "attach-disk --domain %s --source %s --target %s %s"\
% (name, source, target, extra)
return command(cmd, **dargs)
def detach_disk(name, target, extra="", wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a disk from VM.
:param name: name of guest
:param target: target of disk device
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
detach_cmd = "detach-disk --domain %s --target %s %s" % (name, target, extra)
detach_cmd_rv = command(detach_cmd, **dargs)
if wait_remove_event:
event(domain=name, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def detach_device_alias(name, alias, extra="", wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a device with alias
:param name: name of guest
:param alias: alias of device
:param extra: additional arguments to command
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
detach_cmd = "detach-device-alias --domain %s --alias %s %s" % (name, alias, extra)
detach_cmd_rv = command(detach_cmd, **dargs)
if wait_remove_event:
event(domain=name, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def attach_interface(name, option="", **dargs):
"""
Attach a NIC to VM.
:param name: name of guest
:param option: options to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "attach-interface "
if name:
cmd += "--domain %s" % name
if option:
cmd += " %s" % option
return command(cmd, **dargs)
def detach_interface(name, option="", wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a NIC to VM.
:param name: name of guest
:param option: options to pass to command
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
detach_cmd = "detach-interface --domain %s %s" % (name, option)
detach_cmd_rv = command(detach_cmd, **dargs)
if wait_remove_event:
event(domain=name, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def net_dumpxml(name, extra="", to_file="", **dargs):
"""
Dump XML from network named param name.
:param name: Name of a network
:param extra: Extra parameters to pass to command
:param to_file: Send result to a file
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "net-dumpxml %s %s" % (name, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def net_create(xml_file, extra="", **dargs):
"""
Create _transient_ network from a XML file.
:param xml_file: xml defining network
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-create %s %s" % (xml_file, extra), **dargs)
def net_define(xml_file, extra="", **dargs):
"""
Define network from a XML file, do not start
:param xml_file: xml defining network
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-define %s %s" % (xml_file, extra), **dargs)
def net_list(options, extra="", **dargs):
"""
List networks on host.
:param options: options to pass to command
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-list %s %s" % (options, extra), **dargs)
def net_state_dict(only_names=False, virsh_instance=None, **dargs):
"""
Return network name to state/autostart/persistent mapping
:param only_names: When true, return network names as keys and None values
:param virsh_instance: Call net_list() on this instance instead of module
:param dargs: standardized virsh function API keywords
:return: dictionary
"""
# Using multiple virsh commands in different ways
dargs['ignore_status'] = False # force problem detection
if virsh_instance is not None:
net_list_result = virsh_instance.net_list("--all", **dargs)
else:
net_list_result = net_list("--all", **dargs)
# If command failed, exception would be raised here
netlist = net_list_result.stdout_text.strip().splitlines()
# First two lines contain table header followed by entries
# for each network on the host, such as:
#
# Name State Autostart Persistent
# ----------------------------------------------------------
# default active yes yes
#
# TODO: Double-check first-two lines really are header
netlist = netlist[2:]
result = {}
for line in netlist:
# Split on whitespace, assume 3 columns
linesplit = line.split(None, 3)
name = linesplit[0]
# Several callers in libvirt_xml only require defined names
if only_names:
result[name] = None
continue
# Keep search fast & avoid first-letter capital problems
active = not bool(linesplit[1].count("nactive"))
autostart = bool(linesplit[2].count("es"))
if len(linesplit) == 4:
persistent = bool(linesplit[3].count("es"))
else:
# There is no representation of persistent status in output
# in older libvirt. When libvirt older than 0.10.2 no longer
# supported, this block can be safely removed.
try:
# Rely on net_autostart will raise() if not persistent state
if autostart: # Enabled, try enabling again
# dargs['ignore_status'] already False
if virsh_instance is not None:
virsh_instance.net_autostart(name, **dargs)
else:
net_autostart(name, **dargs)
else: # Disabled, try disabling again
if virsh_instance is not None:
virsh_instance.net_autostart(
name, "--disable", **dargs)
else:
net_autostart(name, "--disable", **dargs)
# no exception raised, must be persistent
persistent = True
except process.CmdError as detail:
# Exception thrown, could be transient or real problem
if bool(str(detail.result).count("ransient")):
persistent = False
else: # A unexpected problem happened, re-raise it.
raise
# Warning: These key names are used by libvirt_xml and test modules!
result[name] = {'active': active,
'autostart': autostart,
'persistent': persistent}
return result
def net_start(network, extra="", **dargs):
"""
Start network on host.
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-start %s %s" % (network, extra), **dargs)
def net_destroy(network, extra="", **dargs):
"""
Destroy (stop) an activated network on host.
:param network: name/parameter for network option/argument
:param extra: extra string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-destroy %s %s" % (network, extra), **dargs)
def net_undefine(network, extra="", **dargs):
"""
Undefine a defined network on host.
:param network: name/parameter for network option/argument
:param extra: extra string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-undefine %s %s" % (network, extra), **dargs)
def net_name(uuid, extra="", **dargs):
"""
Get network name on host.
:param uuid: network UUID.
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-name %s %s" % (uuid, extra), **dargs)
def net_uuid(network, extra="", **dargs):
"""
Get network UUID on host.
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-uuid %s %s" % (network, extra), **dargs)
def net_autostart(network, extra="", **dargs):
"""
Set/unset a network to autostart on host boot
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command (e.g. --disable)
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-autostart %s %s" % (network, extra), **dargs)
def net_info(network, extra="", **dargs):
"""
Get network information
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("net-info %s %s" % (network, extra), **dargs)
def net_update(network, update_cmd, section, xml, extra="", **dargs):
"""
Update parts of an existing network's configuration
:param network: network name or uuid
:param update_cmd: type of update (add-first, add-last, delete, or modify)
:param section: which section of network configuration to update
:param xml: name of file containing xml
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "net-update %s %s %s %s %s" \
% (network, update_cmd, section, xml, extra)
return command(cmd, **dargs)
def _pool_type_check(pool_type):
"""
check if the pool_type is supported or not
:param pool_type: pool type
:return: valid pool type or None
"""
valid_types = ['dir', 'fs', 'netfs', 'disk', 'iscsi', 'logical',
'gluster', 'rbd', 'scsi', 'iscsi-direct']
if pool_type and pool_type not in valid_types:
logging.error("Specified pool type '%s' not in '%s'",
pool_type, valid_types)
pool_type = None
elif not pool_type:
# take the first element as default pool_type
pool_type = valid_types[0]
return pool_type
def pool_info(name, **dargs):
"""
Returns basic information about the storage pool.
:param name: name of pool
:param dargs: standardized virsh function API keywords
"""
cmd = "pool-info %s" % name
return command(cmd, **dargs)
def pool_destroy(name, **dargs):
"""
Forcefully stop a given pool.
:param name: name of pool
:param dargs: standardized virsh function API keywords
"""
cmd = "pool-destroy %s" % name
dargs['ignore_status'] = False
try:
command(cmd, **dargs)
return True
except process.CmdError as detail:
logging.error("Failed to destroy pool: %s.", detail)
return False
def pool_create(xml_file, extra="", **dargs):
"""
Create a pool from an xml file.
:param xml_file: file containing an XML pool description
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-create %s %s" % (extra, xml_file), **dargs)
def pool_create_as(name, pool_type, target, extra="", **dargs):
"""
Create a pool from a set of args.
:param name: name of pool
:param pool_type: storage pool type such as 'dir'
:param target: libvirt uri to send guest to
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool creation command was successful
"""
if not name:
logging.error("Please give a pool name")
pool_type = _pool_type_check(pool_type)
if pool_type is None:
return False
logging.info("Create %s type pool %s", pool_type, name)
cmd = "pool-create-as --name %s --type %s --target %s %s" \
% (name, pool_type, target, extra)
dargs['ignore_status'] = False
try:
command(cmd, **dargs)
return True
except process.CmdError as detail:
logging.error("Failed to create pool: %s.", detail)
return False
def pool_list(option="", extra="", **dargs):
"""
Prints the pool information of Host.
:param option: options given to command
all
gives all pool details, including inactive
inactive
gives only inactive pool details
details
Gives the complete details about the pools
:param extra: to provide extra options(to enter invalid options)
"""
return command("pool-list %s %s" % (option, extra), **dargs)
def pool_uuid(name, **dargs):
"""
Convert a pool name to pool UUID
:param name: Name of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-uuid %s" % name, **dargs)
def pool_name(uuid, **dargs):
"""
Convert a pool UUID to pool name
:param name: UUID of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-name %s" % uuid, **dargs)
def pool_refresh(name, **dargs):
"""
Refresh a pool
:param name: Name of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-refresh %s" % name, **dargs)
def pool_delete(name, **dargs):
"""
Delete the resources used by a given pool object
:param name: Name of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-delete %s" % name, **dargs)
def pool_state_dict(only_names=False, **dargs):
"""
Return pool name to state/autostart mapping
:param only_names: When true, return pool names as keys and None values
:param dargs: standardized virsh function API keywords
:return: dictionary
"""
# Using multiple virsh commands in different ways
dargs['ignore_status'] = False # force problem detection
pool_list_result = pool_list("--all", **dargs)
# If command failed, exception would be raised here
poollist = pool_list_result.stdout_text.strip().splitlines()
# First two lines contain table header followed by entries
# for each pool on the host, such as:
#
# Name State Autostart
# -------------------------------------------
# default active yes
# iscsi-net-pool active yes
#
# TODO: Double-check first-two lines really are header
poollist = poollist[2:]
result = {}
for line in poollist:
# Split on whitespace, assume 3 columns
linesplit = line.split(None, 3)
name = linesplit[0]
# Several callers in libvirt_xml only require defined names
# TODO: Copied from net_state_dict where this is true, but
# as of writing only caller is virsh_pool_create test
# which doesn't use this 'feature'.
if only_names:
result[name] = None
continue
# Keep search fast & avoid first-letter capital problems
active = not bool(linesplit[1].count("nactive"))
autostart = bool(linesplit[2].count("es"))
# Warning: These key names are used by libvirt_xml and test modules!
result[name] = {'active': active,
'autostart': autostart}
return result
def pool_define_as(name, pool_type, target="", extra="", **dargs):
"""
Define the pool from the arguments
:param name: Name of the pool to be defined
:param pool_type: Type of the pool to be defined
dir
file system directory
disk
Physical Disk Device
fs
Pre-formatted Block Device
netfs
Network Exported Directory
iscsi
iSCSI Target
logical
LVM Volume Group
mpath
Multipath Device Enumerater
scsi
SCSI Host Adapter
rbd
Rados Block Device
:param target: libvirt uri to send guest to
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool define command was successful
"""
pool_type = _pool_type_check(pool_type)
if pool_type is None:
return False
logging.debug("Try to define %s type pool %s", pool_type, name)
cmd = "pool-define-as --name %s --type %s %s" \
% (name, pool_type, extra)
# Target is not a must
if target:
cmd += " --target %s" % target
return command(cmd, **dargs)
def pool_start(name, extra="", **dargs):
"""
Start the defined pool
:param name: Name of the pool to be started
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool start command was successful
"""
return command("pool-start %s %s" % (name, extra), **dargs)
def pool_autostart(name, extra="", **dargs):
"""
Mark for autostart of a pool
:param name: Name of the pool to be mark for autostart
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool autostart command was successful
"""
return command("pool-autostart %s %s" % (name, extra), **dargs)
def pool_edit(name, **dargs):
"""
Edit XML configuration for a storage pool.
:param name: pool name or uuid
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "pool-edit %s" % name
return command(cmd, **dargs)
def pool_undefine(name, extra="", **dargs):
"""
Undefine the given pool
:param name: Name of the pool to be undefined
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool undefine command was successful
"""
return command("pool-undefine %s %s" % (name, extra), **dargs)
def pool_build(name, options="", **dargs):
"""
Build pool.
:param name: Name of the pool to be built
:param options: options for pool-build
"""
return command("pool-build %s %s" % (name, options), **dargs)
def find_storage_pool_sources_as(source_type, options="", **dargs):
"""
Find potential storage pool sources
:param source_type: type of storage pool sources to find
:param options: cmd options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("find-storage-pool-sources-as %s %s"
% (source_type, options), **dargs)
def find_storage_pool_sources(source_type, srcSpec, **dargs):
"""
Find potential storage pool sources
:param source_type: type of storage pool sources to find
:param srcSpec: file of source xml to qurey for pools
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("find-storage-pool-sources %s %s"
% (source_type, srcSpec), **dargs)
def pool_dumpxml(name, extra="", to_file="", **dargs):
"""
Return the pool information as an XML dump.
:param name: pool_name name
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
dargs['ignore_status'] = True
cmd = "pool-dumpxml %s %s" % (name, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
if result.exit_status:
raise process.CmdError(cmd, result,
"Virsh dumpxml returned non-zero exit status")
return result.stdout_text.strip()
def pool_define(xml_path, **dargs):
"""
To create the pool from xml file.
:param xml_path: XML file path
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "pool-define --file %s" % xml_path
return command(cmd, **dargs)
def vol_create(pool_name, xml_file, extra="", **dargs):
"""
To create the volumes from xml file.
:param pool_name: Name of the pool to be used
:param xml_file: file containing an XML vol description
:param extra: string of extra options
:return: CmdResult object
"""
cmd = "vol-create --pool %s --file %s %s" % (pool_name, xml_file, extra)
return command(cmd, **dargs)
def vol_create_as(volume_name, pool_name, capacity,
allocation, frmt, extra="", **dargs):
"""
To create the volumes on different available pool
:param name: Name of the volume to be created
:param pool_name: Name of the pool to be used
:param capacity: Size of the volume
:param allocaltion: Size of the volume to be pre-allocated
:param frmt: volume formats(e.g. raw, qed, qcow2)
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool undefine command was successful
"""
cmd = "vol-create-as --pool %s" % pool_name
cmd += " %s --capacity %s" % (volume_name, capacity)
if allocation:
cmd += " --allocation %s" % (allocation)
if frmt:
cmd += " --format %s" % (frmt)
if extra:
cmd += " %s" % (extra)
return command(cmd, **dargs)
def vol_create_from(pool_name, vol_file, input_vol, input_pool, extra="",
**dargs):
"""
Create a vol, using another volume as input
:param: pool_name: Name of the pool to create the volume in
:param: vol_file: XML <file> with the volume definition
:param: input_vol: Name of the source volume
:param: input_pool: Name of the pool the source volume is in
:param: extra: Free-form string of options
:return: True if volume create successfully
"""
cmd = ("vol-create-from --pool %s --file %s --vol %s --inputpool %s" %
(pool_name, vol_file, input_vol, input_pool))
if extra:
cmd += " %s" % (extra)
return command(cmd, **dargs)
def vol_list(pool_name, extra="", **dargs):
"""
List the volumes for a given pool
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-list %s %s" % (pool_name, extra), **dargs)
def vol_delete(volume_name, pool_name, extra="", **dargs):
"""
Delete a given volume
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-delete %s %s %s" %
(volume_name, pool_name, extra), **dargs)
def vol_key(volume_name, pool_name, extra="", **drags):
"""
Prints the key of the given volume name
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-key --vol %s --pool %s %s" %
(volume_name, pool_name, extra), **drags)
def vol_info(volume_name, pool_name, extra="", **drags):
"""
Prints the given volume info
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
cmd = "vol-info --vol %s" % volume_name
if pool_name:
cmd += " --pool %s" % pool_name
if extra:
cmd += " %s" % extra
return command(cmd, **drags)
def vol_name(volume_key, extra="", **drags):
"""
Prints the given volume name
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-name --vol %s %s" % (volume_key, extra), **drags)
def vol_path(volume_name, pool_name, extra="", **dargs):
"""
Prints the give volume path
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-path --vol %s --pool %s %s" %
(volume_name, pool_name, extra), **dargs)
def vol_dumpxml(volume_name, pool_name, to_file=None, options="", **dargs):
"""
Dumps volume details in xml
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param to_file: path of the file to store the output
:param options: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
cmd = ('vol-dumpxml --vol %s --pool %s %s' %
(volume_name, pool_name, options))
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def vol_pool(volume_name, extra="", **dargs):
"""
Returns pool name for a given vol-key
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-pool %s %s" % (volume_name, extra), **dargs)
def vol_clone(volume_name, new_name, pool_name="", extra="", **dargs):
"""
Clone an existing volume.
:param volume_name: Name of the original volume
:param new_name: Clone name
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: Standardized virsh function API keywords
:return: Returns the output of the command
"""
cmd = "vol-clone --vol %s --newname %s %s" % (volume_name, new_name, extra)
if pool_name:
cmd += " --pool %s" % pool_name
return command(cmd, **dargs)
def vol_wipe(volume_name, pool_name="", alg="", **dargs):
"""
Ensure data previously on a volume is not accessible to future reads.
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param alg: Perform selected wiping algorithm
:param dargs: Standardized virsh function API keywords
:return: Returns the output of the command
"""
cmd = "vol-wipe --vol %s" % volume_name
if pool_name:
cmd += " --pool %s" % pool_name
if alg:
cmd += " --algorithm %s" % alg
return command(cmd, **dargs)
def vol_resize(volume_name, capacity, pool_name="", extra="", **dargs):
"""
Resizes a storage volume.
:param volume_name: Name of the volume
:param capacity: New capacity for the volume (default bytes)
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: Standardized virsh function API keywords
:return: Returns the output of the command
"""
cmd = "vol-resize --vol %s --capacity %s " % (volume_name, capacity)
if pool_name:
cmd += " --pool %s " % pool_name
if extra:
cmd += extra
return command(cmd, **dargs)
def capabilities(option='', to_file=None, **dargs):
"""
Return output from virsh capabilities command
:param option: additional options (takes none)
:param dargs: standardized virsh function API keywords
"""
cmd_result = command('capabilities %s' % option, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(cmd_result.stdout.strip())
result_file.close()
return cmd_result.stdout_text.strip()
def nodecpustats(option='', **dargs):
"""
Returns basic information about the node CPU statistics
:param option: additional options (takes none)
:param dargs: standardized virsh function API keywords
"""
cmd_nodecpustat = "nodecpustats %s" % option
return command(cmd_nodecpustat, **dargs)
def nodememstats(option='', **dargs):
"""
Returns basic information about the node Memory statistics
:param option: additional options (takes none)
:param dargs: standardized virsh function API keywords
"""
return command('nodememstats %s' % option, **dargs)
def memtune_set(name, options, **dargs):
"""
Set the memory controller parameters
:param domname: VM Name
:param options: contains the values limit, state and value
"""
return command("memtune %s %s" % (name, options), **dargs)
def memtune_list(name, **dargs):
"""
List the memory controller value of a given domain
:param domname: VM Name
"""
return command("memtune %s" % (name), **dargs)
def memtune_get(name, key):
"""
Get the specific memory controller value
:param domname: VM Name
:param key: memory controller limit for which the value needed
:return: the memory value of a key in Kbs
"""
memtune_output = memtune_list(name).stdout.strip()
logging.info("memtune output is %s" % memtune_output)
memtune_value = re.findall(r"%s\s*:\s+(\S+)" % key, str(memtune_output))
if memtune_value:
return int(memtune_value[0] if memtune_value[0] != "unlimited" else -1)
else:
return -1
def help_command(options='', cache=False, **dargs):
"""
Return list of commands and groups in help command output
:param options: additional options to pass to help command
:param cache: Return cached result if True, or refreshed cache if False
:param dargs: standardized virsh function API keywords
:return: List of command and group names
"""
# Combine virsh command list and virsh group list.
virsh_command_list = help_command_only(options, cache, **dargs)
virsh_group_list = help_command_group(options, cache, **dargs)
virsh_command_group = None
virsh_command_group = virsh_command_list + virsh_group_list
return virsh_command_group
def help_command_only(options='', cache=False, **dargs):
"""
Return list of commands in help command output
:param options: additional options to pass to help command
:param cache: Return cached result if True, or refreshed cache if False
:param dargs: standardized virsh function API keywords
:return: List of command names
"""
# global needed to support this function's use in Virsh method closure
global VIRSH_COMMAND_CACHE
if not VIRSH_COMMAND_CACHE or cache is False:
VIRSH_COMMAND_CACHE = []
regx_command_word = re.compile(r"\s+([a-z0-9-]+)\s+")
result = help(options, **dargs)
for line in result.stdout_text.strip().splitlines():
# Get rid of 'keyword' line
if line.find("keyword") != -1:
continue
mobj_command_word = regx_command_word.search(line)
if mobj_command_word:
VIRSH_COMMAND_CACHE.append(mobj_command_word.group(1))
# Prevent accidental modification of cache itself
return list(VIRSH_COMMAND_CACHE)
def help_command_group(options='', cache=False, **dargs):
"""
Return list of groups in help command output
:param options: additional options to pass to help command
:param cache: Return cached result if True, or refreshed cache if False
:param dargs: standardized virsh function API keywords
:return: List of group names
"""
# global needed to support this function's use in Virsh method closure
global VIRSH_COMMAND_GROUP_CACHE, VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL
if VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL:
return []
if not VIRSH_COMMAND_GROUP_CACHE or cache is False:
VIRSH_COMMAND_GROUP_CACHE = []
regx_group_word = re.compile(r"[\']([a-zA-Z0-9]+)[\']")
result = help(options, **dargs)
for line in result.stdout_text.strip().splitlines():
# 'keyword' only exists in group line.
if line.find("keyword") != -1:
mojb_group_word = regx_group_word.search(line)
if mojb_group_word:
VIRSH_COMMAND_GROUP_CACHE.append(mojb_group_word.group(1))
if len(list(VIRSH_COMMAND_GROUP_CACHE)) == 0:
VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL = True
# Prevent accidental modification of cache itself
return list(VIRSH_COMMAND_GROUP_CACHE)
def has_help_command(virsh_cmd, options='', **dargs):
"""
String match on virsh command in help output command list
:param virsh_cmd: Name of virsh command or group to look for
:param options: Additional options to send to help command
:param dargs: standardized virsh function API keywords
:return: True/False
"""
return bool(help_command_only(options, cache=True,
**dargs).count(virsh_cmd))
def has_command_help_match(virsh_cmd, regex, **dargs):
"""
Regex search on subcommand help output
:param virsh_cmd: Name of virsh command or group to match help output
:param regex: regular expression string to match
:param dargs: standardized virsh function API keywords
:return: re match object
"""
result = help(virsh_cmd, **dargs)
command_help_output = result.stdout_text.strip()
return re.search(regex, command_help_output)
def help(virsh_cmd='', **dargs):
"""
Prints global help, command specific help, or help for a
group of related commands
:param virsh_cmd: Name of virsh command or group
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("help %s" % virsh_cmd, **dargs)
def schedinfo(domain, options="", **dargs):
"""
Show/Set scheduler parameters.
:param domain: vm's name id or uuid.
:param options: additional options.
:param dargs: standardized virsh function API keywords
"""
cmd = "schedinfo %s %s" % (domain, options)
return command(cmd, **dargs)
def setmem(domainarg=None, sizearg=None, domain=None,
size=None, use_kilobytes=False, flagstr="", **dargs):
"""
Change the current memory allocation in the guest domain.
:param domainarg: Domain name (first pos. parameter)
:param sizearg: Memory size in KiB (second. pos. parameter)
:param domain: Option to --domain parameter
:param size: Option to --size or --kilobytes parameter
:param use_kilobytes: True for --kilobytes, False for --size
:param dargs: standardized virsh function API keywords
:param flagstr: string of "--config, --live, --current, etc."
:return: CmdResult instance
:raise: process.CmdError: if libvirtd is not running
"""
cmd = "setmem"
if domainarg is not None: # Allow testing of ""
cmd += " %s" % domainarg
if domain is not None: # Allow testing of --domain ""
cmd += " --domain %s" % domain
if sizearg is not None: # Allow testing of 0 and ""
cmd += " %s" % sizearg
if size is not None: # Allow testing of --size "" or --size 0
if use_kilobytes:
cmd += " --kilobytes %s" % size
else:
cmd += " --size %s" % size
if len(flagstr) > 0:
cmd += " %s" % flagstr
return command(cmd, **dargs)
def setmaxmem(domainarg=None, sizearg=None, domain=None,
size=None, use_kilobytes=False, flagstr="", **dargs):
"""
Change the maximum memory allocation for the guest domain.
:param domainarg: Domain name (first pos. parameter)
:param sizearg: Memory size in KiB (second. pos. parameter)
:param domain: Option to --domain parameter
:param size: Option to --size or --kilobytes parameter
:param use_kilobytes: True for --kilobytes, False for --size
:param flagstr: string of "--config, --live, --current, etc."
:return: CmdResult instance
:raise: process.CmdError: if libvirtd is not running.
"""
cmd = "setmaxmem"
if domainarg is not None: # Allow testing of ""
cmd += " %s" % domainarg
if sizearg is not None: # Allow testing of 0 and ""
cmd += " %s" % sizearg
if domain is not None: # Allow testing of --domain ""
cmd += " --domain %s" % domain
if size is not None: # Allow testing of --size "" or --size 0
if use_kilobytes:
cmd += " --kilobytes %s" % size
else:
cmd += " --size %s" % size
if len(flagstr) > 0:
cmd += " %s" % flagstr
return command(cmd, **dargs)
def set_user_password(domain=None, user=None, password=None,
encrypted=False, option=True, **dargs):
"""
Set the user password inside the domain
:param domain: Option to --domain parameter
:param user: Option to --user parameter
:param password: Option to --password
:param encrypted: True for --encrypted
:param option: True for --domain/user/password
:return: CmdResult instance
"""
cmd = "set-user-password"
if option:
if domain:
cmd += " --domain %s" % domain
if user:
cmd += " --user %s" % user
if password:
cmd += " --password %s" % password
else:
if domain:
cmd += " %s" % domain
if user:
cmd += " %s" % user
if password:
cmd += " %s" % password
if encrypted:
cmd += " --encrypted"
return command(cmd, **dargs)
def snapshot_create(name, options="", **dargs):
"""
Create snapshot of domain.
:param name: name of domain
:param dargs: standardized virsh function API keywords
:return: name of snapshot
"""
cmd = "snapshot-create %s %s" % (name, options)
return command(cmd, **dargs)
def snapshot_edit(name, options="", **dargs):
"""
Edit snapshot xml
:param name: name of domain
:param options: options of snapshot-edit command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "snapshot-edit %s %s" % (name, options)
return command(cmd, **dargs)
def snapshot_create_as(name, options="", **dargs):
"""
Create snapshot of domain with options.
:param name: name of domain
:param options: options of snapshot-create-as
:param dargs: standardized virsh function API keywords
:return: name of snapshot
"""
# CmdResult is handled here, force ignore_status
cmd = "snapshot-create-as %s" % name
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def snapshot_parent(name, options, **dargs):
"""
Get name of snapshot parent
:param name: name of domain
:param options: options of snapshot-parent
:param dargs: standardized virsh function API keywords
:return: name of snapshot
"""
cmd = "snapshot-parent %s %s" % (name, options)
return command(cmd, **dargs)
def snapshot_current(name, options="--name", **dargs):
"""
Get name or xml of current snapshot.
:param name: name of domain
:param options: options of snapshot-current, default is --name
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "snapshot-current %s" % name
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def snapshot_list(name, options=None, **dargs):
"""
Get list of snapshots of domain.
:param name: name of domain
:param options: options of snapshot_list
:param dargs: standardized virsh function API keywords
:return: list of snapshot names
"""
# CmdResult is handled here, force ignore_status
dargs['ignore_status'] = True
ret = []
cmd = "snapshot-list %s" % name
if options is not None:
cmd += " %s" % options
sc_output = command(cmd, **dargs)
if sc_output.exit_status != 0:
raise process.CmdError(
cmd, sc_output, "Failed to get list of snapshots")
data = re.findall("\S* *\d*-\d*-\d* \d*:\d*:\d* [+-]\d* \w*",
sc_output.stdout_text)
for rec in data:
if not rec:
continue
ret.append(re.match("\S*", rec).group())
return ret
def snapshot_dumpxml(name, snapshot, options=None, to_file=None, **dargs):
"""
Get dumpxml of snapshot
:param name: name of domain
:param snapshot: name of snapshot
:param options: options of snapshot_list
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
cmd = "snapshot-dumpxml %s %s" % (name, snapshot)
if options is not None:
cmd += " %s" % options
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def snapshot_info(name, snapshot, **dargs):
"""
Check snapshot information.
:param name: name of domain
:param snapshot: name os snapshot to verify
:param dargs: standardized virsh function API keywords
:return: snapshot information dictionary
"""
# CmdResult is handled here, force ignore_status
dargs['ignore_status'] = True
ret = {}
values = ["Name", "Domain", "Current", "State", "Parent",
"Children", "Descendants", "Metadata"]
cmd = "snapshot-info %s %s" % (name, snapshot)
sc_output = command(cmd, **dargs)
if sc_output.exit_status != 0:
raise process.CmdError(cmd, sc_output, "Failed to get snapshot info")
for val in values:
data = re.search("(?<=%s:) *(\w.*|\w*)" % val,
sc_output.stdout_text)
if data is None:
continue
ret[val] = data.group(0).strip()
if ret["Parent"] == "":
ret["Parent"] = None
return ret
def snapshot_revert(name, snapshot, options="", **dargs):
"""
Revert domain state to saved snapshot.
:param name: name of domain
:param dargs: standardized virsh function API keywords
:param snapshot: snapshot to revert to
:return: CmdResult instance
"""
cmd = "snapshot-revert %s %s %s" % (name, snapshot, options)
return command(cmd, **dargs)
def snapshot_delete(name, snapshot, options='', **dargs):
"""
Remove domain snapshot
:param name: name of domain
:param dargs: standardized virsh function API keywords
:param snapshot: snapshot to delete
:return: CmdResult instance
"""
cmd = "snapshot-delete %s %s %s" % (name, snapshot, options)
return command(cmd, **dargs)
def blockcommit(name, path, options="", **dargs):
"""
Start a block commit operation.
:param name: name of domain
:param options: options of blockcommit
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blockcommit %s %s" % (name, path)
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def blockpull(name, path, options="", **dargs):
"""
Start a block pull operation.
:param name: name of domain
:param options: options of blockpull
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blockpull %s %s" % (name, path)
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def blockresize(name, path, size, **dargs):
"""
Resize block device of domain.
:param name: name of domain
:param path: path of block device
:size: new size of the block device
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("blockresize %s %s %s" % (name, path, size), **dargs)
def domblkinfo(name, device, **dargs):
"""
Get block device size info for a domain.
:param name: VM's name or id,uuid.
:param device: device of VM.
:param dargs: standardized virsh function API keywords.
:return: CmdResult object.
"""
return command("domblkinfo %s %s" % (name, device), **dargs)
def domblklist(name, options=None, **dargs):
"""
Get domain devices.
:param name: name of domain
:param options: options of domblklist.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "domblklist %s" % name
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def domiflist(name, options='', extra='', **dargs):
"""
Get the domain network devices
:param name: name of domain
:param options: options of domiflist
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command('domiflist %s %s %s' % (name, options, extra), **dargs)
def cpu_stats(name, options, **dargs):
"""
Display per-CPU and total statistics about domain's CPUs
:param name: name of domain
:param options: options of cpu_stats
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "cpu-stats %s" % name
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def change_media(name, device, options, **dargs):
"""
Change media of CD or floppy drive.
:param name: VM's name.
:param path: Fully-qualified path or target of disk device
:param options: command change_media options.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "change-media %s %s " % (name, device)
if options:
cmd += " %s " % options
return command(cmd, **dargs)
def cpu_compare(xml_file, **dargs):
"""
Compare host CPU with a CPU described by an XML file
:param xml_file: file containing an XML CPU description.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("cpu-compare %s" % xml_file, **dargs)
def hypervisor_cpu_compare(xml_file, options="", **dargs):
"""
Compare CPU provided by hypervisor on the host with a CPU described by an XML file
:param xml_file: file containing an XML CPU description
:param options: extra options passed to virsh command
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("hypervisor-cpu-compare %s %s" % (xml_file, options), **dargs)
def cpu_baseline(xml_file, **dargs):
"""
Compute baseline CPU for a set of given CPUs.
:param xml_file: file containing an XML CPU description.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("cpu-baseline %s" % xml_file, **dargs)
def numatune(name, mode=None, nodeset=None, options=None, **dargs):
"""
Set or get a domain's numa parameters
:param name: name of domain
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "numatune %s" % name
if options:
cmd += " --%s" % options
if mode:
cmd += " --mode %s" % mode
if nodeset:
cmd += " --nodeset %s" % nodeset
return command(cmd, **dargs)
def nodedev_reset(name, options="", **dargs):
"""
Trigger a device reset for device node.
:param name: device node name to be reset.
:param options: additional options passed to virsh command
:param dargs: standardized virsh function API keywords
:return: cmdresult object.
"""
cmd = ("nodedev-reset --device %s %s" % (name, options))
return command(cmd, **dargs)
def ttyconsole(name, **dargs):
"""
Print tty console device.
:param name: name, uuid or id of domain
:return: CmdResult instance
"""
return command("ttyconsole %s" % name, **dargs)
def nodedev_dumpxml(name, options="", to_file=None, **dargs):
"""
Do dumpxml for node device.
:param name: the name of device.
:param options: extra options to nodedev-dumpxml cmd.
:param to_file: optional file to write XML output to.
:return: Cmdobject of virsh nodedev-dumpxml.
"""
cmd = ('nodedev-dumpxml %s %s' % (name, options))
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def connect(connect_uri="", options="", **dargs):
"""
Run a connect command to the uri.
:param connect_uri: target uri connect to.
:param options: options to pass to connect command
:return: CmdResult object.
"""
return command("connect %s %s" % (connect_uri, options), **dargs)
def domif_setlink(name, interface, state, options=None, **dargs):
"""
Set network interface stats for a running domain.
:param name: Name of domain
:param interface: interface device
:param state: new state of the device up or down
:param options: command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "domif-setlink %s %s %s " % (name, interface, state)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def domif_getlink(name, interface, options=None, **dargs):
"""
Get network interface stats for a running domain.
:param name: Name of domain
:param interface: interface device
:param options: command options.
:param dargs: standardized virsh function API keywords
:return: domif state
"""
cmd = "domif-getlink %s %s " % (name, interface)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def nodedev_list(tree=False, cap="", options="", **dargs):
"""
List the node devices.
:param tree: list devices in a tree
:param cap: capability names, separated by comma
:param options: extra command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object.
"""
cmd = "nodedev-list"
if tree:
cmd += " --tree"
if cap:
cmd += " --cap %s" % cap
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def nodedev_detach(name, options="", **dargs):
"""
Detach node device from host.
:return: cmdresult object.
"""
cmd = ("nodedev-detach --device %s %s" % (name, options))
return command(cmd, **dargs)
def nodedev_dettach(name, options="", **dargs):
"""
Detach node device from host.
:return: nodedev_detach(name).
"""
return nodedev_detach(name, options, **dargs)
def nodedev_reattach(name, options="", **dargs):
"""
If node device is detached, this action will
reattach it to its device driver.
:return: cmdresult object.
"""
cmd = ("nodedev-reattach --device %s %s" % (name, options))
return command(cmd, **dargs)
def vcpucount(name, options="", **dargs):
"""
Get the vcpu count of guest.
:param name: name of domain.
:param options: options for vcpucoutn command.
:return: CmdResult object.
"""
cmd = "vcpucount %s %s" % (name, options)
return command(cmd, **dargs)
def blockcopy(name, path, dest, options="", **dargs):
"""
Start a block copy operation.
:param name: name of domain.
:param path: fully-qualified path or target of disk.
:param dest: path of the copy to create.
:param options: options of blockcopy.
:param dargs: standardized virsh function API keywords.
:return: CmdResult instance.
"""
cmd = "blockcopy %s %s %s %s" % (name, path, dest, options)
return command(cmd, **dargs)
def blockjob(name, path, options="", **dargs):
"""
Manage active block operations.
:param name: name of domain.
:param path: fully-qualified path or target of disk.
:param options: options of blockjob.
:param dargs: standardized virsh function API keywords.
:return: CmdResult instance.
"""
cmd = "blockjob %s %s %s" % (name, path, options)
return command(cmd, **dargs)
def domiftune(name, interface, options=None, inbound=None,
outbound=None, **dargs):
"""
Set/get parameters of a virtual interface.
:param name: name of domain.
:param interface: interface device (MAC Address).
:param inbound: control domain's incoming traffics.
:param outbound: control domain's outgoing traffics.
:param options: options may be live, config and current.
:param dargs: standardized virsh function API keywords.
:return: CmdResult instance.
"""
cmd = "domiftune %s %s" % (name, interface)
if inbound:
cmd += " --inbound %s" % inbound
if outbound:
cmd += " --outbound %s" % outbound
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def desc(name, options, desc_str, **dargs):
"""
Show or modify description or title of a domain.
:param name: name of domain.
:param options: options for desc command.
:param desc_str: new desc message.
:param dargs: standardized virsh function API keywords.
:return: CmdResult object.
"""
if desc_str:
options = options + " \"%s\"" % desc_str
cmd = "desc %s %s" % (name, options)
return command(cmd, **dargs)
def autostart(name, options, **dargs):
"""
Autostart a domain
:return: cmdresult object.
"""
cmd = ("autostart %s %s" % (name, options))
return command(cmd, **dargs)
def node_memtune(shm_pages_to_scan=None, shm_sleep_millisecs=None,
shm_merge_across_nodes=None, options=None, **dargs):
"""
Get or set node memory parameters.
:param options: Extra options to virsh.
:param shm-pages-to-scan: Pages to scan.
:param shm-sleep-millisecs: Sleep time (ms).
:param shm-merge-across-nodes: Merge across nodes.
:param dargs: Standardized virsh function API keywords.
:return: CmdResult instance
"""
cmd = "node-memory-tune"
if shm_pages_to_scan:
cmd += " --shm-pages-to-scan %s" % shm_pages_to_scan
if shm_sleep_millisecs:
cmd += " --shm-sleep-millisecs %s" % shm_sleep_millisecs
if shm_merge_across_nodes:
cmd += " --shm-merge-across-nodes %s" % shm_merge_across_nodes
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def iface_list(extra="", **dargs):
"""
List physical host interfaces.
:param extra: Free-form string of options
:param dargs: Standardized virsh functiont API keywords
:return: CmdResult object
"""
return command("iface-list %s" % extra, **dargs)
def iface_define(xml_path, **dargs):
"""
Define (but don't start) a physical host interface from an XML file.
:param xml_path: XML file path
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-define --file %s" % xml_path, **dargs)
def iface_start(iface, **dargs):
"""
Start a physical host interface.
:param iface: Interface name or MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-start %s" % iface, **dargs)
def iface_destroy(iface, **dargs):
"""
Destroy a physical host interface.
:param iface: Interface name or MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-destroy %s" % iface, **dargs)
def iface_undefine(iface, **dargs):
"""
Undefine a physical host interface (remove it from configuration).
:param iface: Interface name or MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-undefine %s" % iface, **dargs)
def iface_dumpxml(iface, extra="", to_file="", **dargs):
"""
Interface information in XML.
:param iface: Interface name or MAC address
:param extra: Free-form string of options
:param to_file: Optional file to write xml
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
dargs['ignore_status'] = True
cmd = "iface-dumpxml %s %s" % (iface, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
if result.exit_status:
raise process.CmdError(cmd, result,
"Dumpxml returned non-zero exit status")
return result.stdout_text.strip()
def iface_name(mac, **dargs):
"""
Convert an interface MAC address to interface name.
:param mac: Interface MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-name %s" % mac, **dargs)
def iface_mac(name, **dargs):
"""
Convert an interface name to interface MAC address.
:param name: Interface name
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-mac %s" % name, **dargs)
def iface_edit(iface, **dargs):
"""
Edit XML configuration for a physical host interface.
:param iface: Interface name or MAC address
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-edit %s" % iface, **dargs)
def iface_bridge(iface, bridge, extra="", **dargs):
"""
Create a bridge device and attach an existing network device to it.
:param iface: Interface name or MAC address
:param bridge: New bridge device name
:param extra: Free-form string of options
:param dargs: Standardized virsh functiont API keywords
:return: CmdResult object
"""
return command("iface-bridge %s %s %s" % (iface, bridge, extra), **dargs)
def iface_unbridge(bridge, extra="", **dargs):
"""
Undefine a bridge device after detaching its slave device.
:param bridge: Current bridge device name
:param extra: Free-form string of options
:param dargs: Standardized virsh functiont API keywords
:return: CmdResult object
"""
return command("iface-unbridge %s %s" % (bridge, extra), **dargs)
def iface_begin(**dargs):
"""
Create a snapshot of current interfaces settings
:param: dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("iface-begin", **dargs)
def iface_commit(**dargs):
"""
Commit changes made since iface-begin and free restore point
:param: dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("iface-commit", **dargs)
def iface_rollback(**dargs):
"""
Rollback to previous saved configuration created via iface-begin
:param: dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("iface-rollback", **dargs)
def emulatorpin(name, cpulist=None, options=None, **dargs):
"""
Control or query domain emulator affinity
:param name: name of domain
:param cpulist: a list of physical CPU numbers
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "emulatorpin %s" % name
if options:
cmd += " --%s" % options
if cpulist:
cmd += " --cpulist %s" % cpulist
return command(cmd, **dargs)
def secret_list(options="", **dargs):
"""
Get list of secret.
:param options: the option may be '--ephemeral'
:param dargs: standardized virsh function API keywords
:return: list of secret
"""
# CmdResult is handled here, force ignore_status
cmd = "secret-list %s" % options
return command(cmd, **dargs)
def secret_define(xml_file, options=None, **dargs):
"""
Return cmd result of secret define.
:param xml_file: secret XML file
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "secret-define --file %s" % xml_file
if options is not None:
cmd += " %s" % options
logging.debug("Define secret from %s", xml_file)
return command(cmd, **dargs)
def secret_undefine(uuid, options=None, **dargs):
"""
Return cmd result of secret undefine.
:param uuid: secret UUID
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "secret-undefine %s" % uuid
if options is not None:
cmd += " %s" % options
logging.debug("Undefine secret %s", uuid)
return command(cmd, **dargs)
def secret_dumpxml(uuid, to_file="", options=None, **dargs):
"""
Return the secret information as an XML dump.
:param uuid: secret UUID
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
dargs['ignore_status'] = True
cmd = "secret-dumpxml %s" % uuid
if options is not None:
cmd += " %s" % options
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
if result.exit_status:
raise process.CmdError(cmd, result,
"Virsh secret-dumpxml returned \
non-zero exit status")
return result
def secret_get_value(uuid, options=None, **dargs):
"""
Get a secret value
:param uuid: secret UUID
:return: CmdResult object.
"""
cmd = "secret-get-value --secret %s" % uuid
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def secret_set_value(uuid, password, options=None, encode=False, **dargs):
"""
Set a secret value
:param uuid: secret UUID
:param password: secret value
:param encode: if False, that means you've already provided a base64-encoded
password. if True, will base64-encode password before use it.
:return: CmdResult object.
"""
cmd = "secret-set-value --secret %s" % uuid
if password:
if encode:
encoding = locale.getpreferredencoding()
cmd += (" --base64 %s"
% base64.b64encode(password.encode(encoding)).decode(encoding))
else:
cmd += " --base64 %s" % password
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def nodedev_create(xml_file, options=None, **dargs):
"""
Return cmd result of the device to be created by an XML file
:param xml_file: device XML file
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodedev-create %s" % xml_file
if options is not None:
cmd += " %s" % options
logging.debug("Create the device from %s", xml_file)
return command(cmd, **dargs)
def nodedev_destroy(dev_name, options=None, **dargs):
"""
Return cmd result of the device to be destroyed
:param dev_name: name of the device
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodedev-destroy %s" % dev_name
if options is not None:
cmd += " %s" % options
logging.debug("Destroy the device %s on the node", dev_name)
return command(cmd, **dargs)
def domfstrim(name, minimum=None, mountpoint=None, options="", **dargs):
"""
Do fstrim on domain's mounted filesystems
:param name: name of domain
:param options: options maybe --minimum <number>, --mountpoint <string>
:return: CmdResult object
"""
cmd = "domfstrim %s" % name
if minimum is not None:
cmd += " --minimum %s" % minimum
if mountpoint is not None:
cmd += " --mountpoint %s" % mountpoint
cmd += " %s" % options
return command(cmd, **dargs)
def domfsfreeze(name, mountpoint=None, options="", **dargs):
"""
Freeze domain's mounted filesystems
:param name: name of domain
:param mountpoint: specific mountpoints to be frozen
:param options: extra options to domfsfreeze cmd.
:return: CmdResult object
"""
cmd = "domfsfreeze %s" % name
if mountpoint is not None:
cmd += " --mountpoint %s" % mountpoint
cmd += " %s" % options
return command(cmd, **dargs)
def domfsthaw(name, mountpoint=None, options="", **dargs):
"""
Thaw domain's mounted filesystems
:param name: name of domain
:param mountpoint: specific mountpoints to be thawed
:param options: extra options to domfsfreeze cmd.
:return: CmdResult object
"""
cmd = "domfsthaw %s" % name
if mountpoint is not None:
cmd += " --mountpoint %s" % mountpoint
cmd += " %s" % options
return command(cmd, **dargs)
def domtime(name, now=False, pretty=False, sync=False, time=None,
options="", **dargs):
"""
Get/Set domain's time
:param name: name of domain
:param now: set to the time of the host running virsh
:param pretty: print domain's time in human readable form
:param sync: instead of setting given time, synchronize from domain's RTC
:param time: integer time to set
:return: CmdResult object
"""
cmd = "domtime %s" % name
if now:
cmd += " --now"
if pretty:
cmd += " --pretty"
if sync:
cmd += " --sync"
if time is not None:
cmd += " --time %s" % time
cmd += " %s" % options
return command(cmd, **dargs)
def nwfilter_dumpxml(name, options="", to_file=None, **dargs):
"""
Do dumpxml for network filter.
:param name: the name or uuid of filter.
:param options: extra options to nwfilter-dumpxml cmd.
:param to_file: optional file to write XML output to.
:param dargs: standardized virsh function API keywords
:return: Cmdobject of virsh nwfilter-dumpxml.
"""
cmd = ('nwfilter-dumpxml %s %s' % (name, options))
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def nwfilter_define(xml_file, options="", **dargs):
"""
Return cmd result of network filter define.
:param xml_file: network filter XML file
:param options: extra options to nwfilter-define cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-define --file %s %s" % (xml_file, options)
return command(cmd, **dargs)
def nwfilter_undefine(name, options="", **dargs):
"""
Return cmd result of network filter undefine.
:param name: network filter name or uuid
:param options: extra options to nwfilter-undefine cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-undefine %s %s" % (name, options)
return command(cmd, **dargs)
def nwfilter_list(options="", **dargs):
"""
Get list of network filters.
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: list of network filters
"""
cmd = "nwfilter-list %s" % options
return command(cmd, **dargs)
def nwfilter_edit(name, options="", **dargs):
"""
Edit the XML configuration for a network filter.
:param name: network filter name or uuid.
:param options: extra options to nwfilter-edit cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-edit %s %s" % (name, options)
return command(cmd, **dargs)
def nwfilter_binding_create(name, options="", **dargs):
"""
Associate a network port with a network filter.
The network filter backend will immediately
attempt to instantiate the filter rules on the
port.
:param name: binding xml file name
:param options: extra options to nwfilter-binding- cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-binding-create %s %s" % (name, options)
return command(cmd, **dargs)
def nwfilter_binding_list(options="", **dargs):
"""
List all of the network ports which have filters
associated with them
:param options: extra options for nwfilter_binding_list
:param dargs: standardized virsh function API keywords
"""
cmd = "nwfilter-binding-list %s" % options
return command(cmd, **dargs)
def nwfilter_binding_dumpxml(portdev_name, options="", to_file="", **dargs):
"""
output the network filter binding XML for network device
called port name
:param portdev_name: port device name for nwfilter_binding_dumpxml
:param options: extra options for nwfilter_binding_dumpxml
:param dargs: standardized virsh function API keywords
"""
cmd = "nwfilter-binding-dumpxml %s %s" % (portdev_name, options)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def nwfilter_binding_delete(portdev_name, option="", **dargs):
"""
Disassociate a network port from a network filter.
The network filter backend will immediately
tear down the filter rules that exist on the port
:param portdev_name: port device name for nwfilter_binding_delete
:param option: extra option for nwfilter_binding_delete
"""
cmd = "nwfilter-binding-delete %s %s" % (portdev_name, option)
return command(cmd, **dargs)
def cd(dir_path, options="", **dargs):
"""
Run cd command in virsh interactive session.
:param dir_path: dir path string
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "cd --dir %s %s" % (dir_path, options)
return command(cmd, **dargs)
def pwd(options="", **dargs):
"""
Run pwd command in virsh session.
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "pwd %s" % options
return command(cmd, **dargs)
def echo(echo_str, options="", **dargs):
"""
Run echo command in virsh session.
:param echo_str: the echo string
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "echo %s %s" % (echo_str, options)
return command(cmd, **dargs)
def exit(**dargs):
"""
Run exit command in virsh session.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "exit"
return command(cmd, **dargs)
def quit(**dargs):
"""
Run quit command in virsh session.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "quit"
return command(cmd, **dargs)
def sendkey(name, keycode, codeset="", holdtime="", **dargs):
"""
Send keycodes to the guest
:param name: name of domain
:param keycode: the key code
:param codeset: the codeset of keycodes
:param holdtime: milliseconds for each keystroke to be held
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "send-key %s" % name
if codeset:
cmd += " --codeset %s" % codeset
if holdtime:
cmd += " --holdtime %s" % holdtime
cmd += " %s" % keycode
return command(cmd, **dargs)
def create(xmlfile, options="", **dargs):
"""
Create guest from xml
:param xmlfile: domain xml file
:param options: --paused
:return: CmdResult object
"""
cmd = "create %s %s" % (xmlfile, options)
return command(cmd, **dargs)
def sysinfo(options="", **dargs):
"""
Return the hypervisor sysinfo xml.
:param options: extra options
:return: CmdResult object
"""
cmd = "sysinfo %s" % options
return command(cmd, **dargs)
def reset(name, **dargs):
"""
Reset a domain
:param name: name of domain
:return: CmdResult object
"""
cmd = "reset %s" % name
return command(cmd, **dargs)
def domdisplay(name, options="", **dargs):
"""
Get domain display connection URI
:param name: name of domain
:param options: options of domdisplay
:return: CmdResult object
"""
cmd = "domdisplay %s %s" % (name, options)
return command(cmd, **dargs)
def domblkerror(name, **dargs):
"""
Show errors on block devices
:param name: name of domain
:return: CmdResult object
"""
return command("domblkerror %s" % name, **dargs)
def domcontrol(name, options="", **dargs):
"""
Return domain control interface state.
:param name: name of domain
:param options: extra options
:return: CmdResult object
"""
cmd = "domcontrol %s %s" % (name, options)
return command(cmd, **dargs)
def save_image_dumpxml(state_file, options="", to_file="", **dargs):
"""
Dump xml from saved state file
:param state_file: saved state file to read
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "save-image-dumpxml %s %s" % (state_file, options)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def save_image_define(state_file, xmlfile, options="", **dargs):
"""
Redefine the XML for a domain's saved state file
:param state_file: saved state file to modify
:param xmlfile: filename containing updated XML for the target
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "save-image-define %s %s %s" % (state_file, xmlfile, options)
return command(cmd, **dargs)
def inject_nmi(name, options="", **dargs):
"""
Inject NMI to the guest
:param name: domain name
:param options: extra options
"""
cmd = "inject-nmi %s %s" % (name, options)
return command(cmd, **dargs)
def vol_download(name, dfile, options="", **dargs):
"""
Download volume contents to a file
:param name: name of volume
:param dfile: file path that will download to
:param options: pool name, offset and length
:return: CmdResult object
"""
cmd = "vol-download %s %s %s" % (name, dfile, options)
return command(cmd, **dargs)
def vol_upload(name, dfile, options="", **dargs):
"""
Upload file contents to a volume
:param name: name of volume
:param dfile: file path that will upload from
:param options: pool name, offset and length
:return: CmdResult object
"""
cmd = "vol-upload %s %s %s" % (name, dfile, options)
return command(cmd, **dargs)
def blkiotune(name, weight=None, device_weights=None, options=None, **dargs):
"""
Set or get a domain's blkio parameters
:param name: name of domain
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blkiotune %s" % name
if options:
cmd += " --%s" % options
if weight:
cmd += " --weight %s" % weight
if device_weights:
cmd += " --device-weights %s" % device_weights
return command(cmd, **dargs)
def blkdeviotune(name, device=None, options=None, params=None, **dargs):
"""
Set or get a domain's blkio parameters
:param name: name of domain
:param device: device name may be vda, vdb and so on
:param options: options may be live, config and current
:param params: parameters for blkdeviotune
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blkdeviotune %s" % name
if options:
cmd += " %s" % options
if device:
cmd += " --device %s" % device
if params:
if params.get("total_iops_sec"):
cmd += " --total-iops-sec %s" % params.get("total_iops_sec")
if params.get("read_iops_sec"):
cmd += " --read-iops-sec %s" % params.get("read_iops_sec")
if params.get("write_iops_sec"):
cmd += " --write-iops-sec %s" % params.get("write_iops_sec")
if params.get("total_iops_sec_max"):
cmd += " --total-iops-sec-max %s" % params.get("total_iops_sec_max")
if params.get("read_iops_sec_max"):
cmd += " --read-iops-sec-max %s" % params.get("read_iops_sec_max")
if params.get("write_iops_sec_max"):
cmd += " --write-iops-sec-max %s" % params.get("write_iops_sec_max")
if params.get("total_iops_sec_max_length"):
cmd += " --total-iops-sec-max-length %s" % params.get("total_iops_sec_max_length")
if params.get("read_iops_sec_max_length"):
cmd += " --read-iops-sec-max-length %s" % params.get("read_iops_sec_max_length")
if params.get("write_iops_sec_max_length"):
cmd += " --write-iops-sec-max-length %s" % params.get("write_iops_sec_max_length")
if params.get("total_bytes_sec"):
cmd += " --total-bytes-sec %s" % params.get("total_bytes_sec")
if params.get("read_bytes_sec"):
cmd += " --read-bytes-sec %s" % params.get("read_bytes_sec")
if params.get("write_bytes_sec"):
cmd += " --write-bytes-sec %s" % params.get("write_bytes_sec")
if params.get("total_bytes_sec_max"):
cmd += " --total-bytes-sec-max %s" % params.get("total_bytes_sec_max")
if params.get("read_bytes_sec_max"):
cmd += " --read-bytes-sec-max %s" % params.get("read_bytes_sec_max")
if params.get("write_bytes_sec_max"):
cmd += " --write-bytes-sec-max %s" % params.get("write_bytes_sec_max")
if params.get("total_bytes_sec_max_length"):
cmd += " --total-bytes-sec-max %s" % params.get("total_bytes_sec_max_length")
if params.get("read_bytes_sec_max_length"):
cmd += " --read-bytes-sec-max-length %s" % params.get("read_bytes_sec_max_length")
if params.get("write_bytes_sec_max_length"):
cmd += " --write-bytes-sec-max-length %s" % params.get("write_bytes_sec_max_length")
if params.get("size_iops_sec"):
cmd += " --size-iops-sec %s" % params.get("size_iops_sec")
if params.get("group_name"):
cmd += " --group-name %s" % params.get("group_name")
return command(cmd, **dargs)
def perf(domain, options="", events="", other_opt="", **dargs):
"""
Enable or disable perf events
:param domain: Domain name, id
:param options: --enable | --disable
:param events: perf event names seperated by comma
:param other_opt: --config | --live | --current
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "perf %s %s %s %s" % (domain, options, events, other_opt)
return command(cmd, **dargs)
def domstats(domains="", options="", **dargs):
"""
Get statistics about one or multiple domains
:param domains: List of domains
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "domstats %s %s" % (domains, options)
return command(cmd, **dargs)
def freepages(cellno=None, pagesize=None, options="", **dargs):
"""
Display available free pages for the NUMA cell
:param cellno: NUMA cell number
:param pagesize: Page size (in kibibytes)
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "freepages %s" % options
if cellno is not None:
cmd += " --cellno %s" % cellno
if pagesize is not None:
cmd += " --pagesize %s" % pagesize
return command(cmd, **dargs)
def domcapabilities(virttype=None, emulatorbin=None, arch=None, machine=None,
options="", **dargs):
"""
Capabilities of emulator with respect to host and libvirt
:param virttype: Virtualization type (/domain/@type)
:param emulatorbin: Path to emulator binary (/domain/devices/emulator)
:param arch: Domain architecture (/domain/os/type/@arch)
:param machine: machine type (/domain/os/type/@machine)
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "domcapabilities %s" % options
if virttype:
cmd += " --virttype %s" % virttype
if emulatorbin:
cmd += " --emulatorbin %s" % emulatorbin
if arch:
cmd += " --arch %s" % arch
if machine:
cmd += " --machine %s" % machine
return command(cmd, **dargs)
def metadata(name, uri, options="", key=None, new_metadata=None, **dargs):
"""
Show or set domain's custom XML Metadata
:param name: Domain name, id or uuid
:param uri: URI of the namespace
:param options: options may be live, config and current
:param key: Key to be used as a namespace identifier
:param new_metadata: new metadata to set
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "metadata --domain %s --uri %s %s" % (name, uri, options)
if key:
cmd += " --key %s" % key
if new_metadata:
cmd += " --set '%s'" % new_metadata.replace("\'", "\"")
return command(cmd, **dargs)
def cpu_models(arch, options="", **dargs):
"""
Get the CPU models for an arch.
:param arch: Architecture
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "cpu-models %s %s" % (arch, options)
return command(cmd, **dargs)
def net_dhcp_leases(network, mac=None, options="", **dargs):
"""
Print lease info for a given network
:param network: Network name or uuid
:param mac: Mac address
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "net-dhcp-leases %s %s" % (network, options)
if mac:
cmd += " --mac %s" % mac
return command(cmd, **dargs)
def qemu_monitor_event(domain=None, event=None, event_timeout=None,
options="", **dargs):
"""
Listen for QEMU Monitor Events
:param domain: Domain name, id or UUID
:param event: Event type name
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "qemu-monitor-event %s" % options
if domain:
cmd += " --domain %s" % domain
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def net_event(network=None, event=None, event_timeout=None, options="",
**dargs):
"""
List event types, or wait for network events to occur
:param network: Network name or uuid
:param event: Event type to wait for
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "net-event %s" % options
if network:
cmd += " --network %s" % network
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def event(domain=None, event=None, event_timeout=None, options="", **dargs):
"""
List event types, or wait for domain events to occur
:param domain: Domain name, id or UUID
:param event: Event type name
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "event %s" % options
if domain:
cmd += " --domain %s" % domain
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def move_mouse(name, coordinate, **dargs):
"""
Move VM mouse.
:param name: domain name
:param coordinate: Mouse coordinate
"""
cmd = "mouse_move %s %s" % coordinate
qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs)
# Sleep 1 sec to make sure VM received mouse move event
time.sleep(1)
def click_button(name, left_button=True, **dargs):
"""
Click left/right button of VM mouse.
:param name: domain name
:param left_button: Click left or right button
"""
state = 1
if not left_button:
state = 4
cmd = "mouse_button %s" % state
qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs)
# Sleep 1 sec to make sure VM received mouse button event,
# then release button(state=0)
time.sleep(1)
cmd = "mouse_button 0"
qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs)
time.sleep(1)
def iothreadadd(name, thread_id, options=None, **dargs):
"""
Add an IOThread to the guest domain.
:param name: domain name
:param thread_id: domain iothread ID
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadadd %s %s" % (name, thread_id)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreaddel(name, thread_id, options=None, **dargs):
"""
Delete an IOThread from the guest domain.
:param name: domain name
:param thread_id: domain iothread ID
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreaddel %s %s" % (name, thread_id)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreadinfo(name, options=None, **dargs):
"""
View domain IOThreads.
:param name: domain name
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadinfo %s" % name
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreadpin(name, thread_id, cpuset, options=None, **dargs):
"""
Control domain IOThread affinity.
:param name: domain name
:param thread_id: domain iothread ID
:param cpuset: host cpu number(s) to set
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadpin %s %s %s" % (name, thread_id, cpuset)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreadset(name, thread_id, values, options="", **dargs):
"""
Modifies an existing iothread of the domain using the specified iothread_id
:param name: domain name
:param thread_id: domain iothread ID
:param values: the values to be set
:param options: options may be live and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadset %s %s %s %s" % (name, thread_id, values, options)
return command(cmd, **dargs)
def domrename(domain, new_name, options="", **dargs):
"""
Rename an inactive domain.
:param domain:domain name, id or uuid.
:param new_name:new domain name.
:param options:extra param.
:param dargs: standardized virsh function API keywords
:return: result from command
"""
cmd = "domrename %s %s %s" % (domain, new_name, options)
return command(cmd, **dargs)
def nodedev_event(event=None, event_timeout=None, options="", **dargs):
"""
List event types, or wait for nodedevice events to occur
:param event: Event type to wait for
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "nodedev-event %s" % options
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def backup_begin(name, options="", **dargs):
"""
Begin domain backup
:param name: name of domain
:param options: options of backup-begin command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "backup-begin %s %s" % (name, options)
return command(cmd, **dargs)
def backup_dumpxml(name, **dargs):
"""
Dump domain backup xml
:param name: name of domain
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "backup-dumpxml %s" % name
return command(cmd, **dargs)
def checkpoint_create(name, options="", **dargs):
"""
Create domain checkpoint (with xml input)
:param name: name of domain
:param options: options of checkpoint-create command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-create %s %s" % (name, options)
return command(cmd, **dargs)
def checkpoint_create_as(name, options="", **dargs):
"""
Create domain checkpoint (with options)
:param name: name of domain
:param options: options of checkpoint-create-as command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-create-as %s %s" % (name, options)
return command(cmd, **dargs)
def checkpoint_edit(name, checkpoint, **dargs):
"""
Edit domain checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-edit %s %s" % (name, checkpoint)
return command(cmd, **dargs)
def checkpoint_info(name, checkpoint, **dargs):
"""
Output basic information about the checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-info %s %s" % (name, checkpoint)
return command(cmd, **dargs)
def checkpoint_list(name, options="", **dargs):
"""
List domain's checkpoint(s)
:param name: name of domain
:param options: options of checkpoint-list command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-list %s %s" % (name, options)
return command(cmd, **dargs)
def checkpoint_dumpxml(name, checkpoint, options="", **dargs):
"""
Dump domain checkpoint xml
:param name: name of domain
:param checkpoint: name of checkpoint
:param options: options of checkpoint-dumpxml command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-dumpxml %s %s %s" % (name, checkpoint, options)
return command(cmd, **dargs)
def checkpoint_parent(name, checkpoint, **dargs):
"""
Output the name of the parent checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-parent %s %s" % (name, checkpoint)
return command(cmd, **dargs)
def checkpoint_delete(name, checkpoint, options="", **dargs):
"""
Delete domain checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param options: options of checkpoint-delete command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-delete %s %s %s" % (name, checkpoint, options)
return command(cmd, **dargs)
| clebergnu/avocado-vt | virttest/virsh.py | Python | gpl-2.0 | 152,004 |
# -*- coding: utf-8 -*-
import os,re
import xbmc,xbmcgui,xbmcplugin
import addon, requests
# Функция для получения подпапок контента
def getGroups(section,USER):
groups = {}
url = section + "/get_data.php"
values = {"user_id": USER['ID'],
"soho_user": USER['SOHO_USER']
}
html = requests.html_req(url, values)
for group in html.findAll('div', {'id': re.compile("^(%s)-groups-\d"%section)}):
group_id = int(group['id'].replace(section + '-groups-',''))
group_name = group.text
groups.update({group_id: group_name})
return groups #groups hash
def listing(fanart, thumbpath, params,USER):
listing = []
is_folder = True #isFolder=True означает, что это папка (виртуальная)
section = params['section']
groups = getGroups(section,USER) #парсим html c группами
xbmc.log("groups: %s" % groups)
for group in groups:
thumb= os.path.join(thumbpath, addon.sections[section]['thumbnail'])
# URL, который передается в качестве параметра рекурсивного вызова плагина.
if group == sorted(groups)[-1]:
url = addon.addon_url + '?section=' + section + "&group=search" + "&page=1"
elif group == sorted(groups)[0]:
url = addon.addon_url + '?section=' + section + "&group=collections" + "&page=1"
else:
url = addon.addon_url + '?section=' + section + "&group=" + str(group) + "&page=1"
# Создаем элемент списка.
list_item = xbmcgui.ListItem(label=groups[group], thumbnailImage=thumb)
# Определяем доп. свойства.
list_item.setProperty('fanart_image', fanart)
# is_folder = True
listing.append((url, list_item, is_folder))
# Добавляем элемент в к списку. isFolder=True означает, что это папка (виртуальная).
xbmcplugin.addDirectoryItems(addon.addon_id,listing,len(listing))
# Конец списка
xbmcplugin.endOfDirectory(addon.addon_id) | Limeone/plugin.video.bonus | groups.py | Python | gpl-3.0 | 2,023 |
# coding: utf-8
"""zmq Socket class"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import errno as errno_mod
from ._cffi import (C, ffi, new_uint64_pointer, new_int64_pointer,
new_int_pointer, new_binary_data, value_uint64_pointer,
value_int64_pointer, value_int_pointer, value_binary_data,
IPC_PATH_MAX_LEN)
from .message import Frame
from .constants import RCVMORE
from .utils import _retry_sys_call
import zmq
from zmq.error import ZMQError, _check_rc, _check_version
from zmq.utils.strtypes import unicode
def new_pointer_from_opt(option, length=0):
from zmq.sugar.constants import (
int64_sockopts, bytes_sockopts,
)
if option in int64_sockopts:
return new_int64_pointer()
elif option in bytes_sockopts:
return new_binary_data(length)
else:
# default
return new_int_pointer()
def value_from_opt_pointer(option, opt_pointer, length=0):
from zmq.sugar.constants import (
int64_sockopts, bytes_sockopts,
)
if option in int64_sockopts:
return int(opt_pointer[0])
elif option in bytes_sockopts:
return ffi.buffer(opt_pointer, length)[:]
else:
return int(opt_pointer[0])
def initialize_opt_pointer(option, value, length=0):
from zmq.sugar.constants import (
int64_sockopts, bytes_sockopts,
)
if option in int64_sockopts:
return value_int64_pointer(value)
elif option in bytes_sockopts:
return value_binary_data(value, length)
else:
return value_int_pointer(value)
class Socket(object):
context = None
socket_type = None
_zmq_socket = None
_closed = None
_ref = None
_shadow = False
def __init__(self, context=None, socket_type=None, shadow=None):
self.context = context
if shadow is not None:
self._zmq_socket = ffi.cast("void *", shadow)
self._shadow = True
else:
self._shadow = False
self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type)
if self._zmq_socket == ffi.NULL:
raise ZMQError()
self._closed = False
if context:
self._ref = context._add_socket(self)
@property
def underlying(self):
"""The address of the underlying libzmq socket"""
return int(ffi.cast('size_t', self._zmq_socket))
@property
def closed(self):
return self._closed
def close(self, linger=None):
rc = 0
if not self._closed and hasattr(self, '_zmq_socket'):
if self._zmq_socket is not None:
if linger is not None:
self.set(zmq.LINGER, linger)
rc = C.zmq_close(self._zmq_socket)
self._closed = True
if self.context:
self.context._rm_socket(self._ref)
return rc
def bind(self, address):
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_bind(self._zmq_socket, address)
if rc < 0:
if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG:
# py3compat: address is bytes, but msg wants str
if str is unicode:
address = address.decode('utf-8', 'replace')
path = address.split('://', 1)[-1]
msg = ('ipc path "{0}" is longer than {1} '
'characters (sizeof(sockaddr_un.sun_path)).'
.format(path, IPC_PATH_MAX_LEN))
raise ZMQError(C.zmq_errno(), msg=msg)
else:
_check_rc(rc)
def unbind(self, address):
_check_version((3,2), "unbind")
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_unbind(self._zmq_socket, address)
_check_rc(rc)
def connect(self, address):
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_connect(self._zmq_socket, address)
_check_rc(rc)
def disconnect(self, address):
_check_version((3,2), "disconnect")
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_disconnect(self._zmq_socket, address)
_check_rc(rc)
def set(self, option, value):
length = None
if isinstance(value, unicode):
raise TypeError("unicode not allowed, use bytes")
if isinstance(value, bytes):
if option not in zmq.constants.bytes_sockopts:
raise TypeError("not a bytes sockopt: %s" % option)
length = len(value)
c_data = initialize_opt_pointer(option, value, length)
c_value_pointer = c_data[0]
c_sizet = c_data[1]
_retry_sys_call(C.zmq_setsockopt,
self._zmq_socket,
option,
ffi.cast('void*', c_value_pointer),
c_sizet)
def get(self, option):
c_data = new_pointer_from_opt(option, length=255)
c_value_pointer = c_data[0]
c_sizet_pointer = c_data[1]
_retry_sys_call(C.zmq_getsockopt,
self._zmq_socket,
option,
c_value_pointer,
c_sizet_pointer)
sz = c_sizet_pointer[0]
v = value_from_opt_pointer(option, c_value_pointer, sz)
if option != zmq.IDENTITY and option in zmq.constants.bytes_sockopts and v.endswith(b'\0'):
v = v[:-1]
return v
def send(self, message, flags=0, copy=False, track=False):
if isinstance(message, unicode):
raise TypeError("Message must be in bytes, not an unicode Object")
if isinstance(message, Frame):
message = message.bytes
zmq_msg = ffi.new('zmq_msg_t*')
c_message = ffi.new('char[]', message)
rc = C.zmq_msg_init_size(zmq_msg, len(message))
_check_rc(rc)
C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
_retry_sys_call(C.zmq_msg_send, zmq_msg, self._zmq_socket, flags)
rc2 = C.zmq_msg_close(zmq_msg)
_check_rc(rc2)
if track:
return zmq.MessageTracker()
def recv(self, flags=0, copy=True, track=False):
zmq_msg = ffi.new('zmq_msg_t*')
C.zmq_msg_init(zmq_msg)
try:
_retry_sys_call(C.zmq_msg_recv, zmq_msg, self._zmq_socket, flags)
except Exception:
C.zmq_msg_close(zmq_msg)
raise
_buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg))
value = _buffer[:]
rc = C.zmq_msg_close(zmq_msg)
_check_rc(rc)
frame = Frame(value, track=track)
frame.more = self.getsockopt(RCVMORE)
if copy:
return frame.bytes
else:
return frame
def monitor(self, addr, events=-1):
"""s.monitor(addr, flags)
Start publishing socket events on inproc.
See libzmq docs for zmq_monitor for details.
Note: requires libzmq >= 3.2
Parameters
----------
addr : str
The inproc url used for monitoring. Passing None as
the addr will cause an existing socket monitor to be
deregistered.
events : int [default: zmq.EVENT_ALL]
The zmq event bitmask for which events will be sent to the monitor.
"""
_check_version((3,2), "monitor")
if events < 0:
events = zmq.EVENT_ALL
if addr is None:
addr = ffi.NULL
if isinstance(addr, unicode):
addr = addr.encode('utf8')
rc = C.zmq_socket_monitor(self._zmq_socket, addr, events)
__all__ = ['Socket', 'IPC_PATH_MAX_LEN']
| swn1/pyzmq | zmq/backend/cffi/socket.py | Python | bsd-3-clause | 8,000 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
CLASSIFIERS=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
]
setup(
name='django-pyodbc-azure',
version='2.1.0.0',
description='Django backend for Microsoft SQL Server and Azure SQL Database using pyodbc',
long_description=open('README.rst').read(),
author='Michiya Takahashi',
author_email='michiya.takahashi@gmail.com',
url='https://github.com/michiya/django-pyodbc-azure',
license='BSD',
packages=['sql_server', 'sql_server.pyodbc'],
install_requires=[
'Django>=2.1.0,<2.2',
'pyodbc>=3.0',
],
classifiers=CLASSIFIERS,
keywords='azure django',
)
| michiya/django-pyodbc-azure | setup.py | Python | bsd-3-clause | 1,032 |
from __future__ import division
from sympy import (Abs, I, Dummy, Rational, Float, S, Symbol, cos, oo, pi,
simplify, sin, sqrt, symbols, Derivative, asin, acos)
from sympy.geometry import (Circle, Curve, Ellipse, GeometryError, Line, Point,
Polygon, Ray, RegularPolygon, Segment, Triangle,
are_similar, convex_hull, intersection,
Point3D, Line3D, Ray3D, Segment3D, Plane, centroid)
from sympy.matrices import Matrix
from sympy.utilities.pytest import raises
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
t = Symbol('t', real=True)
k = Symbol('k', real=True)
x1 = Symbol('x1', real=True)
x2 = Symbol('x2', real=True)
x3 = Symbol('x3', real=True)
y1 = Symbol('y1', real=True)
y2 = Symbol('y2', real=True)
y3 = Symbol('y3', real=True)
z1 = Symbol('z1', real=True)
z2 = Symbol('z2', real=True)
z3 = Symbol('z3', real=True)
half = Rational(1, 2)
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t = Float("1.0E-10")
return -t < a - b < t
def test_point():
p1 = Point(x1, x2)
p2 = Point(y1, y2)
p3 = Point(0, 0)
p4 = Point(1, 1)
p5 = Point(0, 1)
assert p1 in p1
assert p1 not in p2
assert p2.y == y2
assert (p3 + p4) == p4
assert (p2 - p1) == Point(y1 - x1, y2 - x2)
assert p4*5 == Point(5, 5)
assert -p2 == Point(-y1, -y2)
raises(ValueError, lambda: Point(3, I))
raises(ValueError, lambda: Point(2*I, I))
raises(ValueError, lambda: Point(3 + I, I))
assert Point(34.05, sqrt(3)) == Point(Rational(681, 20), sqrt(3))
assert Point.midpoint(p3, p4) == Point(half, half)
assert Point.midpoint(p1, p4) == Point(half + half*x1, half + half*x2)
assert Point.midpoint(p2, p2) == p2
assert p2.midpoint(p2) == p2
assert Point.distance(p3, p4) == sqrt(2)
assert Point.distance(p1, p1) == 0
assert Point.distance(p3, p2) == sqrt(p2.x**2 + p2.y**2)
p1_1 = Point(x1, x1)
p1_2 = Point(y2, y2)
p1_3 = Point(x1 + 1, x1)
assert Point.is_collinear(p3)
assert Point.is_collinear(p3, p4)
assert Point.is_collinear(p3, p4, p1_1, p1_2)
assert Point.is_collinear(p3, p4, p1_1, p1_3) is False
assert Point.is_collinear(p3, p3, p4, p5) is False
line = Line(Point(1,0), slope = 1)
raises(TypeError, lambda: Point.is_collinear(line))
raises(TypeError, lambda: p1_1.is_collinear(line))
assert p3.intersection(Point(0, 0)) == [p3]
assert p3.intersection(p4) == []
x_pos = Symbol('x', real=True, positive=True)
p2_1 = Point(x_pos, 0)
p2_2 = Point(0, x_pos)
p2_3 = Point(-x_pos, 0)
p2_4 = Point(0, -x_pos)
p2_5 = Point(x_pos, 5)
assert Point.is_concyclic(p2_1)
assert Point.is_concyclic(p2_1, p2_2)
assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_4)
assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_5) is False
assert Point.is_concyclic(p4, p4 * 2, p4 * 3) is False
assert p4.scale(2, 3) == Point(2, 3)
assert p3.scale(2, 3) == p3
assert p4.rotate(pi, Point(0.5, 0.5)) == p3
assert p1.__radd__(p2) == p1.midpoint(p2).scale(2, 2)
assert (-p3).__rsub__(p4) == p3.midpoint(p4).scale(2, 2)
assert p4 * 5 == Point(5, 5)
assert p4 / 5 == Point(0.2, 0.2)
raises(ValueError, lambda: Point(0, 0) + 10)
# Point differences should be simplified
assert Point(x*(x - 1), y) - Point(x**2 - x, y + 1) == Point(0, -1)
a, b = Rational(1, 2), Rational(1, 3)
assert Point(a, b).evalf(2) == \
Point(a.n(2), b.n(2))
raises(ValueError, lambda: Point(1, 2) + 1)
# test transformations
p = Point(1, 0)
assert p.rotate(pi/2) == Point(0, 1)
assert p.rotate(pi/2, p) == p
p = Point(1, 1)
assert p.scale(2, 3) == Point(2, 3)
assert p.translate(1, 2) == Point(2, 3)
assert p.translate(1) == Point(2, 1)
assert p.translate(y=1) == Point(1, 2)
assert p.translate(*p.args) == Point(2, 2)
# Check invalid input for transform
raises(ValueError, lambda: p3.transform(p3))
raises(ValueError, lambda: p.transform(Matrix([[1, 0], [0, 1]])))
def test_point3D():
p1 = Point3D(x1, x2, x3)
p2 = Point3D(y1, y2, y3)
p3 = Point3D(0, 0, 0)
p4 = Point3D(1, 1, 1)
p5 = Point3D(0, 1, 2)
assert p1 in p1
assert p1 not in p2
assert p2.y == y2
assert (p3 + p4) == p4
assert (p2 - p1) == Point3D(y1 - x1, y2 - x2, y3 - x3)
assert p4*5 == Point3D(5, 5, 5)
assert -p2 == Point3D(-y1, -y2, -y3)
assert Point(34.05, sqrt(3)) == Point(Rational(681, 20), sqrt(3))
assert Point3D.midpoint(p3, p4) == Point3D(half, half, half)
assert Point3D.midpoint(p1, p4) == Point3D(half + half*x1, half + half*x2,
half + half*x3)
assert Point3D.midpoint(p2, p2) == p2
assert p2.midpoint(p2) == p2
assert Point3D.distance(p3, p4) == sqrt(3)
assert Point3D.distance(p1, p1) == 0
assert Point3D.distance(p3, p2) == sqrt(p2.x**2 + p2.y**2 + p2.z**2)
p1_1 = Point3D(x1, x1, x1)
p1_2 = Point3D(y2, y2, y2)
p1_3 = Point3D(x1 + 1, x1, x1)
# according to the description in the docs, points are collinear
# if they like on a single line. Thus a single point should always
# be collinear
assert Point3D.are_collinear(p3)
assert Point3D.are_collinear(p3, p4)
assert Point3D.are_collinear(p3, p4, p1_1, p1_2)
assert Point3D.are_collinear(p3, p4, p1_1, p1_3) is False
assert Point3D.are_collinear(p3, p3, p4, p5) is False
assert p3.intersection(Point3D(0, 0, 0)) == [p3]
assert p3.intersection(p4) == []
assert p4 * 5 == Point3D(5, 5, 5)
assert p4 / 5 == Point3D(0.2, 0.2, 0.2)
raises(ValueError, lambda: Point3D(0, 0, 0) + 10)
# Point differences should be simplified
assert Point3D(x*(x - 1), y, 2) - Point3D(x**2 - x, y + 1, 1) == \
Point3D(0, -1, 1)
a, b = Rational(1, 2), Rational(1, 3)
assert Point(a, b).evalf(2) == \
Point(a.n(2), b.n(2))
raises(ValueError, lambda: Point(1, 2) + 1)
# test transformations
p = Point3D(1, 1, 1)
assert p.scale(2, 3) == Point3D(2, 3, 1)
assert p.translate(1, 2) == Point3D(2, 3, 1)
assert p.translate(1) == Point3D(2, 1, 1)
assert p.translate(z=1) == Point3D(1, 1, 2)
assert p.translate(*p.args) == Point3D(2, 2, 2)
# Test __new__
assert Point3D(Point3D(1, 2, 3), 4, 5, evaluate=False) == Point3D(1, 2, 3)
# Test length property returns correctly
assert p.length == 0
assert p1_1.length == 0
assert p1_2.length == 0
# Test are_colinear type error
raises(TypeError, lambda: Point3D.are_collinear(p, x))
# Test are_coplanar
planar2 = Point3D(1, -1, 1)
planar3 = Point3D(-1, 1, 1)
assert Point3D.are_coplanar(p, planar2, planar3) == True
assert Point3D.are_coplanar(p, planar2, planar3, p3) == False
raises(ValueError, lambda: Point3D.are_coplanar(p, planar2))
planar2 = Point3D(1, 1, 2)
planar3 = Point3D(1, 1, 3)
raises(ValueError, lambda: Point3D.are_coplanar(p, planar2, planar3))
# Test Intersection
assert planar2.intersection(Line3D(p, planar3)) == [Point3D(1, 1, 2)]
# Test Scale
assert planar2.scale(1, 1, 1) == planar2
assert planar2.scale(2, 2, 2, planar3) == Point3D(1, 1, 1)
assert planar2.scale(1, 1, 1, p3) == planar2
# Test Transform
identity = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
assert p.transform(identity) == p
trans = Matrix([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]])
assert p.transform(trans) == Point3D(2, 2, 2)
raises(ValueError, lambda: p.transform(p))
raises(ValueError, lambda: p.transform(Matrix([[1, 0], [0, 1]])))
# Test Equals
assert p.equals(x1) == False
# Test __sub__
p_2d = Point(0, 0)
raises(ValueError, lambda: (p - p_2d))
def test_issue_9214():
p1 = Point3D(4, -2, 6)
p2 = Point3D(1, 2, 3)
p3 = Point3D(7, 2, 3)
assert Point3D.are_collinear(p1, p2, p3) is False
| grevutiu-gabriel/sympy | sympy/geometry/tests/test_point.py | Python | bsd-3-clause | 8,076 |
LANGUAGE_CODE = 'en'
SECRET_KEY = 'ji2r2iGkZqJVbWDhXrgDKDR2qG#mmtvBZXPXDugA4H)KFLwLHy'
SITE_ID = 1
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--nologcapture', '--with-id']
MEDIA_ROOT = '/tmp/cmsplugin-polls/media/'
STATIC_ROOT = '/tmp/cmsplugin-polls/static/'
ROOT_URLCONF = 'urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django_nose',
'cms',
'menus',
'mptt',
'cmsplugin_polls',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.request',
]
| satyrius/cmsplugin-polls | tests/settings.py | Python | mit | 905 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class lsof(test.test):
"""
Autotest module for testing basic functionality
of lsof
@author
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
if not sm.check_installed('gcc'):
logging.debug("gcc missing - trying to install")
sm.install('gcc')
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/lsof" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./patch_lsof.sh'], cwd="%s/lsof" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
ret_val = subprocess.Popen(['./lsof.sh'], cwd="%s/lsof" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| rajashreer7/autotest-client-tests | linux-tools/lsof/lsof.py | Python | gpl-2.0 | 1,739 |
# webpageclassifier.py
import math
import re
import requests
import collections
import itertools
from bs4 import BeautifulSoup, SoupStrainer
from time import sleep
"""Categorizes urls as blog|wiki|news|forum|classified|shopping|undecided.
THE BIG IDEA: It is inherently confusing to classify pages as clasifieds, blogs,
forums because of no single or clear definition. Even if there is a definition
the structure of the webpage can be anything and still comply with that definition.
The flow is very important for the categorization.
URL CHECK: The code checks the urls for WIKI, BLOGS, FORUMS and NEWS before anything
else. In case we have multiple clues in a single url such as www.**newsforum**.com,
it gives utmost precedence to the wiki. Then treats the others as equal and keeps
the result undecided hoping it will be decided by one of the successive processes.
WIKI: The easiest and most certain way of identifying a wiki is looking into its url.
BLOG: these mostly have a blog provider: And in most cases the name gets appended in the blog url itself.
FORUM: Although they come in different structure and flavors, one of the most
common and exact way of recognizing them is thru their:
1. url: It may contain the word forum (not always true)
2. html tags: the <table>, <tr>, <td> tags contains the "class" attribute that
has some of the commonly repeting names like: views, posts, thread etc.
The code not only looks for these exact words but also looks if these words
are a part of the name of any class in these tags.
NEWS: Checking the <nav>, <header> and <footer> tags' data (attributes, text, sub tags
etc.) for common words we find in a news website like 'world', 'political', 'arts' etc
... 'news' as well and calculates the similary and uses it with a threshhold.
CLASSIFIED and SHOPPING: Here the code uses a two stage approch to first classify the
page into one of these using a list of words for each. The main difference assumed was
that a 'classified' page had many "touting" words, because it's people selling stuff,
whereas a 'shopping' page had different kinds of selling words (of course there is some
overlap, the the code takes care of that). Then it checks see if the predicted type is
independently relevent as a classified of shopping web page (using a threshhold).
The flow of how the sites are checked here is very important because of the heirarchy
on the internet (say a forum can be a shopping forum - the code will correctly classify
it as a forum)
The code uses some necessary conditions (if you may say) to find the accurate classification.
Checking the url, header and footer is also a very good idea, but it may lead you astray
if used even before using the above mentioned accurate techniques. Especially the
words in the header and footer may lead you astray (say a footer may contain both 'blog'
and 'forum')
If indecisive this code will call the Hyperion Gray team categorizer
(That code is commented -- please also import their code first)
"""
LICENSE = """
Copyright [2015] [jpl]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'Asitang Mishra jpl memex'
def read_golden(filepath):
"""Reads a golden file and creates canonical (lowercase) versions of each word.
Returns a list
"""
goldenlist = []
with open(filepath, 'r', encoding='cp1252', errors='ignore') as f:
goldenlist = [x.lower().strip() for x in f.readlines()]
return goldenlist
# creates n grams for a string and outputs it as a list
def ngrams(input, n):
input = input.split(' ')
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
# checks for the existence of a set of words (provided as a list) in the url
def word_in_url(url, wordlist):
for word in wordlist:
if word in url:
return True
return False
def flatten(l):
"""From Christian @ http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def extract_all_classnames(taglist, html_doc):
"""Extracts all `class` values `html_doc`, but only for tags in `taglist`.
Ignores tags w/o class attribute - they don't affect cosine_sim anyway.
Returns: flattened generator of class names appearing in tags.
Note: returned generator may have "" entries, e.g. for <a class="" href=...>
"""
# Note '_' in next line - soup trick to avoid the Python 'class' keyword.
strainer = SoupStrainer(taglist, class_=True)
soup = BeautifulSoup(html_doc, 'lxml', parse_only=strainer)
return flatten((tag.attrs['class'] for tag in soup.find_all() if 'class' in tag.attrs))
def extract_all_fromtag(taglist, html_doc):
"""Extract all tags in taglist from html_doc. Return as list of Tag.
Note some items will be long portions of the document!!
"""
strainer = SoupStrainer(taglist)
soup = BeautifulSoup(html_doc, 'lxml', parse_only=strainer)
return soup.find_all()
# def numberoftags(taglist,html_doc):
# soup = BeautifulSoup(html_doc, 'lxml')
# count=0
# for tag in taglist:
# for classtags in soup.findall(tag):
# count+=1"""
def cosine_sim(words, goldwords):
"""Finds the normalized cosine overlap between two texts given as lists.
"""
# TODO: Speed up the loops? If profile suggests this is taking any time.
wordfreq = dict()
goldwordfreq = dict()
commonwords = []
cosinesum = 0
sumgoldwords = 0
sumwords = 0
for goldword in goldwords:
if goldword in goldwordfreq.keys():
goldwordfreq[goldword] = goldwordfreq[goldword] + 1
else:
goldwordfreq[goldword] = 1
for word in words:
if word in wordfreq.keys():
wordfreq[word] = wordfreq[word] + 1
else:
wordfreq[word] = 1
for word in goldwords:
if word in wordfreq.keys():
if word in goldwordfreq.keys():
commonwords.append(word)
cosinesum += goldwordfreq[word] * wordfreq[word]
for word in goldwords:
sumgoldwords += goldwordfreq[word] * goldwordfreq[word]
for word in commonwords:
sumwords += wordfreq[word] * wordfreq[word]
# print(commonwords)
sumwords = math.sqrt(sumwords)
sumgoldwords = math.sqrt(sumgoldwords)
if sumgoldwords != 0 and sumwords != 0:
return cosinesum / (sumwords * sumgoldwords)
return 0
def name_in_url(url):
"""Check for 'wiki', 'forum', 'news' or 'blog' in the url.
'wiki' trumps; the rest must have no competitors to count.
"""
count = 0
if 'wiki' in url:
return 'wiki'
for word in ['forum', 'blog', 'news']:
if word in url:
url_type = word
count += 1
if count != 1:
url_type = 'undecided'
return url_type
def printlist(name, mylist, N=10, prefix='\t'):
"""Print first N items of list or generator, prefix & name"""
try:
print('{}{}: {}...'.format(prefix, name, mylist[:N]))
except TypeError:
ans = itertools.islice(mylist, N)
print('{}{}: {}...'.format(prefix, name, ans))
def forum_score(html, forum_classnames):
"""Return cosine similarity between the forum_classnames and
the 'class' attribute of certain tags.
"""
tags = ['tr', 'td', 'table', 'div', 'p', 'article']
classlist = extract_all_classnames(tags, html)
#printlist('forum classlist:', classlist)
# Keep only matches, and only in canonical form. So 'forum' not 'forums'.
# TODO: doesn't this artificially inflate cosine_sim? By dropping non-matches?
classlist = [j for i in classlist for j in forum_classnames if j in i]
#printlist('canonical form :', classlist)
return cosine_sim(classlist, forum_classnames)
def news_score(html, news_list):
"""Check if a news website: check the nav, header and footer data
(all content, class and tags within), use similarity
"""
tags = ['nav', 'header', 'footer']
contents = extract_all_fromtag(tags, html)
contents = (re.sub('[^A-Za-z0-9]+', ' ', x.text).strip() for x in contents)
contents = ' '.join(contents).split(' ')
#printlist('news contents:', contents)
return cosine_sim(contents, news_list)
def get_html(url):
"""Fetch HTML and convert to lowercase. If error, prepend with '_HTTP_ERROR_'."""
# Some pages dislike custom agents. Define alternatives.
alt_agents = [
'MEMEX_PageClass_bot/0.5',
'Mozilla/5.0',
'Gecko/1.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko'
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
]
for agent in alt_agents:
r = requests.get(url, params={'User-Agent': agent})
if r.status_code == requests.codes['ok']:
return r.text.lower()
wait = 1
if r.status_code == requests.codes['too_many']:
wait = int(r.headers['Retry-After'])
print('*** Agent "%s" failed. Retrying...' % agent)
sleep(wait) # Reduce chance of 429 error (Too many requests)
print("\tERROR :", r.status_code)
print("\tCOOKIES:", [x for x in r.cookies])
print("\tHISTORY:", r.history)
print("\tHEADERS:", r.headers)
print("\tRESPONSE:", r.text[:100], '...')
return "_HTTP_ERROR_" + r.text.lower()
def categorize_url(url, goldwords):
"""Categorizes urls as blog | wiki | news | forum | classified | shopping | undecided.
Returns best guess and a dictionary of scores, which may be empty.
"""
scores = {}
# 1. Check for blog goldwords in URL
if word_in_url(url, goldwords['blog']):
return 'blog', scores
# 2. Check for category name in URL
name_type = name_in_url(url)
if name_type != 'undecided':
return name_type, scores
# OK, we actually have to look at the page.
html = get_html(url)
if html.startswith('_HTTP_ERROR_'):
return 'ERROR', scores
# Calculate all cosine similarity scores
# It used to stop at the first acceptable, but I want to compare.
fs = forum_score(html, goldwords['forum'])
ns = news_score(html, goldwords['news'])
text = re.sub(u'[^A-Za-z0-9]+', ' ', html)
text_list = text.split(' ') + [' '.join(x) for x in ngrams(text, 2)]
cs = cosine_sim(text_list, goldwords['classified'])
ss = cosine_sim(text_list, goldwords['shopping'])
scores = {'forum': fs,
'news': ns,
'classified': cs,
'shopping': ss}
THRESH = 0.4
# 3. Forum
if fs >= THRESH:
return 'forum', scores
if ns >= THRESH:
return 'news', scores
if THRESH < cs > ss:
return 'classified', scores
if THRESH < ss > cs:
return 'shopping', scores
# 6. If still undecided, call hyperion grey classifier
# if url_type=='undecided':
# fs = DumbCategorize(url)
# category=fs.categorize()
# url_type=category
# return url_type
return 'undecided', scores
def expand_url(url):
if url.startswith('http'):
return url
else:
return('http://' + url)
def get_goldwords():
gold_words = {}
for name in ['blog', 'forum', 'news', 'shopping', 'classified']:
gold_words[name] = read_golden(name + '.txt')
return gold_words
def print_weights(weights, prefix='\t[', suffix=']'):
ans = []
for key in ['forum', 'news', 'classified', 'shopping']:
ans.append('%s: %4.2f' % (key[:2], weights[key]))
print('{}{}{}'.format(prefix, ', '.join(ans), suffix))
if __name__ == "__main__":
import pandas as pd
gold_words = get_goldwords()
for key, val in gold_words.items():
printlist(key, val)
with open('urls.csv') as f:
df = pd.read_csv(f, header=0, skipinitialspace=True)
#df = df.iloc[:5] # Subset for testing
answers, scores = [], []
for url in df['URL']:
eu = expand_url(url)
print('\n' + eu)
cat, weights = categorize_url(eu, gold_words)
try:
print_weights(weights)
except KeyError:
pass
print('\t---> %s <--- ' % cat)
answers.append(cat)
scores.append(weights)
df['Test'] = answers
df['Correct?'] = df['Test'] == df['Category']
df = pd.concat([df, pd.DataFrame(scores)], axis=1)
print()
print(df)
df.describe()
n_right = df['Correct?'].sum()
score = n_right / len(df)
print()
print('*ACCURACY*: {}/{} = {:4.2f}'.format(n_right, len(df), score))
| asitang/webpageclassifier | webpageclassifier.py | Python | apache-2.0 | 13,357 |
#!/usr/bin/env python
# coding=UTF-8
# Author: Dennis Lutter <lad1337@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
if __name__ == "__main__":
import glob
import unittest
test_file_strings = [ x for x in glob.glob('*_tests.py') if not x in __file__]
module_strings = [file_string[0:len(file_string) - 3] for file_string in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(file_string) for file_string in module_strings]
testSuite = unittest.TestSuite(suites)
print "=================="
print "STARTING - ALL TESTS"
print "=================="
print "this will include"
for includedfiles in test_file_strings:
print "- " + includedfiles
text_runner = unittest.TextTestRunner().run(testSuite)
| AlexBoogaard/Sick-Beard-Torrent-Edition | tests/all_tests.py | Python | gpl-3.0 | 1,450 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all images and videos.
To upload an image, run upload_image.py. To upload video, see:
http://adwords.google.com/support/aw/bin/answer.py?hl=en&answer=39454.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
PAGE_SIZE = 500
def main(client):
# Initialize appropriate service.
media_service = client.GetService('MediaService', version='v201809')
# Construct selector and get all images.
offset = 0
selector = {
'fields': ['MediaId', 'Type', 'Width', 'Height', 'MimeType'],
'predicates': [{
'field': 'Type',
'operator': 'IN',
'values': ['IMAGE', 'VIDEO']
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = media_service.get(selector)
# Display results.
if 'entries' in page:
for image in page['entries']:
try:
dimensions = dict([(entry['key'], entry['value'])
for entry in image['dimensions']])
except AttributeError:
dimensions = {'FULL': {'height': 0, 'width': 0}}
if image['type'] == 'IMAGE':
print ('%s with id "%s", dimensions \'%sx%s\', and MimeType "%s"'
' was found.' % (image['type'], image['mediaId'],
dimensions['FULL']['height'],
dimensions['FULL']['width'],
image['mimeType']))
elif image['type'] == 'VIDEO':
print ('%s with id "%s" was found.' % (image['type'],
image['mediaId']))
else:
print 'No images/videos were found.'
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| Aloomaio/googleads-python-lib | examples/adwords/v201809/misc/get_all_images_and_videos.py | Python | apache-2.0 | 2,852 |
import collections as c, numpy as n
class AgentStatistics:
def __init__(self,list_datastructures=None):
if not list_datastructures:
print("input datastructures, please")
self.list_datastructures=list_datastructures
self.author_messages_= author_messages_= c.OrderedDict(sorted(list_datastructures.author_messages.items(), key=lambda x: len(x[1])))
self.n_authors=len(author_messages_)
self.authors=list(author_messages_.keys())
self.msgs=list(author_messages_.values())
self.authors_n_msgs=n.array([len(i) for i in self.msgs])
self.basicMeasures()
self.messageAuthorsCorrespondence()
def basicMeasures(self):
self.mean_msgs=n.mean(self.authors_n_msgs)
self.std_msgs=n.std(self.authors_n_msgs)
plist=[5,10,25,50,75,90,95]
self.percentiles=percentiles={}
for percentile in plist:
percentiles[percentile]=n.percentile(self.authors_n_msgs,plist)
def oneMore(self,bool_array):
args=n.nonzero(bool_array-1)[0]
bool_array[args[0]]=True
return bool_array
def oneLess(self,bool_array):
args=n.nonzero(bool_array-1)[0]
bool_array[args[-1]]=True
return bool_array
def messageAuthorsCorrespondence(self):
self.n_msgs_h=self.authors_n_msgs[-1]
self.n_msgs_h_=100*self.n_msgs_h/self.list_datastructures.n_messages
self.cumulative=n.array([n.sum(self.authors_n_msgs[:i+1]) for i in range(self.n_authors)])
self.cumulative_=self.cumulative/self.list_datastructures.n_messages
self.last_d10=1+(self.cumulative_<.10).sum()
self.last_d10_=self.last_d10/self.n_authors
self.q1=1+(self.cumulative_>0.75).sum()
self.q1_=self.q1/self.n_authors
self.q3=1+(self.cumulative_>0.25).sum()
self.q3_=self.q3/self.n_authors
self.Mlast_d10 =self.authors_n_msgs[self.oneMore(self.cumulative_<0.10)].sum()/self.list_datastructures.n_messages
self.Mq1=self.authors_n_msgs[self.oneLess(self.cumulative_>0.75)].sum()/self.list_datastructures.n_messages
self.Mq3=self.authors_n_msgs[self.oneLess(self.cumulative_>0.25)].sum()/self.list_datastructures.n_messages
| ttm/gmaneLegacy | gmaneLegacy/agentStatistics.py | Python | unlicense | 2,241 |
import RPi.GPIO as GPIO
import time
BEEP = 11
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(BEEP, GPIO.OUT)
def loop():
while True:
GPIO.output(BEEP, not(GPIO.input(BEEP)))
time.sleep(1)
def destroy():
GPIO.output(BEEP, 1)
GPIO.cleanup()
if __name__ == "__main__":
try:
setup()
loop()
except KeyboardInterrupt:
destroy()
| stevenvo/rpi-gpio-scripts | 05_buzzer.py | Python | gpl-2.0 | 346 |
'''
Created on Jun 6, 2012
@author: vr274
'''
import numpy as np
from generic import TakestepSlice, TakestepInterface
from pele.utils import rotations
__all__ = ["RandomDisplacement", "UniformDisplacement",
"RotationalDisplacement", "RandomCluster"]
class RandomDisplacement(TakestepSlice):
'''Random displacement on each individual coordinate
RandomDisplacement is the most basic step taking routine. It simply
displaces each coordinate my a random value.
Parameters
----------
stepsize : float
magnitue of random displacement
'''
def __init__(self, stepsize=1.0):
TakestepSlice.__init__(self, stepsize=stepsize)
def takeStep(self, coords, **kwargs):
coords[self.srange] += np.random.uniform(low=-self.stepsize, high=self.stepsize, size=coords[self.srange].shape)
class UniformDisplacement(TakestepSlice):
'''Displace each atom be a uniform random vector
The routine generates a proper uniform random unitvector to displace
atoms.
'''
def takeStep(self, coords, **kwargs):
c = coords[self.srange]
for x in c.reshape(c.size/3,3):
x += self.stepsize * rotations.vector_random_uniform_hypersphere(3)
class RotationalDisplacement(TakestepSlice):
'''Random rotation for angle axis vector
RotationalDisplacement performs a proper random rotation. If the coordinate array contains
positions and orientations, make sure to specify the correct slice for the angle axis
coordinates.
'''
def takeStep(self, coords, **kwargs):
"""
take a random orientational step
"""
c = coords[self.srange]
for x in c.reshape(c.size/3,3):
rotations.takestep_aa(x, self.stepsize)
class RandomCluster(TakestepInterface):
'''Generate a random configuration
'''
def __init__(self, volume=1.0):
self.volume = volume
def takeStep(self, coords, **kwargs):
coords[:] = np.random.random(coords.shape) * (self.volume**(1./3.))
| js850/pele | pele/takestep/displace.py | Python | gpl-3.0 | 2,126 |
# -*- coding: utf-8 -*-
"""
pydot example 1
@author: Federico Cáceres
@url: http://pythonhaven.wordpress.com/2009/12/09/generating_graphs_with_pydot
"""
import pydot # import pydot or you're not going to get anywhere my friend :D
# first you create a new graph, you do that with pydot.Dot()
graph = pydot.Dot(graph_type='graph')
# the idea here is not to cover how to represent the hierarchical data
# but rather how to graph it, so I'm not going to work on some fancy
# recursive function to traverse a multidimensional array...
# I'm going to hardcode stuff... sorry if that offends you
# let's add the relationship between the king and vassals
for i in range(3):
# we can get right into action by "drawing" edges between the nodes in our graph
# we do not need to CREATE nodes, but if you want to give them some custom style
# then I would recomend you to do so... let's cover that later
# the pydot.Edge() constructor receives two parameters, a source node and a destination
# node, they are just strings like you can see
edge = pydot.Edge("king", "lord%d" % i)
# and we obviosuly need to add the edge to our graph
graph.add_edge(edge)
# now let us add some vassals
vassal_num = 0
for i in range(3):
# we create new edges, now between our previous lords and the new vassals
# let us create two vassals for each lord
for j in range(2):
edge = pydot.Edge("lord%d" % i, "vassal%d" % vassal_num)
graph.add_edge(edge)
vassal_num += 1
# ok, we are set, let's save our graph into a file
graph.write_png('example1_graph.png')
# and we are done!
| metaperl/karatbars-utils | k0de/upgraded/try.py | Python | mit | 1,616 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cloudferry.actions.image import get_info_images
from cloudferry.lib.utils import utils
from tests import test
class GetInfoImagesTestCase(test.TestCase):
def setUp(self):
super(GetInfoImagesTestCase, self).setUp()
self.fake_info = {'images': {'fake_image_id': {'image': 'image_body',
'meta': {}}}}
self.fake_image = mock.Mock()
self.fake_image.read_info.return_value = self.fake_info
self.fake_src_cloud = mock.Mock()
self.fake_dst_cloud = mock.Mock()
self.fake_config = utils.ext_dict(migrate=utils.ext_dict(
{'ignore_empty_images': False}))
self.fake_src_cloud.resources = {'image': self.fake_image}
self.fake_init = {
'src_cloud': self.fake_src_cloud,
'dst_cloud': self.fake_dst_cloud,
'cfg': self.fake_config
}
def test_run(self):
expected_result = {'images_info': self.fake_info}
fake_action = get_info_images.GetInfoImages(self.fake_init,
'src_cloud')
image_info = fake_action.run()
self.assertEqual(expected_result, image_info)
self.fake_image.read_info.assert_called_once_with()
| SVilgelm/CloudFerry | tests/lib/os/actions/test_get_info_images.py | Python | apache-2.0 | 1,926 |
#!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2012 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <joseph@artefactual.com>
# @version svn: $Id$
from linkTaskManager import linkTaskManager
from taskStandard import taskStandard
from passClasses import choicesDic
from passClasses import replacementDic
import os
import uuid
import sys
import threading
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
import databaseInterface
import databaseFunctions
class linkTaskManagerGetMicroserviceGeneratedListInStdOut:
def __init__(self, jobChainLink, pk, unit):
self.tasks = []
self.pk = pk
self.jobChainLink = jobChainLink
sql = """SELECT * FROM StandardTasksConfigs where pk = """ + pk.__str__()
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
print row
#pk = row[0]
filterFileEnd = row[1]
filterFileStart = row[2]
filterSubDir = row[3]
self.requiresOutputLock = row[4]
standardOutputFile = row[5]
standardErrorFile = row[6]
execute = row[7]
self.execute = execute
arguments = row[8]
row = c.fetchone()
sqlLock.release()
#if reloadFileList:
# unit.reloadFileList()
# "%taskUUID%": task.UUID.__str__(), \
if filterSubDir:
directory = os.path.join(unit.currentPath, filterSubDir)
else:
directory = unit.currentPath
if self.jobChainLink.passVar != None:
if isinstance(self.jobChainLink.passVar, list):
for passVar in self.jobChainLink.passVar:
if isinstance(passVar, replacementDic):
execute, arguments, standardOutputFile, standardErrorFile = passVar.replace(execute, arguments, standardOutputFile, standardErrorFile)
elif isinstance(self.jobChainLink.passVar, replacementDic):
execute, arguments, standardOutputFile, standardErrorFile = self.jobChainLink.passVar.replace(execute, arguments, standardOutputFile, standardErrorFile)
commandReplacementDic = unit.getReplacementDic(directory)
#for each key replace all instances of the key in the command string
for key in commandReplacementDic.iterkeys():
value = commandReplacementDic[key].replace("\"", ("\\\""))
if execute:
execute = execute.replace(key, value)
if arguments:
arguments = arguments.replace(key, value)
if standardOutputFile:
standardOutputFile = standardOutputFile.replace(key, value)
if standardErrorFile:
standardErrorFile = standardErrorFile.replace(key, value)
UUID = uuid.uuid4().__str__()
self.task = taskStandard(self, execute, arguments, standardOutputFile, standardErrorFile, UUID=UUID)
databaseFunctions.logTaskCreatedSQL(self, commandReplacementDic, UUID, arguments)
t = threading.Thread(target=self.task.performTask)
t.daemon = True
t.start()
def taskCompletedCallBackFunction(self, task):
print task
databaseFunctions.logTaskCompletedSQL(task)
try:
choices = choicesDic(eval(task.results["stdOut"]))
except:
print >>sys.stderr, "Error creating dic from output"
choices = choicesDic({})
if self.jobChainLink.passVar != None:
if isinstance(self.jobChainLink.passVar, list):
found = False
for passVarIndex in range(len(self.jobChainLink.passVar)):
if isinstance(self.jobChainLink.passVar[passVarIndex], choicesDic):
self.jobChainLink.passVar[passVarIndex] = choices
if not found:
self.jobChainLink.passVar.append(choices)
else:
self.jobChainLink.passVar = [choices, self.jobChainLink.passVar]
else:
self.jobChainLink.passVar = [choices]
if True:
self.jobChainLink.linkProcessingComplete(task.results["exitCode"], self.jobChainLink.passVar)
| artefactual/archivematica-history | src/MCPServer/lib/linkTaskManagerGetMicroserviceGeneratedListInStdOut.py | Python | agpl-3.0 | 5,022 |
###############################################################################
# Copyright 2017-2021 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "fstd2nc" package.
#
# "fstd2nc" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "fstd2nc" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "fstd2nc". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
Optional helper functions.
These functions provide information about the FST files using alternative
approaches (not the standard librmn/rpnpy functions).
This is solely for performance considerations.
"""
from rpnpy.librmn import librmn
import ctypes as ct
import numpy as np
import numpy.ctypeslib as npc
librmn.compact_float.argtypes = (npc.ndpointer(dtype='int32'), npc.ndpointer(dtype='int32'), npc.ndpointer(dtype='int32'), ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.POINTER(ct.c_double))
librmn.compact_double.argtypes = (npc.ndpointer(dtype='int32'), npc.ndpointer(dtype='int32'), npc.ndpointer(dtype='int32'), ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.POINTER(ct.c_double))
librmn.compact_integer.argtypes = (npc.ndpointer(dtype='int32'), ct.c_void_p, npc.ndpointer(dtype='int32'), ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int)
librmn.ieeepak_.argtypes = (npc.ndpointer(dtype='int32'), npc.ndpointer(dtype='int32'), ct.POINTER(ct.c_int), ct.POINTER(ct.c_int), ct.POINTER(ct.c_int), ct.POINTER(ct.c_int), ct.POINTER(ct.c_int))
librmn.compact_char.argtypes = (npc.ndpointer(dtype='int32'), ct.c_void_p, npc.ndpointer(dtype='int32'), ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int)
librmn.c_armn_uncompress32.argtypes = (npc.ndpointer(dtype='int32'), npc.ndpointer(dtype='int32'), ct.c_int, ct.c_int, ct.c_int, ct.c_int)
librmn.c_armn_compress_setswap.argtypes = (ct.c_int,)
librmn.armn_compress.argtypes = (npc.ndpointer(dtype='int32'),ct.c_int,ct.c_int,ct.c_int,ct.c_int,ct.c_int)
librmn.c_float_unpacker.argtypes = (npc.ndpointer(dtype='int32'),npc.ndpointer(dtype='int32'),npc.ndpointer(dtype='int32'),ct.c_int,ct.POINTER(ct.c_int))
def decode (data):
'''
Decodes the raw FSTD data into the final 2D array of values.
Similar to fstluk, but here the data is already loaded in memory.
The data should also include the header information at the
beginning of the array.
Parameters
----------
data : array
The encoded data
'''
import rpnpy.librmn.all as rmn
import numpy as np
from fstd2nc.mixins import dtype_fst2numpy
data = data.view('>i4').astype('i4')
ni, nj, nk = data[3]>>8, data[4]>>8, data[5]>>12
nelm = ni*nj*nk
datyp = int(data[4]%256) & 191 # Ignore +64 mask.
nbits = int(data[2]%256)
dtype = dtype_fst2numpy (datyp, nbits)
if nbits <= 32:
work = np.empty(nelm,'int32')
else:
work = np.empty(nelm,'int64').view('int32')
# Strip header
data = data[20:]
# Extend data buffer for in-place decompression.
if datyp in (129,130,134):
d = np.empty(nelm + 100, dtype='int32')
d[:len(data)] = data
data = d
shape = (nj,ni)
ni = ct.c_int(ni)
nj = ct.c_int(nj)
nk = ct.c_int(nk)
nelm = ct.c_int(nelm)
npak = ct.c_int(-nbits)
nbits = ct.c_int(nbits)
zero = ct.c_int(0)
one = ct.c_int(1)
two = ct.c_int(2)
tempfloat = ct.c_double(99999.0)
#print (ni, nj, nk, nbits, datyp, dtype)
if datyp == 0:
work = data
elif datyp == 1:
if nbits.value <= 32:
librmn.compact_float(work, data, data[3:], nelm, nbits, 24, 1, 2, 0, ct.byref(tempfloat))
else:
raise Exception
librmn.compact_double(work, data, data[3:], nelm, nbits, 24, 1, 2, 0, ct.byref(tempfloat))
elif datyp == 2:
librmn.compact_integer(work, None, data, nelm, nbits, 0, 1, 2)
elif datyp == 3:
raise Exception
elif datyp == 4:
librmn.compact_integer(work, None, data, nelm, nbits, 0, 1, 4)
elif datyp == 5:
librmn.ieeepak_(work, data, ct.byref(nelm), ct.byref(one), ct.byref(npak), ct.byref(zero), ct.byref(two))
elif datyp == 6:
librmn.c_float_unpacker(work, data, data[3:], nelm, ct.byref(nbits));
elif datyp == 7:
ier = librmn.compact_char(work, None, data, nelm, 8, 0, 1, 10)
work = work.view('B')[:len(work)] #& 127
elif datyp == 8:
raise Exception
elif datyp == 129:
librmn.armn_compress(data[5:],ni,nj,nk,nbits,2)
librmn.compact_float(work,data[1:],data[5:],nelm,nbits.value+64*max(16,nbits.value),0,1,2,0,ct.byref(tempfloat))
elif datyp == 130:
#librmn.c_armn_compress_setswap(0)
librmn.armn_compress(data[1:],ni,nj,nk,nbits,2)
#librmn.c_armn_compress_setswap(1)
work[:] = data[1:].astype('>i4').view('>H')[:nelm.value]
elif datyp == 133:
librmn.c_armn_uncompress32(work, data[1:], ni, nj, nk, nbits)
elif datyp == 134:
librmn.armn_compress(data[4:],ni,nj,nk,nbits,2);
librmn.c_float_unpacker(work,data[1:],data[4:],nelm,ct.byref(nbits))
else:
raise Exception(datyp)
return work.view(dtype)[:nelm.value].reshape(shape).T
def decode_headers (raw):
'''
Decode record headers from a raw byte array.
Returns a dictionary similar to fstprm, only the entries are
vectorized over all records instead of 1 record at a time.
NOTE: This includes deleted records as well. You can filter them out using
the 'dltf' flag.
Parameters
----------
raw : numpy array (dtype='B')
The raw array of headers to decode.
'''
import numpy as np
raw = raw.view('>i4').astype('uint32').reshape(-1,9,2)
# Start unpacking the pieces.
# Reference structure (from qstdir.h):
# 0 word deleted:1, select:7, lng:24, addr:32;
# 1 word deet:24, nbits: 8, ni: 24, gtyp: 8;
# 2 word nj:24, datyp: 8, nk: 20, ubc: 12;
# 3 word npas: 26, pad7: 6, ig4: 24, ig2a: 8;
# 4 word ig1: 24, ig2b: 8, ig3: 24, ig2c: 8;
# 5 word etik15:30, pad1:2, etik6a:30, pad2:2;
# 6 word etikbc:12, typvar:12, pad3:8, nomvar:24, pad4:8;
# 7 word ip1:28, levtyp:4, ip2:28, pad5:4;
# 8 word ip3:28, pad6:4, date_stamp:32;
nrecs = raw.shape[0]
out = {}
out['lng'] = np.empty(nrecs, dtype='int32')
out['dltf'] = np.empty(nrecs, dtype='ubyte')
out['swa'] = np.empty(nrecs, dtype='uint32')
out['deet'] = np.empty(nrecs, dtype='int32')
out['nbits'] = np.empty(nrecs, dtype='byte')
out['grtyp'] = np.empty(nrecs, dtype='|S1')
out['ni'] = np.empty(nrecs, dtype='int32')
out['nj'] = np.empty(nrecs, dtype='int32')
out['datyp'] = np.empty(nrecs, dtype='ubyte')
out['nk'] = np.empty(nrecs, dtype='int32')
out['ubc'] = np.empty(nrecs, dtype='uint16')
out['npas'] = np.empty(nrecs, dtype='int32')
out['ig1'] = np.empty(nrecs, dtype='int32')
out['ig2'] = np.empty(nrecs, dtype='int32')
out['ig3'] = np.empty(nrecs, dtype='int32')
out['ig4'] = np.empty(nrecs, dtype='int32')
out['etiket'] = np.empty(nrecs,dtype='|S12')
out['typvar'] = np.empty(nrecs,dtype='|S2')
out['nomvar'] = np.empty(nrecs,dtype='|S4')
out['ip1'] = np.empty(nrecs, dtype='int32')
out['ip2'] = np.empty(nrecs, dtype='int32')
out['ip3'] = np.empty(nrecs, dtype='int32')
out['datev'] = np.empty(nrecs, dtype='int32')
out['dateo'] = np.empty(nrecs, dtype='int32')
out['xtra1'] = np.empty(nrecs, dtype='uint32')
out['xtra2'] = np.empty(nrecs, dtype='uint32')
out['xtra3'] = np.empty(nrecs, dtype='uint32')
temp8 = np.empty(nrecs, dtype='ubyte')
temp32 = np.empty(nrecs, dtype='int32')
np.divmod(raw[:,0,0],2**24, temp8, out['lng'])
out['lng'] *= 2 # Convert from 8-byte to 4-byte units.
np.divmod(temp8,128, out['dltf'], temp8)
out['swa'][:] = raw[:,0,1]
np.divmod(raw[:,1,0],256, out['deet'], out['nbits'])
np.divmod(raw[:,1,1],256, out['ni'], out['grtyp'].view('ubyte'))
np.divmod(raw[:,2,0],256, out['nj'], out['datyp'])
np.divmod(raw[:,2,1],4096, out['nk'], out['ubc'])
out['npas'][:] = raw[:,3,0]//64
np.divmod(raw[:,3,1],256, out['ig4'], temp32)
out['ig2'][:] = (temp32 << 16) # ig2a
np.divmod(raw[:,4,0],256, out['ig1'], temp32)
out['ig2'] |= (temp32 << 8) # ig2b
np.divmod(raw[:,4,1],256, out['ig3'], temp32)
out['ig2'] |= temp32 # ig2c
etik15 = raw[:,5,0]//4
etik6a = raw[:,5,1]//4
et = raw[:,6,0]//256
etikbc, _typvar = divmod(et, 4096)
_nomvar = raw[:,6,1]//256
np.divmod(raw[:,7,0],16, out['ip1'], temp8)
out['ip2'][:] = raw[:,7,1]//16
out['ip3'][:] = raw[:,8,0]//16
date_stamp = raw[:,8,1]
# Reassemble and decode.
# (Based on fstd98.c)
etiket_bytes = np.empty((nrecs,12),dtype='ubyte')
for i in range(5):
etiket_bytes[:,i] = ((etik15 >> ((4-i)*6)) & 0x3f) + 32
for i in range(5,10):
etiket_bytes[:,i] = ((etik6a >> ((9-i)*6)) & 0x3f) + 32
etiket_bytes[:,10] = ((etikbc >> 6) & 0x3f) + 32
etiket_bytes[:,11] = (etikbc & 0x3f) + 32
out['etiket'][:] = etiket_bytes.flatten().view('|S12')
nomvar_bytes = np.empty((nrecs,4),dtype='ubyte')
for i in range(4):
nomvar_bytes[:,i] = ((_nomvar >> ((3-i)*6)) & 0x3f) + 32
out['nomvar'][:] = nomvar_bytes.flatten().view('|S4')
typvar_bytes = np.empty((nrecs,2),dtype='ubyte')
typvar_bytes[:,0] = ((_typvar >> 6) & 0x3f) + 32
typvar_bytes[:,1] = ((_typvar & 0x3f)) + 32
out['typvar'][:] = typvar_bytes.flatten().view('|S2')
out['datev'][:] = (date_stamp >> 3) * 10 + (date_stamp & 0x7)
# Note: this dateo calculation is based on my assumption that
# the raw stamps increase in 5-second intervals.
# Doing it this way to avoid a gazillion calls to incdat.
date_stamp = date_stamp - (out['deet']*out['npas'])//5
out['dateo'][:] = (date_stamp >> 3) * 10 + (date_stamp & 0x7)
out['xtra1'][:] = out['datev']
out['xtra2'][:] = 0
out['xtra3'][:] = 0
return out
def raw_headers (filename):
'''
Extract record headers from the specified file.
Returns a dictionary similar to fstprm, only the entries are
vectorized over all records instead of 1 record at a time.
NOTE: This includes deleted records as well. You can filter them out using
the 'dltf' flag.
Parameters
----------
filename : string
The file to scan for headers.
'''
import numpy as np
import os
if not os.path.exists(filename):
return None
f = open(filename,'rb')
# Use same check as maybeFST
magic = f.read(16)
if len(magic) < 16 or magic[12:] != b'STDR':
f.close()
return None
# Get the raw (packed) parameters.
pageaddr = 27; pageno = 0
raw = []
pageno_list = []
recno_list = []
while pageaddr > 0:
f.seek(pageaddr*8-8, 0)
page = np.fromfile(f, '>i4', 8+256*18)
params = page[8:].reshape(256,9,2)
nent = page[5]
raw.append(params[:nent].view('B').flatten())
recno_list.extend(list(range(nent)))
pageno_list.extend([pageno]*nent)
pageaddr = page[4]; pageno += 1
raw = np.concatenate(raw)
f.close()
return raw
# Get the block size for the filesystem where the given file resides.
# May be useful for determining the best way to read chunks of data from
# FSTD files.
def blocksize (filename):
import subprocess
try:
# Use the command-line "stat" program instead of os.stat, because the
# latter is giving incorrect results for some filesystems.
# E.g., on a GPFS filesystem with 16MB block size, os.stat is giving a size
# of 256KB.
return int(subprocess.check_output(['stat', '-c', '%s', '-f', filename]))
except OSError:
return 4096 # Give some default value if 'stat' not available.
# Return the given arrays as a single structured array.
# Makes certain operations easier, such as finding unique combinations.
# Takes a dictionary of arrays, returns a single structured array.
def structured_array (data):
import numpy as np
dtype = [(key,value.dtype) for key,value in data.items()]
n = len(list(data.values())[0])
out = np.ma.empty(n, dtype=dtype)
for key in data.keys():
out[key] = data[key]
return out
# Lightweight test for FST files.
# Uses the same test for fstd98 random files from wkoffit.c (librmn 16.2).
#
# The 'isFST' test from rpnpy calls c_wkoffit, which has a bug when testing
# many small (non-FST) files. Under certain conditions the file handles are
# not closed properly, which causes the application to run out of file handles
# after testing ~1020 small non-FST files.
def maybeFST(filename):
with open(filename, 'rb') as f:
buf = f.read(16)
if len(buf) < 16: return False
# Same check as c_wkoffit in librmn
return buf[12:] == b'STDR'
| neishm/fstd2nc | fstd2nc/extra.py | Python | lgpl-3.0 | 12,951 |
# -*- coding:utf-8 -*-
"""
Description:
Contract Parameter Type in AntShares.Wallets
Usage:
from AntShares.Wallets.ContractParameterType import ContractParameterType
"""
class ContractParameterType(object):
Signature = 0x00 # 签名
Integer = 0x01 # 整数
Hash160 = 0x02 # 160位散列值
Hash256 = 0x03 # 256位散列值
ByteArray = 0x04 # 字节数组
| AntSharesSDK/antshares-python | sdk/AntShares/Wallets/ContractParameterType.py | Python | apache-2.0 | 426 |
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013-2014 by Łukasz Mierzwa
:contact: l.mierzwa@gmail.com
"""
from __future__ import unicode_literals
from mongoforms import MongoForm
from mongoforms.fields import MongoFormFieldGenerator
from IPy import IP
from django.forms import Form, GenericIPAddressField, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import validators
from crispy_forms.helper import FormHelper, Layout
from crispy_forms.bootstrap import StrictButton, Div
class IPField(GenericIPAddressField):
def to_python(self, value):
if value in validators.EMPTY_VALUES:
return None
try:
return IP(value)
except ValueError as e:
raise ValidationError(e)
class ContribFormFieldGenerator(MongoFormFieldGenerator):
@staticmethod
def generate_ipv4field(field_name, field, label):
return IPField(label=label, required=field.required,
initial=field.default)
class CirspyIconButton(StrictButton):
template = 'crispy/button_with_icon.html'
def __init__(self, content, icon_class=None, **kwargs):
self.icon_class = icon_class
super(CirspyIconButton, self).__init__(content, **kwargs)
class CrispyForm(Form):
submit_label = 'Submit'
submit_css_class = 'btn-primary'
submit_icon_class = 'fa fa-floppy-o'
form_action = None
form_class = 'form-horizontal'
label_class = 'col-md-2'
field_class = 'col-md-8'
layout = []
def __init__(self, *args, **kwargs):
super(CrispyForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = self.form_action
self.helper.form_class = self.form_class
self.helper.label_class = self.label_class
self.helper.field_class = self.field_class
layout = self.layout + [
Div(
CirspyIconButton(_("Cancel"), css_class='btn-default',
icon_class='fa fa-reply',
onclick='javascript:history.go(-1);'),
CirspyIconButton(_(self.submit_label), type='submit',
css_class=self.submit_css_class,
icon_class=self.submit_icon_class),
css_class="btn-toolbar",
),
]
self.helper.layout = Layout(*self.clean_layout(layout))
def clean_layout(self, layout):
return layout
class InlineCrispyForm(Form):
submit_label = 'Submit'
submit_css_class = 'btn-primary'
form_action = None
form_class = 'form-inline'
layout = []
def __init__(self, *args, **kwargs):
super(InlineCrispyForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = self.form_action
self.helper.form_class = self.form_class
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
layout = self.layout + [
StrictButton(_(self.submit_label), css_class=self.submit_css_class,
type='submit'),
]
self.helper.layout = Layout(*self.clean_layout(layout))
def clean_layout(self, layout):
return layout
class CrispyMongoForm(MongoForm):
submit_label = 'Submit'
submit_css_class = 'btn-primary'
submit_icon_class = 'fa fa-floppy-o'
form_action = None
form_class = 'form-horizontal'
label_class = 'col-md-2'
field_class = 'col-md-8'
layout = []
def __init__(self, *args, **kwargs):
super(CrispyMongoForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = self.form_action
self.helper.form_class = self.form_class
self.helper.label_class = self.label_class
self.helper.field_class = self.field_class
layout = self.layout + [
Div(
CirspyIconButton(_("Cancel"), css_class='btn-default',
icon_class='fa fa-reply',
onclick='javascript:history.go(-1);'),
CirspyIconButton(_(self.submit_label), type='submit',
css_class=self.submit_css_class,
icon_class=self.submit_icon_class),
css_class="btn-toolbar",
),
]
self.helper.layout = Layout(*self.clean_layout(layout))
def clean_layout(self, layout):
return layout
class InlineCrispyMongoForm(MongoForm):
submit_label = 'Submit'
submit_css_class = 'btn-primary'
submit_icon_class = 'fa fa-floppy-o'
form_action = None
form_class = 'form-inline'
layout = []
def __init__(self, *args, **kwargs):
super(InlineCrispyMongoForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = self.form_action
self.helper.form_class = self.form_class
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
layout = self.layout + [
CirspyIconButton(_(self.submit_label), type='submit',
css_class=self.submit_css_class,
icon_class=self.submit_icon_class),
]
self.helper.layout = Layout(*self.clean_layout(layout))
def clean_layout(self, layout):
return layout
| prymitive/upaas-admin | upaas_admin/common/forms.py | Python | gpl-3.0 | 5,471 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.