code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from django.conf import settings
from django import forms
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
from captcha_registration.recaptcha.widgets import ReCaptcha
from captcha_registration.recaptcha.client import captcha
class ReCaptchaField(forms.CharField):
default_error_messages = {
'captcha_invalid': _(u'Invalid captcha')
}
def __init__(self, *args, **kwargs):
self.widget = ReCaptcha
self.required = True
super(ReCaptchaField, self).__init__(*args, **kwargs)
def clean(self, values):
super(ReCaptchaField, self).clean(values[1])
recaptcha_challenge_value = smart_unicode(values[0])
recaptcha_response_value = smart_unicode(values[1])
check_captcha = captcha.submit(recaptcha_challenge_value,
recaptcha_response_value, settings.RECAPTCHA_PRIVATE_KEY, {})
if not check_captcha.is_valid:
raise forms.util.ValidationError(self.error_messages['captcha_invalid'])
return values[0] | lambdamusic/testproject | konproj/libs/captcha_registration/recaptcha/fields.py | Python | gpl-2.0 | 1,066 |
from enigma import ePicLoad, eTimer, getDesktop, gMainDC, eSize
from Screens.Screen import Screen
from Tools.Directories import resolveFilename, pathExists, SCOPE_MEDIA, SCOPE_CURRENT_SKIN
from Components.Pixmap import Pixmap, MovingPixmap
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.FileList import FileList
from Components.AVSwitch import AVSwitch
from Components.Sources.List import List
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSelection, ConfigText, ConfigYesNo, KEY_LEFT, KEY_RIGHT, KEY_0, getConfigListEntry
import skin
def getScale():
return AVSwitch().getFramebufferScale()
config.pic = ConfigSubsection()
config.pic.framesize = ConfigInteger(default=30, limits=(5, 99))
config.pic.slidetime = ConfigInteger(default=10, limits=(1, 60))
config.pic.resize = ConfigSelection(default="1", choices = [("0", _("simple")), ("1", _("better"))])
config.pic.cache = ConfigYesNo(default=True)
config.pic.lastDir = ConfigText(default=resolveFilename(SCOPE_MEDIA))
config.pic.infoline = ConfigYesNo(default=True)
config.pic.loop = ConfigYesNo(default=True)
config.pic.bgcolor = ConfigSelection(default="#00000000", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
config.pic.autoOrientation = ConfigYesNo(default=False)
config.pic.textcolor = ConfigSelection(default="#0038FF48", choices = [("#00000000", _("black")),("#009eb9ff", _("blue")),("#00ff5a51", _("red")), ("#00ffe875", _("yellow")), ("#0038FF48", _("green"))])
class picshow(Screen):
skin = """
<screen name="picshow" position="center,center" size="560,440" title="Picture player" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="label" render="Label" position="5,55" size="350,140" font="Regular;19" backgroundColor="#25062748" transparent="1" />
<widget name="thn" position="360,40" size="180,160" alphatest="on" />
<widget name="filelist" position="5,205" zPosition="2" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MenuActions"],
{
"cancel": self.KeyExit,
"red": self.KeyExit,
"green": self.KeyGreen,
"yellow": self.KeyYellow,
"menu": self.KeyMenu,
"ok": self.KeyOk
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Thumbnails"))
self["key_yellow"] = StaticText("")
self["label"] = StaticText("")
self["thn"] = Pixmap()
currDir = config.pic.lastDir.value
if not pathExists(currDir):
currDir = "/"
self.oldService = self.session.nav.getCurrentlyPlayingServiceReference()
self.session.nav.stopService()
# Show Background MVI
import os
try:
os.system("/usr/bin/showiframe /usr/share/enigma2/black.mvi &")
except:
pass
self.filelist = FileList(currDir, matchingPattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp|gif)")
self["filelist"] = self.filelist
self["filelist"].onSelectionChanged.append(self.selectionChanged)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showThumb)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setConf)
def showPic(self, picInfo=""):
ptr = self.picload.getData()
if ptr != None:
self["thn"].instance.setPixmap(ptr.__deref__())
self["thn"].show()
text = picInfo.split('\n',1)
self["label"].setText(text[1])
self["key_yellow"].setText(_("Exif"))
def showThumb(self):
if not self.filelist.canDescent():
if self.filelist.getCurrentDirectory() and self.filelist.getFilename():
if self.picload.getThumbnail(self.filelist.getCurrentDirectory() + self.filelist.getFilename()) == 1:
self.ThumbTimer.start(500, True)
def selectionChanged(self):
if not self.filelist.canDescent():
self.ThumbTimer.start(500, True)
else:
self["label"].setText("")
self["thn"].hide()
self["key_yellow"].setText("")
def KeyGreen(self):
#if not self.filelist.canDescent():
self.session.openWithCallback(self.callbackView, Pic_Thumb, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def KeyYellow(self):
if not self.filelist.canDescent():
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist.getCurrentDirectory() + self.filelist.getFilename()))
def KeyMenu(self):
self.session.openWithCallback(self.setConf, Pic_Setup)
def KeyOk(self):
if self.filelist.canDescent():
self.filelist.descent()
else:
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist.getFileList(), self.filelist.getSelectionIndex(), self.filelist.getCurrentDirectory())
def setConf(self, retval=None):
self.setTitle(_("Picture player"))
sc = getScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self["thn"].instance.size().width(), self["thn"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), "#00000000", config.pic.autoOrientation.value))
def callbackView(self, val=0):
if val > 0:
self.filelist.moveToIndex(val)
def KeyExit(self):
del self.picload
if self.filelist.getCurrentDirectory() is None:
config.pic.lastDir.value = "/"
else:
config.pic.lastDir.value = self.filelist.getCurrentDirectory()
config.pic.save()
self.session.nav.playService(self.oldService)
self.close()
#------------------------------------------------------------------------------------------
class Pic_Setup(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["PicturePlayerSetup", "Setup"]
self.setup_title = _("Settings")
self.onChangedEntry = []
self.session = session
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
"ok": self.keySave,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
setup_list = [
getConfigListEntry(_("Slide show interval (sec.)"), config.pic.slidetime),
getConfigListEntry(_("Scaling mode"), config.pic.resize),
getConfigListEntry(_("Cache thumbnails"), config.pic.cache),
getConfigListEntry(_("Show info line"), config.pic.infoline),
getConfigListEntry(_("Frame size in full view"), config.pic.framesize),
getConfigListEntry(_("Slide picture in loop"), config.pic.loop),
getConfigListEntry(_("Background color"), config.pic.bgcolor),
getConfigListEntry(_("Text color"), config.pic.textcolor),
getConfigListEntry(_("Fulview resulution"), config.usage.pic_resolution),
getConfigListEntry(_("Auto EXIF Orientation rotation/flipping"), config.pic.autoOrientation),
]
self["config"].list = setup_list
self["config"].l.setList(setup_list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
#---------------------------------------------------------------------------
class Pic_Exif(Screen):
skin = """
<screen name="Pic_Exif" position="center,center" size="560,360" title="Info" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="menu" render="Listbox" position="5,50" size="550,310" scrollbarMode="showOnDemand" selectionDisabled="1" >
<convert type="TemplatedMultiContent">
{
"template": [ MultiContentEntryText(pos = (5, 5), size = (250, 30), flags = RT_HALIGN_LEFT, text = 0), MultiContentEntryText(pos = (260, 5), size = (290, 30), flags = RT_HALIGN_LEFT, text = 1)],
"fonts": [gFont("Regular", 20)],
"itemHeight": 30
}
</convert>
</widget>
</screen>"""
def __init__(self, session, exiflist):
Screen.__init__(self, session)
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.close
}, -1)
self["key_red"] = StaticText(_("Close"))
exifdesc = [_("filename")+':', "EXIF-Version:", "Make:", "Camera:", "Date/Time:", "Width / Height:", "Flash used:", "Orientation:", "User Comments:", "Metering Mode:", "Exposure Program:", "Light Source:", "CompressedBitsPerPixel:", "ISO Speed Rating:", "X-Resolution:", "Y-Resolution:", "Resolution Unit:", "Brightness:", "Exposure Time:", "Exposure Bias:", "Distance:", "CCD-Width:", "ApertureFNumber:"]
list = []
for x in range(len(exiflist)):
if x>0:
list.append((exifdesc[x], exiflist[x]))
else:
name = exiflist[x].split('/')[-1]
list.append((exifdesc[x], name))
self["menu"] = List(list)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Info"))
#----------------------------------------------------------------------------------------
T_INDEX = 0
T_FRAME_POS = 1
T_PAGE = 2
T_NAME = 3
T_FULL = 4
class Pic_Thumb(Screen):
def __init__(self, session, piclist, lastindex, path):
self.textcolor = config.pic.textcolor.value
self.color = config.pic.bgcolor.value
self.spaceX, self.picX, self.spaceY, self.picY, textsize, thumtxt = skin.parameters.get("PicturePlayerThumb",(35, 190, 30, 200, 20, 14))
pic_frame = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/pic_frame.png")
size_w = getDesktop(0).size().width()
size_h = getDesktop(0).size().height()
self.thumbsX = size_w / (self.spaceX + self.picX) # thumbnails in X
self.thumbsY = size_h / (self.spaceY + self.picY) # thumbnails in Y
self.thumbsC = self.thumbsX * self.thumbsY # all thumbnails
self.positionlist = []
skincontent = ""
posX = -1
for x in range(self.thumbsC):
posY = x / self.thumbsX
posX += 1
if posX >= self.thumbsX:
posX = 0
absX = self.spaceX + (posX*(self.spaceX + self.picX))
absY = self.spaceY + (posY*(self.spaceY + self.picY))
self.positionlist.append((absX, absY))
skincontent += "<widget source=\"label" + str(x) + "\" render=\"Label\" position=\"" + str(absX+5) + "," + str(absY+self.picY-textsize) + "\" size=\"" + str(self.picX - 10) + "," + str(textsize) \
+ "\" font=\"Regular;" + str(thumtxt) + "\" zPosition=\"2\" transparent=\"1\" noWrap=\"1\" foregroundColor=\"" + self.textcolor + "\" />"
skincontent += "<widget name=\"thumb" + str(x) + "\" position=\"" + str(absX+5)+ "," + str(absY+5) + "\" size=\"" + str(self.picX -10) + "," + str(self.picY - (textsize*2)) + "\" zPosition=\"2\" transparent=\"1\" alphatest=\"on\" />"
# Screen, backgroundlabel and MovingPixmap
self.skin = "<screen position=\"0,0\" size=\"" + str(size_w) + "," + str(size_h) + "\" flags=\"wfNoBorder\" > \
<eLabel position=\"0,0\" zPosition=\"0\" size=\""+ str(size_w) + "," + str(size_h) + "\" backgroundColor=\"" + self.color + "\" />" \
+ "<widget name=\"frame\" position=\"" + str(self.spaceX)+ "," + str(self.spaceY)+ "\" size=\"" + str(self.picX) + "," + str(self.picY) + "\" pixmap=\"" + pic_frame + "\" zPosition=\"1\" alphatest=\"on\" />" \
+ skincontent + "</screen>"
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MovieSelectionActions"],
{
"cancel": self.Exit,
"ok": self.KeyOk,
"left": self.key_left,
"right": self.key_right,
"up": self.key_up,
"down": self.key_down,
"showEventInfo": self.StartExif,
}, -1)
self["frame"] = MovingPixmap()
for x in range(self.thumbsC):
self["label"+str(x)] = StaticText()
self["thumb"+str(x)] = Pixmap()
self.Thumbnaillist = []
self.filelist = []
self.currPage = -1
self.dirlistcount = 0
self.path = path
index = 0
framePos = 0
Page = 0
for x in piclist:
if x[0][1] == False:
self.filelist.append((index, framePos, Page, x[0][0], path + x[0][0]))
index += 1
framePos += 1
if framePos > (self.thumbsC -1):
framePos = 0
Page += 1
else:
self.dirlistcount += 1
self.maxentry = len(self.filelist)-1
self.index = lastindex - self.dirlistcount
if self.index < 0:
self.index = 0
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.setPicloadConf)
self.ThumbTimer = eTimer()
self.ThumbTimer.callback.append(self.showPic)
def setPicloadConf(self):
sc = getScale()
self.picload.setPara([self["thumb0"].instance.size().width(), self["thumb0"].instance.size().height(), sc[0], sc[1], config.pic.cache.value, int(config.pic.resize.value), self.color, config.pic.autoOrientation.value])
self.paintFrame()
def paintFrame(self):
#print "index=" + str(self.index)
if self.maxentry < self.index or self.index < 0:
return
pos = self.positionlist[self.filelist[self.index][T_FRAME_POS]]
self["frame"].moveTo( pos[0], pos[1], 1)
self["frame"].startMoving()
if self.currPage != self.filelist[self.index][T_PAGE]:
self.currPage = self.filelist[self.index][T_PAGE]
self.newPage()
def newPage(self):
self.Thumbnaillist = []
#clear Labels and Thumbnail
for x in range(self.thumbsC):
self["label"+str(x)].setText("")
self["thumb"+str(x)].hide()
#paint Labels and fill Thumbnail-List
for x in self.filelist:
if x[T_PAGE] == self.currPage:
self["label"+str(x[T_FRAME_POS])].setText("(" + str(x[T_INDEX]+1) + ") " + x[T_NAME])
self.Thumbnaillist.append([0, x[T_FRAME_POS], x[T_FULL]])
#paint Thumbnail start
self.showPic()
def showPic(self, picInfo=""):
for x in range(len(self.Thumbnaillist)):
if self.Thumbnaillist[x][0] == 0:
if self.picload.getThumbnail(self.Thumbnaillist[x][2]) == 1: #zu tun probier noch mal
self.ThumbTimer.start(500, True)
else:
self.Thumbnaillist[x][0] = 1
break
elif self.Thumbnaillist[x][0] == 1:
self.Thumbnaillist[x][0] = 2
ptr = self.picload.getData()
if ptr != None:
self["thumb" + str(self.Thumbnaillist[x][1])].instance.setPixmap(ptr.__deref__())
self["thumb" + str(self.Thumbnaillist[x][1])].show()
def key_left(self):
self.index -= 1
if self.index < 0:
self.index = self.maxentry
self.paintFrame()
def key_right(self):
self.index += 1
if self.index > self.maxentry:
self.index = 0
self.paintFrame()
def key_up(self):
self.index -= self.thumbsX
if self.index < 0:
self.index =self.maxentry
self.paintFrame()
def key_down(self):
self.index += self.thumbsX
if self.index > self.maxentry:
self.index = 0
self.paintFrame()
def StartExif(self):
if self.maxentry < 0:
return
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist[self.index][T_FULL]))
def KeyOk(self):
if self.maxentry < 0:
return
self.old_index = self.index
self.session.openWithCallback(self.callbackView, Pic_Full_View, self.filelist, self.index, self.path)
def callbackView(self, val=0):
self.index = val
if self.old_index != self.index:
self.paintFrame()
def Exit(self):
del self.picload
self.close(self.index + self.dirlistcount)
#---------------------------------------------------------------------------
class Pic_Full_View(Screen):
def __init__(self, session, filelist, index, path):
self.textcolor = config.pic.textcolor.value
self.bgcolor = config.pic.bgcolor.value
space = config.pic.framesize.value
self.size_w = size_w = getDesktop(0).size().width()
self.size_h = size_h = getDesktop(0).size().height()
if config.usage.pic_resolution.value and (size_w, size_h) != eval(config.usage.pic_resolution.value):
(size_w, size_h) = eval(config.usage.pic_resolution.value)
gMainDC.getInstance().setResolution(size_w, size_h)
getDesktop(0).resize(eSize(size_w, size_h))
self.skin = "<screen position=\"0,0\" size=\"" + str(size_w) + "," + str(size_h) + "\" flags=\"wfNoBorder\" > \
<eLabel position=\"0,0\" zPosition=\"0\" size=\""+ str(size_w) + "," + str(size_h) + "\" backgroundColor=\""+ self.bgcolor +"\" /><widget name=\"pic\" position=\"" + str(space) + "," + str(space) + "\" size=\"" + str(size_w-(space*2)) + "," + str(size_h-(space*2)) + "\" zPosition=\"1\" alphatest=\"on\" /> \
<widget name=\"point\" position=\""+ str(space+5) + "," + str(space+2) + "\" size=\"20,20\" zPosition=\"2\" pixmap=\"skin_default/icons/record.png\" alphatest=\"on\" /> \
<widget name=\"play_icon\" position=\""+ str(space+25) + "," + str(space+2) + "\" size=\"20,20\" zPosition=\"2\" pixmap=\"skin_default/icons/ico_mp_play.png\" alphatest=\"on\" /> \
<widget source=\"file\" render=\"Label\" position=\""+ str(space+45) + "," + str(space) + "\" size=\""+ str(size_w-(space*2)-50) + ",25\" font=\"Regular;20\" borderWidth=\"1\" borderColor=\"#000000\" halign=\"left\" foregroundColor=\"" + self.textcolor + "\" zPosition=\"2\" noWrap=\"1\" transparent=\"1\" /></screen>"
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "MovieSelectionActions"],
{
"cancel": self.Exit,
"green": self.PlayPause,
"yellow": self.PlayPause,
"blue": self.nextPic,
"red": self.prevPic,
"left": self.prevPic,
"right": self.nextPic,
"showEventInfo": self.StartExif,
"contextMenu": self.KeyMenu,
}, -1)
self["point"] = Pixmap()
self["pic"] = Pixmap()
self["play_icon"] = Pixmap()
self["file"] = StaticText(_("please wait, loading picture..."))
self.old_index = 0
self.filelist = []
self.lastindex = index
self.currPic = []
self.shownow = True
self.dirlistcount = 0
for x in filelist:
if len(filelist[0]) == 3: #orig. filelist
if x[0][1] == False:
self.filelist.append(path + x[0][0])
else:
self.dirlistcount += 1
elif len(filelist[0]) == 2: #scanlist
if x[0][1] == False:
self.filelist.append(x[0][0])
else:
self.dirlistcount += 1
else: # thumbnaillist
self.filelist.append(x[T_FULL])
self.maxentry = len(self.filelist)-1
self.index = index - self.dirlistcount
if self.index < 0:
self.index = 0
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.finish_decode)
self.slideTimer = eTimer()
self.slideTimer.callback.append(self.slidePic)
if self.maxentry >= 0:
self.onLayoutFinish.append(self.setPicloadConf)
def setPicloadConf(self):
self.setConf()
self["play_icon"].hide()
if config.pic.infoline.value == False:
self["file"].setText("")
self.start_decode()
def setConf(self, retval=None):
sc = getScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara([self["pic"].instance.size().width(), self["pic"].instance.size().height(), sc[0], sc[1], 0, int(config.pic.resize.value), self.bgcolor, config.pic.autoOrientation.value])
def ShowPicture(self):
if self.shownow and len(self.currPic):
self.shownow = False
if config.pic.infoline.value:
self["file"].setText(self.currPic[0])
else:
self["file"].setText("")
self.lastindex = self.currPic[1]
self["pic"].instance.setPixmap(self.currPic[2].__deref__())
self.currPic = []
self.next()
self.start_decode()
def finish_decode(self, picInfo=""):
self["point"].hide()
ptr = self.picload.getData()
if ptr != None:
text = ""
try:
text = picInfo.split('\n',1)
text = "(" + str(self.index+1) + "/" + str(self.maxentry+1) + ") " + text[0].split('/')[-1]
except:
pass
self.currPic = []
self.currPic.append(text)
self.currPic.append(self.index)
self.currPic.append(ptr)
self.ShowPicture()
def start_decode(self):
self.picload.startDecode(self.filelist[self.index])
self["point"].show()
def next(self):
self.index += 1
if self.index > self.maxentry:
self.index = 0
def prev(self):
self.index -= 1
if self.index < 0:
self.index = self.maxentry
def slidePic(self):
print "slide to next Picture index=" + str(self.lastindex)
if config.pic.loop.value==False and self.lastindex == self.maxentry:
self.PlayPause()
self.shownow = True
self.ShowPicture()
def PlayPause(self):
if self.slideTimer.isActive():
self.slideTimer.stop()
self["play_icon"].hide()
else:
self.slideTimer.start(config.pic.slidetime.value*1000)
self["play_icon"].show()
self.nextPic()
def prevPic(self):
self.currPic = []
self.index = self.lastindex
self.prev()
self.start_decode()
self.shownow = True
def nextPic(self):
self.shownow = True
self.ShowPicture()
def StartExif(self):
if self.maxentry < 0:
return
self.session.open(Pic_Exif, self.picload.getInfo(self.filelist[self.lastindex]))
def KeyMenu(self):
self.session.openWithCallback(self.setConf, Pic_Setup)
def Exit(self):
del self.picload
if config.usage.pic_resolution.value and (self.size_w, self.size_h) != eval(config.usage.pic_resolution.value):
gMainDC.getInstance().setResolution(self.size_w, self.size_h)
getDesktop(0).resize(eSize(self.size_w, self.size_h))
self.close(self.lastindex + self.dirlistcount)
| kingvuplus/ops | lib/python/Plugins/Extensions/PicturePlayer/ui.py | Python | gpl-2.0 | 22,711 |
#!/usr/bin/env python
import sys
from pfsense_api import PfSenseAPI
from datetime import datetime
from pfsense_cmdline import PfSenseOptionParser
from ConfigParser import ConfigParser
from pfsense_logger import PfSenseLogger as logging
import os.path
parser = PfSenseOptionParser()
parser.add_option("--id", dest="crl_id", help="ID of the CRL to update")
parser.add_option("--name", dest="name", help="Descriptive name of the CRL", default="Imported CRL")
parser.add_option("--crl", dest="crl", help="File containing CRL in PEM format", metavar="CRL_FILE")
parser.add_option("--ssl_verification", dest="ssl_verification", help="Whether SSL should be verified or not, valid values are yes/no, true/false, 1/0", default=True, metavar="yes/no")
parser.add_option("--overwrite", dest="overwrite", default=False, help="Command line options will overwrite same settings in config file", action="store_true")
(options, args) = parser.parse_args()
logger = logging.setupLogger(options.logging)
parser.check_cmd_options( options )
required_items = ['crl_id', 'crl', 'host', 'username', 'password']
options_cmdline = vars(options).copy()
del options_cmdline['config']
del options_cmdline['overwrite']
configFile = ConfigParser()
configFile.read(options.config)
api = PfSenseAPI()
for section in configFile.sections():
logger.info("Working on %s" % section)
parsed_options = parser.parse_individual_options(configFile.items(section), options_cmdline, overwrite = options.overwrite, bool_keys = ['ssl_verification'])
required_items_missed = False
missed_items = parser.check_required_options(parsed_options, required_items)
for item in missed_items:
logger.error('%s is reqired for entry %s' % ( item, section))
required_items_missed = True
if required_items_missed:
continue
if not os.path.isfile(parsed_options['crl']):
logger.error('CRL file %s does not exist?' % parsed_options['crl'])
continue
try:
crlFile = open(parsed_options['crl'], 'r')
crlData = crlFile.read()
crlFile.close()
except:
logger.error("Error while read CRL data from file %s" % parsed_options['crl'])
continue
api['options'] = parsed_options
api.login()
(rc, data, contentType) = api.call( '/system_crlmanager.php', 'POST',
apiData = {
'method': 'existing',
'descr': '%s (last refresh: %s)' % (options.name, datetime.now().isoformat()),
'crltext': crlData,
'submit': 'Save'
},
itemData = {
'id': parsed_options['crl_id'],
'act': 'editimported'
})
api.logout()
if rc == 302:
logger.info('CRL Update successful for %s' % (section))
else:
logger.info('CRL Update failed for %s' % ( section))
| stanleyz/pfsense-2.x-tools | pfsense-updateCRL.py | Python | gpl-2.0 | 2,863 |
import ujson
from unittest import mock
from unittest.mock import MagicMock
from flask import url_for
from flask_login import login_required, AnonymousUserMixin
from requests.exceptions import HTTPError
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
from pika.exceptions import ConnectionClosed, ChannelClosed
import listenbrainz.db.user as db_user
import listenbrainz.webserver.login
from listenbrainz.db.testing import DatabaseTestCase
from listenbrainz.webserver import create_app
from listenbrainz.webserver.testing import ServerTestCase
class IndexViewsTestCase(ServerTestCase, DatabaseTestCase):
def setUp(self):
ServerTestCase.setUp(self)
DatabaseTestCase.setUp(self)
def test_index(self):
resp = self.client.get(url_for('index.index'))
self.assert200(resp)
def test_downloads(self):
resp = self.client.get(url_for('index.downloads'))
self.assert_redirects(resp, url_for('index.data'))
def test_data(self):
resp = self.client.get(url_for('index.data'))
self.assert200(resp)
def test_contribute(self):
resp = self.client.get(url_for('index.contribute'))
self.assert200(resp)
def test_goals(self):
resp = self.client.get(url_for('index.goals'))
self.assert200(resp)
def test_faq(self):
resp = self.client.get(url_for('index.faq'))
self.assert200(resp)
def test_roadmap(self):
resp = self.client.get(url_for('index.roadmap'))
self.assert200(resp)
def test_add_data_info(self):
resp = self.client.get(url_for('index.add_data_info'))
self.assert200(resp)
def test_import_data_info(self):
resp = self.client.get(url_for('index.import_data_info'))
self.assert200(resp)
def test_404(self):
resp = self.client.get('/canyoufindthis')
self.assert404(resp)
self.assertIn('Not Found', resp.data.decode('utf-8'))
def test_lastfm_proxy(self):
resp = self.client.get(url_for('index.proxy'))
self.assert200(resp)
def test_flask_debugtoolbar(self):
""" Test if flask debugtoolbar is loaded correctly
Creating an app with default config so that debug is True
and SECRET_KEY is defined.
"""
app = create_app(debug=True)
client = app.test_client()
resp = client.get('/data')
self.assert200(resp)
self.assertIn('flDebug', str(resp.data))
def test_current_status(self):
resp = self.client.get(url_for('index.current_status'))
self.assert200(resp)
@mock.patch('listenbrainz.db.user.get')
def test_menu_not_logged_in(self, mock_user_get):
resp = self.client.get(url_for('index.index'))
data = resp.data.decode('utf-8')
self.assertIn('Sign in', data)
self.assertIn('Import', data)
# item in user menu doesn't exist
self.assertNotIn('My Listens', data)
mock_user_get.assert_not_called()
@mock.patch('listenbrainz.db.user.get_by_login_id')
def test_menu_logged_in(self, mock_user_get):
""" If the user is logged in, check that we perform a database query to get user data """
user = db_user.get_or_create(1, 'iliekcomputers')
db_user.agree_to_gdpr(user['musicbrainz_id'])
user = db_user.get_or_create(1, 'iliekcomputers')
mock_user_get.return_value = user
self.temporary_login(user['login_id'])
resp = self.client.get(url_for('index.index'))
data = resp.data.decode('utf-8')
# username (menu header)
self.assertIn('iliekcomputers', data)
self.assertIn('Import', data)
# item in user menu
self.assertIn('My Listens', data)
mock_user_get.assert_called_with(user['login_id'])
@mock.patch('listenbrainz.db.user.get_by_login_id')
def test_menu_logged_in_error_show(self, mock_user_get):
""" If the user is logged in, if we show a 400 or 404 error, show the user menu"""
@self.app.route('/page_that_returns_400')
def view400():
raise BadRequest('bad request')
@self.app.route('/page_that_returns_404')
def view404():
raise NotFound('not found')
user = db_user.get_or_create(1, 'iliekcomputers')
db_user.agree_to_gdpr(user['musicbrainz_id'])
user = db_user.get_or_create(1, 'iliekcomputers')
mock_user_get.return_value = user
self.temporary_login(user['login_id'])
resp = self.client.get('/page_that_returns_400')
data = resp.data.decode('utf-8')
self.assert400(resp)
# username (menu header)
self.assertIn('iliekcomputers', data)
self.assertIn('Import', data)
# item in user menu
self.assertIn('My Listens', data)
mock_user_get.assert_called_with(user['login_id'])
resp = self.client.get('/page_that_returns_404')
data = resp.data.decode('utf-8')
self.assert404(resp)
# username (menu header)
self.assertIn('iliekcomputers', data)
self.assertIn('Import', data)
# item in user menu
self.assertIn('My Listens', data)
mock_user_get.assert_called_with(user['login_id'])
@mock.patch('listenbrainz.db.user.get')
def test_menu_logged_in_error_dont_show_no_user(self, mock_user_get):
""" If the user is logged in, if we show a 500 error, do not show the user menu
Don't query the database to get a current_user for the template context"""
@self.app.route('/page_that_returns_500')
def view500():
raise InternalServerError('error')
user = db_user.get_or_create(1, 'iliekcomputers')
db_user.agree_to_gdpr(user['musicbrainz_id'])
user = db_user.get_or_create(1, 'iliekcomputers')
mock_user_get.return_value = user
self.temporary_login(user['login_id'])
resp = self.client.get('/page_that_returns_500')
data = resp.data.decode('utf-8')
# item not in user menu
self.assertNotIn('My Listens', data)
self.assertNotIn('Sign in', data)
self.assertIn('Import', data)
@mock.patch('listenbrainz.db.user.get_by_login_id')
def test_menu_logged_in_error_dont_show_user_loaded(self, mock_user_get):
""" If the user is logged in, if we show a 500 error, do not show the user menu
If the user has previously been loaded in the view, check that it's not
loaded while rendering the template"""
user = db_user.get_or_create(1, 'iliekcomputers')
db_user.agree_to_gdpr(user['musicbrainz_id'])
user = db_user.get_or_create(1, 'iliekcomputers')
mock_user_get.return_value = user
@self.app.route('/page_that_returns_500')
@login_required
def view500():
# flask-login user is loaded during @login_required, so check that the db has been queried
mock_user_get.assert_called_with(user['login_id'])
raise InternalServerError('error')
self.temporary_login(user['login_id'])
resp = self.client.get('/page_that_returns_500')
data = resp.data.decode('utf-8')
self.assertIn('Import', data)
# item not in user menu
self.assertNotIn('My Listens', data)
self.assertNotIn('Sign in', data)
# Even after rendering the template, the database has only been queried once (before the exception)
mock_user_get.assert_called_once()
self.assertIsInstance(self.get_context_variable('current_user'), listenbrainz.webserver.login.User)
@mock.patch('listenbrainz.webserver.views.index._authorize_mb_user_deleter')
@mock.patch('listenbrainz.webserver.views.index.delete_user')
def test_mb_user_deleter_valid_account(self, mock_delete_user, mock_authorize_mb_user_deleter):
user1 = db_user.create(1, 'iliekcomputers')
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assert200(r)
mock_authorize_mb_user_deleter.assert_called_once_with('132')
mock_delete_user.assert_called_once_with('iliekcomputers')
@mock.patch('listenbrainz.webserver.views.index._authorize_mb_user_deleter')
@mock.patch('listenbrainz.webserver.views.index.delete_user')
def test_mb_user_deleter_not_found(self, mock_delete_user, mock_authorize_mb_user_deleter):
# no user in the db with musicbrainz_row_id = 2
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=2, access_token='312421'))
self.assert404(r)
mock_authorize_mb_user_deleter.assert_called_with('312421')
mock_delete_user.assert_not_called()
@mock.patch('listenbrainz.webserver.views.index.requests.get')
@mock.patch('listenbrainz.webserver.views.index.delete_user')
def test_mb_user_deleter_valid_access_token(self, mock_delete_user, mock_requests_get):
mock_requests_get.return_value = MagicMock()
mock_requests_get.return_value.json.return_value = {
'sub': 'UserDeleter',
'metabrainz_user_id': 2007538,
}
user1 = db_user.create(1, 'iliekcomputers')
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assert200(r)
mock_requests_get.assert_called_with(
'https://musicbrainz.org/oauth2/userinfo',
headers={'Authorization': 'Bearer 132'},
)
mock_delete_user.assert_called_with('iliekcomputers')
@mock.patch('listenbrainz.webserver.views.index.requests.get')
@mock.patch('listenbrainz.webserver.views.index.delete_user')
def test_mb_user_deleter_invalid_access_tokens(self, mock_delete_user, mock_requests_get):
mock_requests_get.return_value = MagicMock()
mock_requests_get.return_value.json.return_value = {
'sub': 'UserDeleter',
'metabrainz_user_id': 2007531, # incorrect musicbrainz row id for UserDeleter
}
user1 = db_user.create(1, 'iliekcomputers')
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assertStatus(r, 401)
mock_delete_user.assert_not_called()
# no sub value
mock_requests_get.return_value.json.return_value = {
'metabrainz_user_id': 2007538,
}
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assertStatus(r, 401)
mock_delete_user.assert_not_called()
# no row id
mock_requests_get.return_value.json.return_value = {
'sub': 'UserDeleter',
}
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assertStatus(r, 401)
mock_delete_user.assert_not_called()
# incorrect username
mock_requests_get.return_value.json.return_value = {
'sub': 'iliekcomputers',
'metabrainz_user_id': 2007538
}
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assertStatus(r, 401)
mock_delete_user.assert_not_called()
# everything incorrect
mock_requests_get.return_value.json.return_value = {
'sub': 'iliekcomputers',
'metabrainz_user_id': 1,
}
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assertStatus(r, 401)
mock_delete_user.assert_not_called()
# HTTPError while getting userinfo from MusicBrainz
mock_requests_get.return_value.raise_for_status.side_effect = HTTPError
r = self.client.get(url_for('index.mb_user_deleter', musicbrainz_row_id=1, access_token='132'))
self.assertStatus(r, 401)
mock_delete_user.assert_not_called()
def test_recent_listens_page(self):
response = self.client.get(url_for('index.recent_listens'))
self.assert200(response)
self.assertTemplateUsed('index/recent.html')
props = ujson.loads(self.get_context_variable('props'))
self.assertEqual(props['mode'], 'recent')
self.assertDictEqual(props['spotify'], {})
def test_feed_page(self):
user = db_user.get_or_create(1, 'iliekcomputers') # dev
db_user.agree_to_gdpr(user['musicbrainz_id'])
self.temporary_login(user['login_id'])
r = self.client.get('/feed')
self.assert200(r)
def test_similar_users(self):
resp = self.client.get(url_for('index.similar_users'))
self.assert200(resp)
| Freso/listenbrainz-server | listenbrainz/webserver/views/test/test_index.py | Python | gpl-2.0 | 12,732 |
# -*- coding: utf-8 -*-
# daemon/runner.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2009 Ben Finney <ben+python@benfinney.id.au>
# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Daemon runner library.
"""
import sys
import os
import signal
import errno
import pidlockfile
from daemon import DaemonContext
class DaemonRunnerError(Exception):
""" Abstract base class for errors from DaemonRunner. """
class DaemonRunnerInvalidActionError(ValueError, DaemonRunnerError):
""" Raised when specified action for DaemonRunner is invalid. """
class DaemonRunnerStartFailureError(RuntimeError, DaemonRunnerError):
""" Raised when failure starting DaemonRunner. """
class DaemonRunnerStopFailureError(RuntimeError, DaemonRunnerError):
""" Raised when failure stopping DaemonRunner. """
class DaemonRunner(object):
""" Controller for a callable running in a separate background process.
The first command-line argument is the action to take:
* 'start': Become a daemon and call `app.run()`.
* 'stop': Exit the daemon process specified in the PID file.
* 'restart': Stop, then start.
"""
start_message = "started with pid %(pid)d"
def __init__(self, app):
""" Set up the parameters of a new runner.
The `app` argument must have the following attributes:
* `stdin_path`, `stdout_path`, `stderr_path`: Filesystem
paths to open and replace the existing `sys.stdin`,
`sys.stdout`, `sys.stderr`.
* `pidfile_path`: Absolute filesystem path to a file that
will be used as the PID file for the daemon. If
``None``, no PID file will be used.
* `pidfile_timeout`: Used as the default acquisition
timeout value supplied to the runner's PID lock file.
* `run`: Callable that will be invoked when the daemon is
started.
"""
self.parse_args()
self.app = app
self.daemon_context = DaemonContext()
self.daemon_context.stdin = open(app.stdin_path, 'r')
self.daemon_context.stdout = open(app.stdout_path, 'w+')
self.daemon_context.stderr = open(
app.stderr_path, 'w+', buffering=0)
self.pidfile = None
if app.pidfile_path is not None:
self.pidfile = make_pidlockfile(
app.pidfile_path, app.pidfile_timeout)
self.daemon_context.pidfile = self.pidfile
def _usage_exit(self, argv):
""" Emit a usage message, then exit.
"""
progname = os.path.basename(argv[0])
usage_exit_code = 2
action_usage = "|".join(self.action_funcs.keys())
message = "usage: %(progname)s %(action_usage)s" % vars()
emit_message(message)
sys.exit(usage_exit_code)
def parse_args(self, argv=None):
""" Parse command-line arguments.
"""
if argv is None:
argv = sys.argv
min_args = 2
if len(argv) < min_args:
self._usage_exit(argv)
self.action = argv[1]
if self.action not in self.action_funcs:
self._usage_exit(argv)
def _start(self):
""" Open the daemon context and run the application.
"""
if is_pidfile_stale(self.pidfile):
self.pidfile.break_lock()
try:
self.daemon_context.open()
except pidlockfile.AlreadyLocked:
pidfile_path = self.pidfile.path
raise DaemonRunnerStartFailureError(
"PID file %(pidfile_path)r already locked" % vars())
pid = os.getpid()
message = self.start_message % vars()
emit_message(message)
self.app.run()
def _terminate_daemon_process(self):
""" Terminate the daemon process specified in the current PID file.
"""
pid = self.pidfile.read_pid()
try:
os.kill(pid, signal.SIGTERM)
except OSError, exc:
raise DaemonRunnerStopFailureError(
"Failed to terminate %(pid)d: %(exc)s" % vars())
def _stop(self):
""" Exit the daemon process specified in the current PID file.
"""
if not self.pidfile.is_locked():
pidfile_path = self.pidfile.path
raise DaemonRunnerStopFailureError(
"PID file %(pidfile_path)r not locked" % vars())
if is_pidfile_stale(self.pidfile):
self.pidfile.break_lock()
else:
self._terminate_daemon_process()
def _restart(self):
""" Stop, then start.
"""
self._stop()
self._start()
action_funcs = {
'start': _start,
'stop': _stop,
'restart': _restart,
}
def _get_action_func(self):
""" Return the function for the specified action.
Raises ``DaemonRunnerInvalidActionError`` if the action is
unknown.
"""
try:
func = self.action_funcs[self.action]
except KeyError:
raise DaemonRunnerInvalidActionError(
"Unknown action: %(action)r" % vars(self))
return func
def do_action(self):
""" Perform the requested action.
"""
func = self._get_action_func()
func(self)
def emit_message(message, stream=None):
""" Emit a message to the specified stream (default `sys.stderr`). """
if stream is None:
stream = sys.stderr
stream.write("%(message)s\n" % vars())
stream.flush()
def make_pidlockfile(path, acquire_timeout):
""" Make a PIDLockFile instance with the given filesystem path. """
if not isinstance(path, basestring):
error = ValueError("Not a filesystem path: %(path)r" % vars())
raise error
if not os.path.isabs(path):
error = ValueError("Not an absolute path: %(path)r" % vars())
raise error
lockfile = pidlockfile.TimeoutPIDLockFile(path, acquire_timeout)
return lockfile
def is_pidfile_stale(pidfile):
""" Determine whether a PID file is stale.
Return ``True`` (“stale”) if the contents of the PID file are
valid but do not match the PID of a currently-running process;
otherwise return ``False``.
"""
result = False
pidfile_pid = pidfile.read_pid()
if pidfile_pid is not None:
try:
os.kill(pidfile_pid, signal.SIG_DFL)
except OSError, exc:
if exc.errno == errno.ESRCH:
# The specified PID does not exist
result = True
return result
| elephantum/python-daemon | daemon/runner.py | Python | gpl-2.0 | 7,087 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 24 11:17:25 2014
@author: kshmirko
"""
import pandas as pds
import numpy as np
def seasons(x):
month = x.month
ret = None
if month in [12,1,2]:
ret = "Winter"
elif month in [3,4,5]:
ret = "Spring"
elif month in [6,7,8]:
ret = "Summer"
else:
ret = "Fall"
return ret
Lon0 = 131.9
Lat0 = 43.1
Radius = 4.0
DB = 'DS-%5.1f-%4.1f-%3.1f.h5'%(Lon0, Lat0, Radius)
O3StatFileName_fig = 'O3-%5.1f-%4.1f-%3.1f.eps'%(Lon0, Lat0, Radius)
O3StatFileName_h5 = 'O3-%5.1f-%4.1f-%3.1f.h5'%(Lon0, Lat0, Radius)
O3 = pds.read_hdf(DB,'O3')
O3_Err = pds.read_hdf(DB,'O3Err')
TH = pds.read_hdf(DB,'TH')
#вычисляем статистику по сезонам относительно земли
O3seasons = O3.groupby(seasons).mean().T / 1.0e12
O3seasons_s = O3.groupby(seasons).std().T / 1.0e12
O3seasons_Err = O3_Err.groupby(seasons).mean().T / 100.00
THseasons = TH.groupby(seasons).agg([np.mean, np.std]).T
X = np.linspace(0.5, 70, 140)
StatO3 = pds.DataFrame(index=X)
for iseason in ['Winter','Spring','Summer','Fall']:
StatO3[iseason+'_m'] = O3seasons[iseason]
StatO3[iseason+'_e'] = O3seasons_Err[iseason]
StatO3[iseason+'_s'] = O3seasons_s[iseason]
store = pds.HDFStore(O3StatFileName_h5,'w')
store.put('Statistics',StatO3)
store.close()
import pylab as plt
plt.figure(1)
plt.clf()
ax = plt.subplot(2,2,1)
ax.plot(X, O3seasons['Winter'])
ax.set_xlim((0,40))
ax.set_ylim((0,6))
ax.set_ylabel('$[O3]x10^{12}, cm^{-3}$' )
ax.set_title('Winter')
Ht0 = THseasons['Winter'][0]['mean']
Ht0e= THseasons['Winter'][0]['std']
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.annotate('$H_{tropo}=%3.1f\pm%3.1f km$'%(Ht0, Ht0e), xy=(Ht0, 4,),
xycoords='data', xytext=(30, 4.0),
arrowprops=dict(arrowstyle="->", lw=2),
size=16
)
ax.grid()
ax2 = ax.twinx()
ax2.plot(X,O3seasons_Err['Winter'],'r.--')
ax.spines['right'].set_color('red')
ax2.yaxis.label.set_color('red')
ax2.tick_params(axis='y', colors='red')
ax2.set_ylim((0,100))
# 2-nd plot
ax = plt.subplot(2,2,2)
ax.plot(X, O3seasons['Spring'],'g')
ax.set_xlim((0,40))
ax.set_ylim((0,6))
ax.set_title('Spring')
Ht0 = THseasons['Spring'][0]['mean']
Ht0e= THseasons['Spring'][0]['std']
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.annotate('$H_{tropo}=%3.1f\pm%3.1f km$'%(Ht0, Ht0e), xy=(Ht0, 4,),
xycoords='data', xytext=(30, 4.0),
arrowprops=dict(arrowstyle="->", lw=2),
size=16
)
ax.grid()
ax2 = ax.twinx()
ax2.plot(X,O3seasons_Err['Spring'],'r.--')
ax.spines['right'].set_color('red')
ax2.set_ylabel('Error,$\%$')
ax2.tick_params(axis='y', colors='red')
ax2.yaxis.label.set_color('red')
ax2.set_ylim((0,100))
#3-rd plot
ax = plt.subplot(2,2,3)
ax.plot(X, O3seasons['Summer'],'y')
ax.set_xlim((0,40))
ax.set_ylim((0,6))
ax.grid()
ax.set_xlabel('Altitude, km')
ax.set_ylabel('$[O3]x10^{12}, cm^{-3}$' )
ax.set_title('Summer')
Ht0 = THseasons['Summer'][0]['mean']
Ht0e= THseasons['Summer'][0]['std']
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.annotate('$H_{tropo}=%3.1f\pm%3.1f km$'%(Ht0, Ht0e), xy=(Ht0, 4,),
xycoords='data', xytext=(30, 4.0),
arrowprops=dict(arrowstyle="->", lw=2),
size=16
)
ax2 = ax.twinx()
ax2.plot(X,O3seasons_Err['Summer'],'r.--')
ax.spines['right'].set_color('red')
ax2.tick_params(axis='y', colors='red')
ax2.yaxis.label.set_color('red')
ax2.set_ylim((0,100))
#4-th plot
ax = plt.subplot(2,2,4)
ax.plot(X, O3seasons['Fall'],'k')
ax.set_xlim((0,40))
ax.set_ylim((0,6))
Ht0 = THseasons['Fall'][0]['mean']
Ht0e= THseasons['Fall'][0]['std']
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.plot([Ht0,Ht0],[0,6],
'k.--',lw=2)
ax.annotate('$H_{tropo}=%3.1f\pm%3.1f km$'%(Ht0, Ht0e), xy=(Ht0, 4,),
xycoords='data', xytext=(30, 4.0),
arrowprops=dict(arrowstyle="->", lw=2),
size=16
)
ax.grid()
ax.set_xlabel('Altitude, km')
ax.set_title('Fall')
ax2 = ax.twinx()
ax2.plot(X,O3seasons_Err['Fall'],'r.--')
ax.spines['right'].set_color('red')
ax2.yaxis.label.set_color('red')
ax2.set_ylabel('Error,$\%$')
ax2.tick_params(axis='y', colors='red')
ax2.set_ylim((0,100))
plt.savefig(O3StatFileName_fig) | kshmirko/sageII-stat | make-stat.py | Python | gpl-2.0 | 4,462 |
import glob
import os
from qgis.core import QgsApplication
from command import command, complete_with
from PyQt4.QtCore import QUrl
from PyQt4.QtGui import QDesktopServices
folder = os.path.join(QgsApplication.qgisSettingsDirPath(), "python",
"commandbar")
def packages(argname, data):
return [os.path.basename(f) for f in glob.glob(folder + "/*.py")]
@command("Package name")
@complete_with(packagename=packages)
def edit_package(packagename):
"""
Edit a package file
"""
packagepath = os.path.join(folder, packagename)
if not packagename.endswith(".py"):
packagepath += ".py"
open_file(packagepath)
def open_file(path):
import subprocess
try:
subprocess.Popen([os.environ['EDITOR'], path])
except KeyError:
QDesktopServices.openUrl(QUrl.fromLocalFile(path))
@command("Package name")
def define_package(packagename):
"""
Define new command bar package file
"""
packagename = packagename.replace(" ", "_")
packagepath = os.path.join(folder, packagename) + ".py"
with open(packagepath, 'w') as f:
f.write("""# Package file for QGIS command bar plugin
from qgis.core import QgsMapLayerRegistry, QgsVectorLayer, QgsApplication
from qgis.utils import iface
from qgiscommand.command import command, complete_with, check
from qgiscommand.qgis_commands import layer_by_name, layers
@command("Prompt")
def {0}_package_function(arg1):
pass
""".format(packagename))
open_file(packagepath)
| NathanW2/qgiscommand | package_commands.py | Python | gpl-2.0 | 1,552 |
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from PDFManager.PDFMangerFacade import PDFMangerFacade
class PDFManager_UI:
def __init__(self):
self.i= -1;
self.files=[]
self.root = Tk()
self.root.title('PDFManager')
self.root.wm_iconbitmap("ico.ico") #icona
self.frame = Frame(self.root,height=2,bd=2,relief=SUNKEN,bg='black',)
self.root.resizable(False, False) #settaggio redimensione
#centrare nello schermo
larghezza = self.root.winfo_screenwidth() # larghezza schermo in pixel
altezza = self.root.winfo_screenheight() # altezza schermo in pixel
WIDTH = self.root.winfo_reqwidth()
HEIGHT = self.root.winfo_reqheight()
x = larghezza//2 - WIDTH
y = altezza//2 - HEIGHT
self.root.geometry("%dx%d+%d+%d" % (421,342 , x, y))
self.button_merge = Button(self.root, text = 'Unisci', command=self.__unisci__)
self.button_stitching = Button(self.root,text = 'Dividi',command=self.dividi)
self.button_split = Button(self.root, text = 'Fusione', command=self.__fusione__)
self.button_watermark = Button(self.root, text = 'Filigrana', command=self.__filigrana__)
self.button_encript = Button(self.root, text = 'Cripta', command=self.__cripta__)
self.button_rotate = Button(self.root, text='Ruota', command=self.__ruota__)
self.button_clear =Button(self.root, text='Rimuovi tutto', command=self.__svuota__)
self.password = Entry(self.root)
self.combo_rotate = ttk.Combobox(self.root,state='readonly')
self.combo_rotate['values'] = (0,90,180,270)
lblPass = Label(self.root,text='Password :',anchor=E)
lblGradi = Label(self.root,text='Gradi :',anchor=E)
self.button_add = Button(self.root, text='Aggiungi PDF', command=self.__aggiungi__)
self.button_delete = Button(self.root, text='Rimuovi selezionato', command=self.__rimuovi__)
self.list_file = ttk.Treeview(self.root)
self.list_file['columns'] =('NumeroPagine')
self.list_file.heading("#0",text='NomeFile')
self.list_file.column('#0',anchor=W)
self.list_file.heading('NumeroPagine',text = 'Numero pagine')
self.list_file.column('NumeroPagine',anchor='center',width=100)
self.button_add.grid(row=0, column= 0,columnspan=2,sticky=(W,E))
self.button_delete.grid(row=1,column=0,columnspan=2,sticky=(W,E))
self.button_clear.grid(row = 2,column=0,columnspan=2,sticky=(W,E))
self.list_file.grid(row=0,column=2,columnspan=3,rowspan=3)
self.frame.grid(row=3,column=0,columnspan=5,sticky=(W,E),pady=5)
self.button_merge.grid(row=4,column=0,columnspan=2,sticky=(W,E))
self.button_stitching.grid(row=4,column=3,columnspan=2,sticky=(W,E))
self.button_split.grid(row=5,column=0,columnspan=2,sticky=(W,E))
self.button_watermark.grid(row=5,column=3,columnspan=2,sticky=(W,E))
self.button_encript.grid(row=6,column=0,columnspan=2,sticky=(W,E))
lblPass.grid(row=6,column=2)
self.password.grid(row=6,column=3,columnspan=2,sticky=(W,E))
self.button_rotate.grid(row=7,column=0,columnspan=2,sticky=(W,E))
lblGradi.grid(row=7,column=2)
self.combo_rotate.grid(row=7,column=3,columnspan=2,sticky=(W,E))
self.button_stitching.config(state=DISABLED)
self.button_encript.config(state=DISABLED)
self.button_watermark.config(state=DISABLED)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=DISABLED)
def __aggiungi__(self):
filelist = filedialog.askopenfilenames(filetypes=[("PDF file",".pdf")])
for file in filelist:
if(file in self.files):
continue
self.i = self.i+1
self.files.append(file)
split = file.split("/").pop()
self.list_file.insert("",self.i,text=split,values=(PDFMangerFacade.pagescount(file)))
self.__controlla__()
def __rimuovi__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
del(self.files[posizione])
self.list_file.delete(pos)
self.i= self.i-1
print(self.files)
except IndexError:
messagebox.showwarning("Attenzione","Nessun elemento selezionato")
self.__controlla__()
def __unisci__(self):
try:
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
if(name.endswith('.pdf') == False):
name = name+'.pdf'
PDFMangerFacade.merge(*self.files, filenameOut=name)
except Exception as e:
messagebox.showwarning("Attenzione",e)
def __svuota__(self):
self.files = []
self.list_file.delete(*self.list_file.get_children())
def dividi(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
phat = filedialog.askdirectory()
prefisso = (self.files[posizione].split("/").pop()).split('.')[0]
PDFMangerFacade.stitching(self.files[posizione], phat + '/' + prefisso)
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato")
def __fusione__(self):
try:
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.splitting(*self.files,filenameOut = name)
except IndexError as e:
messagebox.showwarning("Attenzione",e)
def __filigrana__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
print(self.files[posizione])
name_filigrana = filedialog.askopenfilename(filetypes=[("PDF file",".pdf")])
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.watermark(self.files[posizione], name_filigrana, name)
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
def __cripta__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
password = self.password.get()
if(password == ""):
messagebox.showwarning("Attenzione","Inserire una password.")
return
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.encrypt(self.files[posizione], password, name);
self.password.delete(0,'end')
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
def __ruota__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
gradi = int(self.combo_rotate.get())
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.rotatePage(self.files[posizione],name,gradi);
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
except ValueError:
messagebox.showwarning("Attenzione","Selezionare il grado di rotazione.")
def start(self):
self.root.mainloop()
def __controlla__(self):
if((self.i+1) == 0):
self.button_stitching.config(state=DISABLED)
self.button_encript.config(state=DISABLED)
self.button_watermark.config(state=DISABLED)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=DISABLED)
if((self.i+1) ==1):
self.button_stitching.config(state=NORMAL)
self.button_encript.config(state=NORMAL)
self.button_watermark.config(state=NORMAL)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=NORMAL)
if((self.i+1) >1):
self.button_stitching.config(state=NORMAL)
self.button_encript.config(state=NORMAL)
self.button_watermark.config(state=NORMAL)
self.button_merge.config(state=NORMAL)
self.button_split.config(state=NORMAL)
self.button_rotate.config(state=NORMAL) | DevilSeven7/PDFManager | PDFManager/UI.py | Python | gpl-2.0 | 8,592 |
from django.test import TestCase
# No tests
| HughMcGrade/eusay | votes/tests.py | Python | gpl-2.0 | 45 |
from django.test import TestCase
from ..models import l_artist_release
class test_l_artist_release(TestCase):
def setUp(self):
"""
Set up the test subject.
"""
self.subject = l_artist_release()
def test__l_artist_release__instance(self):
self.assertIsInstance(self.subject, l_artist_release)
def test__l_artist_release__str(self):
self.assertEqual(str(self.subject), 'L Artist Release')
| marios-zindilis/musicbrainz-django-models | musicbrainz_django_models/tests/test_l_artist_release.py | Python | gpl-2.0 | 451 |
# -*- coding: utf-8 -*-
"""
Usage: (python) parse_full_xml_report.py <.xml files>
Example:
$ python parse_full_xml_report.py ncbi_full_report.xml
"""
import sys
WATSON_INDId = 30346
VENTER_INDId = 30342
THOUSAND_GENOME_popId = [13148, 13149, 13150, 16651, 16652, 16653, 16654, 16655];
def parse_genotype_report(full_report):
"""Writes to 'phenotype_full_xml.txt' with genes associated with rsids retrieved
from XML file generated by NCBI"""
phenotype_dict = {}
f = open(full_report)
for line in f:
if "<FxnSet geneId=" in line:
if "allele=" in line:
start = line.find("symbol")
start_quote = line.index('"', start)
end_quote = line.index('"', start_quote+1)
symbol = line[start_quote+1:end_quote]
start = line.find("allele")
start_quote = line.index('"', start)
end_quote = line.index('"', start_quote+1)
allele = line[start_quote+1:end_quote]
if symbol not in phenotype_dict:
phenotype_dict[symbol] = allele
info = symbol + "\t" + allele + "\n"
with open('phenotype_full_xml.txt','ab') as f: f.write(info)
if __name__ == '__main__':
if len(sys.argv) != 1:
full_report = sys.argv[1]
parse_genotype_report(full_report)
else:
print 'Usage: (python) parse_full_xml_report.py <.xml files>'
print 'Example: \
python parse_full_xml_report.py ncbi_full_report.xml' | kl2532/csi_columbia | Assignment_4/parse_full_xml_report.py | Python | gpl-2.0 | 1,662 |
# -*- coding: UTF-8 -*-
# Pluma External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <steve@istique.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('ExternalToolsPlugin', 'ExternalToolsWindowHelper',
'Manager', 'OutputPanel', 'Capture', 'UniqueById')
import pluma
import gtk
from manager import Manager
from library import ToolLibrary
from outputpanel import OutputPanel
from capture import Capture
from functions import *
class ToolMenu(object):
ACTION_HANDLER_DATA_KEY = "ExternalToolActionHandlerData"
ACTION_ITEM_DATA_KEY = "ExternalToolActionItemData"
def __init__(self, library, window, menupath):
super(ToolMenu, self).__init__()
self._library = library
self._window = window
self._menupath = menupath
self._merge_id = 0
self._action_group = gtk.ActionGroup("ExternalToolsPluginToolActions")
self._signals = []
self.update()
def deactivate(self):
self.remove()
def remove(self):
if self._merge_id != 0:
self._window.get_ui_manager().remove_ui(self._merge_id)
self._window.get_ui_manager().remove_action_group(self._action_group)
self._merge_id = 0
for action in self._action_group.list_actions():
handler = action.get_data(self.ACTION_HANDLER_DATA_KEY)
if handler is not None:
action.disconnect(handler)
action.set_data(self.ACTION_ITEM_DATA_KEY, None)
action.set_data(self.ACTION_HANDLER_DATA_KEY, None)
self._action_group.remove_action(action)
accelmap = gtk.accel_map_get()
for s in self._signals:
accelmap.disconnect(s)
self._signals = []
def _insert_directory(self, directory, path):
manager = self._window.get_ui_manager()
for item in directory.subdirs:
action_name = 'ExternalToolDirectory%X' % id(item)
action = gtk.Action(action_name, item.name.replace('_', '__'), None, None)
self._action_group.add_action(action)
manager.add_ui(self._merge_id, path,
action_name, action_name,
gtk.UI_MANAGER_MENU, False)
self._insert_directory(item, path + '/' + action_name)
for item in directory.tools:
action_name = 'ExternalToolTool%X' % id(item)
action = gtk.Action(action_name, item.name.replace('_', '__'), item.comment, None)
handler = action.connect("activate", capture_menu_action, self._window, item)
action.set_data(self.ACTION_ITEM_DATA_KEY, item)
action.set_data(self.ACTION_HANDLER_DATA_KEY, handler)
# Make sure to replace accel
accelpath = '<Actions>/ExternalToolsPluginToolActions/%s' % (action_name, )
if item.shortcut:
key, mod = gtk.accelerator_parse(item.shortcut)
gtk.accel_map_change_entry(accelpath, key, mod, True)
self._signals.append(gtk.accel_map_get().connect('changed::%s' % (accelpath,), self.on_accelmap_changed, item))
self._action_group.add_action_with_accel(action, item.shortcut)
manager.add_ui(self._merge_id, path,
action_name, action_name,
gtk.UI_MANAGER_MENUITEM, False)
def on_accelmap_changed(self, accelmap, path, key, mod, tool):
tool.shortcut = gtk.accelerator_name(key, mod)
tool.save()
self._window.get_data("ExternalToolsPluginWindowData").update_manager(tool)
def update(self):
self.remove()
self._merge_id = self._window.get_ui_manager().new_merge_id()
self._insert_directory(self._library.tree, self._menupath)
self._window.get_ui_manager().insert_action_group(self._action_group, -1)
self.filter(self._window.get_active_document())
def filter_language(self, language, item):
if not item.languages:
return True
if not language and 'plain' in item.languages:
return True
if language and (language.get_id() in item.languages):
return True
else:
return False
def filter(self, document):
if document is None:
return
titled = document.get_uri() is not None
remote = not document.is_local()
states = {
'all' : True,
'local': titled and not remote,
'remote': titled and remote,
'titled': titled,
'untitled': not titled,
}
language = document.get_language()
for action in self._action_group.list_actions():
item = action.get_data(self.ACTION_ITEM_DATA_KEY)
if item is not None:
action.set_visible(states[item.applicability] and self.filter_language(language, item))
class ExternalToolsWindowHelper(object):
def __init__(self, plugin, window):
super(ExternalToolsWindowHelper, self).__init__()
self._window = window
self._plugin = plugin
self._library = ToolLibrary()
manager = window.get_ui_manager()
self._action_group = gtk.ActionGroup('ExternalToolsPluginActions')
self._action_group.set_translation_domain('pluma')
self._action_group.add_actions([('ExternalToolManager',
None,
_('Manage _External Tools...'),
None,
_("Opens the External Tools Manager"),
lambda action: plugin.open_dialog()),
('ExternalTools',
None,
_('External _Tools'),
None,
_("External tools"),
None)])
manager.insert_action_group(self._action_group, -1)
ui_string = """
<ui>
<menubar name="MenuBar">
<menu name="ToolsMenu" action="Tools">
<placeholder name="ToolsOps_4">
<separator/>
<menu name="ExternalToolsMenu" action="ExternalTools">
<placeholder name="ExternalToolPlaceholder"/>
</menu>
<separator/>
</placeholder>
<placeholder name="ToolsOps_5">
<menuitem name="ExternalToolManager" action="ExternalToolManager"/>
</placeholder>
</menu>
</menubar>
</ui>"""
self._merge_id = manager.add_ui_from_string(ui_string)
self.menu = ToolMenu(self._library, self._window,
"/MenuBar/ToolsMenu/ToolsOps_4/ExternalToolsMenu/ExternalToolPlaceholder")
manager.ensure_update()
# Create output console
self._output_buffer = OutputPanel(self._plugin.get_data_dir(), window)
bottom = window.get_bottom_panel()
bottom.add_item(self._output_buffer.panel,
_("Shell Output"),
gtk.STOCK_EXECUTE)
def update_ui(self):
self.menu.filter(self._window.get_active_document())
self._window.get_ui_manager().ensure_update()
def deactivate(self):
manager = self._window.get_ui_manager()
self.menu.deactivate()
manager.remove_ui(self._merge_id)
manager.remove_action_group(self._action_group)
manager.ensure_update()
bottom = self._window.get_bottom_panel()
bottom.remove_item(self._output_buffer.panel)
def update_manager(self, tool):
self._plugin.update_manager(tool)
class ExternalToolsPlugin(pluma.Plugin):
WINDOW_DATA_KEY = "ExternalToolsPluginWindowData"
def __init__(self):
super(ExternalToolsPlugin, self).__init__()
self._manager = None
self._manager_default_size = None
ToolLibrary().set_locations(os.path.join(self.get_data_dir(), 'tools'))
def activate(self, window):
helper = ExternalToolsWindowHelper(self, window)
window.set_data(self.WINDOW_DATA_KEY, helper)
def deactivate(self, window):
window.get_data(self.WINDOW_DATA_KEY).deactivate()
window.set_data(self.WINDOW_DATA_KEY, None)
def update_ui(self, window):
window.get_data(self.WINDOW_DATA_KEY).update_ui()
def create_configure_dialog(self):
return self.open_dialog()
def open_dialog(self):
if not self._manager:
self._manager = Manager(self.get_data_dir())
if self._manager_default_size:
self._manager.dialog.set_default_size(*self._manager_default_size)
self._manager.dialog.connect('destroy', self.on_manager_destroy)
window = pluma.app_get_default().get_active_window()
self._manager.run(window)
return self._manager.dialog
def update_manager(self, tool):
if not self._manager:
return
self._manager.tool_changed(tool, True)
def on_manager_destroy(self, dialog):
self._manager_default_size = [dialog.allocation.width, dialog.allocation.height]
self._manager = None
# ex:ts=4:et:
| monsta/pluma | plugins/externaltools/tools/__init__.py | Python | gpl-2.0 | 10,313 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import Sitemap
from . import models
class BlogSitemap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return models.Post.objects.filter(is_draft=False)
def lastmod(self, obj):
return obj.update_time
class PageSitemap(Sitemap):
changefreq = "monthly"
priority = 0.5
def items(self):
return models.Page.objects.filter(is_draft=False)
def lastmod(self, obj):
return obj.update_time
# class CategorySitemap(Sitemap):
# changefreq = "weekly"
# priority = 0.6
# def items(self):
# return models.Category.objects.all()
# class TagSitemap(Sitemap):
# changefreq = "weekly"
# priority = 0.6
# def items(self):
# return models.Tag.objects.all()
sitemaps = {
'blog': BlogSitemap,
'page': PageSitemap,
# 'category': CategorySitemap,
# 'tag': TagSitemap,
}
| flyhigher139/mayblog | blog/main/sitemaps.py | Python | gpl-2.0 | 968 |
import urllib
from django.http import HttpResponseRedirect
from django.http.response import HttpResponseBadRequest, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.core.urlresolvers import reverse
from rest_framework import viewsets
from csinterop.forms import InteropServiceForm
from csinterop.models import SharingProposal, User, Folder
from csinterop.serializers import SharingProposalSerializer
class SharingProposalViewSet(viewsets.ModelViewSet):
print "Into SharingProposalViewSet"
model = SharingProposal
serializer_class = SharingProposalSerializer
def url_with_querystring(path, **kwargs):
return path + '?' + urllib.urlencode(kwargs)
def proposal_select(request, key):
proposal = get_object_or_404(SharingProposal, key=key)
if proposal.status != 'CREATED':
return HttpResponseBadRequest('Proposal status is not valid')
error = False
if request.POST:
form = InteropServiceForm(request.POST)
if form.is_valid():
service = form.cleaned_data['services']
proposal.service = service
proposal.status = 'SENT'
proposal.save()
permission = 'read-write' if proposal.write_access else 'read-only'
params = {'share_id': proposal.key,
'resource_url': proposal.resource_url,
'owner_name': proposal.owner.name,
'owner_email': proposal.owner.email,
'folder_name': proposal.folder.name,
'permission': permission,
'recipient': proposal.recipient,
'callback': proposal.callback,
'protocol_version': '1.0'
}
url = '%s?%s' % (service.endpoint_share, urllib.urlencode(params))
return HttpResponseRedirect(url)
error = True
form = InteropServiceForm()
return render(request, 'proposal_select.html', {'proposal': proposal, 'form': form, 'error': error})
def proposal_view(request, key):
#TODO: user must be logged in to see the proposal
proposal = get_object_or_404(SharingProposal, key=key)
if proposal.status != 'PENDING':
return HttpResponseBadRequest('Proposal status is not valid')
if request.POST:
accepted = True if 'accept' in request.POST else False
if accepted:
proposal.status = 'ACCEPTED'
else:
proposal.status = 'DECLINED'
proposal.save()
redirect_url = url_with_querystring(proposal.callback, accepted=accepted, share_id=proposal.key)
return HttpResponseRedirect(redirect_url)
return render(request, 'proposal_view.html', {'proposal': proposal})
def proposal_share(request):
share_id = request.GET.get('share_id')
resource_url = request.GET.get('resource_url')
owner_name = request.GET.get('owner_name')
owner_email = request.GET.get('owner_email')
folder_name = request.GET.get('folder_name')
permission = request.GET.get('permission')
recipient = request.GET.get('recipient')
callback = request.GET.get('callback')
protocol_version = request.GET.get('protocol_version')
if share_id is None:
return HttpResponseBadRequest('share_id is missing')
if resource_url is None:
return HttpResponseBadRequest('resource_url is missing')
if owner_name is None:
return HttpResponseBadRequest('owner_name is missing')
if owner_email is None:
return HttpResponseBadRequest('owner_email is missing')
if folder_name is None:
return HttpResponseBadRequest('folder_name is missing')
if permission is None:
return HttpResponseBadRequest('permission is missing')
if recipient is None:
return HttpResponseBadRequest('recipient is missing')
if callback is None:
return HttpResponseBadRequest('callback is missing')
if protocol_version is None:
return HttpResponseBadRequest('protocol_version is missing')
if protocol_version != '1.0':
return HttpResponseBadRequest('Wrong protocol version. Must be 1.0.')
proposal = SharingProposal()
proposal.key = share_id
proposal.is_local = False
proposal.resource_url = resource_url
proposal.recipient = recipient
proposal.callback = callback
proposal.protocol_version = protocol_version
owner = User()
owner.name = owner_name
owner.email = owner_email
proposal.owner = owner
folder = Folder()
folder.name = folder_name
proposal.folder = folder
write_access = True if permission.lower() is 'read-write' else False
proposal.write_access = write_access
proposal.status = 'PENDING'
proposal.save()
#TODO: check if the proposal was successfully saved
url = reverse('proposal_view', args=(), kwargs={'key': proposal.key})
return HttpResponseRedirect(url)
def proposal_result(request):
share_id = request.GET.get('share_id')
accepted = request.GET.get('accepted')
if share_id is None or accepted is None:
return HttpResponseBadRequest(content='Some parameters are missing')
proposal = get_object_or_404(SharingProposal, key=share_id)
proposal.status = 'ACCEPTED' if accepted else 'DECLINED'
proposal.save()
#TODO: create a thread to process acceptance/denial
if accepted:
# Send the credentials
url = url_with_querystring(proposal.service.endpoint_credentials, share_id=proposal.key, auth_protocol='oauth',
auth_protocol_version='1.0a', oauth_access_token='2j42342')
return HttpResponseRedirect(url)
else:
return HttpResponse(content='Proposal was denied')
def proposal_credentials(request):
share_id = request.GET.get('share_id')
auth_protocol = request.GET.get('auth_protocol')
auth_protocol_version = request.GET.get('auth_protocol_version')
if share_id is None:
return HttpResponseBadRequest('share_id is missing')
if auth_protocol is None:
return HttpResponseBadRequest('auth_protocol is missing')
if auth_protocol_version is None:
return HttpResponseBadRequest('auth_protocol_version is missing')
if auth_protocol == 'oauth' and auth_protocol_version == '1.0a':
# TODO: get oauth 1.0a parameters and save them for future use
# oauth_consumer_key
# oauth_token
# oauth_signature_method
# oauth_signature
# oauth_timestamp
# oauth_nonce
# (oauth_version)
return HttpResponse('Oauth parameters received and stored')
else:
# TODO: add other authentication methods
return HttpResponseBadRequest(content='Authentication method not supported')
| cloudspaces/interop-protocol | csinterop/csinterop/views.py | Python | gpl-2.0 | 6,787 |
# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# __init__.py - Startup and module loading logic for Mercurial.
#
# Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import sys
# Allow 'from mercurial import demandimport' to keep working.
from edenscm import hgdemandimport
# pyre-fixme[11]: Annotation `hgdemandimport` is not defined as a type.
demandimport = hgdemandimport
__all__ = []
if getattr(sys, "platform") == "win32":
configdir = os.path.join(
getattr(os, "environ").get("PROGRAMDATA") or "\ProgramData",
"Facebook",
"Mercurial",
)
else:
configdir = "/etc/mercurial"
def shoulduselegacy(name):
legacy = getattr(os, "environ").get("HGLEGACY")
if legacy is not None:
return name in legacy.split()
else:
return os.path.lexists(os.path.join(configdir, "legacy.%s" % name))
| facebookexperimental/eden | eden/hg-server/edenscm/mercurial/__init__.py | Python | gpl-2.0 | 1,173 |
# This file is part of PooPyLab.
#
# PooPyLab is a simulation software for biological wastewater treatment processes using International Water Association
# Activated Sludge Models.
#
# Copyright (C) Kai Zhang
#
# PooPyLab is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with PooPyLab. If not, see
# <http://www.gnu.org/licenses/>.
#
#
# This is the definition of the ASM1 model to be imported as part of the Reactor object
#
#
"""Definition of the IWA Activated Sludge Model #2d.
Reference:
Grady Jr. et al, 1999: Biological Wastewater Treatment, 2nd Ed.
IWA Task Group on Math. Modelling for Design and Operation of Biological
Wastewater Treatment, 2000. Activated Sludge Model No. 1, in Activated
Sludge Models ASM1, ASM2, ASM2d, and ASM 3.
"""
## @namespace asm_2d
## @file asm_2d.py
from ..ASMModel import constants
from .asmbase import asm_model
class ASM_2d(asm_model):
"""
Kinetics and stoichiometrics of the IWA ASM 2d model.
"""
__id = 0
def __init__(self, ww_temp=20, DO=2):
"""
Initialize the model with water temperature and dissolved O2.
Args:
ww_temp: wastewater temperature, degC;
DO: dissolved oxygen, mg/L
Return:
None
See:
_set_ideal_kinetics_20C();
_set_params();
_set_stoichs().
"""
asm_model.__init__(self)
self.__class__.__id += 1
self._set_ideal_kinetics_20C_to_defaults()
# wastewater temperature used in the model, degC
self._temperature = ww_temp
# mixed liquor bulk dissolved oxygen, mg/L
self._bulk_DO = DO
# temperature difference b/t what's used and baseline (20C), degC
self._delta_t = self._temperature - 20
self.update(ww_temp, DO)
# The Components the ASM components IN THE REACTOR
# For ASM #2d:
#
# self._comps[0]: S_DO as COD
# self._comps[1]: S_I
# self._comps[2]: S_S
# self._comps[3]: S_NH
# self._comps[4]: S_NS
# self._comps[5]: S_NO
# self._comps[6]: S_ALK
# self._comps[7]: X_I
# self._comps[8]: X_S
# self._comps[9]: X_BH
# self._comps[10]: X_BA
# self._comps[11]: X_D
# self._comps[12]: X_NS
#
# ASM model components
self._comps = [0.0] * constants._NUM_ASM1_COMPONENTS
return None
def _set_ideal_kinetics_20C_to_defaults(self):
"""
Set the kinetic params/consts @ 20C to default ideal values.
See:
update();
_set_params();
_set_stoichs().
"""
# Ideal Growth Rate of Heterotrophs (u_max_H, 1/DAY)
self._kinetics_20C['u_max_H'] = 6.0
# Decay Rate of Heterotrophs (b_H, 1/DAY)
self._kinetics_20C['b_LH'] = 0.62
# Ideal Growth Rate of Autotrophs (u_max_A, 1/DAY)
self._kinetics_20C['u_max_A'] = 0.8
# Decay Rate of Autotrophs (b_A, 1/DAY)
# A wide range exists. Table 6.3 on Grady 1999 shows 0.096 (1/d). IWA's
# ASM report did not even show b_A on its table for typical value. ASIM
# software show a value of "0.000", probably cut off by the print
# function. I can only assume it was < 0.0005 (1/d) at 20C.
#self._kinetics_20C['b_LA'] = 0.096
self._kinetics_20C['b_LA'] = 0.0007
# Half Growth Rate Concentration of Heterotrophs (K_s, mgCOD/L)
self._kinetics_20C['K_S'] = 20.0
# Switch Coefficient for Dissolved O2 of Hetero. (K_OH, mgO2/L)
self._kinetics_20C['K_OH'] = 0.2
# Association Conc. for Dissolved O2 of Auto. (K_OA, mgN/L)
self._kinetics_20C['K_OA'] = 0.4
# Association Conc. for NH3-N of Auto. (K_NH, mgN/L)
self._kinetics_20C['K_NH'] = 1.0
# Association Conc. for NOx of Hetero. (K_NO, mgN/L)
self._kinetics_20C['K_NO'] = 0.5
# Hydrolysis Rate (k_h, mgCOD/mgBiomassCOD-day)
self._kinetics_20C['k_h'] = 3.0
# Half Rate Conc. for Hetero. Growth on Part. COD
# (K_X, mgCOD/mgBiomassCOD)
self._kinetics_20C['K_X'] = 0.03
# Ammonification of Org-N in biomass (k_a, L/mgBiomassCOD-day)
self._kinetics_20C['k_a'] = 0.08
# Yield of Hetero. Growth on COD (Y_H, mgBiomassCOD/mgCODremoved)
self._kinetics_20C['Y_H'] = 0.67
# Yield of Auto. Growth on TKN (Y_A, mgBiomassCOD/mgTKNoxidized)
self._kinetics_20C['Y_A'] = 0.24
# Fract. of Debris in Lysed Biomass(f_D, gDebrisCOD/gBiomassCOD)
self._kinetics_20C['f_D'] = 0.08
# Correction Factor for Hydrolysis (cf_h, unitless)
self._kinetics_20C['cf_h'] = 0.4
# Correction Factor for Anoxic Heterotrophic Growth (cf_g, unitless)
self._kinetics_20C['cf_g'] = 0.8
# Ratio of N in Active Biomass (i_N_XB, mgN/mgActiveBiomassCOD)
self._kinetics_20C['i_N_XB'] = 0.086
# Ratio of N in Debris Biomass (i_N_XD, mgN/mgDebrisBiomassCOD)
self._kinetics_20C['i_N_XD'] = 0.06
return None
def _set_params(self):
"""
Set the kinetic parameters/constants @ project temperature.
This function updates the self._params based on the model temperature
and DO.
See:
update();
_set_ideal_kinetics_20C();
_set_stoichs().
"""
# Ideal Growth Rate of Heterotrophs (u_max_H, 1/DAY)
self._params['u_max_H'] = self._kinetics_20C['u_max_H']\
* pow(1.072, self._delta_t)
# Decay Rate of Heterotrophs (b_H, 1/DAY)
self._params['b_LH'] = self._kinetics_20C['b_LH']\
* pow(1.12, self._delta_t)
# Ideal Growth Rate of Autotrophs (u_max_A, 1/DAY)
self._params['u_max_A'] = self._kinetics_20C['u_max_A']\
* pow(1.103, self._delta_t)
# Decay Rate of Autotrophs (b_A, 1/DAY)
self._params['b_LA'] = self._kinetics_20C['b_LA']\
* pow(1.114, self._delta_t)
# Half Growth Rate Concentration of Heterotrophs (K_s, mgCOD/L)
self._params['K_S'] = self._kinetics_20C['K_S']
# Switch Coefficient for Dissolved O2 of Hetero. (K_OH, mgO2/L)
self._params['K_OH'] = self._kinetics_20C['K_OH']
# Association Conc. for Dissolved O2 of Auto. (K_OA, mgN/L)
self._params['K_OA'] = self._kinetics_20C['K_OA']
# Association Conc. for NH3-N of Auto. (K_NH, mgN/L)
self._params['K_NH'] = self._kinetics_20C['K_NH']
# Association Conc. for NOx of Hetero. (K_NO, mgN/L)
self._params['K_NO'] = self._kinetics_20C['K_NO']
# Hydrolysis Rate (k_h, mgCOD/mgBiomassCOD-day)
self._params['k_h'] = self._kinetics_20C['k_h']\
* pow(1.116, self._delta_t)
# Half Rate Conc. for Hetero. Growth on Part. COD
# (K_X, mgCOD/mgBiomassCOD)
self._params['K_X'] = self._kinetics_20C['K_X']\
* pow(1.116, self._delta_t)
# Ammonification of Org-N in biomass (k_a, L/mgBiomassCOD-day)
self._params['k_a'] = self._kinetics_20C['k_a']\
* pow(1.072, self._delta_t)
# Yield of Hetero. Growth on COD (Y_H, mgBiomassCOD/mgCODremoved)
self._params['Y_H'] = self._kinetics_20C['Y_H']
# Yield of Auto. Growth on TKN (Y_A, mgBiomassCOD/mgTKNoxidized)
self._params['Y_A'] = self._kinetics_20C['Y_A']
# Fract. of Debris in Lysed Biomass(f_D, gDebrisCOD/gBiomassCOD)
self._params['f_D'] = self._kinetics_20C['f_D']
# Correction Factor for Hydrolysis (cf_h, unitless)
self._params['cf_h'] = self._kinetics_20C['cf_h']
# Correction Factor for Anoxic Heterotrophic Growth (cf_g, unitless)
self._params['cf_g'] = self._kinetics_20C['cf_g']
# Ratio of N in Active Biomass (i_N_XB, mgN/mgActiveBiomassCOD)
self._params['i_N_XB'] = self._kinetics_20C['i_N_XB']
# Ratio of N in Debris Biomass (i_N_XD, mgN/mgDebrisBiomassCOD)
self._params['i_N_XD'] = self._kinetics_20C['i_N_XD']
return None
# STOCHIOMETRIC MATRIX
def _set_stoichs(self):
"""
Set the stoichiometrics for the model.
Note:
Make sure to match the .csv model template file in the
model_builder folder, Sep 04, 2019):
_stoichs['x_y'] ==> x is process rate id, and y is component id
See:
_set_params();
_set_ideal_kinetics_20C();
update().
"""
# S_O for aerobic hetero. growth, as O2
self._stoichs['0_0'] = (self._params['Y_H'] - 1.0) \
/ self._params['Y_H']
# S_O for aerobic auto. growth, as O2
self._stoichs['2_0'] = (self._params['Y_A'] - 4.57) \
/ self._params['Y_A']
# S_S for aerobic hetero. growth, as COD
self._stoichs['0_2'] = -1.0 / self._params['Y_H']
# S_S for anoxic hetero. growth, as COD
self._stoichs['1_2'] = -1.0 / self._params['Y_H']
# S_S for hydrolysis of part. substrate
self._stoichs['6_2'] = 1.0
# S_NH required for aerobic hetero. growth, as N
self._stoichs['0_3'] = -self._params['i_N_XB']
# S_NH required for anoxic hetero. growth, as N
self._stoichs['1_3'] = -self._params['i_N_XB']
# S_NH required for aerobic auto. growth, as N
self._stoichs['2_3'] = -self._params['i_N_XB'] \
- 1.0 / self._params['Y_A']
# S_NH from ammonification, as N
self._stoichs['5_3'] = 1.0
# S_NS used by ammonification, as N
self._stoichs['5_4'] = -1.0
# S_NS from hydrolysis of part.TKN, as N
self._stoichs['7_4'] = 1.0
# S_NO for anoxic hetero. growth, as N
self._stoichs['1_5'] = (self._params['Y_H'] - 1.0) \
/ (2.86 * self._params['Y_H'])
# S_NO from nitrification, as N
self._stoichs['2_5'] = 1.0 / self._params['Y_A']
# S_ALK consumed by aerobic hetero. growth, as mM CaCO3
self._stoichs['0_6'] = -self._params['i_N_XB'] / 14.0
# S_ALK generated by anoxic hetero. growth, as mM CaCO3
self._stoichs['1_6'] = (1.0 - self._params['Y_H']) \
/ (14.0 * 2.86 * self._params['Y_H']) \
- self._params['i_N_XB'] / 14.0
# S_ALK consumed by aerobic auto. growth, as mM CaCO3
self._stoichs['2_6'] = -self._params['i_N_XB'] / 14 \
- 1.0 / (7.0 * self._params['Y_A'])
# S_ALK generated by ammonification, as mM CaCO3
self._stoichs['5_6'] = 1.0 / 14.0
# X_S from hetero. decay, as COD
self._stoichs['3_8'] = 1.0 - self._params['f_D']
# X_S from auto. decay, as COD
self._stoichs['4_8'] = 1.0 - self._params['f_D']
# X_S consumed by hydrolysis of biomass
self._stoichs['6_8'] = -1.0
# X_BH from aerobic hetero. growth, as COD
self._stoichs['0_9'] = 1.0
# X_BH from anoxic hetero. growth, as COD
self._stoichs['1_9'] = 1.0
# X_BH lost in hetero. decay, as COD
self._stoichs['3_9'] = -1.0
# X_BA from aerobic auto. growth, as COD
self._stoichs['2_10'] = 1.0
# X_BA lost in auto. decay, as COD
self._stoichs['4_10'] = -1.0
# X_D from hetero. decay, as COD
self._stoichs['3_11'] = self._params['f_D']
# X_D from auto. decay, as COD
self._stoichs['4_11'] = self._params['f_D']
# X_NS from hetero. decay, as N
self._stoichs['3_12'] = self._params['i_N_XB'] - self._params['f_D'] \
* self._params['i_N_XD']
# X_NS from auto. decay, as COD
self._stoichs['4_12'] = self._params['i_N_XB'] - self._params['f_D'] \
* self._params['i_N_XD']
# X_NS consumed in hydrolysis of part. TKN, as N
self._stoichs['7_12'] = -1.0
return None
# PROCESS RATE DEFINITIONS (Rj, M/L^3/T):
#
def _r0_AerGH(self, comps):
"""
Aerobic Growth Rate of Heterotrophs (mgCOD/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['u_max_H'] \
* self._monod(comps[2], self._params['K_S']) \
* self._monod(comps[0], self._params['K_OH']) \
* comps[9]
def _r1_AxGH(self, comps):
"""
Anoxic Growth Rate of Heterotrophs (mgCOD/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['u_max_H'] \
* self._monod(comps[2], self._params['K_S']) \
* self._monod(self._params['K_OH'], comps[0]) \
* self._monod(comps[5], self._params['K_NO']) \
* self._params['cf_g'] \
* comps[9]
def _r2_AerGA(self, comps):
"""
Aerobic Growth Rate of Autotrophs (mgCOD/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['u_max_A'] \
* self._monod(comps[3], self._params['K_NH']) \
* self._monod(comps[0], self._params['K_OA']) \
* comps[10]
def _r3_DLH(self, comps):
"""
Death and Lysis Rate of Heterotrophs (mgCOD/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['b_LH'] * comps[9]
def _r4_DLA(self, comps):
"""
Death and Lysis Rate of Autotrophs (mgCOD/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['b_LA'] * comps[10]
def _r5_AmmSN(self, comps):
"""
Ammonification Rate of Soluable Organic N (mgN/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['k_a'] \
* comps[4] \
* comps[9]
def _r6_HydX(self, comps):
"""
Hydrolysis Rate of Particulate Organics (mgCOD/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._params['k_h'] \
* self._monod(comps[8] / comps[9],
self._params['K_X']) \
* (self._monod(comps[0], self._params['K_OH'])
+ self._params['cf_h']
* self._monod(self._params['K_OH'], comps[0])
* self._monod(comps[5], self._params['K_NO'])) \
* comps[9]
def _r7_HydXN(self, comps):
"""
Hydrolysis Rate of Particulate Organic N (mgN/L/day).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._r6_HydX(comps) * comps[12] / comps[8]
# OVERALL PROCESS RATE EQUATIONS FOR INDIVIDUAL COMPONENTS
def _rate0_S_DO(self, comps):
"""
Overall process rate for dissolved O2 (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['0_0'] * self._r0_AerGH(comps)\
+ self._stoichs['2_0'] * self._r2_AerGA(comps)
def _rate1_S_I(self, comps):
"""
Overall process rate for inert soluble COD (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
0.0
"""
return 0.0
def _rate2_S_S(self, comps):
"""
Overall process rate for soluble biodegradable COD (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['0_2'] * self._r0_AerGH(comps)\
+ self._stoichs['1_2'] * self._r1_AxGH(comps)\
+ self._stoichs['6_2'] * self._r6_HydX(comps)
def _rate3_S_NH(self, comps):
"""
Overall process rate for ammonia nitrogen (mgN/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['0_3'] * self._r0_AerGH(comps)\
+ self._stoichs['1_3'] * self._r1_AxGH(comps)\
+ self._stoichs['2_3'] * self._r2_AerGA(comps)\
+ self._stoichs['5_3'] * self._r5_AmmSN(comps)
def _rate4_S_NS(self, comps):
"""
Overall process rate for soluble organic nitrogen (mgN/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['5_4'] * self._r5_AmmSN(comps)\
+ self._stoichs['7_4'] * self._r7_HydXN(comps)
def _rate5_S_NO(self, comps):
"""
Overall process rate for nitrite/nitrate nitrogen (mgN/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['1_5'] * self._r1_AxGH(comps)\
+ self._stoichs['2_5'] * self._r2_AerGA(comps)
def _rate6_S_ALK(self, comps):
"""
Overall process rate for alkalinity (mg/L/d as CaCO3)
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['0_6'] * self._r0_AerGH(comps)\
+ self._stoichs['1_6'] * self._r1_AxGH(comps)\
+ self._stoichs['2_6'] * self._r2_AerGA(comps)\
+ self._stoichs['5_6'] * self._r5_AmmSN(comps)
def _rate7_X_I(self, comps):
"""
Overall process rate for inert particulate COD (mgCOD/L/d)
Args:
comps: list of current model components (concentrations).
Return:
0.0
"""
return 0.0
def _rate8_X_S(self, comps):
"""
Overall process rate for particulate biodegradable COD (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['3_8'] * self._r3_DLH(comps)\
+ self._stoichs['4_8'] * self._r4_DLA(comps)\
+ self._stoichs['6_8'] * self._r6_HydX(comps)
def _rate9_X_BH(self, comps):
"""
Overall process rate for heterotrophic biomass (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['0_9'] * self._r0_AerGH(comps)\
+ self._stoichs['1_9'] * self._r1_AxGH(comps)\
+ self._stoichs['3_9'] * self._r3_DLH(comps)
def _rate10_X_BA(self, comps):
"""
Overall process rate for autotrophic biomass (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['2_10'] * self._r2_AerGA(comps)\
+ self._stoichs['4_10'] * self._r4_DLA(comps)
def _rate11_X_D(self, comps):
"""
Overall process rate for biomass debris (mgCOD/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['3_11'] * self._r3_DLH(comps)\
+ self._stoichs['4_11'] * self._r4_DLA(comps)
def _rate12_X_NS(self, comps):
"""
Overall process rate for particulate organic nitrogen (mgN/L/d).
Args:
comps: list of current model components (concentrations).
Return:
float
"""
return self._stoichs['3_12'] * self._r3_DLH(comps)\
+ self._stoichs['4_12'] * self._r4_DLA(comps)\
+ self._stoichs['7_12'] * self._r7_HydXN(comps)
def _dCdt(self, t, mo_comps, vol, flow, in_comps, fix_DO, DO_sat_T):
'''
Defines dC/dt for the reactor based on mass balance.
Overall mass balance:
dComp/dt == InfFlow / Actvol * (in_comps - mo_comps) + GrowthRate
== (in_comps - mo_comps) / HRT + GrowthRate
Args:
t: time for use in ODE integration routine, d
mo_comps: list of model component for mainstream outlet, mg/L.
vol: reactor's active volume, m3;
flow: reactor's total inflow, m3/d
in_comps: list of model components for inlet, mg/L;
fix_DO: whether to use a fix DO setpoint, bool
DO_sat_T: saturation DO of the project elev. and temp, mg/L
Return:
dC/dt of the system ([float])
ASM1 Components:
0_S_DO, 1_S_I, 2_S_S, 3_S_NH, 4_S_NS, 5_S_NO, 6_S_ALK,
7_X_I, 8_X_S, 9_X_BH, 10_X_BA, 11_X_D, 12_X_NS
'''
_HRT = vol / flow
# set DO rate to zero since DO is set to a fix conc., which is
# recommended for steady state simulation; alternatively, use the given
# KLa to dynamically estimate residual DO
if fix_DO or self._bulk_DO == 0:
result = [0.0]
else: #TODO: what if the user provides a fix scfm of air?
result = [(in_comps[0] - mo_comps[0]) / _HRT
+ self._KLa * (DO_sat_T - mo_comps[0])
+ self._rate0_S_DO(mo_comps)]
result.append((in_comps[1] - mo_comps[1]) / _HRT
+ self._rate1_S_I(mo_comps))
result.append((in_comps[2] - mo_comps[2]) / _HRT
+ self._rate2_S_S(mo_comps))
result.append((in_comps[3] - mo_comps[3]) / _HRT
+ self._rate3_S_NH(mo_comps))
result.append((in_comps[4] - mo_comps[4]) / _HRT
+ self._rate4_S_NS(mo_comps))
result.append((in_comps[5] - mo_comps[5]) / _HRT
+ self._rate5_S_NO(mo_comps))
result.append((in_comps[6] - mo_comps[6]) / _HRT
+ self._rate6_S_ALK(mo_comps))
result.append((in_comps[7] - mo_comps[7]) / _HRT
+ self._rate7_X_I(mo_comps))
result.append((in_comps[8] - mo_comps[8]) / _HRT
+ self._rate8_X_S(mo_comps))
result.append((in_comps[9] - mo_comps[9]) / _HRT
+ self._rate9_X_BH(mo_comps))
result.append((in_comps[10] - mo_comps[10]) / _HRT
+ self._rate10_X_BA(mo_comps))
result.append((in_comps[11] - mo_comps[11]) / _HRT
+ self._rate11_X_D(mo_comps))
result.append((in_comps[12] - mo_comps[12]) / _HRT
+ self._rate12_X_NS(mo_comps))
return result[:]
| toogad/PooPyLab_Project | PooPyLab/ASMModel/asm_2d.py | Python | gpl-3.0 | 24,192 |
#!/usr/bin/env python
import sys
from utils.file_system import *
from calculations.beatmap_metadata import BeatmapMetadata
from database.database_wrapper import DatabaseWrapper
def hello(msg):
print("Hello world! " + msg)
def print_help():
print("Usage: ./beatmap_processor.py [songs_folder]")
print("This script initializes beatmap database with pp calculations")
print("of all standard maps located on [songs_folder]")
exit(1)
if (len(sys.argv) != 2):
print_help()
songs_abs_path = abs_path(sys.argv[1])
print(songs_abs_path)
files = list_files(songs_abs_path)
osu_files = [x for x in files if x.endswith(".osu")]
#print("=======================================")
#print(str(len(osu_files)) + " maps found!")
#for u in osu_files:
# print(u)
#print("=======================================")
if not dir_exists("thumbnails"):
mkdir("thumbnails")
db_wrapper = DatabaseWrapper()
added = 0
errors = 0
other_modes = 0
print("\n\nSTARTING PP CALCULATIONS.....\n")
for u in osu_files:
print("Calculating pp for '" + u + "'.....")
b = BeatmapMetadata(u)
if not b.is_standard_map():
print("Not a osu! standard map. Skipping...")
other_modes += 1
continue
success = b.calculate_diff_values()
if not success:
print("Beatmap has errors. Skipping...")
errors += 1
continue
b.path = path_subtraction(u, songs_abs_path)
if not db_wrapper.already_exists(b.beatmap_id): # not like this peppy....
db_wrapper.store_beatmap(b)
else:
print("Beatmap " + b.path + " already exists on the database; skipping...")
added += 1
print(str(added) + " beatmaps added to the database!")
print(str(errors) + " beatmaps with errors found!")
print(str(other_modes) + " beatmaps from other modes were skipped.")
| altur13/osu-Assistant | beatmap_processor.py | Python | gpl-3.0 | 1,817 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MissingTeacherEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hour', models.PositiveIntegerField(verbose_name='Heure de cours')),
('content', models.TextField(verbose_name='contenu', blank=True)),
('visible', models.BooleanField(default=True, verbose_name='visible ?')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='date de cr\xe9ation')),
('modified', models.DateTimeField(auto_now=True, verbose_name='date de modification')),
],
options={
'verbose_name': 'heure de cours',
'verbose_name_plural': 'heures de cours',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MissingTeacherWidget',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text="Le nom est obligatoire et permet d'identifier votre widget facilement.", max_length=100, verbose_name='nom')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='date de cr\xe9ation')),
('modified', models.DateTimeField(auto_now=True, verbose_name='date de modification')),
('missing', models.TextField(help_text='Un enseignant par ligne.', verbose_name='Enseignants absents', blank=True)),
('hide_empty', models.BooleanField(default=True, help_text="Masque les heures de cours pour lesquelles aucuneinformation n'a \xe9t\xe9 entr\xe9e.", verbose_name='Cacher les \xe9l\xe9ments vides')),
],
options={
'verbose_name': 'enseignant absent',
'verbose_name_plural': 'enseignants absents',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='missingteacherentry',
name='widget',
field=models.ForeignKey(related_name='hours', to='schoolwidget.MissingTeacherWidget'),
preserve_default=True,
),
]
| AlexandreDecan/Dashbird | widgets/schoolwidget/migrations/0001_initial.py | Python | gpl-3.0 | 2,474 |
# -*- coding: utf-8 -*-
# -*- mode: python -*-
"""Sources of random data
Copyright (C) 2013 Dan Meliza <dmeliza@uchicago.edu>
Created Wed May 29 14:50:02 2013
"""
from mspikes import util
from mspikes.types import DataBlock, Source, Node, tag_set
from numpy.random import RandomState
class rand_samples(Source):
"""Generates random values from N(0,1)"""
seed = 1
nsamples = 4096
def __init__(self, **options):
util.set_option_attributes(self, options, seed=1, nsamples=4096)
self.chunk_size = 1024
self.channel = "random"
self.sampling_rate = 1
self._randg = RandomState(self.seed)
@classmethod
def options(cls, addopt_f, **defaults):
addopt_f("--seed",
help="seed for random number generator",
type=int,
metavar='INT',
default=defaults.get('seed',cls.seed))
addopt_f("--nsamples",
help="number of samples to generate",
type=int,
metavar='INT',
default=defaults.get('nsamples',cls.nsamples))
def data(self, t=0):
"""Generates a data chunk"""
return DataBlock(id=self.channel, offset=t, ds=self.sampling_rate,
data=self._randg.randn(self.chunk_size),
tags=tag_set("samples"))
def __iter__(self):
t = 0
while t < self.nsamples:
data = self.data(t)
Node.send(self, data)
yield data
t += self.chunk_size
## TODO random_events
# Variables:
# End:
| melizalab/mspikes | mspikes/modules/random_sources.py | Python | gpl-3.0 | 1,590 |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), "README.md")) as readme:
README = readme.read()
with open(os.path.join(os.path.dirname(__file__), "requirements.in")) as requirements:
REQUIREMENTS = [
req.split("#egg=")[1] if "#egg=" in req else req
for req in requirements.readlines()
]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-cineclub",
version="3.0.0",
packages=["cine"],
install_requires=REQUIREMENTS,
include_package_data=True,
license="GPL License",
description="A Django app to manage a cineclub.",
long_description=README,
url="https://saurel.me/",
author="Guilhem Saurel",
author_email="webmaster@saurel.me",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: GPL License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
)
| nim65s/django-cineclub | setup.py | Python | gpl-3.0 | 1,266 |
# Put libraries such as Divisi in the PYTHONPATH.
import sys, pickle, os
sys.path = ['/stuff/openmind'] + sys.path
from csc.divisi.cnet import *
from csc.divisi.graphics import output_svg
from vendor_db import iter_info
from csamoa.corpus.models import *
from csamoa.conceptnet.models import *
# Load the OMCS language model
en = Language.get('en')
en_nl=get_nl('en')
# Load OMCS stopwords
sw = open('stopwords.txt', 'r')
swords = [x.strip() for x in sw.readlines()]
# Parameters
factor = 1
wsize = 2
def check_concept(concept):
try:
Concept.get(concept, 'en')
return True
except:
return False
def english_window(text):
windows = []
words = [x for x in text.lower().replace('&', 'and').split() if x not in swords]
for x in range(len(words)-wsize+1):
pair = " ".join(words[x:x+wsize])
if check_concept(pair): windows.append(pair)
if check_concept(words[x]): windows.append(words[x])
for c in range(wsize-1):
if check_concept(words[c]): windows.append(words[c])
return windows
if 'vendor_only.pickle' in os.listdir('.'):
print "Loading saved matrix."
matrix = pickle.load(open("vendor_only.pickle"))
else:
print "Creating New Tensor"
matrix = SparseLabeledTensor(ndim=2)
print "Adding Vendors"
for co, englist in iter_info('CFB_Cities'):
print co
for phrase in englist:
parts = english_window(phrase)
print parts
for part in parts:
matrix[co, ('sells', part)] += factor
matrix[part, ('sells_inv', co)] += factor
pickle.dump(matrix, open("vendor_only.pickle", 'w'))
print "Normalizing."
matrix = matrix.normalized()
print "Matrix constructed. Running SVD."
svd = matrix.svd(k=10)
svd.summarize()
output_svg(svd.u, "vendorplain.svg", xscale=3000, yscale=3000, min=0.03)
| commonsense/divisi | doc/demo/vendor_only_svd.py | Python | gpl-3.0 | 1,871 |
#-*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from filer import settings as filer_settings
urlpatterns = patterns('filer.server.views',
url(r'^' + filer_settings.FILER_PRIVATEMEDIA_STORAGE.base_url.lstrip('/') + r'(?P<path>.*)/$',
'serve_protected_file',),
url(r'^' + filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE.base_url.lstrip('/') + r'(?P<path>.*)$',
'serve_protected_thumbnail',),
)
| hzlf/openbroadcast | website/__filer/server/urls.py | Python | gpl-3.0 | 449 |
import os
import subprocess
# This is an example for using Kataja to launch a visualisation from a python script that doesn't use kataja
# structures, but can output bracket trees. Kataja is launched as a separate process so it doesn't stop the
# main script.
def send_to_kataja(tree, image_file=''):
# return os.system(f'python Kataja.py -image_out test.pdf "{tree}"')
args = ['python', 'Kataja.py']
if image_file:
args.append('-image_out')
args.append(image_file)
args.append(tree)
if os.name == 'posix':
# return os.spawnv(os.P_NOWAIT, '', args)
return subprocess.Popen(args, preexec_fn=os.setpgrp, stdout=subprocess.DEVNULL)
elif os.name == 'nt' and hasattr(os, 'P_DETACH'):
return os.spawnv(os.P_DETACH, 'python', args)
# python Kataja.py -image_out test.pdf "[ [ A {word} ] [.T did [.V happen ] ] ]"
# tree = """[.{CP} [.{DP(0)} [.{D'} [.{D} which ] [.{NP} [.{N'} [.N wine ] ] ] ] ] [.{C'} [.C \epsilon [.{VP} [.{DP} [.{D'} [.D the ] [.{NP} [.{N'} [.N queen ] ] ] ] ] [.{V'} [.V prefers ] [.{DP} t(0) ] ] ] ] ] ]
# """
tree = """[.{FP} {Graham Greene_i} [.{F'} on_j [.{TP} t_i [.{T'} t_j [.{AuxP} t_j [.{PrtP} kirjoittanut_k [.{VP} t_i [.{V'} t_k [.{DP} tämän kirjan ] ] ] ] ] ] ] ] ]
"""
send_to_kataja(tree, 'test.pdf')
print(f"I just sent {tree} to kataja.")
print("thanks, I'm done now!")
| jpurma/Kataja | kataja/runner.py | Python | gpl-3.0 | 1,380 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011, Max Leuthaeuser
License: GPL (see LICENSE.txt for details)
"""
__author__ = 'Max Leuthaeuser'
__license__ = 'GPL'
from CodeGeneration.AbstractMetaModel.AbstractMetaClass import MethodInvocationNotAllowedError
class Modifier:
'''
Simple enumeration of available modifier in your language.
'''
def __init__(self):
'''
You cannot instantiate this class or any subclass.
This is just meant to be used as an enumeration of modifier
available in the language you want to use in code generation.
'''
raise MethodInvocationNotAllowedError("Instantiation of class Modifier or all subclasses is not allowed!") | max-leuthaeuser/naoservice | CodeGeneration/AbstractMetaModel/Modifier.py | Python | gpl-3.0 | 719 |
#!/usr/bin/env python
#
###############################################################################
# Copyright (C) 2016-2018 Cortney T. Buffington, N0MJS <n0mjs@me.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
import ConfigParser
import sys
from socket import getaddrinfo, IPPROTO_UDP
# Does anybody read this stuff? There's a PEP somewhere that says I should do this.
__author__ = 'Cortney T. Buffington, N0MJS'
__copyright__ = 'Copyright (c) 2016-2018 Cortney T. Buffington, N0MJS and the K0USY Group'
__license__ = 'GNU GPLv3'
__maintainer__ = 'Cort Buffington, N0MJS'
__email__ = 'n0mjs@me.com'
def get_address(_config):
ipv4 = ''
ipv6 = ''
socket_info = getaddrinfo(_config, None, 0, 0, IPPROTO_UDP)
for item in socket_info:
if item[0] == 2:
ipv4 = item[4][0]
elif item[0] == 30:
ipv6 = item[4][0]
if ipv4:
return ipv4
if ipv6:
return ipv6
return 'invalid address'
def build_config(_config_file):
config = ConfigParser.ConfigParser()
if not config.read(_config_file):
sys.exit('Configuration file \''+_config_file+'\' is not a valid configuration file! Exiting...')
CONFIG = {}
CONFIG['GLOBAL'] = {}
CONFIG['REPORTS'] = {}
CONFIG['LOGGER'] = {}
CONFIG['ALIASES'] = {}
CONFIG['SYSTEMS'] = {}
try:
for section in config.sections():
if section == 'GLOBAL':
CONFIG['GLOBAL'].update({
'PATH': config.get(section, 'PATH')
})
elif section == 'REPORTS':
CONFIG['REPORTS'].update({
'REPORT_NETWORKS': config.get(section, 'REPORT_NETWORKS'),
'REPORT_RCM': config.get(section, 'REPORT_RCM'),
'REPORT_INTERVAL': config.getint(section, 'REPORT_INTERVAL'),
'REPORT_PORT': config.get(section, 'REPORT_PORT'),
'REPORT_CLIENTS': config.get(section, 'REPORT_CLIENTS').split(','),
'PRINT_PEERS_INC_MODE': config.getboolean(section, 'PRINT_PEERS_INC_MODE'),
'PRINT_PEERS_INC_FLAGS': config.getboolean(section, 'PRINT_PEERS_INC_FLAGS')
})
if CONFIG['REPORTS']['REPORT_PORT']:
CONFIG['REPORTS']['REPORT_PORT'] = int(CONFIG['REPORTS']['REPORT_PORT'])
if CONFIG['REPORTS']['REPORT_RCM']:
CONFIG['REPORTS']['REPORT_RCM'] = bool(CONFIG['REPORTS']['REPORT_RCM'])
elif section == 'LOGGER':
CONFIG['LOGGER'].update({
'LOG_FILE': config.get(section, 'LOG_FILE'),
'LOG_HANDLERS': config.get(section, 'LOG_HANDLERS'),
'LOG_LEVEL': config.get(section, 'LOG_LEVEL'),
'LOG_NAME': config.get(section, 'LOG_NAME')
})
elif section == 'ALIASES':
CONFIG['ALIASES'].update({
'TRY_DOWNLOAD': config.getboolean(section, 'TRY_DOWNLOAD'),
'PATH': config.get(section, 'PATH'),
'PEER_FILE': config.get(section, 'PEER_FILE'),
'SUBSCRIBER_FILE': config.get(section, 'SUBSCRIBER_FILE'),
'TGID_FILE': config.get(section, 'TGID_FILE'),
'LOCAL_FILE': config.get(section, 'LOCAL_FILE'),
'PEER_URL': config.get(section, 'PEER_URL'),
'SUBSCRIBER_URL': config.get(section, 'SUBSCRIBER_URL'),
'STALE_TIME': config.getint(section, 'STALE_DAYS') * 86400,
})
elif config.getboolean(section, 'ENABLED'):
CONFIG['SYSTEMS'].update({section: {'LOCAL': {}, 'MASTER': {}, 'PEERS': {}}})
CONFIG['SYSTEMS'][section]['LOCAL'].update({
# In case we want to keep config, but not actually connect to the network
'ENABLED': config.getboolean(section, 'ENABLED'),
# These items are used to create the MODE byte
'PEER_OPER': config.getboolean(section, 'PEER_OPER'),
'IPSC_MODE': config.get(section, 'IPSC_MODE'),
'TS1_LINK': config.getboolean(section, 'TS1_LINK'),
'TS2_LINK': config.getboolean(section, 'TS2_LINK'),
'MODE': '',
# These items are used to create the multi-byte FLAGS field
'AUTH_ENABLED': config.getboolean(section, 'AUTH_ENABLED'),
'CSBK_CALL': config.getboolean(section, 'CSBK_CALL'),
'RCM': config.getboolean(section, 'RCM'),
'CON_APP': config.getboolean(section, 'CON_APP'),
'XNL_CALL': config.getboolean(section, 'XNL_CALL'),
'XNL_MASTER': config.getboolean(section, 'XNL_MASTER'),
'DATA_CALL': config.getboolean(section, 'DATA_CALL'),
'VOICE_CALL': config.getboolean(section, 'VOICE_CALL'),
'MASTER_PEER': config.getboolean(section, 'MASTER_PEER'),
'FLAGS': '',
# Things we need to know to connect and be a peer in this IPSC
'RADIO_ID': hex(int(config.get(section, 'RADIO_ID')))[2:].rjust(8,'0').decode('hex'),
'IP': config.get(section, 'IP'),
'PORT': config.getint(section, 'PORT'),
'ALIVE_TIMER': config.getint(section, 'ALIVE_TIMER'),
'MAX_MISSED': config.getint(section, 'MAX_MISSED'),
'AUTH_KEY': (config.get(section, 'AUTH_KEY').rjust(40,'0')).decode('hex'),
'GROUP_HANGTIME': config.getint(section, 'GROUP_HANGTIME'),
'NUM_PEERS': 0,
})
# Master means things we need to know about the master peer of the network
CONFIG['SYSTEMS'][section]['MASTER'].update({
'RADIO_ID': '\x00\x00\x00\x00',
'MODE': '\x00',
'MODE_DECODE': '',
'FLAGS': '\x00\x00\x00\x00',
'FLAGS_DECODE': '',
'STATUS': {
'CONNECTED': False,
'PEER_LIST': False,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0,
'KEEP_ALIVES_RECEIVED': 0,
'KEEP_ALIVE_RX_TIME': 0
},
'IP': '',
'PORT': ''
})
if not CONFIG['SYSTEMS'][section]['LOCAL']['MASTER_PEER']:
CONFIG['SYSTEMS'][section]['MASTER'].update({
'IP': get_address(config.get(section, 'MASTER_IP')),
'PORT': config.getint(section, 'MASTER_PORT')
})
# Temporary locations for building MODE and FLAG data
MODE_BYTE = 0
FLAG_1 = 0
FLAG_2 = 0
# Construct and store the MODE field
if CONFIG['SYSTEMS'][section]['LOCAL']['PEER_OPER']:
MODE_BYTE |= 1 << 6
if CONFIG['SYSTEMS'][section]['LOCAL']['IPSC_MODE'] == 'ANALOG':
MODE_BYTE |= 1 << 4
elif CONFIG['SYSTEMS'][section]['LOCAL']['IPSC_MODE'] == 'DIGITAL':
MODE_BYTE |= 1 << 5
if CONFIG['SYSTEMS'][section]['LOCAL']['TS1_LINK']:
MODE_BYTE |= 1 << 3
else:
MODE_BYTE |= 1 << 2
if CONFIG['SYSTEMS'][section]['LOCAL']['TS2_LINK']:
MODE_BYTE |= 1 << 1
else:
MODE_BYTE |= 1 << 0
CONFIG['SYSTEMS'][section]['LOCAL']['MODE'] = chr(MODE_BYTE)
# Construct and store the FLAGS field
if CONFIG['SYSTEMS'][section]['LOCAL']['CSBK_CALL']:
FLAG_1 |= 1 << 7
if CONFIG['SYSTEMS'][section]['LOCAL']['RCM']:
FLAG_1 |= 1 << 6
if CONFIG['SYSTEMS'][section]['LOCAL']['CON_APP']:
FLAG_1 |= 1 << 5
if CONFIG['SYSTEMS'][section]['LOCAL']['XNL_CALL']:
FLAG_2 |= 1 << 7
if CONFIG['SYSTEMS'][section]['LOCAL']['XNL_CALL'] and CONFIG['SYSTEMS'][section]['LOCAL']['XNL_MASTER']:
FLAG_2 |= 1 << 6
elif CONFIG['SYSTEMS'][section]['LOCAL']['XNL_CALL'] and not CONFIG['SYSTEMS'][section]['LOCAL']['XNL_MASTER']:
FLAG_2 |= 1 << 5
if CONFIG['SYSTEMS'][section]['LOCAL']['AUTH_ENABLED']:
FLAG_2 |= 1 << 4
if CONFIG['SYSTEMS'][section]['LOCAL']['DATA_CALL']:
FLAG_2 |= 1 << 3
if CONFIG['SYSTEMS'][section]['LOCAL']['VOICE_CALL']:
FLAG_2 |= 1 << 2
if CONFIG['SYSTEMS'][section]['LOCAL']['MASTER_PEER']:
FLAG_2 |= 1 << 0
CONFIG['SYSTEMS'][section]['LOCAL']['FLAGS'] = '\x00\x00'+chr(FLAG_1)+chr(FLAG_2)
except ConfigParser.Error, err:
print(err)
sys.exit('Could not parse configuration file, exiting...')
return CONFIG
# Used to run this file direclty and print the config,
# which might be useful for debugging
if __name__ == '__main__':
import sys
import os
import argparse
from pprint import pprint
# Change the current directory to the location of the application
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
# CLI argument parser - handles picking up the config file from the command line, and sending a "help" message
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store', dest='CONFIG_FILE', help='/full/path/to/config.file (usually dmrlink.cfg)')
cli_args = parser.parse_args()
# Ensure we have a path for the config file, if one wasn't specified, then use the execution directory
if not cli_args.CONFIG_FILE:
cli_args.CONFIG_FILE = os.path.dirname(os.path.abspath(__file__))+'/../dmrlink.cfg'
pprint(build_config(cli_args.CONFIG_FILE))
| n0mjs710/DMRlink | ipsc/dmrlink_config.py | Python | gpl-3.0 | 11,481 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'LikedComment'
db.delete_table('forum_likedcomment')
# Deleting model 'Comment'
db.delete_table(u'comment')
# Adding field 'Node.abs_parent'
db.add_column('forum_node', 'abs_parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='all_children', null=True, to=orm['forum.Node']), keep_default=False)
# Changing field 'Question.last_activity_by'
db.alter_column(u'question', 'last_activity_by_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['forum.User']))
def backwards(self, orm):
# Adding model 'LikedComment'
db.create_table('forum_likedcomment', (
('comment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.Comment'])),
('canceled', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.User'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['LikedComment'])
# Adding model 'Comment'
db.create_table(u'comment', (
('comment', self.gf('django.db.models.fields.CharField')(max_length=300)),
('node', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', null=True, to=orm['forum.Node'])),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='deleted_comments', null=True, to=orm['forum.User'], blank=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['forum.User'])),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['Comment'])
# Deleting field 'Node.abs_parent'
db.delete_column('forum_node', 'abs_parent_id')
# Changing field 'Question.last_activity_by'
db.alter_column(u'question', 'last_activity_by_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.User']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']"})
},
'forum.anonymousnode': {
'Meta': {'object_name': 'AnonymousNode', '_ormbases': ['forum.Node']},
'convertible_to': ('django.db.models.fields.CharField', [], {'default': "'node'", 'max_length': '16'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Node']", 'unique': 'True', 'primary_key': 'True'}),
'validation_hash': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_content'", 'to': "orm['forum.Node']"})
},
'forum.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'accepted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']", 'null': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Node']", 'unique': 'True', 'primary_key': 'True'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.authkeyuserassociation': {
'Meta': {'object_name': 'AuthKeyUserAssociation'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['forum.User']"})
},
'forum.award': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user', 'badge'),)", 'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['forum.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'awards'", 'to': "orm['forum.User']"})
},
'forum.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['forum.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'forum.favoritequestion': {
'Meta': {'unique_together': "(('question', 'user'),)", 'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['forum.User']"})
},
'forum.flaggeditem': {
'Meta': {'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'null': 'True', 'to': "orm['forum.Node']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['forum.User']"})
},
'forum.keyvalue': {
'Meta': {'object_name': 'KeyValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'value': ('forum.models.utils.PickledObjectField', [], {})
},
'forum.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['forum.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['forum.User']"})
},
'forum.node': {
'Meta': {'object_name': 'Node'},
'abs_parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_children'", 'null': 'True', 'to': "orm['forum.Node']"}),
'active_revision': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'active'", 'unique': 'True', 'null': 'True', 'to': "orm['forum.NodeRevision']"}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nodes'", 'to': "orm['forum.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_nodes'", 'null': 'True', 'to': "orm['forum.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_nodes'", 'null': 'True', 'to': "orm['forum.User']"}),
'node_type': ('django.db.models.fields.CharField', [], {'default': "'node'", 'max_length': '16'}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['forum.Node']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'nodes'", 'to': "orm['forum.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'forum.noderevision': {
'Meta': {'unique_together': "(('node', 'revision'),)", 'object_name': 'NodeRevision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'noderevisions'", 'to': "orm['forum.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Node']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'forum.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'accepted_answer': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'question_accepting'", 'unique': 'True', 'null': 'True', 'to': "orm['forum.Answer']"}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['forum.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['forum.User']"}),
'favourite_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'null': 'True', 'to': "orm['forum.User']"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Node']", 'unique': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subscriptions'", 'through': "'QuestionSubscription'", 'to': "orm['forum.User']"}),
'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.questionsubscription': {
'Meta': {'object_name': 'QuestionSubscription'},
'auto_subscription': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_view': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 17, 1, 11, 40, 975000)'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']"})
},
'forum.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reputes'", 'null': 'True', 'to': "orm['forum.Node']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reputes'", 'to': "orm['forum.User']"}),
'user_previous_rep': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'forum.subscriptionsettings': {
'Meta': {'object_name': 'SubscriptionSettings'},
'all_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'all_questions_watched_tags': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'enable_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_joins': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'new_question': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '1'}),
'new_question_watched_tags': ('django.db.models.fields.CharField', [], {'default': "'i'", 'max_length': '1'}),
'notify_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'notify_answers': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'notify_comments_own_post': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_reply_to_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'questions_answered': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'questions_asked': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'questions_commented': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'questions_viewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subscribed_questions': ('django.db.models.fields.CharField', [], {'default': "'i'", 'max_length': '1'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'subscription_settings'", 'unique': 'True', 'to': "orm['forum.User']"})
},
'forum.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['forum.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['forum.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marked_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'marked_tags'", 'through': "'MarkedTag'", 'to': "orm['forum.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'forum.user': {
'Meta': {'object_name': 'User', '_ormbases': ['auth.User']},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'forum.validationhash': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'},
'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 18, 1, 11, 41, 269000)'}),
'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.User']"})
},
'forum.vote': {
'Meta': {'object_name': 'Vote', 'db_table': "u'vote'"},
'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'null': 'True', 'to': "orm['forum.Node']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['forum.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
}
}
complete_apps = ['forum']
| CLLKazan/iCQA | qa-engine/forum/migrations/0019_auto__del_likedcomment__del_comment__add_field_node_abs_parent__chg_fi.py | Python | gpl-3.0 | 27,368 |
# A file to contain exclusively dependencies of the NeuroML package.
# See:
# https://github.com/NeuralEnsemble/libNeuroML
# http://neuroml.org
from __future__ import print_function
from collections import defaultdict
try:
from neuroml import Cell, Segment, SegmentParent, Morphology, \
NeuroMLDocument, Point3DWithDiam
except ImportError:
print("NeuroML module could not be loaded.")
def neuroml_single_cell(skeleton_id, nodes, pre, post):
""" Encapsulate a single skeleton into a NeuroML Cell instance.
skeleton_id: the ID of the skeleton to which all nodes belong.
nodes: a dictionary of node ID vs tuple of node parent ID, location as a tuple of 3 floats, and radius. In nanometers.
pre: a dictionary of node ID vs list of connector ID
post: a dictionary of node ID vs list of connector ID
Returns a Cell with id=skeleton_id.
"""
# Collect the children of every node
successors = defaultdict(list) # parent node ID vs list of children node IDs
rootID = None
for nodeID, props in nodes.iteritems():
parentID = props[0]
if not parentID:
rootID = nodeID
continue
successors[parentID].append(nodeID)
# Cache of Point3DWithDiam
points = {}
def asPoint(nodeID):
""" Return the node as a Point3DWithDiam, in micrometers. """
p = points.get(nodeID)
if not p:
props = nodes[nodeID]
radius = props[2]
if radius < 0:
radius = 0.1 # FUTURE Will have to change
loc = props[1]
# Point in micrometers
p = Point3DWithDiam(loc[0] / 1000.0, loc[1] / 1000.0, loc[2] / 1000.0, radius)
points[nodeID] = p
return p
# Starting from the root node, iterate towards the end nodes, adding a segment
# for each parent-child pair.
segments = []
segment_id = 1
todo = [rootID]
# VERY CONFUSINGLY, the Segment.parent is a SegmentParent with the same id as the parent Segment. An unseemly overheady way to reference the parent Segment.
while todo:
nodeID = todo.pop()
children = successors[nodeID]
if not children:
continue
p1 = asPoint(nodeID)
parent = segments[-1] if segments else None
segment_parent = SegmentParent(segments=parent.id) if parent else None
for childID in children:
p2 = asPoint(childID)
segment_id += 1
segment = Segment(proximal=p1, distal=p2, parent=segment_parent)
segment.id = segment_id
segment.name = "%s-%s" % (nodeID, childID)
segments.append(segment)
todo.append(childID)
# Pack the segments into a Cell
morphology = Morphology()
morphology.segments.extend(segments)
morphology.id = "Skeleton #%s" % skeleton_id
# Synapses: TODO requires input from Padraig Gleeson
cell = Cell()
cell.name = 'Cell'
cell.id = skeleton_id
cell.morphology = morphology
return cell
def neuroml_network(cells, response):
""" Write a list of Cell instances.
cells: a list of Cell instances.
response: somewhere to write to, like an HttpResponse
Returns nothing.
"""
doc = NeuroMLDocument()
doc.cells.extend(cells)
doc.id = "NeuroMLDocument"
namespacedef = 'xmlns="http://www.neuroml.org/schema/neuroml2"' \
+ ' xmlns:xi="http://www.w3.org/2001/XInclude"' \
+ ' xmlns:xs="http://www.w3.org/2001/XMLSchema"' \
+ ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \
+ ' xsi:schemaLocation="http://www.w3.org/2001/XMLSchema"'
doc.export( response, 0, name_="neuroml", namespacedef_=namespacedef)
return response
| dwitvliet/CATMAID | django/applications/catmaid/control/exportneuroml.py | Python | gpl-3.0 | 3,874 |
from vec2d import vec2d
from math import e, exp, pi, cos, sin, sqrt, atan2
class Goal:
def __init__(self, pos):
self.pos= pos
self.size = 8
self.theta = atan2(pos.y,pos.x)
class Landmark:
def __init__(self):
self.pos= vec2d(0,0)
self.size = 4
class Pipe:
def __init__(self):
self.pos0= vec2d(0,0)
self.pos1= vec2d(0,0)
self.width = 3 | degoldcode/PyNaviSim | objects.py | Python | gpl-3.0 | 424 |
from ase import Atom, Atoms
from gpaw import GPAW
from gpaw.test import equal
a = 4.05
d = a / 2**0.5
bulk = Atoms([Atom('Al', (0, 0, 0)),
Atom('Al', (0.5, 0.5, 0.5))], pbc=True)
bulk.set_cell((d, d, a), scale_atoms=True)
h = 0.25
calc = GPAW(mode='pw',
nbands=2*8,
kpts=(2, 2, 2),
convergence={'eigenstates': 7.2e-9, 'energy': 1e-5})
bulk.set_calculator(calc)
e0 = bulk.get_potential_energy()
niter0 = calc.get_number_of_iterations()
calc = GPAW(mode='pw',
nbands=2*8,
kpts=(2, 2, 2),
convergence={'eigenstates': 7.2e-9,
'energy': 1e-5,
'bands': 5 },
eigensolver='dav')
bulk.set_calculator(calc)
e1 = bulk.get_potential_energy()
niter1 = calc.get_number_of_iterations()
equal(e0, e1, 5.0e-6)
energy_tolerance = 0.00004
niter_tolerance = 0
equal(e0, -6.97798, energy_tolerance)
assert 10 <= niter0 <= 14, niter0
equal(e1, -6.97798, energy_tolerance)
assert 10 <= niter1 <= 24, niter1
| robwarm/gpaw-symm | gpaw/test/pw/davidson_pw.py | Python | gpl-3.0 | 1,034 |
from test_support import *
do_flow(opt=["--mode=flow"])
| ptroja/spark2014 | testsuite/gnatprove/tests/PC07-016__flow_write_object_to_ali/test.py | Python | gpl-3.0 | 56 |
####################################################################################################
#
# Patro - A Python library to make patterns for fashion design
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
####################################################################################################
"""Module to implement rectangle.
"""
####################################################################################################
__all__ = ['Rectangle2D']
####################################################################################################
import math
from .Path import Path2D
from .Primitive import Primitive2P, ClosedPrimitiveMixin, PathMixin, PolygonMixin, Primitive2DMixin
from .Segment import Segment2D
####################################################################################################
class Rectangle2D(Primitive2DMixin, ClosedPrimitiveMixin, PathMixin, PolygonMixin, Primitive2P):
"""Class to implements 2D Rectangle."""
##############################################
def __init__(self, p0, p1):
# if p1 == p0:
# raise ValueError('Rectangle reduced to a point')
Primitive2P.__init__(self, p0, p1)
##############################################
@classmethod
def from_point_and_offset(self, p0, v):
return cls(p0, p0+v)
@classmethod
def from_point_and_radius(self, p0, v):
return cls(p0-v, p0+v)
##############################################
@property
def is_closed(self):
return True
##############################################
@property
def p01(self):
return self.__vector_cls__(self._p0.x, self._p1.y)
@property
def p10(self):
return self.__vector_cls__(self._p1.x, self._p0.y)
@property
def edges(self):
p0 = self._p0
p1 = self.p01
p2 = self._p1
p3 = self.p10
return (
Segment2D(p0, p1),
Segment2D(p1, p2),
Segment2D(p2, p3),
Segment2D(p3, p0),
)
##############################################
@property
def diagonal(self):
return self._p1 - self._p0
##############################################
@property
def perimeter(self):
d = self.diagonal
return 2*(abs(d.x) + abs(d.y))
##############################################
@property
def area(self):
d = self.diagonal
return abs(d.x * d.y)
##############################################
def is_point_inside(self, point):
bounding_box = self.bounding_box
return (point.x in bounding_box.x and
point.y in bounding_box.y)
##############################################
def distance_to_point(self, point):
raise NotImplementedError
| FabriceSalvaire/PyValentina | Patro/GeometryEngine/Rectangle.py | Python | gpl-3.0 | 3,505 |
#!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for defining ardupilot's submodules, so that they are kept up to date.
Submodules can be considered dynamic sources, since they are updated during the
build. Furthermore, they can be used to generate other dynamic sources (mavlink
headers generation, for example). Thus, the correct use of this tool should
have three build groups: first one for updating the submodules, second for
generating any dynamic source from them, and the last one for the build. And
post_mode should be set to POST_LAZY. Example::
def build(bld):
bld.post_mode = waflib.Build.POST_LAZY
bld.add_group('git_submodules')
# gtest submodule
bld(
features='git_submodule'
git_submodule='gtest',
)
# mavlink submodule with syntactic sugar
bld.git_submodule('mavlink')
...
# now, for the dynamic sources
bld.add_group('dynamic_sources')
...
# now, below go the task generators for normal build process
bld.add_group('build')
...
"""
from waflib import Context, Task, Utils
from waflib.Configure import conf
from waflib.TaskGen import before_method, feature, taskgen_method
import os.path
class update_submodule(Task.Task):
color = 'BLUE'
run_str = '${GIT} -C ${SRC_ROOT} submodule update --init -- ${SUBMODULE_PATH}'
def runnable_status(self):
e = self.env.get_flat
cmd = e('GIT'), '-C', e('SRC_ROOT'), 'submodule', 'status', '--', e('SUBMODULE_PATH')
out = self.generator.bld.cmd_and_log(cmd, quiet=Context.BOTH)
# git submodule status uses a blank prefix for submodules that are up
# to date
if out[0] != ' ':
return Task.RUN_ME
return Task.SKIP_ME
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
m.update(self.__class__.__name__)
m.update(self.env.get_flat('SUBMODULE_PATH'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return 'Submodule update: %s' % self.submodule
def configure(cfg):
cfg.find_program('git')
_submodules_tasks = {}
@taskgen_method
def git_submodule_update(self, name):
if name not in _submodules_tasks:
module_node = self.bld.srcnode.make_node(os.path.join('modules', name))
tsk = self.create_task('update_submodule', submodule=name)
tsk.env.SRC_ROOT = self.bld.srcnode.abspath()
tsk.env.SUBMODULE_PATH = module_node.abspath()
_submodules_tasks[name] = tsk
return _submodules_tasks[name]
@feature('git_submodule')
@before_method('process_source')
def process_module_dependencies(self):
self.git_submodule = getattr(self, 'git_submodule', '')
if not self.git_submodule:
self.bld.fatal('git_submodule: empty or missing git_submodule argument')
self.git_submodule_update(self.git_submodule)
@conf
def git_submodule(bld, git_submodule, **kw):
kw['git_submodule'] = git_submodule
kw['features'] = Utils.to_list(kw.get('features', ''))
kw['features'].append('git_submodule')
return bld(**kw)
| aesaae/ardupilot_str | Tools/ardupilotwaf/git_submodule.py | Python | gpl-3.0 | 3,169 |
#!/usr/bin/env python
#coding:utf-8
##
# @file longestPalindrome.py
# @brief Find longest palindrome string
# @author unlessbamboo
# @version 1.0
# @date 2016-02-15
def secondHelper(str1, left, right):
"""secondHelper:Find a palindrome string from one point
:param str1:source string
:param left:
:param right:
"""
lenStr1 = len(str1)
while left >= 0 and right <= lenStr1 and str1[left] == str1[right]:
left -= 1
right += 1
return str1[left + 1:right]
def longestPalindromeSecond(str1):
"""longestPalindromeSecond:方法2
分析:
元操作:判断字符串是否对称,O(n)
方法1:
对于任意/所有子字符串,逐个判断是否对称:
对于当前字符a和子字符串[a...end],不断
循环,知道找到回文串,比较最大长度--O(n)
结束,--O(n)
总的性能消耗:O(n*n*n)
方法2:
从里向外判断回文,逐个判断:
对于当前字符a,逐层向外,获取最长回文
奇数比较(bab)、偶数比较(aa),--O(n)
结束,--O(n)
总的性能消耗:O(n*n)
方法3:
manacher算法性能O(n),以空间换时间
:param str1:Source string
"""
res = ""
for i in xrange(len(str1)):
# odd case
tmp = secondHelper(str1, i, i)
if len(tmp) > len(res):
res = tmp
# even case
tmp = secondHelper(str1, i, i + 1)
if len(tmp) > len(res):
res = tmp
return res
def lengthOfLongestSubstring(str1):
"""lengthOfLongestSubstring:
find the length of the longest substring without repeating characters
分析:(完全归纳法)
1,使用hash,对每一个character进行index记录,从而保证
不存在repeating characters
2,假设l[i] = s[m...i],其中m-i没有任何重复元素,最大长度为i-m+1
3,判断s[i+1]:
if s[i+1] not in Hashmap:
L[i+1] = s[m...i+1]
else
m = max(m, hashmap[s[i+1]])
L[i+1] = s[m...i+1]
4,返回最大的长度
:param str1:
"""
start = maxLength = 0
usedChar = {}
for i in range(len(str1)):
if str1[i] in usedChar and start <= usedChar[str1[i]]:
# 如果在首位和第5位发现a,则start++(在前面一个a的基础上面)
start = usedChar[str1[i]] + 1
else:
maxLength = max(maxLength, i - start + 1)
usedChar[str1[i]] = i
return maxLength
| unlessbamboo/grocery-shop | algorithm/search/stringHandlePy.py | Python | gpl-3.0 | 2,790 |
#!/usr/bin/env python
##################################################
# Parallel MLMC: Config class #
# #
# Jun Nie #
# Last modification: 19-09-2017 #
##################################################
import sys, os
import numpy as np
class Config:
"""
config class wchich is used for fvm solver, mlmc & parallelization
TODO: adding read config parameters from file.
"""
def __init__(self, config_file):
# === fvm solver parameters
self.DIM = 2
self.ORDER = 1
self.case = 'vayu_burgers' # 'vayu_ls89', 'su2_ls89'
self.mesh_ncoarsest = 8+1
self.mesh_nfinest = 128+1
self.mesh_filename = '/home/jun/vayu/TestMatrix/Burgers.Test/mesh/' + \
'cartesian_tube_0009x0009x2.BlockMesh'
# === mlmc parameters
self.eps = 0.
self.alpha = 0.
self.beta = 0.
self.gamma = 0.
self.L = 2 # highest level
self.ML = 8 # number of samples on finest level
self.M = 2 # refinement factor
self.SAMPLES_FACTOR = 1
self.mlmc_convergence_test = True
self.READ_NUMBER_OF_SAMPLES_FROM_FILE = False
self.USE_OPTIMAL_NUMBER_OF_SAMPLES = False
self.USE_EQUIDISTRIBUTED_NUMBER_OF_SAMPLES = True
self.COMPUTE_IN_DIFFERENCE = True
# === qoi
self.STATS = 'MEAN_VAR'
# === parallelization parameters
self.multi = 'mpi' # 'mpi' for parallel, 'single' for serial
self.MULTIN = 1 # number of processes for fvm solver, 1 or multiples of 2
self.MULTIM = 4 # number of samplers (processor group)
self.MULTI_CORES = 0
# === update
self.update(config_file)
def update(self, config_file):
''' read config file and update parameters'''
pass
if __name__ == '__main__':
pass
| henrynj/PMLMC | v0.0/config.py | Python | gpl-3.0 | 2,156 |
""" Telewall. This application connects to asterisk and provides Statis-Apps.
It has a user interface using a button, display and led. It also provides
a web interface.
The Application should be run using python 2.7, because ARI (Asterisk REST Interface) does not support python 3.
"""
| synox/telewall | telewall/telewall/__init__.py | Python | gpl-3.0 | 290 |
# coding: utf-8
__author__ = "@strizhechenko"
import sys
from morpher import Morpher
from twitterbot_utils import Twibot
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
bot = Twibot()
morphy = Morpher()
def tweets2words(tweets):
string = " ".join([tweet.text for tweet in tweets])
return morphy.process_to_words(string)
@sched.scheduled_job('interval', minutes=15)
def do_tweets():
print 'New tick'
words = tweets2words(bot.fetch_list(list_id=217926157))
for word in words:
tweet = morphy.word2phrase(word)
bot.tweet(tweet)
print 'post', tweet.encode('utf-8')
@sched.scheduled_job('interval', hours=24)
def do_wipe():
print 'Wipe time'
bot.wipe()
if __name__ == '__main__':
do_tweets()
if '--test' in sys.argv:
exit(0)
sched.start()
| strizhechenko/twitterbots | memes_zaebali.py | Python | gpl-3.0 | 857 |
from sqlalchemy import Column, String, Integer
from BusTrack.repository import Base, session
from BusTrack.repository.models import STRING_LEN_SMALL
class UserType(Base):
__tablename__ = 'user_type'
id = Column(Integer, primary_key=True)
role_name = Column(String(STRING_LEN_SMALL))
@staticmethod
def __create_default_role__():
if session.query(UserType).count() != 0:
return
driver = UserType()
driver.role_name = 'Driver'
parent = UserType()
parent.role_name = 'Parent'
admin = UserType()
admin.role_name = 'Admin'
session.add(driver)
session.add(parent)
session.add(admin)
session.commit()
session.close()
| Rjtsahu/School-Bus-Tracking | BusTrack/repository/models/UserType.py | Python | gpl-3.0 | 743 |
# -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numbers
import warnings
import numpy as np
import traits.api as t
from scipy import constants
from hyperspy._signals.spectrum import Spectrum
from hyperspy.misc.elements import elements as elements_db
import hyperspy.axes
from hyperspy.decorators import only_interactive
from hyperspy.gui.eels import TEMParametersUI
from hyperspy.defaults_parser import preferences
import hyperspy.gui.messages as messagesui
from hyperspy.misc.progressbar import progressbar
from hyperspy.components import PowerLaw
from hyperspy.misc.utils import isiterable, closest_power_of_two, underline
from hyperspy.misc.utils import without_nans
class EELSSpectrum(Spectrum):
_signal_type = "EELS"
def __init__(self, *args, **kwards):
Spectrum.__init__(self, *args, **kwards)
# Attributes defaults
self.subshells = set()
self.elements = set()
self.edges = list()
if hasattr(self.metadata, 'Sample') and \
hasattr(self.metadata.Sample, 'elements'):
print('Elemental composition read from file')
self.add_elements(self.metadata.Sample.elements)
self.metadata.Signal.binned = True
def add_elements(self, elements, include_pre_edges=False):
"""Declare the elemental composition of the sample.
The ionisation edges of the elements present in the current
energy range will be added automatically.
Parameters
----------
elements : tuple of strings
The symbol of the elements. Note this input must always be
in the form of a tuple. Meaning: add_elements(('C',)) will
work, while add_elements(('C')) will NOT work.
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
Examples
--------
>>> s = signals.EELSSpectrum(np.arange(1024))
>>> s.add_elements(('C', 'O'))
Adding C_K subshell
Adding O_K subshell
Raises
------
ValueError
"""
if not isiterable(elements) or isinstance(elements, basestring):
raise ValueError(
"Input must be in the form of a tuple. For example, "
"if `s` is the variable containing this EELS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
for element in elements:
if element in elements_db:
self.elements.add(element)
else:
raise ValueError(
"%s is not a valid symbol of a chemical element"
% element)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
self.metadata.Sample.elements = list(self.elements)
if self.elements:
self.generate_subshells(include_pre_edges)
def generate_subshells(self, include_pre_edges=False):
"""Calculate the subshells for the current energy range for the
elements present in self.elements
Parameters
----------
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
"""
Eaxis = self.axes_manager.signal_axes[0].axis
if not include_pre_edges:
start_energy = Eaxis[0]
else:
start_energy = 0.
end_energy = Eaxis[-1]
for element in self.elements:
e_shells = list()
for shell in elements_db[element]['Atomic_properties']['Binding_energies']:
if shell[-1] != 'a':
if start_energy <= \
elements_db[element]['Atomic_properties']['Binding_energies'][shell][
'onset_energy (eV)'] \
<= end_energy:
subshell = '%s_%s' % (element, shell)
if subshell not in self.subshells:
print "Adding %s subshell" % (subshell)
self.subshells.add(
'%s_%s' % (element, shell))
e_shells.append(subshell)
def estimate_zero_loss_peak_centre(self, mask=None):
"""Estimate the posision of the zero-loss peak.
This function provides just a coarse estimation of the position
of the zero-loss peak centre by computing the position of the maximum
of the spectra. For subpixel accuracy use `estimate_shift1D`.
Parameters
----------
mask : Signal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
Returns
-------
zlpc : Signal subclass
The estimated position of the maximum of the ZLP peak.
Notes
-----
This function only works when the zero-loss peak is the most
intense feature in the spectrum. If it is not in most cases
the spectrum can be cropped to meet this criterium.
Alternatively use `estimate_shift1D`.
See Also
--------
estimate_shift1D, align_zero_loss_peak
"""
self._check_signal_dimension_equals_one()
self._check_navigation_mask(mask)
zlpc = self.valuemax(-1)
if self.axes_manager.navigation_dimension == 1:
zlpc = zlpc.as_spectrum(0)
elif self.axes_manager.navigation_dimension > 1:
zlpc = zlpc.as_image((0, 1))
if mask is not None:
zlpc.data[mask.data] = np.nan
return zlpc
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
**kwargs):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
def substract_from_offset(value, signals):
for signal in signals:
signal.axes_manager[-1].offset -= value
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
mean_ = without_nans(zlpc.data).mean()
if print_stats is True:
print
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
signal.shift1D(-zlpc.data + mean_)
if calibrate is True:
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
substract_from_offset(without_nans(zlpc.data).mean(),
also_align + [self])
if subpixel is False:
return
left, right = -3., 3.
if calibrate is False:
mean_ = without_nans(self.estimate_zero_loss_peak_centre(
mask=mask).data).mean()
left += mean_
right += mean_
left = (left if left > self.axes_manager[-1].axis[0]
else self.axes_manager[-1].axis[0])
right = (right if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1])
self.align1D(left, right, also_align=also_align, **kwargs)
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
if calibrate is True:
substract_from_offset(without_nans(zlpc.data).mean(),
also_align + [self])
def estimate_elastic_scattering_intensity(self,
threshold):
"""Rough estimation of the elastic scattering intensity by
truncation of a EELS low-loss spectrum.
Parameters
----------
threshold : {Signal, float, int}
Truncation energy to estimate the intensity of the
elastic scattering. The
threshold can be provided as a signal of the same dimension
as the input spectrum navigation space containing the
threshold value in the energy units. Alternatively a constant
threshold can be specified in energy/index units by passing
float/int.
Returns
-------
I0: Signal
The elastic scattering intensity.
See Also
--------
estimate_elastic_scattering_threshold
"""
# TODO: Write units tests
self._check_signal_dimension_equals_one()
if isinstance(threshold, numbers.Number):
I0 = self.isig[:threshold].integrate1D(-1)
I0.axes_manager.set_signal_dimension(
min(2, self.axes_manager.navigation_dimension))
else:
bk_threshold_navigate = (
threshold.axes_manager._get_axis_attribute_values('navigate'))
threshold.axes_manager.set_signal_dimension(0)
I0 = self._get_navigation_signal()
bk_I0_navigate = (
I0.axes_manager._get_axis_attribute_values('navigate'))
I0.axes_manager.set_signal_dimension(0)
pbar = hyperspy.misc.progressbar.progressbar(
maxval=self.axes_manager.navigation_size)
for i, s in enumerate(self):
threshold_ = threshold[self.axes_manager.indices].data[0]
if np.isnan(threshold_):
I0[self.axes_manager.indices] = np.nan
else:
I0[self.axes_manager.indices].data[:] = (
s[:threshold_].integrate1D(-1).data)
pbar.update(i)
pbar.finish()
threshold.axes_manager._set_axis_attribute_values(
'navigate',
bk_threshold_navigate)
I0.axes_manager._set_axis_attribute_values(
'navigate',
bk_I0_navigate)
I0.metadata.General.title = (
self.metadata.General.title + ' elastic intensity')
if self.tmp_parameters.has_item('filename'):
I0.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_elastic_intensity')
I0.tmp_parameters.folder = self.tmp_parameters.folder
I0.tmp_parameters.extension = \
self.tmp_parameters.extension
return I0
def estimate_elastic_scattering_threshold(self,
window=10.,
tol=None,
number_of_points=5,
polynomial_order=3,
start=1.):
"""Calculate the first inflexion point of the spectrum derivative
within a window.
This method assumes that the zero-loss peak is located at position zero
in all the spectra. Currently it looks for an inflexion point, that can
be a local maximum or minimum. Therefore, to estimate the elastic
scattering threshold `start` + `window` must be less than the first
maximum for all spectra (often the bulk plasmon maximum). If there is
more than one inflexion point in energy the window it selects the
smoother one what, often, but not always, is a good choice in this
case.
Parameters
----------
window : {None, float}
If None, the search for the local inflexion point is performed
using the full energy range. A positive float will restrict
the search to the (0,window] energy window, where window is given
in the axis units. If no inflexion point is found in this
spectral range the window value is returned instead.
tol : {None, float}
The threshold tolerance for the derivative. If "auto" it is
automatically calculated as the minimum value that guarantees
finding an inflexion point in all the spectra in given energy
range.
number_of_points : int
If non zero performs order three Savitzky-Golay smoothing
to the data to avoid falling in local minima caused by
the noise.
polynomial_order : int
Savitzky-Golay filter polynomial order.
start : float
Position from the zero-loss peak centre from where to start
looking for the inflexion point.
Returns
-------
threshold : Signal
A Signal of the same dimension as the input spectrum
navigation space containing the estimated threshold. Where the
threshold couldn't be estimated the value is set to nan.
See Also
--------
estimate_elastic_scattering_intensity,align_zero_loss_peak,
find_peaks1D_ohaver, fourier_ratio_deconvolution.
Notes
-----
The main purpose of this method is to be used as input for
`estimate_elastic_scattering_intensity`. Indeed, for currently
achievable energy resolutions, there is not such a thing as a elastic
scattering threshold. Therefore, please be aware of the limitations of
this method when using it.
"""
self._check_signal_dimension_equals_one()
# Create threshold with the same shape as the navigation dims.
threshold = self._get_navigation_signal()
threshold.axes_manager.set_signal_dimension(0)
# Progress Bar
axis = self.axes_manager.signal_axes[0]
min_index, max_index = axis.value_range_to_indices(start,
start + window)
if max_index < min_index + 10:
raise ValueError("Please select a bigger window")
s = self.isig[min_index:max_index].deepcopy()
if number_of_points:
s.smooth_savitzky_golay(polynomial_order=polynomial_order,
number_of_points=number_of_points,
differential_order=1)
else:
s = s.diff(-1)
if tol is None:
tol = np.max(np.abs(s.data).min(axis.index_in_array))
saxis = s.axes_manager[-1]
inflexion = (np.abs(s.data) <= tol).argmax(saxis.index_in_array)
threshold.data[:] = saxis.index2value(inflexion)
if isinstance(inflexion, np.ndarray):
threshold.data[inflexion == 0] = np.nan
else: # Single spectrum
if inflexion == 0:
threshold.data[:] = np.nan
del s
if np.isnan(threshold.data).any():
warnings.warn("No inflexion point could we found in some positions "
"that have been marked with nans.")
# Create spectrum image, stop and return value
threshold.metadata.General.title = (
self.metadata.General.title +
' ZLP threshold')
if self.tmp_parameters.has_item('filename'):
threshold.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_ZLP_threshold')
threshold.tmp_parameters.folder = self.tmp_parameters.folder
threshold.tmp_parameters.extension = \
self.tmp_parameters.extension
threshold.axes_manager.set_signal_dimension(
min(2, self.axes_manager.navigation_dimension))
return threshold
def estimate_thickness(self,
threshold,
zlp=None,):
"""Estimates the thickness (relative to the mean free path)
of a sample using the log-ratio method.
The current EELS spectrum must be a low-loss spectrum containing
the zero-loss peak. The hyperspectrum must be well calibrated
and aligned.
Parameters
----------
threshold : {Signal, float, int}
Truncation energy to estimate the intensity of the
elastic scattering. The
threshold can be provided as a signal of the same dimension
as the input spectrum navigation space containing the
threshold value in the energy units. Alternatively a constant
threshold can be specified in energy/index units by passing
float/int.
zlp : {None, EELSSpectrum}
If not None the zero-loss
peak intensity is calculated from the ZLP spectrum
supplied by integration using Simpson's rule. If None estimates
the zero-loss peak intensity using
`estimate_elastic_scattering_intensity` by truncation.
Returns
-------
s : Signal
The thickness relative to the MFP. It returns a Spectrum,
Image or a Signal, depending on the currenct spectrum navigation
dimensions.
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
# TODO: Write units tests
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
total_intensity = self.integrate1D(axis.index_in_array).data
if zlp is not None:
I0 = zlp.integrate1D(axis.index_in_array).data
else:
I0 = self.estimate_elastic_scattering_intensity(
threshold=threshold,).data
t_over_lambda = np.log(total_intensity / I0)
s = self._get_navigation_signal()
s.data = t_over_lambda
s.metadata.General.title = (self.metadata.General.title +
' $\\frac{t}{\\lambda}$')
if self.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_relative_thickness')
s.tmp_parameters.folder = self.tmp_parameters.folder
s.tmp_parameters.extension = \
self.tmp_parameters.extension
return s
def fourier_log_deconvolution(self,
zlp,
add_zlp=False,
crop=False):
"""Performs fourier-log deconvolution.
Parameters
----------
zlp : EELSSpectrum
The corresponding zero-loss peak.
add_zlp : bool
If True, adds the ZLP to the deconvolved spectrum
crop : bool
If True crop the spectrum to leave out the channels that
have been modified to decay smoothly to zero at the sides
of the spectrum.
Returns
-------
An EELSSpectrum containing the current data deconvolved.
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
self._check_signal_dimension_equals_one()
s = self.deepcopy()
zlp_size = zlp.axes_manager.signal_axes[0].size
self_size = self.axes_manager.signal_axes[0].size
tapped_channels = s.hanning_taper()
# Conservative new size to solve the wrap-around problem
size = zlp_size + self_size - 1
# Increase to the closest power of two to enhance the FFT
# performance
size = closest_power_of_two(size)
axis = self.axes_manager.signal_axes[0]
z = np.fft.rfft(zlp.data, n=size, axis=axis.index_in_array)
j = np.fft.rfft(s.data, n=size, axis=axis.index_in_array)
j1 = z * np.nan_to_num(np.log(j / z))
sdata = np.fft.irfft(j1, axis=axis.index_in_array)
s.data = sdata[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, self_size)), ])]
if add_zlp is True:
if self_size >= zlp_size:
s.data[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, zlp_size)), ])
] += zlp.data
else:
s.data += zlp.data[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, self_size)), ])]
s.metadata.General.title = (s.metadata.General.title +
' after Fourier-log deconvolution')
if s.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_after_fourier_log_deconvolution')
if crop is True:
s.crop(axis.index_in_axes_manager,
None, int(-tapped_channels))
return s
def fourier_ratio_deconvolution(self, ll,
fwhm=None,
threshold=None,
extrapolate_lowloss=True,
extrapolate_coreloss=True):
"""Performs Fourier-ratio deconvolution.
The core-loss should have the background removed. To reduce
the noise amplication the result is convolved with a
Gaussian function.
Parameters
----------
ll: EELSSpectrum
The corresponding low-loss (ll) EELSSpectrum.
fwhm : float or None
Full-width half-maximum of the Gaussian function by which
the result of the deconvolution is convolved. It can be
used to select the final SNR and spectral resolution. If
None, the FWHM of the zero-loss peak of the low-loss is
estimated and used.
threshold : {None, float}
Truncation energy to estimate the intensity of the
elastic scattering. If None the threshold is taken as the
first minimum after the ZLP centre.
extrapolate_lowloss, extrapolate_coreloss : bool
If True the signals are extrapolated using a power law,
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
self._check_signal_dimension_equals_one()
orig_cl_size = self.axes_manager.signal_axes[0].size
if threshold is None:
threshold = ll.estimate_elastic_scattering_threshold()
if extrapolate_coreloss is True:
cl = self.power_law_extrapolation(
window_size=20,
extrapolation_size=100)
else:
cl = self.deepcopy()
if extrapolate_lowloss is True:
ll = ll.power_law_extrapolation(
window_size=100,
extrapolation_size=100)
else:
ll = ll.deepcopy()
ll.hanning_taper()
cl.hanning_taper()
ll_size = ll.axes_manager.signal_axes[0].size
cl_size = self.axes_manager.signal_axes[0].size
# Conservative new size to solve the wrap-around problem
size = ll_size + cl_size - 1
# Increase to the closest multiple of two to enhance the FFT
# performance
size = int(2 ** np.ceil(np.log2(size)))
axis = ll.axes_manager.signal_axes[0]
if fwhm is None:
fwhm = float(ll.get_current_signal().estimate_peak_width()())
print("FWHM = %1.2f" % fwhm)
I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold)
I0 = I0.data
if ll.axes_manager.navigation_size > 0:
I0_shape = list(I0.shape)
I0_shape.insert(axis.index_in_array, 1)
I0 = I0.reshape(I0_shape)
from hyperspy.components import Gaussian
g = Gaussian()
g.sigma.value = fwhm / 2.3548
g.A.value = 1
g.centre.value = 0
zl = g.function(
np.linspace(axis.offset,
axis.offset + axis.scale * (size - 1),
size))
z = np.fft.rfft(zl)
jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array)
jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array)
zshape = [1, ] * len(cl.data.shape)
zshape[axis.index_in_array] = jk.shape[axis.index_in_array]
cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl,
axis=axis.index_in_array)
cl.data *= I0
cl.crop(-1, None, int(orig_cl_size))
cl.metadata.General.title = (self.metadata.General.title +
' after Fourier-ratio deconvolution')
if cl.tmp_parameters.has_item('filename'):
cl.tmp_parameters.filename = (
self.tmp_parameters.filename +
'after_fourier_ratio_deconvolution')
return cl
def richardson_lucy_deconvolution(self, psf, iterations=15,
mask=None):
"""1D Richardson-Lucy Poissonian deconvolution of
the spectrum by the given kernel.
Parameters
----------
iterations: int
Number of iterations of the deconvolution. Note that
increasing the value will increase the noise amplification.
psf: EELSSpectrum
It must have the same signal dimension as the current
spectrum and a spatial dimension of 0 or the same as the
current spectrum.
Notes:
-----
For details on the algorithm see Gloter, A., A. Douiri,
M. Tence, and C. Colliex. “Improving Energy Resolution of
EELS Spectra: An Alternative to the Monochromator Solution.”
Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.
"""
self._check_signal_dimension_equals_one()
ds = self.deepcopy()
ds.data = ds.data.copy()
ds.metadata.General.title += (
' after Richardson-Lucy deconvolution %i iterations' %
iterations)
if ds.tmp_parameters.has_item('filename'):
ds.tmp_parameters.filename += (
'_after_R-L_deconvolution_%iiter' % iterations)
psf_size = psf.axes_manager.signal_axes[0].size
kernel = psf()
imax = kernel.argmax()
j = 0
maxval = self.axes_manager.navigation_size
if maxval > 0:
pbar = progressbar(maxval=maxval)
for D in self:
D = D.data.copy()
if psf.axes_manager.navigation_dimension != 0:
kernel = psf(axes_manager=self.axes_manager)
imax = kernel.argmax()
s = ds(axes_manager=self.axes_manager)
mimax = psf_size - 1 - imax
O = D.copy()
for i in xrange(iterations):
first = np.convolve(kernel, O)[imax: imax + psf_size]
O = O * (np.convolve(kernel[::-1],
D / first)[mimax: mimax + psf_size])
s[:] = O
j += 1
if maxval > 0:
pbar.update(j)
if maxval > 0:
pbar.finish()
return ds
def _are_microscope_parameters_missing(self):
"""Check if the EELS parameters necessary to calculate the GOS
are defined in metadata. If not, in interactive mode
raises an UI item to fill the values"""
must_exist = (
'Acquisition_instrument.TEM.convergence_angle',
'Acquisition_instrument.TEM.beam_energy',
'Acquisition_instrument.TEM.Detector.EELS.collection_angle',)
missing_parameters = []
for item in must_exist:
exists = self.metadata.has_item(item)
if exists is False:
missing_parameters.append(item)
if missing_parameters:
if preferences.General.interactive is True:
par_str = "The following parameters are missing:\n"
for par in missing_parameters:
par_str += '%s\n' % par
par_str += 'Please set them in the following wizard'
is_ok = messagesui.information(par_str)
if is_ok:
self._set_microscope_parameters()
else:
return True
else:
return True
else:
return False
def set_microscope_parameters(self,
beam_energy=None,
convergence_angle=None,
collection_angle=None):
"""Set the microscope parameters that are necessary to calculate
the GOS.
If not all of them are defined, raises in interactive mode
raises an UI item to fill the values
beam_energy: float
The energy of the electron beam in keV
convengence_angle : float
In mrad.
collection_angle : float
In mrad.
"""
mp = self.metadata
if beam_energy is not None:
mp.set_item("Acquisition_instrument.TEM.beam_energy", beam_energy)
if convergence_angle is not None:
mp.set_item(
"Acquisition_instrument.TEM.convergence_angle",
convergence_angle)
if collection_angle is not None:
mp.set_item(
"Acquisition_instrument.TEM.Detector.EELS.collection_angle",
collection_angle)
self._are_microscope_parameters_missing()
@only_interactive
def _set_microscope_parameters(self):
tem_par = TEMParametersUI()
mapping = {
'Acquisition_instrument.TEM.convergence_angle': 'tem_par.convergence_angle',
'Acquisition_instrument.TEM.beam_energy': 'tem_par.beam_energy',
'Acquisition_instrument.TEM.Detector.EELS.collection_angle': 'tem_par.collection_angle', }
for key, value in mapping.iteritems():
if self.metadata.has_item(key):
exec('%s = self.metadata.%s' % (value, key))
tem_par.edit_traits()
mapping = {
'Acquisition_instrument.TEM.convergence_angle': tem_par.convergence_angle,
'Acquisition_instrument.TEM.beam_energy': tem_par.beam_energy,
'Acquisition_instrument.TEM.Detector.EELS.collection_angle': tem_par.collection_angle, }
for key, value in mapping.iteritems():
if value != t.Undefined:
self.metadata.set_item(key, value)
self._are_microscope_parameters_missing()
def power_law_extrapolation(self,
window_size=20,
extrapolation_size=1024,
add_noise=False,
fix_neg_r=False):
"""Extrapolate the spectrum to the right using a powerlaw
Parameters
----------
window_size : int
The number of channels from the right side of the
spectrum that are used to estimate the power law
parameters.
extrapolation_size : int
Size of the extrapolation in number of channels
add_noise : bool
If True, add poissonian noise to the extrapolated spectrum.
fix_neg_r : bool
If True, the negative values for the "components.PowerLaw"
parameter r will be flagged and the extrapolation will be
done with a constant zero-value.
Returns
-------
A new spectrum, with the extrapolation.
"""
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
s = self.deepcopy()
s.metadata.General.title += (
' %i channels extrapolated' %
extrapolation_size)
if s.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename += (
'_%i_channels_extrapolated' % extrapolation_size)
new_shape = list(self.data.shape)
new_shape[axis.index_in_array] += extrapolation_size
s.data = np.zeros((new_shape))
s.get_dimensions_from_data()
s.data[..., :axis.size] = self.data
pl = PowerLaw()
pl._axes_manager = self.axes_manager
pl.estimate_parameters(
s, axis.index2value(axis.size - window_size),
axis.index2value(axis.size - 1))
if fix_neg_r is True:
_r = pl.r.map['values']
_A = pl.A.map['values']
_A[_r <= 0] = 0
pl.A.map['values'] = _A
# If the signal is binned we need to bin the extrapolated power law
# what, in a first approximation, can be done by multiplying by the
# axis step size.
if self.metadata.Signal.binned is True:
factor = s.axes_manager[-1].scale
else:
factor = 1
s.data[..., axis.size:] = (
factor * pl.A.map['values'][..., np.newaxis] *
s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size:] ** (
-pl.r.map['values'][..., np.newaxis]))
return s
def kramers_kronig_analysis(self,
zlp=None,
iterations=1,
n=None,
t=None,
delta=0.5,
full_output=False):
"""Calculate the complex
dielectric function from a single scattering distribution (SSD) using
the Kramers-Kronig relations.
It uses the FFT method as in [Egerton2011]_. The SSD is an
EELSSpectrum instance containing SSD low-loss EELS with no zero-loss
peak. The internal loop is devised to approximately subtract the
surface plasmon contribution supposing an unoxidized planar surface and
neglecting coupling between the surfaces. This method does not account
for retardation effects, instrumental broading and surface plasmon
excitation in particles.
Note that either refractive index or thickness are required.
If both are None or if both are provided an exception is raised.
Parameters
----------
zlp: {None, number, Signal}
ZLP intensity. It is optional (can be None) if `t` is None and `n`
is not None and the thickness estimation is not required. If `t`
is not None, the ZLP is required to perform the normalization and
if `t` is not None, the ZLP is required to calculate the thickness.
If the ZLP is the same for all spectra, the integral of the ZLP
can be provided as a number. Otherwise, if the ZLP intensity is not
the same for all spectra, it can be provided as i) a Signal
of the same dimensions as the current signal containing the ZLP
spectra for each location ii) a Signal of signal dimension 0
and navigation_dimension equal to the current signal containing the
integrated ZLP intensity.
iterations: int
Number of the iterations for the internal loop to remove the
surface plasmon contribution. If 1 the surface plasmon contribution
is not estimated and subtracted (the default is 1).
n: {None, float}
The medium refractive index. Used for normalization of the
SSD to obtain the energy loss function. If given the thickness
is estimated and returned. It is only required when `t` is None.
t: {None, number, Signal}
The sample thickness in nm. Used for normalization of the
SSD to obtain the energy loss function. It is only required when
`n` is None. If the thickness is the same for all spectra it can be
given by a number. Otherwise, it can be provided as a Signal with
signal dimension 0 and navigation_dimension equal to the current
signal.
delta : float
A small number (0.1-0.5 eV) added to the energy axis in
specific steps of the calculation the surface loss correction to
improve stability.
full_output : bool
If True, return a dictionary that contains the estimated
thickness if `t` is None and the estimated surface plasmon
excitation and the spectrum corrected from surface plasmon
excitations if `iterations` > 1.
Returns
-------
eps: DielectricFunction instance
The complex dielectric function results,
$\epsilon = \epsilon_1 + i*\epsilon_2$,
contained in an DielectricFunction instance.
output: Dictionary (optional)
A dictionary of optional outputs with the following keys:
``thickness``
The estimated thickness in nm calculated by normalization of
the SSD (only when `t` is None)
``surface plasmon estimation``
The estimated surface plasmon excitation (only if
`iterations` > 1.)
Raises
------
ValuerError
If both `n` and `t` are undefined (None).
AttribureError
If the beam_energy or the collection angle are not defined in
metadata.
Notes
-----
This method is based in Egerton's Matlab code [Egerton2011]_ with some
minor differences:
* The integrals are performed using the simpsom rule instead of using
a summation.
* The wrap-around problem when computing the ffts is workarounded by
padding the signal instead of substracting the reflected tail.
.. [Egerton2011] Ray Egerton, "Electron Energy-Loss
Spectroscopy in the Electron Microscope", Springer-Verlag, 2011.
"""
output = {}
if iterations == 1:
# In this case s.data is not modified so there is no need to make
# a deep copy.
s = self.isig[0.:]
else:
s = self.isig[0.:].deepcopy()
sorig = self.isig[0.:]
# Avoid singularity at 0
if s.axes_manager.signal_axes[0].axis[0] == 0:
s = s.isig[1:]
sorig = self.isig[1:]
# Constants and units
me = constants.value(
'electron mass energy equivalent in MeV') * 1e3 # keV
# Mapped parameters
try:
e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
except:
raise AttributeError("Please define the beam energy."
"You can do this e.g. by using the "
"set_microscope_parameters method")
try:
beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle
except:
raise AttributeError("Please define the collection angle."
"You can do this e.g. by using the "
"set_microscope_parameters method")
axis = s.axes_manager.signal_axes[0]
eaxis = axis.axis.copy()
if isinstance(zlp, hyperspy.signal.Signal):
if (zlp.axes_manager.navigation_dimension ==
self.axes_manager.navigation_dimension):
if zlp.axes_manager.signal_dimension == 0:
i0 = zlp.data
else:
i0 = zlp.integrate1D(axis.index_in_axes_manager).data
else:
raise ValueError('The ZLP signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
i0 = i0.reshape(
np.insert(i0.shape, axis.index_in_array, 1))
elif isinstance(zlp, numbers.Number):
i0 = zlp
else:
raise ValueError('The zero-loss peak input is not valid.')
if isinstance(t, hyperspy.signal.Signal):
if (t.axes_manager.navigation_dimension ==
self.axes_manager.navigation_dimension) and (
t.axes_manager.signal_dimension == 0):
t = t.data
t = t.reshape(
np.insert(t.shape, axis.index_in_array, 1))
else:
raise ValueError('The thickness signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):
raise ValueError("thickness must be a HyperSpy signal or a number,"
" not a numpy array.")
# Slicer to get the signal data from 0 to axis.size
slicer = s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, axis.size)), ])
# Kinetic definitions
ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2
tgt = e0 * (2 * me + e0) / (me + e0)
rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)
for io in range(iterations):
# Calculation of the ELF by normalization of the SSD
# Norm(SSD) = Imag(-1/epsilon) (Energy Loss Funtion, ELF)
# We start by the "angular corrections"
Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale
if n is None and t is None:
raise ValueError("The thickness and the refractive index are "
"not defined. Please provide one of them.")
elif n is not None and t is not None:
raise ValueError("Please provide the refractive index OR the "
"thickness information, not both")
elif n is not None:
# normalize using the refractive index.
K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale
K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape(
np.insert(K.shape, axis.index_in_array, 1))
# Calculate the thickness only if possible and required
if zlp is not None and (full_output is True or
iterations > 1):
te = (332.5 * K * ke / i0)
if full_output is True:
output['thickness'] = te
elif t is not None:
if zlp is None:
raise ValueError("The ZLP must be provided when the "
"thickness is used for normalization.")
# normalize using the thickness
K = t * i0 / (332.5 * ke)
te = t
Im = Im / K
# Kramers Kronig Transform:
# We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT
# Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490
# Use a size that is a power of two to speed up the fft and
# make it double the closest upper value to workaround the
# wrap-around problem.
esize = 2 * closest_power_of_two(axis.size)
q = -2 * np.fft.fft(Im, esize,
axis.index_in_array).imag / esize
q[slicer] *= -1
q = np.fft.fft(q, axis=axis.index_in_array)
# Final touch, we have Re(1/eps)
Re = q[slicer].real + 1
# Egerton does this to correct the wrap-around problem, but in our
# case this is not necessary because we compute the fft on an
# extended and padded spectrum to avoid this problem.
# Re=real(q)
# Tail correction
# vm=Re[axis.size-1]
# Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /
# (axis.size*2-arange(0,axis.size-1)))**2)
# Re[axis.size:]=1+(0.5*vm*((axis.size-1) /
# (axis.size+arange(0,axis.size)))**2)
# Epsilon appears:
# We calculate the real and imaginary parts of the CDF
e1 = Re / (Re ** 2 + Im ** 2)
e2 = Im / (Re ** 2 + Im ** 2)
if iterations > 1 and zlp is not None:
# Surface losses correction:
# Calculates the surface ELF from a vaccumm border effect
# A simulated surface plasmon is subtracted from the ELF
Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im
adep = (tgt / (eaxis + delta) *
np.arctan(beta * tgt / axis.axis) -
beta / 1000. /
(beta ** 2 + axis.axis ** 2. / tgt ** 2))
Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale
s.data = sorig.data - Srfint
print 'Iteration number: ', io + 1, '/', iterations
if iterations == io + 1 and full_output is True:
sp = sorig._deepcopy_with_new_data(Srfint)
sp.metadata.General.title += (
" estimated surface plasmon excitation.")
output['surface plasmon estimation'] = sp
del sp
del Srfint
eps = s._deepcopy_with_new_data(e1 + e2 * 1j)
del s
eps.set_signal_type("DielectricFunction")
eps.metadata.General.title = (self.metadata.General.title +
'dielectric function '
'(from Kramers-Kronig analysis)')
if eps.tmp_parameters.has_item('filename'):
eps.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_CDF_after_Kramers_Kronig_transform')
if 'thickness' in output:
thickness = eps._get_navigation_signal()
thickness.metadata.General.title = (
self.metadata.General.title + ' thickness '
'(calculated using Kramers-Kronig analysis)')
thickness.data = te[
self.axes_manager._get_data_slice([(
axis.index_in_array, 0)])]
output['thickness'] = thickness
if full_output is False:
return eps
else:
return eps, output
| pburdet/hyperspy | hyperspy/_signals/eels.py | Python | gpl-3.0 | 48,505 |
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Vector Impact on
Buildings using QGIS.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from collections import OrderedDict
from qgis.core import (
QgsField,
QgsSpatialIndex,
QgsVectorLayer,
QgsFeature,
QgsRectangle,
QgsFeatureRequest,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem,
QgsGeometry)
from PyQt4.QtCore import QVariant
from safe.impact_functions.bases.classified_vh_classified_ve import \
ClassifiedVHClassifiedVE
from safe.impact_functions.inundation.flood_vector_building_impact.\
metadata_definitions import FloodPolygonBuildingFunctionMetadata
from safe.utilities.i18n import tr
from safe.storage.vector import Vector
from safe.common.exceptions import GetDataError
from safe.impact_reports.building_exposure_report_mixin import (
BuildingExposureReportMixin)
class FloodPolygonBuildingFunction(
ClassifiedVHClassifiedVE,
BuildingExposureReportMixin):
# noinspection PyUnresolvedReferences
"""Impact function for inundation (polygon-polygon)."""
_metadata = FloodPolygonBuildingFunctionMetadata()
def __init__(self):
super(FloodPolygonBuildingFunction, self).__init__()
# The 'wet' variable
self.wet = 'wet'
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
return [
{
'content': tr('Notes'),
'header': True
},
{
'content': tr(
'Buildings are said to be inundated when in a region with '
'field "%s" in "%s" .') % (
self.hazard_class_attribute,
', '.join([
unicode(hazard_class) for
hazard_class in self.hazard_class_mapping[self.wet]
]))
}
]
def run(self):
"""Experimental impact function."""
self.validate()
self.prepare()
# Get parameters from layer's keywords
self.hazard_class_attribute = self.hazard.keyword('field')
self.hazard_class_mapping = self.hazard.keyword('value_map')
self.exposure_class_attribute = self.exposure.keyword(
'structure_class_field')
# Prepare Hazard Layer
hazard_provider = self.hazard.layer.dataProvider()
# Check affected field exists in the hazard layer
affected_field_index = hazard_provider.fieldNameIndex(
self.hazard_class_attribute)
if affected_field_index == -1:
message = tr(
'Field "%s" is not present in the attribute table of the '
'hazard layer. Please change the Affected Field parameter in '
'the IF Option.') % self.hazard_class_attribute
raise GetDataError(message)
srs = self.exposure.layer.crs().toWkt()
exposure_provider = self.exposure.layer.dataProvider()
exposure_fields = exposure_provider.fields()
# Check self.exposure_class_attribute exists in exposure layer
building_type_field_index = exposure_provider.fieldNameIndex(
self.exposure_class_attribute)
if building_type_field_index == -1:
message = tr(
'Field "%s" is not present in the attribute table of '
'the exposure layer. Please change the Building Type '
'Field parameter in the IF Option.'
) % self.exposure_class_attribute
raise GetDataError(message)
# If target_field does not exist, add it:
if exposure_fields.indexFromName(self.target_field) == -1:
exposure_provider.addAttributes(
[QgsField(self.target_field, QVariant.Int)])
target_field_index = exposure_provider.fieldNameIndex(
self.target_field)
exposure_fields = exposure_provider.fields()
# Create layer to store the lines from E and extent
building_layer = QgsVectorLayer(
'Polygon?crs=' + srs, 'impact_buildings', 'memory')
building_provider = building_layer.dataProvider()
# Set attributes
building_provider.addAttributes(exposure_fields.toList())
building_layer.startEditing()
building_layer.commitChanges()
# Filter geometry and data using the requested extent
requested_extent = QgsRectangle(*self.requested_extent)
# This is a hack - we should be setting the extent CRS
# in the IF base class via safe/engine/core.py:calculate_impact
# for now we assume the extent is in 4326 because it
# is set to that from geo_extent
# See issue #1857
transform = QgsCoordinateTransform(
QgsCoordinateReferenceSystem(
'EPSG:%i' % self._requested_extent_crs),
self.hazard.layer.crs()
)
projected_extent = transform.transformBoundingBox(requested_extent)
request = QgsFeatureRequest()
request.setFilterRect(projected_extent)
# Split building_layer by H and save as result:
# 1) Filter from H inundated features
# 2) Mark buildings as inundated (1) or not inundated (0)
# make spatial index of affected polygons
hazard_index = QgsSpatialIndex()
hazard_geometries = {} # key = feature id, value = geometry
has_hazard_objects = False
for feature in self.hazard.layer.getFeatures(request):
value = feature[affected_field_index]
if value not in self.hazard_class_mapping[self.wet]:
continue
hazard_index.insertFeature(feature)
hazard_geometries[feature.id()] = QgsGeometry(feature.geometry())
has_hazard_objects = True
if not has_hazard_objects:
message = tr(
'There are no objects in the hazard layer with %s '
'value in %s. Please check your data or use another '
'attribute.') % (
self.hazard_class_attribute,
', '.join(self.hazard_class_mapping[self.wet]))
raise GetDataError(message)
features = []
for feature in self.exposure.layer.getFeatures(request):
building_geom = feature.geometry()
affected = False
# get tentative list of intersecting hazard features
# only based on intersection of bounding boxes
ids = hazard_index.intersects(building_geom.boundingBox())
for fid in ids:
# run (slow) exact intersection test
if hazard_geometries[fid].intersects(building_geom):
affected = True
break
f = QgsFeature()
f.setGeometry(building_geom)
f.setAttributes(feature.attributes())
f[target_field_index] = 1 if affected else 0
features.append(f)
# every once in a while commit the created features
# to the output layer
if len(features) == 1000:
(_, __) = building_provider.addFeatures(features)
features = []
(_, __) = building_provider.addFeatures(features)
building_layer.updateExtents()
# Generate simple impact report
self.buildings = {}
self.affected_buildings = OrderedDict([
(tr('Flooded'), {})
])
buildings_data = building_layer.getFeatures()
building_type_field_index = building_layer.fieldNameIndex(
self.exposure_class_attribute)
for building in buildings_data:
record = building.attributes()
building_type = record[building_type_field_index]
if building_type in [None, 'NULL', 'null', 'Null']:
building_type = 'Unknown type'
if building_type not in self.buildings:
self.buildings[building_type] = 0
for category in self.affected_buildings.keys():
self.affected_buildings[category][
building_type] = OrderedDict([
(tr('Buildings Affected'), 0)])
self.buildings[building_type] += 1
if record[target_field_index] == 1:
self.affected_buildings[tr('Flooded')][building_type][
tr('Buildings Affected')] += 1
# Lump small entries and 'unknown' into 'other' category
self._consolidate_to_other()
impact_summary = self.generate_html_report()
map_title = tr('Buildings inundated')
style_classes = [
dict(label=tr('Not Inundated'), value=0, colour='#1EFC7C',
transparency=0, size=0.5),
dict(label=tr('Inundated'), value=1, colour='#F31A1C',
transparency=0, size=0.5)]
style_info = dict(
target_field=self.target_field,
style_classes=style_classes,
style_type='categorizedSymbol')
# Convert QgsVectorLayer to inasafe layer and return it.
building_layer = Vector(
data=building_layer,
name=tr('Flooded buildings'),
keywords={
'impact_summary': impact_summary,
'map_title': map_title,
'target_field': self.target_field,
'buildings_total': self.total_buildings,
'buildings_affected': self.total_affected_buildings},
style_info=style_info)
self._impact = building_layer
return building_layer
| MariaSolovyeva/inasafe | safe/impact_functions/inundation/flood_vector_building_impact/impact_function.py | Python | gpl-3.0 | 10,048 |
from django.conf.urls import url
from .views import HomePage, PlayerList, PlayerDetail, SignUpForm
urlpatterns = [
url(r'^$', HomePage.as_view(),
name='home'),
url(r'^list$', PlayerList.as_view(),
name='player_list'),
url(r'signup/$', SignUpForm.as_view(),
name="signup"),
url(r'^(?P<pk>[0-9]+)/$', PlayerDetail.as_view(), name='player_detail'),
]
| biddellns/litsl | players/urls.py | Python | gpl-3.0 | 437 |
# -*- coding: utf-8 -*-
from django import template
from CanteenWebsite.models import Category
from CanteenWebsite.utils.functions import setting_get
register = template.Library()
@register.simple_tag
def get_setting(name, default=None):
return setting_get(name, default)
@register.inclusion_tag('CanteenWebsite/inclusions/sidebar_category_list.html', takes_context=True)
def sidebar_category_list(context):
categories = Category.objects.all()
try:
current_category = context['current_category']
except:
current_category = None
return {
'current': current_category,
'categories': categories,
}
@register.inclusion_tag('CanteenWebsite/inclusions/pagination.html')
def show_pagination(page):
pagination = page.paginator
page_range = list()
if pagination.num_pages <= 10:
page_range = pagination.page_range
else:
ON_EACH_SIDE = 2
ON_ENDS = 2
DOT = '...'
if page.number > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(1, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page.number - ON_EACH_SIDE, page.number + 1))
else:
page_range.extend(range(1, page.number + 1))
if page.number < (pagination.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page.number + 1, page.number + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(pagination.num_pages - ON_ENDS, pagination.num_pages + 1))
else:
page_range.extend(range(page.number + 1, pagination.num_pages + 1))
return {
'page': page,
'pages': page_range
}
@register.assignment_tag
def define(val=None):
return val
| jinyu121/Canteen | CanteenWebsite/templatetags/canteen_website_tags.py | Python | gpl-3.0 | 1,773 |
import sys
import argparse
import time
sys.path.append('./bin/moabc')
import optimizer
#引数の処理
#parserの初期化
parser = argparse.ArgumentParser(description='This script optimizes bayesian network graph structure by MOABC')
#強化成功率表
parser.add_argument("infile_pt", type=str)
#一回の強化費用
parser.add_argument("-pg","--pricegrind", type=int)
#強化シミュレータの実行回数
parser.add_argument("-na","--num_average", type=int, default=100)
#結果の出力先
# 最後のスラッシュいらない
# OK: test, ./test
# NG: test/, ./test/
parser.add_argument("out_dir", type=str)
#学習中の結果の保存
group_sp = parser.add_mutually_exclusive_group()
group_sp.add_argument('-sp', '--saveprogress', action='store_true')
group_sp.add_argument('-n-sp', '--no-saveprogress', action='store_false')
parser.set_defaults(saveprogress=False)
#並列化のプロセス数
parser.add_argument("-np","--num_proc", type=int, default=1)
#画像出力の有無
# sshログインとかだと無理なので、Falseを入れる
group_wi = parser.add_mutually_exclusive_group()
group_wi.add_argument('-wi', '--withimage', action='store_true')
group_wi.add_argument('-n-wi', '--no-with_image', action='store_false')
parser.set_defaults(withimage=True)
#蜂の数
parser.add_argument('-me', '--m_employed', type=int, help='収穫蜂の数', default=40)
parser.add_argument('-mo', '--m_onlooker',type=int, help='追従蜂の数', default=40)
parser.add_argument('-li', '--limit',type=int, help='偵察蜂の閾値', default=3)
#ループ数
parser.add_argument('-n', type=int, help='ループ数', default=50)
#ALPHA
parser.add_argument('-a', '--alpha', type=float, help='ALPHAの値', default=1)
#変数のparse
# 下のやり方でdictになるっぽい
args = vars(parser.parse_args())
print("parsed argments from argparse\n%s\n" % str(args))
#出力先ディレクトリ
out_dir = args['out_dir']
#実行中の結果保存
save_progress = args['saveprogress']
#インスタンスの作成
infile_pt = args['infile_pt']
input_price_grind = args['pricegrind']
op = optimizer.MOABC(infile_pt, input_price_grind)
#ハイパーパラメータの設定
op.M_employed = args['m_employed']
op.M_onlooker = args['m_onlooker']
op.LIMIT = args['limit']
op.N = args['n']
op.weight_h = args['alpha']
op.proc = args['num_proc']
op.num_average = args['num_average']
#パラメータを適用
op.gen.calculate_weights()
#学習の処理
dir_save_progress = ''
if save_progress:
dir_save_progress = out_dir
start = time.time()
op.learn(out_dirname=dir_save_progress)
end = time.time()
#経過時間の出力
str_time = "time: ", "{0}".format(end - start)
print(str_time)
f = open('%s/time.log' % out_dir, 'w')
f.writelines(str_time)
f.close()
#学習結果の出力
op.save_result(out_dir, prefix='total', with_image=args['withimage'])
| curiburn/pso2_grind_optimizer | py/main.py | Python | gpl-3.0 | 2,887 |
# coding: utf-8
# Copyright 2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
Unit-tests for the Beam class.
Run as python testBeamObject.py in console or via travis
'''
# General imports
# -----------------
from __future__ import division, print_function
import unittest
import numpy
from scipy.constants import physical_constants
# BLonD imports
# --------------
from blond.beam.beam import Particle, Proton, Electron
from blond.input_parameters.ring import Ring
from blond.input_parameters.rf_parameters import RFStation
from blond.beam.beam import Beam
from blond.beam.distributions import matched_from_distribution_function
from blond.trackers.tracker import FullRingAndRF, RingAndRFTracker
import blond.utils.exceptions as blExcept
class testParticleClass(unittest.TestCase):
def setUp(self):
self.test_particle = Particle(1, 2)
def test_particle_attributes(self):
for attribute in ['mass', 'charge', 'radius_cl', 'C_gamma', 'C_q']:
self.assertTrue(hasattr(self.test_particle, attribute),
msg=f"Particle: no '{attribute}' attribute")
def test_attribute_types(self):
for attribute in ['mass', 'charge']:
self.assertIsInstance(getattr(self.test_particle, attribute), float,
msg=f"Particle: {attribute} is not a float")
def test_negative_restmass_exception(self):
with self.assertRaises(RuntimeError):
Particle(-1, 2)
class testElectron(unittest.TestCase):
def setUp(self):
self.electron = Electron()
def test_classical_electron_radius(self):
self.assertAlmostEqual(self.electron.radius_cl,
physical_constants['classical electron radius'][0], delta=1e-24,
msg='Electron: wrong classical elctron radius')
def test_Sand_radiation_constant(self):
# value from S. Lee: Accelerator Physics, 2nd ed., eq (4.5)
# convert from GeV^3 to eV^3
self.assertAlmostEqual(self.electron.C_gamma, 8.846e-5 / (1e9)**3, delta=1e-35,
msg='Electron: wrong radiation constant')
def test_quantum_radiation_constant(self):
# value from A. Wolski: Beam Dynamics in High Energy Accelerators, p. 233
self.assertAlmostEqual(self.electron.C_q, 3.832e-13, delta=1e-16,
msg='Electron: wrong quantum excitation constant')
class testProton(unittest.TestCase):
def setUp(self):
self.proton = Proton()
def test_classical_proton_radius(self):
# value from S. Lee: Accelerator Physics, 2nd ed., p. 560
self.assertAlmostEqual(self.proton.radius_cl, 1.5346986e-18, delta=1e-24,
msg='Proton: wrong classical proton radius')
def test_Sand_radiation_constant(self):
# value from S. Lee: Accelerator Physics, 2nd ed., eq (4.5)
# convert from GeV^3 to eV^3
self.assertAlmostEqual(self.proton.C_gamma, 7.783e-18 / (1e9)**3, delta=1e-48,
msg='Proton: wrong radiation constant')
class testBeamClass(unittest.TestCase):
# Run before every test
def setUp(self):
# Bunch parameters
# -----------------
N_turn = 200
N_b = 1e9 # Intensity
N_p = int(2e6) # Macro-particles
# Machine parameters
# --------------------
C = 6911.5038 # Machine circumference [m]
p = 450e9 # Synchronous momentum [eV/c]
gamma_t = 17.95142852 # Transition gamma
alpha = 1./gamma_t**2 # First order mom. comp. factor
# Define general parameters
# --------------------------
self.general_params = Ring(C, alpha, p, Proton(), N_turn)
# Define beam
# ------------
self.beam = Beam(self.general_params, N_p, N_b)
# Define RF section
# -----------------
self.rf_params = RFStation(self.general_params, [4620], [7e6], [0.])
# Run after every test
def tearDown(self):
del self.general_params
del self.beam
del self.rf_params
def test_variables_types(self):
self.assertIsInstance(self.beam.beta, float,
msg='Beam: beta is not a float')
self.assertIsInstance(self.beam.gamma, float,
msg='Beam: gamma is not a float')
self.assertIsInstance(self.beam.energy, float,
msg='Beam: energy is not a float')
self.assertIsInstance(self.beam.momentum, float,
msg='Beam: momentum is not a float')
self.assertIsInstance(self.beam.mean_dt, float,
msg='Beam: mean_dt is not a float')
self.assertIsInstance(self.beam.mean_dE, float,
msg='Beam: mean_dE is not a float')
self.assertIsInstance(self.beam.sigma_dt, float,
msg='Beam: sigma_dt is not a float')
self.assertIsInstance(self.beam.sigma_dE, float,
msg='Beam: sigma_dE is not a float')
self.assertIsInstance(self.beam.intensity, float,
msg='Beam: intensity is not a float')
self.assertIsInstance(self.beam.n_macroparticles, int,
msg='Beam: n_macroparticles is not an int')
self.assertIsInstance(self.beam.ratio, float,
msg='Beam: ratio is not a float')
self.assertIsInstance(self.beam.id, numpy.ndarray,
msg='Beam: id is not a numpy.array')
self.assertIn('int', type(self.beam.id[0]).__name__,
msg='Beam: id array does not contain int')
self.assertIsInstance(self.beam.n_macroparticles_lost, int,
msg='Beam: n_macroparticles_lost is not an int')
self.assertIsInstance(self.beam.n_macroparticles_alive, int,
msg='Beam: n_macroparticles_alive is not an int')
self.assertIsInstance(self.beam.dt, numpy.ndarray,
msg='Beam: dt is not a numpy.array')
self.assertIsInstance(self.beam.dE, numpy.ndarray,
msg='Beam: dE is not a numpy.array')
self.assertIn('float', type(self.beam.dt[0]).__name__,
msg='Beam: dt does not contain float')
self.assertIn('float', type(self.beam.dE[0]).__name__,
msg='Beam: dE does not contain float')
def test_beam_statistic(self):
sigma_dt = 1.
sigma_dE = 1.
self.beam.dt = sigma_dt*numpy.random.randn(self.beam.n_macroparticles)
self.beam.dE = sigma_dE*numpy.random.randn(self.beam.n_macroparticles)
self.beam.statistics()
self.assertAlmostEqual(self.beam.sigma_dt, sigma_dt, delta=1e-2,
msg='Beam: Failed statistic sigma_dt')
self.assertAlmostEqual(self.beam.sigma_dE, sigma_dE, delta=1e-2,
msg='Beam: Failed statistic sigma_dE')
self.assertAlmostEqual(self.beam.mean_dt, 0., delta=1e-2,
msg='Beam: Failed statistic mean_dt')
self.assertAlmostEqual(self.beam.mean_dE, 0., delta=1e-2,
msg='Beam: Failed statistic mean_dE')
def test_losses_separatrix(self):
longitudinal_tracker = RingAndRFTracker(self.rf_params, self.beam)
full_tracker = FullRingAndRF([longitudinal_tracker])
try:
matched_from_distribution_function(self.beam,
full_tracker,
distribution_exponent=1.5,
distribution_type='binomial',
bunch_length=1.65e-9,
bunch_length_fit='fwhm',
distribution_variable='Hamiltonian')
except TypeError as te:
self.skipTest("Skipped because of known bug in deepcopy. Exception message %s"
% str(te))
self.beam.losses_separatrix(self.general_params, self.rf_params)
self.assertEqual(len(self.beam.id[self.beam.id == 0]), 0,
msg='Beam: Failed losses_sepatrix, first')
self.beam.dE += 10e8
self.beam.losses_separatrix(self.general_params, self.rf_params)
self.assertEqual(len(self.beam.id[self.beam.id == 0]),
self.beam.n_macroparticles,
msg='Beam: Failed losses_sepatrix, second')
def test_losses_longitudinal_cut(self):
longitudinal_tracker = RingAndRFTracker(self.rf_params, self.beam)
full_tracker = FullRingAndRF([longitudinal_tracker])
try:
matched_from_distribution_function(self.beam,
full_tracker,
distribution_exponent=1.5,
distribution_type='binomial',
bunch_length=1.65e-9,
bunch_length_fit='fwhm',
distribution_variable='Hamiltonian')
except TypeError as te:
self.skipTest("Skipped because of known bug in deepcopy. Exception message %s"
% str(te))
self.beam.losses_longitudinal_cut(0., 5e-9)
self.assertEqual(len(self.beam.id[self.beam.id == 0]), 0,
msg='Beam: Failed losses_longitudinal_cut, first')
self.beam.dt += 10e-9
self.beam.losses_longitudinal_cut(0., 5e-9)
self.assertEqual(len(self.beam.id[self.beam.id == 0]),
self.beam.n_macroparticles,
msg='Beam: Failed losses_longitudinal_cut, second')
def test_losses_energy_cut(self):
longitudinal_tracker = RingAndRFTracker(self.rf_params, self.beam)
full_tracker = FullRingAndRF([longitudinal_tracker])
try:
matched_from_distribution_function(self.beam,
full_tracker,
distribution_exponent=1.5,
distribution_type='binomial',
bunch_length=1.65e-9,
bunch_length_fit='fwhm',
distribution_variable='Hamiltonian')
except TypeError as te:
self.skipTest("Skipped because of known bug in deepcopy. Exception message %s"
% str(te))
self.beam.losses_energy_cut(-3e8, 3e8)
self.assertEqual(len(self.beam.id[self.beam.id == 0]), 0,
msg='Beam: Failed losses_energy_cut, first')
self.beam.dE += 10e8
self.beam.losses_energy_cut(-3e8, 3e8)
self.assertEqual(len(self.beam.id[self.beam.id == 0]),
self.beam.n_macroparticles,
msg='Beam: Failed losses_energy_cut, second')
def test_addition(self):
np = numpy
testdEs = np.linspace(-1E6, 1E6, 2000000)
testdts = np.linspace(0, 10E-9, 2000000)
self.beam.dE = testdEs
self.beam.dt = testdts
testdEs = np.linspace(-2E6, 2E6, 100000)
testdts = np.linspace(-1E-9, 12E-9, 100000)
self.beam.add_particles([testdts, testdEs])
self.assertEqual(self.beam.n_macroparticles, 2100000,
msg="n_macroparticles not incremented correctly")
testBeam = Beam(self.general_params, 200, 0)
testBeam.id[:100] = 0
self.beam.add_beam(testBeam)
self.assertEqual(self.beam.id[2100000:2100100].tolist(), [0]*100,
msg="particle ids not applied correctly")
self.assertEqual(self.beam.n_macroparticles, 2100200,
msg="Added macroparticles not incremented n_macro correctly")
self.beam += testBeam
self.assertEqual(self.beam.n_macroparticles, 2100400,
msg="Added macroparticles not incremented n_macro correctly")
self.beam += (testdts, testdEs)
self.assertEqual(self.beam.n_macroparticles, 2200400,
msg="Added macroparticles not incremented n_macro correctly")
self.assertEqual(-2E6, np.min(self.beam.dE),
msg="coordinates of added beam not used correctly")
self.assertEqual(2E6, np.max(self.beam.dE),
msg="coordinates of added beam not used correctly")
self.assertEqual(-1E-9, np.min(self.beam.dt),
msg="coordinates of added beam not used correctly")
self.assertEqual(12E-9, np.max(self.beam.dt),
msg="coordinates of added beam not used correctly")
with self.assertRaises(blExcept.ParticleAdditionError,
msg="""Unequal length time and energy should raise exception"""):
self.beam += ([1, 2, 3], [4, 5])
with self.assertRaises(blExcept.ParticleAdditionError,
msg="""Mising time/energy should raise exception"""):
self.beam += ([1, 2, 3])
with self.assertRaises(TypeError, msg='Wrong type should raise exception'):
self.beam.add_beam(([1], [2]))
if __name__ == '__main__':
unittest.main()
| blond-admin/BLonD | unittests/beams/test_beam_object.py | Python | gpl-3.0 | 14,105 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set et sts=4 sw=4:
__all__ = (
"WeaselSession",
"WeaselService",
"service",
)
import logging
import logging.config
import os
import time
import threading
logfile = os.path.join(os.path.dirname(__file__), "logging.conf")
logging.config.fileConfig(logfile)
logger = logging.getLogger("weasel")
import ibus
from core import *
from engine import *
import storage
def add_text(actions, msg, field, text):
actions.add(u'ctx')
(s, attrs, cursor) = text
msg.append(u'ctx.%s=%s\n' % (field, s))
if attrs:
msg.append(u'ctx.%s.attr.length=%d\n' % (field, len(attrs)))
for i in range(len(attrs)):
(extent, type_) = attrs[i]
msg.append(u'ctx.%s.attr.%d.range=%d,%d\n' % (field, i, extent[0], extent[1]))
msg.append(u'ctx.%s.attr.%d.type=%s\n' % (field, i, type_))
if cursor:
msg.append(u'ctx.%s.cursor=%d,%d\n' % (field, cursor[0], cursor[1]))
def add_cand(actions, msg, cand_info):
actions.add(u'ctx')
(current_page, total_pages, cursor, cands) = cand_info
n = len(cands)
msg.append(u'ctx.cand.length=%d\n' % n)
for i in range(n):
msg.append(u'ctx.cand.%d=%s\n' % (i, cands[i][0]))
msg.append(u'ctx.cand.cursor=%d\n' % cursor)
msg.append(u'ctx.cand.page=%d/%d\n' % (current_page, total_pages))
#msg.append(u'ctx.cand.current_page=%d\n' % current_page)
#msg.append(u'ctx.cand.total_pages=%d\n' % total_pages)
class WeaselSession:
'''【小狼毫】會話
承擔Rime算法引擎與【小狼毫】前端的交互
'''
def __init__(self, params=None):
logger.info("init weasel session: %s", params)
self.__page_size = storage.DB.read_setting(u'Option/PageSize') or 5
self.__lookup_table = ibus.LookupTable(self.__page_size)
self.__clear()
self.__backend = Engine(self, params)
def __clear(self):
self.__commit = None
self.__preedit = None
self.__aux = None
self.__cand = None
def process_key_event(self, keycode, mask):
'''處理鍵盤事件'''
logger.debug("process_key_event: '%s'(%x), %08x" % \
(keysyms.keycode_to_name(keycode), keycode, mask))
self.__clear()
taken = self.__backend.process_key_event(KeyEvent(keycode, mask))
return taken
def get_response(self):
'''生成回應消息'''
actions = set()
msg = list()
if self.__commit:
actions.add(u'commit')
msg.append(u'commit=%s\n' % u''.join(self.__commit))
if self.__preedit:
add_text(actions, msg, u'preedit', self.__preedit)
if self.__aux:
add_text(actions, msg, u'aux', self.__aux)
if self.__cand:
add_cand(actions, msg, self.__cand)
#self.__clear()
if not actions:
return u'action=noop\n.\n'
else:
# starts with an action list
msg.insert(0, u'action=%s\n' % u','.join(sorted(actions)))
# ends with a single dot
msg.append(u'.\n')
return u''.join(msg)
# implement a frontend proxy for rime engine
def commit_string(self, s):
'''文字上屏'''
logger.debug(u'commit: [%s]' % s)
if self.__commit:
self.__commit.append(s)
else:
self.__commit = [s]
def update_preedit(self, s, start=0, end=0):
'''更新寫作串
[start, end) 定義了串中的高亮區間
'''
if start < end:
logger.debug(u'preedit: [%s[%s]%s]' % (s[:start], s[start:end], s[end:]))
else:
logger.debug(u'preedit: [%s]' % s)
#attrs = [((start, end), u'HIGHLIGHTED')] if start < end else None
#self.__preedit = (s, attrs)
cursor = (start, end) if start < end else None
self.__preedit = (s, None, cursor)
def update_aux(self, s, start=0, end=0):
'''更新輔助串
[start, end) 定義了串中的高亮區間
'''
if start < end:
logger.debug(u'aux: [%s[%s]%s]' % (s[:start], s[start:end], s[end:]))
else:
logger.debug(u'aux: [%s]' % s)
cursor = (start, end) if start < end else None
self.__aux = (s, None, cursor)
def update_candidates(self, candidates):
'''更新候選列表'''
self.__lookup_table.clean()
self.__lookup_table.show_cursor(False)
if not candidates:
self.__cand = (0, 0, 0, [])
else:
for c in candidates:
self.__lookup_table.append_candidate(ibus.Text(c[0]))
self.__update_page()
def __update_page(self):
candidates = self.__lookup_table.get_candidates_in_current_page()
n = self.__lookup_table.get_number_of_candidates()
c = self.__lookup_table.get_cursor_pos()
p = self.__lookup_table.get_page_size()
current_page = c / p
total_pages = (n + p - 1) / p
cands = [(x.get_text(), None) for x in candidates]
self.__cand = (current_page, total_pages, c % p, cands)
def page_up(self):
if self.__lookup_table.page_up():
#print u'page up.'
self.__update_page()
return True
return False
def page_down(self):
if self.__lookup_table.page_down():
#print u'page down.'
self.__update_page()
return True
return False
def cursor_up(self):
if self.__lookup_table.cursor_up():
#print u'cursor up.'
self.__update_page()
return True
return False
def cursor_down(self):
if self.__lookup_table.cursor_down():
#print u'cursor down.'
self.__update_page()
return True
return False
def get_candidate_index(self, number):
if number >= self.__page_size:
return -1
index = number + self.__lookup_table.get_current_page_start()
#print u'cand index = %d' % index
return index
def get_highlighted_candidate_index(self):
index = self.__lookup_table.get_cursor_pos()
#print u'highlighted cand index = %d' % index
return index
class WeaselService:
'''【小狼毫】算法服務
管理一組會話
每個會話對象持有一個算法引擎實例,並響應一個IME前端的輸入請求
'''
SESSION_EXPIRE_TIME = 3 * 60 # 3 min.
def __init__(self):
self.__sessions = dict()
self.__timer = None
def cleanup(self):
'''清除所有會話'''
logger.info("cleaning up %d remaining sessions." % len(self.__sessions))
self.cancel_check()
self.__sessions.clear()
def schedule_next_check(self):
self.cancel_check()
self.__timer = threading.Timer(WeaselService.SESSION_EXPIRE_TIME + 10, \
lambda: self.check_stale_sessions())
self.__timer.start()
def cancel_check(self):
if self.__timer:
self.__timer.cancel()
self.__timer = None
def check_stale_sessions(self):
'''檢查過期的回話'''
logger.info("check_stale_sessions...")
expire_time = time.time() - WeaselService.SESSION_EXPIRE_TIME
for sid in self.__sessions.keys():
if self.__sessions[sid].last_active_time < expire_time:
logger.info("removing stale session #%x." % sid)
self.destroy_session(sid)
# 還有活動會話,計劃下一次檢查
self.__timer = None
if self.__sessions:
self.schedule_next_check()
def has_session(self, sid):
'''檢查指定會話的存在狀態'''
if sid in self.__sessions:
return True
else:
return False
def get_session(self, sid):
'''按標識獲取會話對象
以傳遞按鍵消息等
'''
if sid in self.__sessions:
session = self.__sessions[sid]
session.last_active_time = time.time()
return session
else:
return None
def create_session(self):
'''創建會話
IME前端開啟輸入法時調用
返回會話的標識(正整數)
'''
try:
session = WeaselSession()
session.last_active_time = time.time()
except Exception, e:
logger.error("create_session: error creating session: %s" % e)
return None
sid = id(session)
self.__sessions[sid] = session
logger.info("create_session: session #%x, total %d active sessions." % \
(sid, len(self.__sessions)))
# 啟動過期會話檢查
if self.__sessions and not self.__timer:
self.schedule_next_check()
return sid
def destroy_session(self, sid):
'''結束指定的會話
IME前端關閉輸入法時調用
'''
if sid not in self.__sessions:
logger.warning("destroy_session: invalid session #%x." % sid)
return False
del self.__sessions[sid]
logger.info("destroy_session: session #%x, %d active sessions left." % \
(sid, len(self.__sessions)))
# 已經無有會話時,停了過期會話檢查
if not self.__sessions and self.__timer:
self.cancel_check()
return True
# a ready-to-use service instance
service = WeaselService()
| lotem/rime.py | weasel/weasel.py | Python | gpl-3.0 | 9,656 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'spellcheck.ui'
#
# Created: Thu Jul 30 01:27:24 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(579, 352)
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.fr_main = QtGui.QFrame(Form)
self.fr_main.setStyleSheet(_fromUtf8("QFrame#fr_main {\n"
" background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0 rgba(82, 82, 82, 255), stop:0.0590909 rgba(111, 111, 111, 255), stop:0.922727 rgba(99, 99, 99, 255), stop:1 rgba(151, 151, 151, 255));\n"
"border-radius:8px;\n"
"}"))
self.fr_main.setFrameShape(QtGui.QFrame.NoFrame)
self.fr_main.setFrameShadow(QtGui.QFrame.Raised)
self.fr_main.setObjectName(_fromUtf8("fr_main"))
self.gridLayout_4 = QtGui.QGridLayout(self.fr_main)
self.gridLayout_4.setMargin(6)
self.gridLayout_4.setSpacing(4)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.te_text = QtGui.QPlainTextEdit(self.fr_main)
self.te_text.setReadOnly(True)
self.te_text.setObjectName(_fromUtf8("te_text"))
self.gridLayout_4.addWidget(self.te_text, 2, 0, 1, 4)
self.frame = QtGui.QFrame(self.fr_main)
self.frame.setFrameShape(QtGui.QFrame.NoFrame)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.gridLayout = QtGui.QGridLayout(self.frame)
self.gridLayout.setMargin(0)
self.gridLayout.setHorizontalSpacing(8)
self.gridLayout.setVerticalSpacing(4)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
spacerItem = QtGui.QSpacerItem(313, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 1, 1, 1)
self.b_cancel = QtGui.QPushButton(self.frame)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("../../style/img/close_hover.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.b_cancel.setIcon(icon)
self.b_cancel.setObjectName(_fromUtf8("b_cancel"))
self.gridLayout.addWidget(self.b_cancel, 0, 2, 1, 1)
self.b_ok = QtGui.QPushButton(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.b_ok.setFont(font)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8("../../style/img/checkmark.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.b_ok.setIcon(icon1)
self.b_ok.setObjectName(_fromUtf8("b_ok"))
self.gridLayout.addWidget(self.b_ok, 0, 3, 1, 1)
self.l_count = QtGui.QLabel(self.frame)
self.l_count.setStyleSheet(_fromUtf8("color: rgb(184, 215, 255);"))
self.l_count.setObjectName(_fromUtf8("l_count"))
self.gridLayout.addWidget(self.l_count, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.frame, 3, 0, 1, 4)
self.frame_2 = QtGui.QFrame(self.fr_main)
self.frame_2.setFrameShape(QtGui.QFrame.NoFrame)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName(_fromUtf8("frame_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.frame_2)
self.gridLayout_3.setSpacing(4)
self.gridLayout_3.setContentsMargins(4, 0, 4, 0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_2 = QtGui.QLabel(self.frame_2)
font = QtGui.QFont()
font.setItalic(True)
self.label_2.setFont(font)
self.label_2.setStyleSheet(_fromUtf8("color: rgb(250, 255, 187);"))
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_3.addWidget(self.label_2, 0, 5, 1, 1)
self.b_redo = QtGui.QPushButton(self.frame_2)
self.b_redo.setMaximumSize(QtCore.QSize(26, 24))
self.b_redo.setText(_fromUtf8(""))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8("../../style/img/redo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.b_redo.setIcon(icon2)
self.b_redo.setObjectName(_fromUtf8("b_redo"))
self.gridLayout_3.addWidget(self.b_redo, 0, 3, 1, 1)
self.b_undo = QtGui.QPushButton(self.frame_2)
self.b_undo.setMaximumSize(QtCore.QSize(26, 24))
self.b_undo.setText(_fromUtf8(""))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8("../../style/img/undo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.b_undo.setIcon(icon3)
self.b_undo.setObjectName(_fromUtf8("b_undo"))
self.gridLayout_3.addWidget(self.b_undo, 0, 2, 1, 1)
self.label = QtGui.QLabel(self.frame_2)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setStyleSheet(_fromUtf8("color:white;"))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_3.addWidget(self.label, 0, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem1, 0, 4, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 0, 1, 1, 1)
self.gridLayout_4.addWidget(self.frame_2, 1, 0, 1, 4)
self.gridLayout_2.addWidget(self.fr_main, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QObject.connect(self.b_undo, QtCore.SIGNAL(_fromUtf8("clicked()")), self.te_text.undo)
QtCore.QObject.connect(self.b_redo, QtCore.SIGNAL(_fromUtf8("clicked()")), self.te_text.redo)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.b_cancel.setText(_translate("Form", "cancel", None))
self.b_ok.setText(_translate("Form", "apply changes", None))
self.l_count.setText(_translate("Form", "0 changes", None))
self.label_2.setText(_translate("Form", "Right click on word for spelling suggestions", None))
self.b_redo.setToolTip(_translate("Form", "redo", None))
self.b_undo.setToolTip(_translate("Form", "undo", None))
self.label.setText(_translate("Form", "Spellcheck", None))
| lucidlylogicole/scope | plugins/spellcheck/spellcheck_ui.py | Python | gpl-3.0 | 7,193 |
import pytest
from cli_config.tag import tag
from utility.nix_error import NixError
def test_tag_show_no_tag(capsys):
with pytest.raises(SystemExit) as _excinfo:
tag.tag("nixconfig", ["show"])
_out, _err = capsys.readouterr()
assert "2" in str(_excinfo.value), "Exception doesn't contain expected string"
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert "the following arguments are required: tag" in _err, "StdErr doesn't contain expected string"
def test_tag_show_invalid_tag(capsys):
with pytest.raises(NixError) as _excinfo:
tag.tag("nixconfig", ["show", "badtag"])
_out, _err = capsys.readouterr()
assert "Unknown tag: badtag" in str(_excinfo.value)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_tag_show_good_tag(capsys):
tag.tag("nixconfig", ["show", "tag1"])
_out, _err = capsys.readouterr()
assert "script1" in _out, "'script1' should be in output"
assert "script2" in _out, "'script2' should be in output"
assert "script3" not in _out, "'script2' should be in output"
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
| mbiciunas/nix | test/cli_config/tag/test_tag_show.py | Python | gpl-3.0 | 1,291 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import re
import sys
from io import BytesIO
from PIL import Image
from triptools import config, DB
from triptools.common import Trackpoint, tp_dist, distance, get_names
from triptools.exif_support import get_location
logging.basicConfig(level=logging.INFO)
def get_thumbnail(filename):
"""Return PNG thumbnail as bytes object"""
max_x = config.getint("Photo", "thumbwidth")
max_y = config.getint("Photo", "thumbheight")
im = Image.open(filename)
x, y = im.size
scale = max(x / max_x, y / max_y)
im.thumbnail((int(x/scale), int(y/scale)), Image.ANTIALIAS)
buffer = BytesIO()
im.save(buffer, "PNG")
buffer.seek(0)
return buffer.read()
if __name__ == "__main__":
with DB() as db:
for filename in get_names(config.get("Photo", "name"), config.get("Photo", "mask")):
try:
logging.getLogger(__name__).info("Processing %s" % filename)
if db.get_photo(filename) and not config.getboolean("Photo", "refresh"):
logging.getLogger(__name__).info("Photo %s already imported." % filename)
continue
location = get_location(filename)
location.add("thumbnail", get_thumbnail(filename))
if location:
if db.add_photo(location) == 1:
logging.getLogger(__name__).info("Photo %s added." % filename)
except Exception as e:
logging.getLogger(__name__).error(e)
| Eierkopp/triparchive | triptools/photoimport.py | Python | gpl-3.0 | 1,597 |
# Environment configuration
# Copyright (c) 2016, Tieto Corporation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
#
# Currently static definition, in the future this could be a config file,
# or even common database with host management.
#
import logging
logger = logging.getLogger()
#
# You can put your settings in cfg.py file with setup_params, devices
# definitions in the format as below. In other case HWSIM cfg will be used.
#
setup_params = {"setup_hw" : "./tests/setup_hw.sh",
"hostapd" : "./tests/hostapd",
"wpa_supplicant" : "./tests/wpa_supplicant",
"iperf" : "iperf",
"wlantest" : "./tests/wlantest",
"wlantest_cli" : "./tests/wlantest_cli",
"country" : "US",
"log_dir" : "/tmp/",
"ipv4_test_net" : "192.168.12.0",
"trace_start" : "./tests/trace_start.sh",
"trace_stop" : "./tests/trace_stop.sh",
"perf_start" : "./tests/perf_start.sh",
"perf_stop" : "./tests/perf_stop.sh"}
#
#devices = [{"hostname": "192.168.254.58", "ifname" : "wlan0", "port": "9877", "name" : "t2-ath9k", "flags" : "AP_HT40 STA_HT40"},
# {"hostname": "192.168.254.58", "ifname" : "wlan1", "port": "9877", "name" : "t2-ath10k", "flags" : "AP_VHT80"},
# {"hostname": "192.168.254.58", "ifname" : "wlan3", "port": "9877", "name" : "t2-intel7260", "flags" : "STA_VHT80"},
# {"hostname": "192.168.254.55", "ifname" : "wlan0, wlan1, wlan2", "port": "", "name" : "t3-monitor"},
# {"hostname": "192.168.254.50", "ifname" : "wlan0", "port": "9877", "name" : "t1-ath9k"},
# {"hostname": "192.168.254.50", "ifname" : "wlan1", "port": "9877", "name" : "t1-ath10k"}]
#
# HWSIM - ifaces available after modprobe mac80211_hwsim
#
devices = [{"hostname": "localhost", "ifname": "wlan0", "port": "9868", "name": "hwsim0", "flags": "AP_VHT80 STA_VHT80"},
{"hostname": "localhost", "ifname": "wlan1", "port": "9878", "name": "hwsim1", "flags": "AP_VHT80 STA_VHT80"},
{"hostname": "localhost", "ifname": "wlan2", "port": "9888", "name": "hwsim2", "flags": "AP_VHT80 STA_VHT80"},
{"hostname": "localhost", "ifname": "wlan3", "port": "9898", "name": "hwsim3", "flags": "AP_VHT80 STA_VHT80"},
{"hostname": "localhost", "ifname": "wlan4", "port": "9908", "name": "hwsim4", "flags": "AP_VHT80 STA_VHT80"}]
def get_setup_params(filename="cfg.py"):
try:
mod = __import__(filename.split(".")[0])
return mod.setup_params
except:
logger.debug("__import__(" + filename + ") failed, using static settings")
pass
return setup_params
def get_devices(filename="cfg.py"):
try:
mod = __import__(filename.split(".")[0])
return mod.devices
except:
logger.debug("__import__(" + filename + ") failed, using static settings")
pass
return devices
def get_device(devices, name=None, flags=None, lock=False):
if name is None and flags is None:
raise Exception("Failed to get device")
for device in devices:
if device['name'] == name:
return device
for device in devices:
try:
device_flags = device['flags']
if device_flags.find(flags) != -1:
return device
except:
pass
raise Exception("Failed to get device " + name)
def put_device(devices, name):
pass
| s0lst1c3/eaphammer | local/hostapd-eaphammer/tests/remote/config.py | Python | gpl-3.0 | 3,543 |
#!/usr/bin/python
from pyliferisk import MortalityTable
from pyliferisk.mortalitytables import GKM95
import numpy as np
mt = MortalityTable(nt=GKM95)
x = 40 #age
n = 20 #horizon
C = 10000 #capital
i = 0.03 #interest rate
payments = []
for t in range(0,n):
payments.append((mt.lx[x+t] - mt.lx[x+t+1]) / mt.lx[x] * C)
discount_factor = []
for y in range(0,n):
discount_factor.append(1 / (1 + i) ** (y + 0.5))
print('{0:5} {1:10} {2:10}'.format(' t', 'factor', 'payment'))
for t in range(0,n):
print('{0:2} {1:10} {2:10}'.format(t, np.around(discount_factor[t], 5), np.around(payments[t], 4)))
| franciscogarate/pyliferisk | Examples/Example_2_2_3b.py | Python | gpl-3.0 | 608 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Inspection(models.Model):
_name = 'property.inspection'
_order = 'date desc'
_inherit = ['mail.thread', 'ir.needaction_mixin']
property_id = fields.Many2one('property', string='Property ID', required=True, readonly=True)
### inspection ###
date = fields.Date(string='Date', required=True,)
inspector_id = fields.Many2one('res.users', string='Inspector')
act_type = fields.Selection([
('inspect', 'Tenken'),
('routine_inspection', 'Teikitenken'),
('change', 'Koukan'),
('repair', 'Syuri'),
('coordinate', 'Tyousei'),
('others', 'Other'),],
string='Act type')
inspection_note = fields.Text(string='Note')
product_memo = fields.Text(string='product_memo', help='Koukan sita kiki wo kaitene')
### request ###
request_id = fields.Many2one('property.inspection.request', string='Request')
request_date = fields.Date(string='request_date', related='request_id.date', readonly=True)
requester_name = fields.Char(string='requester_name', related='request_id.partner_id.name', readonly=True)
request_note = fields.Text(string='request_note', related='request_id.request_note', readonly=True)
responder_name = fields.Char(string='responder_name', related='request_id.user_id.name', readonly=True)
### ###
state = fields.Selection([
('ongoing', 'Taioutyu'),
('arranging', 'Tehaityu'),
('finishing', 'Kanryo'),],
string='state')
class InspectionRequest(models.Model):
_name = 'property.inspection.request'
_order = 'date desc'
date = fields.Date(string='Date', required=True, copy=False,)
partner_id = fields.Many2one('res.partner', string='partner_id',)
request_note = fields.Text(string='request_note',)
user_id = fields.Many2one('res.users', string='user_id', required=True, help='hosyu no irai wo uketahitoy')
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('inspection.request') or 'New'
result = super(InspectionRequest, self).create(vals)
return result
| ichi23de5/ichi_Repo | property/models/inspection.py | Python | gpl-3.0 | 2,328 |
import re
import sys
class SrtSection:
"""
This class is used to stock a section from a srt file (subtitle frames).
- self.beginning is the time (in seconds) where the subframe begins
- self.duration is the duration (in seconds) of the subframe
- self.content is the content of the subframe
"""
def __init__(self, beginning, duration, content):
self.beginning = beginning
self.duration = duration
self.content = content
def __repr__(self):
return '({0}, {1}), "{2}"'.format(self.beginning, self.duration, self.content.encode("unicode_escape").decode())
def export(self):
"""
Exports the section to a formatted string
"""
return self.__export_tdata() + '\n' + self.content
def __export_tdata(self):
"""
Writes the time section in the srt syntax from the tuple
(beginning, duration)
"""
# Calculates each momentum
beginning, end = self.beginning, self.beginning + self.duration
times = []
for temps in beginning, end:
hours = int(temps // 3600)
temps %= 3600
minutes = int(temps // 60)
temps %= 60
seconds = int(temps)
miliseconds = int(round(temps - seconds, 3)*1000)
times.append('{0}:{1}:{2},{3}'.format(hours, minutes, seconds,
miliseconds))
return ' --> '.join(times)
class SrtSubs:
"""
This class is used to stock and manipulate sections from a srt file.
self.sections, where all the datas are stored, is a list of SrtSections.
"""
def __init__(self, string):
"""
string is the content of the srt file.
"""
self.rawsections = [s.strip() for s in string.split("\n\n") if s != '']
self.sections = self.__extract_sections()
def __extract_sections(self):
"""
Extracts all the informations from a list containing all the
sections of the file, in the form of a list of tuples :
((beginning, duration), content)
with
beginning and duration in seconds
content the sub to show at this time
"""
sections = []
for section in self.rawsections:
lines = section.split('\n')
beginning, duration = self.__extract_tdata(lines[1])
content = "\n".join(lines[2:])
sections.append(SrtSection(beginning, duration, content))
return sections
def export_sections(self):
"""
Writes the sections to a string to be written to the subs file
"""
secs = []
for number, section in enumerate(self.sections):
sec = str(number+1)+'\n'
sec += section.export()
secs.append(sec)
return '\n\n'.join(secs)
def __extract_tdata(self, timesection):
"""
Returns a tuple (beginning, duration) from
the %H:%M:%S --> %H:%M:%S line.
"""
tparts = timesection.split(" --> ")
beginning_end = []
for sec in tparts:
hours, minutes, seconds, miliseconds = tuple(map(int, re.split("[:,]", sec)))
beginning_end.append(3600 * hours + 60 * minutes + seconds + miliseconds/1000)
beginning, end = tuple(beginning_end)
duration = end - beginning
return beginning, round(duration)
| octarin/PySrt | PySrt.py | Python | gpl-3.0 | 3,556 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import utool as ut
from os.path import join, basename, splitext, exists, dirname
# from utool import util_inject
# print, rrr, profile = util_inject.inject2(__file__)
@ut.reloadable_class
class SourceDir(ut.NiceRepr):
def __init__(self, dpath):
self.dpath = dpath
self.rel_fpath_list = None
def populate(self):
self.rel_fpath_list = ut.glob(self.dpath, '*', recursive=True,
fullpath=False, with_dirs=False)
self.attrs = {
# 'nbytes': list(map(ut.get_file_nBytes, self.fpaths())),
'fname': list(map(basename, self.rel_fpath_list)),
'dname': list(map(dirname, self.rel_fpath_list)),
'ext': list(map(lambda p: splitext(p)[1].lower().replace('.jpeg', '.jpg'), self.rel_fpath_list)),
}
# self.nbytes_list = list(map(ut.get_file_nBytes, self.fpaths()))
# self.fname_list = list(map(basename, self.rel_fpath_list))
# self.ext_list = list(map(lambda p: splitext(p)[1].lower().replace('.jpeg', '.jpg'), self.rel_fpath_list))
def __len__(self):
return len(self.rel_fpath_list)
def index(self):
fpaths = self.fpaths()
prog = ut.ProgIter(fpaths, length=len(self), label='building uuid')
self.uuids = self._md5(prog)
def _nbytes(self, fpaths):
return (ut.get_file_nBytes(fpath) for fpath in fpaths)
def _full_path(self, fpaths):
return fpaths
def _md5(self, fpaths):
import hashlib
return (ut.get_file_hash(fpath, hasher=hashlib.md5()) for fpath in fpaths)
def _md5_stride(self, fpaths):
import hashlib
return (ut.get_file_hash(fpath, hasher=hashlib.md5(), stride=1024) for fpath in fpaths)
# def _sha1(self, fpaths):
# import hashlib
# hasher = hashlib.sha1()
# return (ut.get_file_hash(fpath, hasher=hasher) for fpath in fpaths)
def _crc32(self, fpaths):
return (ut.cmd2('crc32 "%s"' % fpath)['out'] for fpath in fpaths)
def _abs(self, rel_paths):
for rel_path in rel_paths:
yield join(self.dpath, rel_path)
def get_prop(self, attrname, idxs=None):
"""
Caching getter
"""
if attrname not in self.attrs:
self.attrs[attrname] = [None for _ in range(len(self))]
prop_list = self.attrs[attrname]
if idxs is None:
idxs = list(range(len(prop_list)))
props = prop_list
else:
props = ut.take(prop_list, idxs)
miss_flags = ut.flag_None_items(props)
if any(miss_flags):
miss_idxs = ut.compress(idxs, miss_flags)
miss_fpaths = self._abs(ut.take(self.rel_fpath_list, miss_idxs))
miss_iter = getattr(self, '_' + attrname)(miss_fpaths)
miss_iter = ut.ProgIter(miss_iter, length=len(miss_idxs),
label='Compute %s' % (attrname,))
for idx, val in zip(miss_idxs, miss_iter):
prop_list[idx] = val
props = ut.take(prop_list, idxs)
return props
def find_needsmove_to_other(self, other):
hash1 = self.get_prop('md5_stride')
hash2 = other.get_prop('md5_stride')
idxs1 = list(range(len(hash1)))
hash_to_idxs = ut.group_items(idxs1, hash1)
# Find what we have that other doesnt have and move it there
other_missing = set(hash1).difference(hash2)
missing_idxs1 = ut.flatten(ut.take(hash_to_idxs, other_missing))
data = ut.ColumnLists({
'idx': missing_idxs1,
'fname': self.get_prop('fname', missing_idxs1),
'dname': self.get_prop('dname', missing_idxs1),
'full_path': self.get_prop('full_path', missing_idxs1),
'nbytes': self.get_prop('nbytes', missing_idxs1),
})
data = data.compress([f != 'Thumbs.db' for f in data['fname']])
data['ext'] = self.get_prop('ext', data['idx'])
ut.dict_hist(data['ext'])
data.print(ignore=['full_path', 'dname'])
def find_internal_duplicates(self):
# First find which files take up the same amount of space
nbytes = self.get_prop('nbytes')
dups = ut.find_duplicate_items(nbytes)
# Now evaluate the hashes of these candidates
cand_idxs = ut.flatten(dups.values())
data = ut.ColumnLists({
'idx': cand_idxs,
'fname': self.get_prop('fname', cand_idxs),
'dname': self.get_prop('dname', cand_idxs),
'full_path': self.get_prop('full_path', cand_idxs),
'nbytes': self.get_prop('nbytes', cand_idxs),
})
# print(ut.repr4(ut.group_items(fpaths, nbytes)))
data.ignore = ['full_path', 'dname']
data.print(ignore=['full_path', 'dname'])
data['hash'] = self.get_prop('md5', data['idx'])
data.print(ignore=['full_path', 'hash'])
data.print(ignore=['full_path', 'dname'])
multis = data.get_multis('hash')
multis.print(ignore=data.ignore)
return multis
def analyze_internal_duplicats(self):
multis = self.find_internal_duplicates()
unique_dnames = set([])
associations = ut.ddict(lambda: 0)
# diag_dups = []
# other_dups = []
for sub in multis.group_items('hash').values():
dnames = sub['dname']
unique_dnames.update(dnames)
for dn1, dn2 in ut.combinations(dnames, 2):
# if dn1 == dn2:
# diag_dups[dn1] += 1
key = tuple(sorted([dn1, dn2]))
associations[key] += 1
print(sub['dname'])
def find_nonunique_names(self):
fnames = map(basename, self.rel_fpath_list)
duplicate_map = ut.find_duplicate_items(fnames)
groups = []
for dupname, idxs in duplicate_map.items():
uuids = self.get_prop('uuids', idxs)
fpaths = self.get_prop('abs', idxs)
groups = ut.group_items(fpaths, uuids)
if len(groups) > 1:
if all(x == 1 for x in map(len, groups.values())):
# All groups are different, this is an simpler case
print(ut.repr2(groups, nl=3))
else:
# Need to handle the multi-item groups first
pass
def consolodate_duplicates(self):
fnames = map(basename, self.rel_fpath_list)
duplicate_map = ut.find_duplicate_items(fnames)
groups = []
for dupname, idxs in duplicate_map.items():
uuids = self.get_prop('uuids', idxs)
unique_uuids, groupxs = ut.group_indices(uuids)
groups.extend(ut.apply_grouping(idxs, groupxs))
multitons = [g for g in groups if len(g) > 1]
# singletons = [g for g in groups if len(g) <= 1]
ut.unflat_take(list(self.fpaths()), multitons)
def duplicates(self):
uuid_to_dupxs = ut.find_duplicate_items(self.uuids)
dup_fpaths = [ut.take(self.rel_fpath_list, idxs) for idxs in uuid_to_dupxs.values()]
return dup_fpaths
def nbytes(self):
return sum(self.attrs['nbytes'])
def ext_hist(self):
return ut.dict_hist(self.attrs['ext'])
def fpaths(self):
return self._abs(self.rel_fpath_list)
def __nice__(self):
return self.dpath
def isect_info(self, other):
set1 = set(self.rel_fpath_list)
set2 = set(other.rel_fpath_list)
set_comparisons = ut.odict([
('s1', set1),
('s2', set2),
('union', set1.union(set2)),
('isect', set1.intersection(set2)),
('s1 - s2', set1.difference(set2)),
('s2 - s1', set1.difference(set1)),
])
stat_stats = ut.map_vals(len, set_comparisons)
print(ut.repr4(stat_stats))
return set_comparisons
if False:
idx_lookup1 = ut.make_index_lookup(self.rel_fpath_list)
idx_lookup2 = ut.make_index_lookup(other.rel_fpath_list)
uuids1 = ut.take(self.uuids, ut.take(idx_lookup1, set_comparisons['union']))
uuids2 = ut.take(other.uuids, ut.take(idx_lookup2, set_comparisons['union']))
uuids1 == uuids2
def make_merge_bash_script(self, dest):
import subprocess
# find $SOURCE_DIR -name '*' -type f -exec mv -f {} $TARGET_DIR \;
# bash_cmd = subprocess.list2cmdline(['mv', '--verbose', join(self.dpath, '*'), dest.dpath])
bash_cmd = subprocess.list2cmdline(
['find', self.dpath, '-name', '\'*\'', '-type', 'f', '-exec', 'mv', '-f', '{}', dest.dpath, '\;'])
print(bash_cmd)
return bash_cmd
# # import shutil
# move_tasks = [
# (join(self.dpath, rel_fpath), join(dest.dpath, rel_fpath))
# for rel_fpath in self.rel_fpath_list
# ]
# for src, dst in move_tasks:
# if exists(dst):
# raise Exception('dont overwrite yet')
# bash_script = '\n'.join([subprocess.list2cmdline(('mv', src, dst)) for src, dst in move_tasks])
# return bash_script
def merge_into(self, dest):
import shutil
print('Preparing merge %r into %r' % (self, dest))
# import shutil
move_tasks = [
(join(self.dpath, rel_fpath), join(dest.dpath, rel_fpath))
for rel_fpath in self.rel_fpath_list
]
for src, dst in move_tasks:
if exists(dst):
raise Exception('dont overwrite yet')
def trymove(src, dst):
try:
shutil.move(src, dst)
except OSError:
return 1
return 0
error_list = [
trymove(src, dst)
for (src, dst) in ut.ProgIter(move_tasks, lbl='moving')
]
assert not any(error_list), 'error merging'
return error_list
def find_empty_dirs(self):
""" find dirs with only dirs in them """
self.rel_dpath_list = ut.glob(self.dpath, '*', recursive=True,
fullpath=False, with_dirs=True, with_files=False)
counts = {dpath: 0 for dpath in self.rel_dpath_list}
for fpath in self.rel_fpath_list:
tmp = dirname(fpath)
while tmp:
counts[tmp] += 1
tmp = dirname(tmp)
empty_dpaths = [dpath for dpath, count in counts.items() if count == 0]
return empty_dpaths
def delete_empty_directories(self):
"""
ut.ensuredir(self.dpath + '/foo')
ut.ensuredir(self.dpath + '/foo/bar')
ut.ensuredir(self.dpath + '/foo/bar/baz')
self.delete_empty_directories()
"""
import os
# for root, dirs, files in os.walk(self.dpath, topdown=False):
# if len(files) == 0 and len(os.listdir(root)) == 0:
# print('Remove %s' % root)
# os.rmdir(root)
if True:
# Find all directories with no files
subdirs = ut.glob(self.dpath, '*', recursive=True, fullpath=False,
with_files=False, with_dirs=True)
freq = {d: 0 for d in subdirs + ['']}
for path in self.rel_fpath_list:
while True:
path = dirname(path)
freq[path] += 1
if not path:
break
to_delete = [k for k, v in freq.items() if v == 0]
# Remove deep dirs first
to_delete = ut.sortedby(to_delete, map(len, to_delete))[::-1]
for d in to_delete:
dpath = join(self.dpath, d)
print('Remove %s' % dpath)
os.rmdir(dpath)
def turtles2():
"""
from utool.experimental.file_organizer import *
"""
self = SourceDir('/raid/raw/RotanTurtles/Roatan HotSpotter Nov_21_2016')
self.populate()
"""
Goal:
Organize media on a computer over multiple drives.
Fix duplicate strategy:
make graph where each file/directory is a node
make a directed edge whenever <path1> -- contains --> <path2>
Find all files with the same contents
make an undirected edge whever <file1.uuid> == <file2.uuid>
For each pair of the same files we need to assign them both to either
directory 1 or directory 2. Maybe do a min-st cut between directory 1 and
directory 2. for each pair of directories with the same file.
In this case dir1 should be merged into dir2 (cut all edges from dir1 to
its files)
dir1 dir2
---- ----
a.jpg --- a.jpg
b.jpg --- b.jpg
c.jpg --- c.jpg
d.jpg
e.jpg
We also need to know the association of each file with every other file in
its directory (or more preciely every other file in the system, but we can
ignore most setting thir association to 0)
"""
def turtles():
source_dpaths = sorted(ut.glob('/raid/raw/RotanTurtles/', '*',
recusrive=False, with_dirs=True,
with_files=False))
sources = [SourceDir(dpath) for dpath in source_dpaths]
for self in ut.ProgIter(sources, label='populate'):
self.populate()
import fnmatch
del_ext = set(['.npy', '.flann', '.npz'])
for self in ut.ProgIter(sources, label='populate'):
flags = [ext in del_ext for ext in self.attrs['ext']]
to_delete = ut.compress(list(self.fpaths()), flags)
ut.remove_file_list(to_delete)
flags = [fnmatch.fnmatch(fpath, '*/_hsdb/computed/chips/*.png') for fpath in self.rel_fpath_list]
to_delete = ut.compress(list(self.fpaths()), flags)
ut.remove_file_list(to_delete)
self.populate()
for self in ut.ProgIter(sources, label='del empty'):
self.populate()
self.delete_empty_directories()
print(ut.byte_str2(sum([self.nbytes() for self in sources])))
# [ut.byte_str2(self.nbytes()) for self in sources]
# import numpy as np
# num_isect = np.zeros((len(sources), len(sources)))
# num_union = np.zeros((len(sources), len(sources)))
for i, j in ut.combinations(range(len(sources)), 2):
s1 = sources[i]
s2 = sources[j]
isect = set(s1.rel_fpath_list).intersection(s2.rel_fpath_list)
# union = set(s1.rel_fpath_list).union(s2.rel_fpath_list)
if isect:
s1.isect_info(s2)
print((i, j))
print(s1.dpath)
print(s2.dpath)
self = s1
other = s2
assert False
# print(isect)
# break
# num_isect[i, j] = len(isect)
# num_union[i, j] = len(union)
# for self in ut.ProgIter(sources, label='index'):
# self.index()
for self in ut.ProgIter(sources, label='populate'):
self.populate()
dest = sources[0]
others = sources[1:]
# Merge others into dest
bash_script = '\n'.join([o.make_merge_bash_script(dest) for o in others])
print(bash_script)
other = self
for other in others:
other.merge_into(dest)
# [ut.byte_str2(self.nbytes()) for self in sources]
# for self in sources:
# pass
if __name__ == '__main__':
r"""
CommandLine:
python -m utool.experimental.file_organizer
python -m utool.experimental.file_organizer --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| Erotemic/local | misc/file_organizer.py | Python | gpl-3.0 | 15,764 |
#!/usr/bin/python
"""
An interface to the web-based PROvider Direct Access (PRODA) system
of Medicare Australia
"""
import mechanize # available via PIP
import re
m = mechanize.Browser()
m.open("https://proda.humanservices.gov.au/prodalogin/pages/public/login.jsf?TAM_OP=login&USER")
m.select_form(name="loginFormAndStuff")
m['loginFormAndStuff:inputPassword'] = "Drc232crq838"
m['loginFormAndStuff:username'] = 'ihaywood'
m.submit()
m.select_form(nr=0)
m['otp.user.otp'] = raw_input("Emailed code")
m.submit()
print m.reply()
#m.open("https://www2.medicareaustralia.gov.au:5447/pcert/hpos/home.do")
#m.select_form(name="termsAndConditionsForm")
#m['action'] = "I agree"
#m.submit()
#m.follow_link(text_regex=re.compile("Claims"))
#m.follow_link(text_regex=re.compile("Make a new claim"))
#m.follow_link(text_regex=re.compile("Medicare Bulk Bill Webclaim"))
print m.read() | ihaywood3/easygp | db/proda.py | Python | gpl-3.0 | 882 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**database_operations.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines the :class:`DatabaseOperations` Component Interface class and others helper objects.
**Others:**
"""
from __future__ import unicode_literals
import os
from PyQt4.QtCore import QString
from PyQt4.QtGui import QGridLayout
from PyQt4.QtGui import QMessageBox
import foundations.common
import foundations.data_structures
import foundations.exceptions
import foundations.verbose
import sibl_gui.components.core.database.operations
import umbra.engine
import umbra.ui.widgets.message_box as message_box
from manager.QWidget_component import QWidgetComponentFactory
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["LOGGER", "COMPONENT_UI_FILE", "DatabaseType", "DatabaseOperations"]
LOGGER = foundations.verbose.install_logger()
COMPONENT_UI_FILE = os.path.join(os.path.dirname(__file__), "ui", "Database_Operations.ui")
class DatabaseType(foundations.data_structures.Structure):
"""
| Defines a storage object for manipulation methods associated to a given Database type.
| See :mod:`sibl_gui.components.core.database.types` module for more informations
about the available Database types.
"""
def __init__(self, **kwargs):
"""
Initializes the class.
:param kwargs: type, get_method, update_content_method, remove_method, model_container, update_location_method
:type kwargs: dict
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
foundations.data_structures.Structure.__init__(self, **kwargs)
class DatabaseOperations(QWidgetComponentFactory(ui_file=COMPONENT_UI_FILE)):
"""
| Defines the :mod:`sibl_gui.components.addons.database_operations.database_operations` Component Interface class.
| It provides various methods to operate on the Database.
"""
def __init__(self, parent=None, name=None, *args, **kwargs):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param name: Component name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(DatabaseOperations, self).__init__(parent, name, *args, **kwargs)
# --- Setting class attributes. ---
self.deactivatable = True
self.__engine = None
self.__settings = None
self.__settings_section = None
self.__preferences_manager = None
self.__ibl_sets_outliner = None
self.__templates_outliner = None
self.__types = None
@property
def engine(self):
"""
Property for **self.__engine** attribute.
:return: self.__engine.
:rtype: QObject
"""
return self.__engine
@engine.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def engine(self, value):
"""
Setter for **self.__engine** attribute.
:param value: Attribute value.
:type value: QObject
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "engine"))
@engine.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def engine(self):
"""
Deleter for **self.__engine** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "engine"))
@property
def settings(self):
"""
Property for **self.__settings** attribute.
:return: self.__settings.
:rtype: QSettings
"""
return self.__settings
@settings.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings(self, value):
"""
Setter for **self.__settings** attribute.
:param value: Attribute value.
:type value: QSettings
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "settings"))
@settings.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings(self):
"""
Deleter for **self.__settings** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "settings"))
@property
def settings_section(self):
"""
Property for **self.__settings_section** attribute.
:return: self.__settings_section.
:rtype: unicode
"""
return self.__settings_section
@settings_section.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings_section(self, value):
"""
Setter for **self.__settings_section** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "settings_section"))
@settings_section.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def settings_section(self):
"""
Deleter for **self.__settings_section** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "settings_section"))
@property
def preferences_manager(self):
"""
Property for **self.__preferences_manager** attribute.
:return: self.__preferences_manager.
:rtype: QWidget
"""
return self.__preferences_manager
@preferences_manager.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def preferences_manager(self, value):
"""
Setter for **self.__preferences_manager** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "preferences_manager"))
@preferences_manager.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def preferences_manager(self):
"""
Deleter for **self.__preferences_manager** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "preferences_manager"))
@property
def ibl_sets_outliner(self):
"""
Property for **self.__ibl_sets_outliner** attribute.
:return: self.__ibl_sets_outliner.
:rtype: QWidget
"""
return self.__ibl_sets_outliner
@ibl_sets_outliner.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ibl_sets_outliner(self, value):
"""
Setter for **self.__ibl_sets_outliner** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "ibl_sets_outliner"))
@ibl_sets_outliner.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ibl_sets_outliner(self):
"""
Deleter for **self.__ibl_sets_outliner** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "ibl_sets_outliner"))
@property
def templates_outliner(self):
"""
Property for **self.__templates_outliner** attribute.
:return: self.__templates_outliner.
:rtype: QWidget
"""
return self.__templates_outliner
@templates_outliner.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def templates_outliner(self, value):
"""
Setter for **self.__templates_outliner** attribute.
:param value: Attribute value.
:type value: QWidget
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "templates_outliner"))
@templates_outliner.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def templates_outliner(self):
"""
Deleter for **self.__templates_outliner** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "templates_outliner"))
@property
def types(self):
"""
Property for **self.__types** attribute.
:return: self.__types.
:rtype: tuple
"""
return self.__types
@types.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def types(self, value):
"""
Setter for **self.__types** attribute.
:param value: Attribute value.
:type value: tuple
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "types"))
@types.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def types(self):
"""
Deleter for **self.__types** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "types"))
def activate(self, engine):
"""
Activates the Component.
:param engine: Engine to attach the Component to.
:type engine: QObject
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Activating '{0}' Component.".format(self.__class__.__name__))
self.__engine = engine
self.__settings = self.__engine.settings
self.__settings_section = self.name
self.__preferences_manager = self.__engine.components_manager["factory.preferences_manager"]
self.__ibl_sets_outliner = self.__engine.components_manager["core.ibl_sets_outliner"]
self.__templates_outliner = self.__engine.components_manager["core.templates_outliner"]
self.__types = (DatabaseType(type="Ibl Set",
get_method=sibl_gui.components.core.database.operations.get_ibl_sets,
update_content_method=sibl_gui.components.core.database.operations.update_ibl_set_content,
remove_method=sibl_gui.components.core.database.operations.remove_ibl_set,
model_container=self.__ibl_sets_outliner,
update_location_method=self.__ibl_sets_outliner.update_ibl_set_location_ui),
DatabaseType(type="Template",
get_method=sibl_gui.components.core.database.operations.get_templates,
update_content_method=sibl_gui.components.core.database.operations.update_template_content,
remove_method=sibl_gui.components.core.database.operations.remove_template,
model_container=self.__templates_outliner,
update_location_method=self.__templates_outliner.update_template_location_ui))
self.activated = True
return True
def deactivate(self):
"""
Deactivates the Component.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Deactivating '{0}' Component.".format(self.__class__.__name__))
self.__engine = None
self.__settings = None
self.__settings_section = None
self.__preferences_manager = None
self.__ibl_sets_outliner = None
self.__templates_outliner = None
self.activated = False
return True
def initialize_ui(self):
"""
Initializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__))
# Signals / Slots.
if not self.__engine.parameters.database_read_only:
self.Update_Database_pushButton.clicked.connect(self.__Update_Database_pushButton__clicked)
self.Remove_Invalid_Data_pushButton.clicked.connect(self.__Remove_Invalid_Data_pushButton__clicked)
else:
LOGGER.info(
"{0} | Database Operations capabilities deactivated by '{1}' command line parameter value!".format(
self.__class__.__name__, "database_read_only"))
self.initialized_ui = True
return True
def uninitialize_ui(self):
"""
Uninitializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Uninitializing '{0}' Component ui.".format(self.__class__.__name__))
# Signals / Slots.
if not self.__engine.parameters.database_read_only:
self.Update_Database_pushButton.clicked.disconnect(self.__Update_Database_pushButton__clicked)
self.Remove_Invalid_Data_pushButton.clicked.disconnect(self.__Remove_Invalid_Data_pushButton__clicked)
self.initialized_ui = False
return True
def add_widget(self):
"""
Adds the Component Widget to the engine.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Adding '{0}' Component Widget.".format(self.__class__.__name__))
self.__preferences_manager.Others_Preferences_gridLayout.addWidget(self.Database_Operations_groupBox)
return True
def remove_widget(self):
"""
Removes the Component Widget from the engine.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Removing '{0}' Component Widget.".format(self.__class__.__name__))
self.__preferences_manager.findChild(QGridLayout, "Others_Preferences_gridLayout").removeWidget(self)
self.Database_Operations_groupBox.setParent(None)
return True
def __Update_Database_pushButton__clicked(self, checked):
"""
Defines the slot triggered by **Update_Database_pushButton** Widget when clicked.
:param checked: Checked state.
:type checked: bool
"""
self.update_database()
def __Remove_Invalid_Data_pushButton__clicked(self, checked):
"""
Defines the slot triggered by **Remove_Invalid_Data_pushButton** Widget when clicked.
:param checked: Checked state.
:type checked: bool
"""
self.remove_invalid_data()
@umbra.engine.show_processing("Updating Database ...")
def update_database(self):
"""
| Updates the Database.
| Each type defined by :meth:`DatabaseOperations.sibl_gui.components.core.database.types` attribute
will have its instances checked and updated by their associated methods.
:return: Method success.
:rtype: bool
"""
for type in self.__types:
for item in type.get_method():
if foundations.common.path_exists(item.path):
if type.update_content_method(item):
LOGGER.info("{0} | '{1}' {2} has been updated!".format(self.__class__.__name__,
item.name,
type.type))
else:
choice = message_box.message_box("Question", "Error",
"{0} | '{1}' {2} file is missing, would you like to update it's location?".format(
self.__class__.__name__, item.name, type.type),
QMessageBox.Critical, QMessageBox.Yes | QMessageBox.No,
custom_buttons=((QString("No To All"), QMessageBox.RejectRole),))
if choice == 0:
break
if choice == QMessageBox.Yes:
type.update_location_method(item)
self.__engine.process_events()
type.model_container.refresh_nodes.emit()
self.__engine.stop_processing()
self.__engine.notifications_manager.notify("{0} | Database update done!".format(self.__class__.__name__))
return True
@umbra.engine.show_processing("Removing Invalid Data ...")
def remove_invalid_data(self):
"""
Removes invalid data from the Database.
:return: Method success.
:rtype: bool
"""
if message_box.message_box("Question", "Question",
"Are you sure you want to remove invalid data from the Database?",
buttons=QMessageBox.Yes | QMessageBox.No) == QMessageBox.Yes:
for type in self.__types:
for item in type.get_method():
if foundations.common.path_exists(item.path):
continue
LOGGER.info(
"{0} | Removing non existing '{1}' {2} from the Database!".format(self.__class__.__name__,
item.name,
type.type))
type.remove_method(item.id)
self.__engine.process_events()
type.model_container.refresh_nodes.emit()
self.__engine.stop_processing()
self.__engine.notifications_manager.notify(
"{0} | Invalid data removed from Database!".format(self.__class__.__name__))
return True
| KelSolaar/sIBL_GUI | sibl_gui/components/addons/database_operations/database_operations.py | Python | gpl-3.0 | 19,077 |
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse_lazy, reverse
from django.views.generic import ListView, DetailView, FormView, CreateView, \
UpdateView
from django.views.generic.base import TemplateView, View
from django.contrib.auth.mixins import LoginRequiredMixin
from . import xml_hero
from .models import Group, Hero
from .forms import GroupForm, HeroAddForm, CharsheetUploadForm
# Create your views here.
class HomeView(TemplateView):
template_name = "home.html"
class GroupView(LoginRequiredMixin, ListView):
model = Group
template_name = 'groups_all.html'
def get_context_data(self, **kwargs):
context = super(GroupView, self).get_context_data(**kwargs)
user = self.request.user
context['gm_groups'] = user.gaming_group_master.all()
context['player_groups'] = user.gaming_group.all()
return context
class GroupDetailView(LoginRequiredMixin, DetailView):
model = Group
template_name = 'groups_detail.html'
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
return queryset.get(name=self.kwargs['group'])
def render_to_response(self, context, **response_kwargs):
user = self.request.user
print(self.object.players.all())
if user in self.object.players.all() and not user.hero_set.filter(
group=self.object).exists():
return redirect(
reverse_lazy('hero:group_add_hero', kwargs=self.kwargs))
else:
return super(GroupDetailView, self).render_to_response(context)
class GroupAdminView(LoginRequiredMixin, DetailView):
model = Group
template_name = 'groups_all.html'
def get_context_data(self, **kwargs):
context = super(GroupView, self).get_context_data(**kwargs)
user = self.request.user
context['gm_groups'] = user.gaming_group_master.all()
context['player_groups'] = user.gaming_group.all()
return context
class GroupAddView(LoginRequiredMixin, CreateView):
template_name = 'groups_add.html'
form_class = GroupForm
def form_valid(self, form):
group = Group(
name=form.cleaned_data['name'],
rule_version=form.cleaned_data['rule_version'],
players=form.cleaned_data['players'],
description=form.cleaned_data['description'],
game_master=self.request.user
)
group.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy('hero:group_detail', args=(self.object.name,))
class PlayerDetailView(LoginRequiredMixin, DetailView):
pass
class GroupAddHeroView(LoginRequiredMixin, CreateView):
template_name = "groups_add_hero.html"
form_class = HeroAddForm
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['group_name'] = self.kwargs['group']
return context
def get_form_kwargs(self):
# pass "user" keyword argument with the current user to your form
kwargs = super(CreateView, self).get_form_kwargs()
kwargs['player'] = self.request.user
kwargs['group'] = Group.objects.get(name=self.kwargs['group'])
return kwargs
def form_valid(self, form):
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('hero:group_detail', args=(self.kwargs['group'],))
class ProfileView(LoginRequiredMixin, DetailView):
pass
class HeroCharsheetView(DetailView):
model = Hero
template_name = "hero_charsheet.html"
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
return queryset.get(name=self.kwargs['hero'],
group=self.kwargs['group'])
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
try:
context['charsheet'] = xml_hero.get_hero(
self.object.group.rule_version,
self.object.char_sheet.read())
finally:
return context
def get(self, request, *args, **kwargs):
if not self.get_object().char_sheet:
self.template_name = "no_charsheet.html"
return super(HeroCharsheetView, self).get(self, request, *args,
**kwargs)
class GroupHeroView(LoginRequiredMixin, DetailView):
pass
class HeroAddCharsheetView(LoginRequiredMixin, UpdateView):
model = Hero
fields = ['char_sheet']
template_name = 'charsheet_upload.html'
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
return queryset.get(name=self.kwargs['hero'],
group=self.kwargs['group'])
def get_success_url(self):
print(self.kwargs['group'])
return reverse('hero:group_detail', args=(self.kwargs['group'],))
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.generic import DetailView, ListView, CreateView
from .models import DiaryEntry, Adventure
from hero.models import Group, Hero
class AdventureView(LoginRequiredMixin, DetailView):
model = Adventure
template_name = "adventure_overview.html"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
group = Group.objects.get(name=self.kwargs['group'])
context['group'] = group
return context
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
group = Group.objects.get(name=self.kwargs['group'])
return queryset.get(group=group, name=self.kwargs['adventure'])
class DiaryView(LoginRequiredMixin, ListView):
model = DiaryEntry
template_name = 'diary_overview.html'
def get_context_data(self, **kwargs):
context = super(ListView, self).get_context_data(**kwargs)
hero = Hero.objects.get(name=self.kwargs['hero'])
group = hero.group
context['group'] = group
return context
class DiaryEntryView(LoginRequiredMixin, DetailView):
model = DiaryEntry
template_name = "diary_entry.html"
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
return queryset
class AddDiaryEntryView(LoginRequiredMixin, CreateView):
model = DiaryEntry
fields = ('name', 'date', 'entry', 'hero')
template_name = 'add_diary_entry.html'
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['adventure'] = Adventure.objects.get(
name=self.kwargs['adventure'])
return context
def form_valid(self, form):
adventure = Adventure.objects.get(name=self.kwargs['adventure'])
diary_entry = DiaryEntry(
name=form.cleaned_data['name'],
date=form.cleaned_data['date'],
entry=form.cleaned_data['entry'],
hero=form.cleaned_data['hero'],
adventure=adventure,
)
diary_entry.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
print(form.errors)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('adventure:hero_adventure_diary',
args=(self.kwargs['hero'], self.kwargs['adventure']))
| Iliricon/hero-manager | src/hero/views.py | Python | gpl-3.0 | 7,791 |
# -*- coding: utf-8 -*-
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2015 Andrew Colin Kissa <andrew@topdog.za.net>
# vim: ai ts=4 sts=4 et sw=4
"status tasks"
import os
import datetime
import psutil
from StringIO import StringIO
from pylons import config
from celery.task import task
from sqlalchemy.pool import NullPool
from eventlet.green import subprocess
from sqlalchemy import desc
from sqlalchemy import engine_from_config
from sqlalchemy.exc import DatabaseError
from sphinxapi import SphinxClient, SPH_MATCH_EXTENDED2
from reportlab.lib import colors
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Table
from reportlab.lib.styles import ParagraphStyle
from reportlab.platypus import Paragraph, Image, Spacer, TableStyle
from baruwa.model.meta import Session
from baruwa.lib.graphs import PIE_TABLE
from baruwa.lib.net import system_hostname
from baruwa.lib.misc import extract_sphinx_opts
from baruwa.lib.query import clean_sphinx_q
from baruwa.lib.mail.queue.exim import EximQueue
from baruwa.lib.mail.message import PreviewMessage
from baruwa.lib.mail.queue.convert import Exim2Mbox
from baruwa.lib.mail.queue.search import search_queue
from baruwa.model.status import AuditLog, CATEGORY_MAP
from baruwa.commands.queuestats import update_queue_stats
from baruwa.lib.regex import EXIM_MSGID_RE, BAYES_INFO_RE
from baruwa.lib.outputformats import build_csv, BaruwaPDFTemplate
from baruwa.lib.misc import get_processes, get_config_option, wrap_string, _
STYLES = getSampleStyleSheet()
if not Session.registry.has():
try:
engine = engine_from_config(config, 'sqlalchemy.', poolclass=NullPool)
Session.configure(bind=engine)
except KeyError:
pass
@task(name="get-system-status")
def systemstatus():
"process via mq"
logger = systemstatus.get_logger()
logger.info("Checking system status")
stats = dict(mem=None,
cpu=None,
load=None,
net=[],
mta=None,
scanners=None,
time=None,
uptime=None,
av=None,
partitions=[])
def _obj2dict(obj):
"convert object attribs to dict"
val = {}
for key in obj._fields:
val[key] = getattr(obj, key)
return val
pipe = subprocess.Popen(["uptime"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
upt = pipe.communicate()[0].split()
pipe.wait(timeout=2)
pipe = subprocess.Popen(["date"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stats['time'] = pipe.communicate()[0]
pipe.wait(timeout=2)
stats['uptime'] = "%s %s" % (upt[2], upt[3].rstrip(','))
stats['mem'] = _obj2dict(psutil.virtual_memory())
stats['mem']['percent'] = ((stats['mem']['used']
/ float(stats['mem']['total'])) * 100)
stats['cpu'] = psutil.cpu_percent()
stats['load'] = os.getloadavg()
net = psutil.network_io_counters(True)
infs = {}
for inf in net:
infs[inf] = _obj2dict(net[inf])
stats['net'] = infs
partitions = []
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dpart = _obj2dict(part)
dpart.update(_obj2dict(usage))
partitions.append(dpart)
stats['partitions'] = partitions
stats['mta'] = get_processes('exim')
stats['scanners'] = get_processes('MailScanner')
stats['av'] = get_processes('clamd')
return stats
@task(name="spamassassin-lint")
def salint():
"Spamassassin lint"
logger = salint.get_logger()
logger.info("Running Spamassassin lint checks")
lint = []
pipe1 = subprocess.Popen(['spamassassin',
'-x',
'-D',
'--lint'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
while True:
line = pipe1.stderr.readline()
if not line:
break
lint.append(line)
pipe1.wait(timeout=2)
return lint
@task(name="get-bayes-info")
def bayesinfo():
"Get bayes info"
logger = bayesinfo.get_logger()
logger.info("Generating Bayesian stats")
info = {}
saprefs = config.get(
'ms.saprefs',
'/etc/MailScanner/spam.assassin.prefs.conf'
)
pipe1 = subprocess.Popen(['sa-learn',
'-p',
saprefs,
'--dump',
'magic'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
while True:
line = pipe1.stdout.readline()
if not line:
break
match = BAYES_INFO_RE.match(line)
if match:
if match.group(5) == 'bayes db version':
info['version'] = match.group(3)
elif match.group(5) == 'nspam':
info['spam'] = match.group(3)
elif match.group(5) == 'nham':
info['ham'] = match.group(3)
elif match.group(5) == 'ntokens':
info['tokens'] = match.group(3)
elif match.group(5) == 'oldest atime':
info['otoken'] = datetime.datetime\
.fromtimestamp(float(match.group(3)))
elif match.group(5) == 'newest atime':
info['ntoken'] = datetime.datetime\
.fromtimestamp(float(match.group(3)))
elif match.group(5) == 'last journal sync atime':
info['ljournal'] = datetime.datetime\
.fromtimestamp(float(match.group(3)))
elif match.group(5) == 'last expiry atime':
info['expiry'] = datetime.datetime\
.fromtimestamp(float(match.group(3)))
elif match.group(5) == 'last expire reduction count':
info['rcount'] = match.group(3)
pipe1.wait(timeout=2)
return info
@task(name="preview-queued-msg")
def preview_queued_msg(msgid, direction, attachid=None, imgid=None):
"Preview a queued message"
try:
logger = preview_queued_msg.get_logger()
header = search_queue(msgid, int(direction))
convertor = Exim2Mbox(header)
mbox = convertor()
msgfile = StringIO(mbox)
previewer = PreviewMessage(msgfile)
if attachid:
logger.info("Download attachment: %(attachid)s of "
"message: %(id)s",
dict(id=msgid, attachid=attachid))
return previewer.attachment(attachid)
if imgid:
logger.info("Image access: %(img)s", dict(img=imgid))
return previewer.img(imgid)
logger.info("Preview of message: %(id)s", dict(id=msgid))
return previewer.preview()
except TypeError, type_error:
logger.info("Error occured: %s" % str(type_error))
return {}
except (AssertionError, IOError), error:
logger.info("Accessing message: %(id)s, Failed: %(error)s",
dict(id=msgid, error=error))
return None
finally:
if 'msgfile' in locals():
msgfile.close()
@task(name='process-queued-msgs', ignore_result=True)
def process_queued_msgs(msgids, action, direction, *args):
"Process queued messages"
try:
logger = process_queued_msgs.get_logger()
eximcmd = get_config_option('Sendmail2') if direction == 2 else 'exim'
if 'exim' not in eximcmd:
logger.info("Invalid exim command: %s" % eximcmd)
return
if direction == 1 and action not in ['bounce', 'delete']:
logger.info("Invalid action: %s" % action)
return
exim_user = config.get('baruwa.mail.user', 'exim')
queue = EximQueue('sudo -u %s %s' % (exim_user, eximcmd))
func = getattr(queue, action)
msgids = [msgid for msgid in msgids if EXIM_MSGID_RE.match(msgid)]
func(msgids, *args)
for result in queue.results:
logger.info("STDOUT: %s" % result)
if queue.errors:
for errmsg in queue.errors:
logger.info("STDERR: %s" % errmsg)
hostname = system_hostname()
update_queue_stats(hostname)
except TypeError, error:
logger.info("Invalid input: %s" % error)
except AttributeError:
logger.info("Invalid action: %s" % action)
@task(name='update-audit-log', ignore_result=True)
def update_audit_log(username,
category,
info,
hostname,
remoteip,
timestamp=None):
"Update the audit log"
logger = update_audit_log.get_logger()
try:
entry = AuditLog(username,
category,
info,
hostname,
remoteip)
if timestamp:
entry.timestamp = timestamp
Session.add(entry)
Session.commit()
logger.info("Audit Log update for: %s from: %s" %
(username, remoteip))
except DatabaseError, err:
logger.error("Audit Log FAILURE: %s %s %s %s %s %s Error: %s" %
(username,
category,
info,
hostname,
remoteip,
timestamp,
err))
finally:
Session.close()
def build_pdf(rows):
"Build PDF"
pdffile = StringIO()
doc = BaruwaPDFTemplate(pdffile, topMargin=50, bottomMargin=18)
import baruwa
here = os.path.dirname(
os.path.dirname(os.path.abspath(baruwa.__file__))
)
logo = os.path.join(here, 'baruwa', 'public', 'imgs', 'logo.png')
img = Image(logo)
logobj = [(img, _('Audit Log exported report'))]
logo_table = Table(logobj, [2.0 * inch, 5.4 * inch])
logo_table.setStyle(PIE_TABLE)
parts = [logo_table]
parts.append(Spacer(1, 20))
parts.append(Paragraph(_('Audit Logs'), STYLES['Heading1']))
heading = ((Paragraph(_('Date/Time'), STYLES["Heading6"]),
Paragraph(_('Username'), STYLES["Heading6"]),
Paragraph(_('Info'), STYLES["Heading6"]),
Paragraph(_('Hostname'), STYLES["Heading6"]),
Paragraph(_('Remote IP'), STYLES["Heading6"]),
Paragraph(_('Action'), STYLES["Heading6"]), ))
rows.insert(0, heading)
table = Table(rows, [1.10 * inch, 1.23 * inch,
1.96 * inch, 1.69 * inch,
0.95 * inch, 0.45 * inch, ])
table.setStyle(TableStyle([
('FONTSIZE', (0, 0), (-1, -1), 8),
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('GRID', (0, 0), (-1, -1), 0.15, colors.black),
]))
parts.append(table)
doc.title = _('Baruwa Audit log export')
doc.build(parts)
return pdffile.getvalue()
@task(name='export-audit-log')
def export_auditlog(format, query):
"Export the audit log"
logger = export_auditlog.get_logger()
filename = 'auditlog-%s.%s' % (export_auditlog.request.id, format)
content_type = 'text/csv' if format == 'csv' else 'application/pdf'
results = dict(id=export_auditlog.request.id,
f=None,
content_type=content_type,
filename=filename,
errormsg='')
try:
dbquery = Session.query(AuditLog)
if query:
conn = SphinxClient()
sphinxopts = extract_sphinx_opts(config['sphinx.url'])
conn.SetServer(sphinxopts.get('host', '127.0.0.1'))
conn.SetMatchMode(SPH_MATCH_EXTENDED2)
conn.SetLimits(0, 500, 500)
query = clean_sphinx_q(query)
qresults = conn.Query(query, 'auditlog, auditlog_rt')
if qresults and qresults['matches']:
ids = [hit['id'] for hit in qresults['matches']]
dbquery = dbquery.filter(AuditLog.id.in_(ids))
dbquery = dbquery.order_by(desc('timestamp')).all()
if format == 'pdf':
PS = ParagraphStyle('auditlogp',
fontName='Helvetica',
fontSize=8,
borderPadding=(2, 2, 2, 2))
rows = [(Paragraph(item.timestamp.strftime('%Y-%m-%d %H:%M'), PS),
Paragraph(wrap_string(item.username, 27), PS),
Paragraph(wrap_string(item.info, 33), PS),
Paragraph(wrap_string(item.hostname, 27), PS),
Paragraph(wrap_string(item.remoteip, 15), PS),
Paragraph(CATEGORY_MAP[item.category], PS))
for item in dbquery]
pdf = build_pdf(rows)
results['f'] = pdf
elif format == 'csv':
rows = [item.tojson() for item in dbquery]
keys = ('timestamp',
'username',
'info',
'hostname',
'remoteip',
'category')
results['f'] = build_csv(rows, keys)
logger.info("Audit Log export complete: %s" % results['filename'])
return results
except (DatabaseError), err:
results['errormsg'] = str(err)
logger.info("Audit Log export FAILURE: %s" % str(err))
return results
finally:
Session.close()
| akissa/baruwa2 | baruwa/tasks/status.py | Python | gpl-3.0 | 13,729 |
#!/usr/bin/env python
import json
import argparse
from webapollo import WAAuth, WebApolloInstance, AssertUser, accessible_organisms
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="List all organisms available in an Apollo instance"
)
WAAuth(parser)
parser.add_argument("email", help="User Email")
args = parser.parse_args()
wa = WebApolloInstance(args.apollo, args.username, args.password)
gx_user = AssertUser(wa.users.loadUsers(email=args.email))
all_orgs = wa.organisms.findAllOrganisms()
orgs = accessible_organisms(gx_user, all_orgs)
cleanedOrgs = []
for organism in all_orgs:
org = {
"name": organism["commonName"],
"id": organism["id"],
"annotations": organism["annotationCount"],
"sequences": organism["sequences"],
}
cleanedOrgs.append(org)
print(json.dumps(cleanedOrgs, indent=2))
| TAMU-CPT/galaxy-tools | tools/webapollo/list_organism_data.py | Python | gpl-3.0 | 951 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from gobspy import main
main()
| arypbatista/gobspy | gobspy.py | Python | gpl-3.0 | 75 |
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import matplotlib.cm as cm
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a*np.exp(-b*x) + c
main_dir = "/mnt/DataGuillaume/corr_pop/"
files = os.listdir(main_dir)
files = list(np.sort(files))
##############################################################################################################
# CROSS_CORR OF POPULATION CORRELATION
##############################################################################################################
bins = np.arange(0, 5.1, 0.1)
# 1 per session
# 2 per shank
# 3 per nucleus
space = pd.read_hdf("../figures/figures_articles/figure1/space.hdf5")
cross_pop = {
'session': {
'wak': pd.DataFrame(index = bins[0:-1], columns = files, data = 0),
'rem': pd.DataFrame(index = bins[0:-1], columns = files, data = 0),
'rip': pd.DataFrame(index = bins[0:-1], columns = files, data = 0)
},
'shank': {
'wak': None,
'rem': None,
'rip': None
},
'nucleus': {
'wak': pd.DataFrame(index = bins[0:-1], columns = np.unique(space['nucleus']), data = 0),
'rem': pd.DataFrame(index = bins[0:-1], columns = np.unique(space['nucleus']), data = 0),
'rip': pd.DataFrame(index = bins[0:-1], columns = np.unique(space['nucleus']), data = 0)
}
}
lambdaa = {
'session': {
'wak': pd.DataFrame(index = files, columns = ['a', 'b', 'c', 'mx', 'my'], data = 0),
'rem': pd.DataFrame(index = files, columns = ['a', 'b', 'c', 'mx', 'my'], data = 0)
},
'nucleus': {
'wak': pd.DataFrame(index = np.unique(space['nucleus']), columns = ['a', 'b', 'c'], data = 0),
'rem': pd.DataFrame(index = np.unique(space['nucleus']), columns = ['a', 'b', 'c'], data = 0)
}
}
for f in files:
store = pd.HDFStore("/mnt/DataGuillaume/corr_pop/"+f)
for m in ['allwak_corr', 'allrem_corr', 'allrip_corr']:
tmp = store[m]
tmp = tmp[np.abs(tmp.index.values)<bins.max()]
idx = np.digitize(np.abs(tmp.index.values), bins)-1
tmp2 = np.array([np.nanmean(tmp.values.flatten()[idx == i]) for i in np.arange(len(bins)-1)])
cross_pop['session'][m[3:6]][f] = tmp2
for ep in ['wak', 'rem']:
tmp = cross_pop['session'][ep].loc[0.1:3.1]
tmp = tmp.rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=1.0)
predit = []
for ses in tmp.columns:
popt, pcov = curve_fit(func, tmp.index.values, tmp[ses].values)
predit.append(func(tmp.index.values, popt[0], popt[1], popt[2]))
lambdaa['session'][ep].loc[ses, ['a', 'b', 'c']] = popt
# mean x and y postion of neurons on the same session
for ses in files:
nme = ses.split(".")[0]
idx = space.index.values[space.index.str.contains(nme)]
for ep in ['wak', 'rem']:
lambdaa['session'][ep].loc[ses, ['mx','my']] = space.loc[idx, ['x', 'y']].mean().values
subplot(211)
for m in ['12', '17', '20', '32']:
tmp = lambdaa['session']['wak'][lambdaa['session']['wak'].index.str.contains('Mouse'+m)]
plot(tmp['my'], tmp['b'], 'o-', label = m)
subplot(212)
for m in ['12', '17', '20', '32']:
tmp = lambdaa['session']['rem'][lambdaa['session']['rem'].index.str.contains('Mouse'+m)]
plot(tmp['my'], tmp['b'], 'o-', label = m)
sys.exit()
###############################################################################################################
# TO LOAD
###############################################################################################################
files = [f for f in files if 'Mouse12' in f]
files = list(np.sort(files))
bins = np.arange(0, 5.1, 0.1)
hist_corr = {n:pd.DataFrame(index = bins[0:-1], columns = np.arange(len(files)), data = 0) for n in ['allwak_corr', 'allrem_corr', 'allrip_corr']}
for f in files:
store = pd.HDFStore("/mnt/DataGuillaume/corr_pop/"+f)
for m in ['allwak_corr', 'allrem_corr', 'allrip_corr']:
tmp = store[m]
idx = np.digitize(np.abs(tmp.index.values), bins)-1
for i in np.arange(len(bins)-1):
hist_corr[m][files.index(f)].iloc[i] = np.nanmean(tmp.values.flatten()[idx == i])
store.close()
ct = 1
colors = cm.jet(np.linspace(0, 1, len(files)))
for m in ['allwak_corr', 'allrem_corr', 'allrip_corr']:
subplot(1,3,ct)
ct+=1
tmp = hist_corr[m]
for i in tmp.columns:
plot(hist_corr[m][i], color = colors[i])
legend()
show()
sys.exit()
ct2 = 1
for f in files:
store = pd.HDFStore("/mnt/DataGuillaume/corr_pop/"+f)
ct = 1
for m in ['wak_corr', 'rem_corr', 'rip_corr']:
subplot(1, 3, ct)
imshow(store[m], interpolation = 'none', origin = 'lower', aspect = 'equal')
title(f+" "+str(ct2)+"/"+str(len(files)))
ct+=1
store.close()
show()
ct2+=1
sys.exit()
ywak = []
yrem = []
yrip = []
toplot = {}
tt = 3.0
dt = 0.1
bins = np.arange(dt, tt+dt, dt)
mean_rip = pd.DataFrame(index = [f.split(".")[0] for f in files], columns = bins[1:] - dt)
mean_rem = pd.DataFrame(index = [f.split(".")[0] for f in files], columns = bins[1:] - dt)
mean_wak = pd.DataFrame(index = [f.split(".")[0] for f in files], columns = bins[1:] - dt)
for f in files:
store = pd.HDFStore(main_dir+f)
theta_wake_corr = store['wak_corr']
theta_rem_corr = store['rem_corr']
rip_corr = store['rip_corr']
rip = store['allrip_corr']
wake = store['allwak_corr']
rem = store['allrem_corr']
store.close()
##############################################
# TO MAKE THE EXEMPLES
#############################################
index = np.tril_indices(len(rip_corr))
rip = np.vstack([rip_corr[0][index],rip_corr[1][index]]).transpose()
if f == 'Mouse12-120809.pickle':
toplot['wake'] = theta_wake_corr[0][1]
toplot['rem'] = theta_rem_corr[0][1]
toplot['rip'] = rip_corr[1]
wake = []
for i in range(len(theta_wake_corr)):
np.fill_diagonal(theta_wake_corr[i][1], 1.0)
index = np.tril_indices(len(theta_wake_corr[i][0]))
wake.append(np.vstack([theta_wake_corr[i][0][index],theta_wake_corr[i][1][index]]).transpose())
wake = np.vstack(wake)
rem = []
for i in range(len(theta_rem_corr)):
np.fill_diagonal(theta_rem_corr[i][1], 1.0)
index = np.tril_indices(len(theta_rem_corr[i][0]))
rem.append(np.vstack([theta_rem_corr[i][0][index],theta_rem_corr[i][1][index]]).transpose())
rem = np.vstack(rem)
# remove nan
# rem = rem[~np.isnan(rem[:,1])]
# wake = wake[~np.isnan(wake[:,1])]
# rip = rip[~np.isnan(rip[:,1])]
# restrict to less than 3 second
rem = rem[rem.index.values <= tt]
wake = wake[wake.index.values <= tt]
rip = rip[rip.index.values <= tt]
rem = rem[rem.index.values > 0]
wake = wake[wake.index.values > 0]
rip = rip[rip.index.values > 0]
index_rip = np.digitize(rip.index.values, bins)
index_wake = np.digitize(wake.index.values, bins)
index_rem = np.digitize(rem.index.values, bins)
for i in range(len(bins)-1):
mean_rip.loc[f.split(".")[0]].iloc[i] = np.mean(rip.loc[index_rip == i])[0]
mean_wak.loc[f.split(".")[0]].iloc[i] = np.mean(wake.loc[index_wake == i])[0]
mean_rem.loc[f.split(".")[0]].iloc[i] = np.mean(rem.loc[index_rem == i])[0]
xt = np.array(list(mean_rip.columns))
meanywak = mean_wak.mean(0).values
meanyrem = mean_rem.mean(0).values
meanyrip = mean_rip.mean(0).values
stdywak = mean_wak.std(0).values
stdyrem = mean_rem.std(0).values
stdyrip = mean_rip.std(0).values
#########
# TO SAVE
#########
# tosave = { 'xt': xt,
# 'meanywak': meanywak,
# 'meanyrem': meanyrem,
# 'meanyrip': meanyrip,
# 'toplot' : toplot,
# 'varywak' : varywak,
# 'varyrem' : varyrem,
# 'varyrip' : varyrip,
# }
# cPickle.dump(tosave, open('../data/to_plot_corr_pop.pickle', 'wb'))
# plot(xt, meanywak, 'o-', label = 'theta(wake)')
# plot(xt, meanyrem, 'o-', label = 'theta(rem)')
# plot(xt, meanyrip, 'o-', label = 'ripple')
figure()
# subplot(1,4,1)
# imshow(toplot['wake'])
# title('wake')
# subplot(1,4,2)
# imshow(toplot['rem'][100:,100:])
# title('REM')
# subplot(1,4,3)
# imshow(toplot['rip'][0:200,0:200])
# title('RIPPLES')
subplot(1,1,1)
xtsym = np.array(list(xt[::-1]*-1.0)+[0.0]+list(xt))
meanywak = np.array(list(meanywak[::-1])+[1.0]+list(meanywak))
meanyrem = np.array(list(meanyrem[::-1])+[1.0]+list(meanyrem))
meanyrip = np.array(list(meanyrip[::-1])+[1.0]+list(meanyrip))
stdywak = np.array(list(stdywak[::-1])+[0.0]+list(stdywak))
stdyrem = np.array(list(stdyrem[::-1])+[0.0]+list(stdyrem))
stdyrip = np.array(list(stdyrip[::-1])+[0.0]+list(stdyrip))
colors = ['red', 'blue', 'green']
plot(xtsym, meanywak, '-', color = colors[0], label = 'theta(wake)')
plot(xtsym, meanyrem, '-', color = colors[1], label = 'theta(rem)')
plot(xtsym, meanyrip, '-', color = colors[2], label = 'ripple')
legend()
fill_between(xtsym, meanywak+stdywak, meanywak-stdywak, color = colors[0], alpha = 0.4)
fill_between(xtsym, meanyrem+stdyrem, meanyrem-stdyrem, color = colors[1], alpha = 0.4)
fill_between(xtsym, meanyrip+stdyrip, meanyrip-stdyrip, color = colors[2], alpha = 0.4)
legend()
xlabel('s')
ylabel('r')
show()
# savefig("../figures/fig_correlation_population.pdf", dpi = 900, bbox_inches = 'tight', facecolor = 'white')
# os.system("evince ../figures/fig_correlation_population.pdf &")
# savefig("../../Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Code/AdrienDatasetThalamus/figures/fig_correlation_population.pdf", dpi = 900, bbox_inches = 'tight', facecolor = 'white')
# os.system("evince ../../Dropbox\ \(Peyrache\ \Lab)/Peyrache\ \Lab\ \Team\ \Folder/Code/AdrienDatasetThalamus/figures/fig_correlation_population.pdf &")
| gviejo/ThalamusPhysio | python/main_pop_corr_fig.py | Python | gpl-3.0 | 9,516 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from httplib import FORBIDDEN, INTERNAL_SERVER_ERROR, OK
from json import dumps, loads
from logging import getLogger
from traceback import format_exc
# gevent
from gevent import sleep, spawn
# huTools
from huTools.structured import dict2xml
# Zato
from zato.common import DATA_FORMAT, PUB_SUB, ZATO_ERROR, ZATO_NONE, ZATO_OK
from zato.common.pubsub import ItemFull, PermissionDenied
from zato.common.util import get_basic_auth_credentials
from zato.server.connection.http_soap import BadRequest, Forbidden, TooManyRequests, Unauthorized
from zato.server.service import AsIs, Bool, Int, Service
from zato.server.service.internal import AdminService
logger_overflown = getLogger('zato_pubsub_overflown')
# ################################################################################################################################
class DeleteExpired(AdminService):
""" Invoked when a server is starting - periodically spawns a greenlet deleting expired messages.
"""
def _delete_expired(self):
self.logger.debug('Deleted expired messages %s', self.pubsub.impl.delete_expired())
def handle(self):
interval = float(self.server.fs_server_config.pubsub.delete_expired_interval)
while True:
self.logger.debug('Deleting expired messages, interval %rs', interval)
spawn(self._delete_expired)
sleep(interval)
# ################################################################################################################################
class InvokeCallbacks(AdminService):
""" Invoked when a server is starting - periodically spawns a greenlet invoking consumer URL callbacks.
"""
def _reject(self, msg_ids, sub_key, consumer, reason):
self.pubsub.reject(sub_key, msg_ids)
self.logger.error('Could not deliver messages `%s`, sub_key `%s` to `%s`, reason `%s`', msg_ids, sub_key, consumer, reason)
def _invoke_callbacks(self):
callback_consumers = list(self.pubsub.impl.get_callback_consumers())
self.logger.debug('Callback consumers found `%s`', callback_consumers)
for consumer in callback_consumers:
with self.lock(consumer.sub_key):
msg_ids = []
out = {
'status': ZATO_OK,
'results_count': 0,
'results': []
}
messages = self.pubsub.get(consumer.sub_key, get_format=PUB_SUB.GET_FORMAT.JSON.id)
for msg in messages:
msg_ids.append(msg['metadata']['msg_id'])
out['results_count'] += 1
out['results'].append(msg)
# messages is a generator so we still don't know if we had anything.
if msg_ids:
outconn = self.outgoing.plain_http[consumer.callback_name]
if outconn.config['data_format'] == DATA_FORMAT.XML:
out = dict2xml(out)
content_type = 'application/xml'
else:
out = dumps(out)
content_type = 'application/json'
try:
response = outconn.conn.post(self.cid, data=out, headers={'content-type': content_type})
except Exception, e:
self._reject(msg_ids, consumer.sub_key, consumer, format_exc(e))
else:
if response.status_code == OK:
self.pubsub.acknowledge(consumer.sub_key, msg_ids)
else:
self._reject(
msg_ids, consumer.sub_key, consumer, '`{}` `{}`'.format(response.status_code, response.text))
def handle(self):
# TODO: self.logger's name should be 'zato_pubsub' so it got logged to the same location
# the rest of pub/sub does.
interval = float(self.server.fs_server_config.pubsub.invoke_callbacks_interval)
while True:
self.logger.debug('Invoking pub/sub callbacks, interval %rs', interval)
spawn(self._invoke_callbacks)
sleep(interval)
# ################################################################################################################################
class MoveToTargetQueues(AdminService):
""" Invoked when a server is starting - periodically spawns a greenlet moving published messages to recipient queues.
"""
def _move_to_target_queues(self):
overflown = []
for item in self.pubsub.impl.move_to_target_queues():
for result, target_queue, msg_id in item:
if result == PUB_SUB.MOVE_RESULT.OVERFLOW:
self.logger.warn('Message overflow, queue:`%s`, msg_id:`%s`', target_queue, msg_id)
overflown.append((target_queue[target_queue.rfind(':')+1:], msg_id))
if overflown:
self.invoke_async(StoreOverflownMessages.get_name(), overflown, to_json_string=True)
self.logger.debug('Messages moved to target queues')
def handle(self):
interval = float(self.server.fs_server_config.pubsub.move_to_target_queues_interval)
while True:
self.logger.debug('Moving messages to target queues, interval %rs', interval)
spawn(self._move_to_target_queues)
sleep(interval)
# ################################################################################################################################
class StoreOverflownMessages(AdminService):
""" Stores on filesystem messages that were above a consumer's max backlog and marks them as rejected by the consumer.
"""
def handle(self):
acks = {}
for sub_key, msg_id in loads(self.request.payload):
logger_overflown.warn('%s - %s - %s', msg_id, self.pubsub.get_consumer_by_sub_key(sub_key).name,
self.pubsub.get_message(msg_id))
msg_ids = acks.setdefault(sub_key, [])
msg_ids.append(msg_id)
for consumer_sub_key, msg_ids in acks.iteritems():
self.pubsub.acknowledge(sub_key, msg_id)
# ################################################################################################################################
class RESTHandler(Service):
""" Handles calls to pub/sub from REST clients.
"""
class SimpleIO(object):
input_required = ('item_type', 'item')
input_optional = ('max', 'dir', 'format', 'mime_type', Int('priority'), Int('expiration'), AsIs('msg_id'),
Bool('ack'), Bool('reject'))
default = ZATO_NONE
use_channel_params_only = True
# ################################################################################################################################
def _raise_unauthorized(self):
raise Unauthorized(self.cid, 'You are not authorized to access this resource', 'Zato pub/sub')
def validate_input(self):
username, password = get_basic_auth_credentials(self.wsgi_environ.get('HTTP_AUTHORIZATION'))
if not username:
self._raise_unauthorized()
for item in self.server.worker_store.request_dispatcher.url_data.basic_auth_config.values():
if item.config.username == username and item.config.password == password:
client = item
break
else:
self._raise_unauthorized()
if self.request.input.item_type not in PUB_SUB.URL_ITEM_TYPE:
raise BadRequest(self.cid, 'None of the supported resources `{}` found in URL path'.format(
', '.join(PUB_SUB.URL_ITEM_TYPE)))
sub_key = self.wsgi_environ.get('HTTP_X_ZATO_PUBSUB_KEY', ZATO_NONE)
is_consumer = self.request.input.item_type == PUB_SUB.URL_ITEM_TYPE.MESSAGES.id
# Deletes don't access topics, they operate on messages.
if self.wsgi_environ['REQUEST_METHOD'] != 'DELETE':
if not self.pubsub.can_access_topic(client.config.id, self.request.input.item, is_consumer):
raise Forbidden(self.cid, 'You are not authorized to access this resource')
self.environ['sub_key'] = sub_key
self.environ['client_id'] = client.config.id
self.environ['format'] = self.request.input.format if self.request.input.format else PUB_SUB.GET_FORMAT.DEFAULT.id
self.environ['is_json'] = self.environ['format'] == PUB_SUB.GET_FORMAT.JSON.id
# ################################################################################################################################
def _set_payload_data(self, out, status_code=OK):
if self.environ['is_json']:
content_type = 'application/json'
out = dumps(out)
else:
content_type = 'application/xml'
out = dict2xml(out)
self.response.headers['Content-Type'] = content_type
self.response.payload = out
self.response.status_code = status_code
# ################################################################################################################################
def _handle_POST_topic(self):
""" Publishes a message on a topic.
"""
pub_data = {
'payload': self.request.raw_request,
'topic': self.request.input.item,
'mime_type': self.request.input.mime_type or self.wsgi_environ['CONTENT_TYPE'],
'priority': int(self.request.input.priority or PUB_SUB.DEFAULT_PRIORITY),
'expiration': int(self.request.input.expiration or PUB_SUB.DEFAULT_EXPIRATION),
'msg_id': self.request.input.msg_id,
'client_id': self.environ['client_id'],
}
self._set_payload_data({
'status': ZATO_OK,
'msg_id':self.pubsub.publish(**pub_data).msg.msg_id
})
# ################################################################################################################################
def _handle_POST_msg(self):
""" Returns messages from topics, either in JSON or XML.
"""
out = {
'status': ZATO_OK,
'results_count': 0,
'results': []
}
max_batch_size = int(self.request.input.max) if self.request.input.max else PUB_SUB.DEFAULT_GET_MAX_BATCH_SIZE
is_fifo = True if (self.request.input.dir == PUB_SUB.GET_DIR.FIFO or not self.request.input.dir) else False
try:
for item in self.pubsub.get(self.environ['sub_key'], max_batch_size, is_fifo, self.environ['format']):
if self.environ['is_json']:
out_item = item
else:
out_item = {'metadata': item.to_dict()}
out_item['payload'] = item.payload
out['results'].append(out_item)
out['results_count'] += 1
except ItemFull, e:
raise TooManyRequests(self.cid, e.msg)
else:
self._set_payload_data(out)
# ################################################################################################################################
def handle_POST(self):
try:
getattr(self, '_handle_POST_{}'.format(self.request.input.item_type))()
except Exception, e:
details, status_code = ('Permission denied', FORBIDDEN) if isinstance(e, PermissionDenied) else (e.message, INTERNAL_SERVER_ERROR)
self.logger.warn('Could not handle POST pub/sub (%s %s), e:`%s`', self.cid, details, format_exc(e))
self._set_payload_data({'status': ZATO_ERROR, 'details':details}, status_code)
def handle_DELETE(self):
actions = ('ack', 'reject')
try:
self.request.input.require_any(*actions)
except ValueError:
raise BadRequest(self.cid, 'Missing state to set, should be one of `{}`'.format(', '.join(actions)))
if self.request.input.ack and self.request.input.reject:
raise BadRequest(self.cid, 'Cannot both acknowledge and reject a message')
func = self.pubsub.acknowledge if self.request.input.ack else self.pubsub.reject
result = func(self.environ['sub_key'], self.request.input.item)
if self.request.input.item in result:
status = ZATO_OK
details = ''
else:
status = ZATO_ERROR
details = 'Message not found `{}`'.format(self.request.input.item)
self._set_payload_data({'status': status, 'details':details})
# ################################################################################################################################
| alirizakeles/zato | code/zato-server/src/zato/server/service/internal/pubsub/__init__.py | Python | gpl-3.0 | 12,954 |
"""
Legalese
--------
Copyright (c) 2015, 2016 Genome Research Ltd.
Author: Colin Nolan <cn13@sanger.ac.uk>
This file is part of Cookie Monster.
Cookie Monster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from unittest.mock import MagicMock
from hgicommon.mixable import Priority
from cookiemonster.cookiejar import CookieJar
from cookiemonster.cookiejar.in_memory_cookiejar import InMemoryCookieJar
from cookiemonster.processor.models import Rule
def create_mock_rule(priority: int=Priority.MIN_PRIORITY) -> Rule:
"""
Creates a mock `Rule` object.
:param priority: (optional) the priority of the rule
:return: the created rule
"""
return Rule(
lambda file_update, data_environment: True,
lambda file_update, data_environment: True,
"my_rule",
priority=priority
)
def create_magic_mock_cookie_jar() -> CookieJar:
"""
Creates a magic mock CookieJar - has the implementation of a CookieJar all methods are implemented using magic mocks
and therefore their usage is recorded.
:return: the created magic mock
"""
cookie_jar = InMemoryCookieJar()
original_get_next_for_processing = cookie_jar.get_next_for_processing
original_enrich_cookie = cookie_jar.enrich_cookie
original_mark_as_failed = cookie_jar.mark_as_complete
original_mark_as_completed = cookie_jar.mark_as_complete
original_mark_as_reprocess = cookie_jar.mark_for_processing
cookie_jar.get_next_for_processing = MagicMock(side_effect=original_get_next_for_processing)
cookie_jar.enrich_cookie = MagicMock(side_effect=original_enrich_cookie)
cookie_jar.mark_as_failed = MagicMock(side_effect=original_mark_as_failed)
cookie_jar.mark_as_complete = MagicMock(side_effect=original_mark_as_completed)
cookie_jar.mark_for_processing = MagicMock(side_effect=original_mark_as_reprocess)
return cookie_jar
| wtsi-hgi/cookie-monster | cookiemonster/tests/processor/_mocks.py | Python | gpl-3.0 | 2,448 |
import uuid
from unithelper import DBTestCase
from unithelper import mocker
from unithelper import requestor
from unithelper import hashable_dict
from bc import database
from bc import metrics
from bc_wapi import wapi_metrics
class Test(DBTestCase):
def test_metric_get(self):
"""Check getting metric with metricGet"""
data={
'id': str(uuid.uuid4()),
'type': str(uuid.uuid4())[:10],
'formula': metrics.constants.FORMULA_SPEED,
'aggregate': 0L,
}
with database.DBConnect() as db:
db.insert('metrics', data)
self.assertEquals(wapi_metrics.metricGet({'id': data['id']}),
requestor({'metric': data}, 'ok'))
self.assertEquals(wapi_metrics.metricGet({'id':''}),
requestor({'message': 'Metric not found' }, 'error'))
with mocker([('bc.metrics.get', mocker.exception),
('bc_wapi.wapi_metrics.LOG.error', mocker.passs)]):
self.assertEquals(wapi_metrics.metricGet({'id':''}),
requestor({'message': 'Unable to obtain metric' }, 'servererror'))
def test_metric_get_list(self):
"""Check getting metrics with metricList"""
data = []
for i in range(2, 10):
d={
'id': str(uuid.uuid4()),
'type': str(uuid.uuid4())[:10],
'formula': metrics.constants.FORMULA_SPEED,
'aggregate': 0L,
}
with database.DBConnect() as db:
db.insert('metrics', d)
data.append(d)
ans = wapi_metrics.metricList('')
self.assertEquals(ans[0], (01 << 2))
self.assertEquals(ans[1]['status'], 'ok')
self.assertEquals(set(map(lambda x: hashable_dict(x), ans[1]['metrics'])),
set(map(lambda x: hashable_dict(x), data)))
with mocker([('bc.metrics.get_all', mocker.exception),
('bc_wapi.wapi_metrics.LOG.error', mocker.passs)]):
self.assertEquals(wapi_metrics.metricList({'id':''}),
requestor({'message': 'Unable to obtain metric list' }, 'servererror'))
def test_metric_add(self):
"""Check the creating metric with metricAdd"""
data={
'id': str(uuid.uuid4()),
'type': str(uuid.uuid4())[:10],
'formula': metrics.constants.FORMULA_SPEED,
'aggregate': 0L,
}
ans = wapi_metrics.metricAdd(data.copy())
self.assertEquals(ans, requestor({'id':data['id']}, 'ok'))
with database.DBConnect() as db:
t1 = db.find('metrics').one()
self.assertEquals(data['id'], t1['id'])
self.assertEquals(data['type'], t1['type'])
with mocker([('bc.metrics.add', mocker.exception),
('bc_wapi.wapi_metrics.LOG.error', mocker.passs)]):
self.assertEquals(wapi_metrics.metricAdd({'id':''}),
requestor({'message': 'Unable to add new metric' }, 'servererror'))
| legionus/billing | tests/test_wapi_metrics.py | Python | gpl-3.0 | 2,622 |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from . import views
app_name = 'web'
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="front.html"), name='front'),
url(r'^index$', views.IndexView.as_view(), name='index'),
# Accounts
url('^accounts/login/',
auth_views.login,
{'template_name': 'login.html'},
name="login"),
url('^accounts/logout/', auth_views.logout, name="logout"),
url('^accounts/change/',
auth_views.password_change,
name="password_change"),
url('^accounts/register/', views.UserRegister.as_view(), name="register"),
# url('^accounts/profile/', views.UserProfile.as_view(), name="user_profile"),
# Product
url(r'^products/(\?type=.*)?$', views.ProductListView.as_view(),
name='product_list'),
url(r'^product/(?P<pk>\d+)/$', views.ProductView.as_view(),
name='product'),
url(r'^product/new/$', views.ProductCreate.as_view(),
name='product_create'),
url(r'^product/update/(?P<pk>\d+)/$', views.ProductUpdate.as_view(),
name='product_update'),
url(r'^product/delete/(?P<pk>\d+)/$', views.ProductDelete.as_view(),
name='product_delete'),
# Brand
url(r'^brands/$', views.BrandListView.as_view(),
name='brand_list'),
url(r'^brand/(?P<pk>\d+)/$', views.BrandView.as_view(),
name='brand'),
url(r'^brand/new/$', views.BrandCreate.as_view(),
name='brand_create'),
url(r'^brand/update/(?P<pk>\d+)/$', views.BrandUpdate.as_view(),
name='brand_update'),
url(r'^brand/delete/(?P<pk>\d+)/$', views.BrandDelete.as_view(),
name='brand_delete'),
# Component
url(r'^components/$', views.ComponentListView.as_view(),
name='component_list'),
url(r'^component/(?P<pk>\d+)/$', views.ComponentView.as_view(),
name='component'),
url(r'^component/new/$', views.ComponentCreate.as_view(),
name='component_create'),
url(r'^component/update/(?P<pk>\d+)/$', views.ComponentUpdate.as_view(),
name='component_update'),
url(r'^component/delete/(?P<pk>\d+)/$', views.ComponentDelete.as_view(),
name='component_delete'),
]
| FacundoAcevedo/gondolero | gondolero/web/urls.py | Python | gpl-3.0 | 2,275 |
from django.http import HttpResponseRedirect
def anonymous_required(view, redirect_to= None):
return AnonymousRequired(view, redirect_to)
class AnonymousRequired(object):
def __init__(self, view, redirect_to):
if redirect_to is None:
from django.conf import settings
redirect_to = settings.LOGIN_REDIRECT_URL
self.view = view
self.redirect_to = redirect_to
def __call__(self, request, *args, **kwargs):
if request.user is not None and request.user.is_authenticated:
return HttpResponseRedirect(self.redirect_to)
return self.view(request, *args, **kwargs)
| ZwaConnection/TheGuildWeb | apps/member/decorators.py | Python | gpl-3.0 | 646 |
#
#
# Copyright (c) 2009-2019 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Generate fake data used by the tests.
The idea is to create a deterministic database that reports
can be run against, resulting in predictable, expected results"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import with_statement
import logging
import math
import os
import time
import weecfg.database
import weedb
import weewx.manager
log = logging.getLogger(__name__)
os.environ['TZ'] = 'America/Los_Angeles'
time.tzset()
# The start of the 'solar year' for 2009-2010
year_start_tt = (2009, 12, 21, 9, 47, 0, 0, 0, 0)
year_start = int(time.mktime(year_start_tt))
# Roughly nine months of data:
start_tt = (2010, 1, 1, 0, 0, 0, 0, 0, -1) # 2010-01-01 00:00
stop_tt = (2010, 9, 3, 11, 0, 0, 0, 0, -1) # 2010-09-03 11:00
alt_start_tt = (2010, 8, 30, 0, 0, 0, 0, 0, -1)
start_ts = int(time.mktime(start_tt))
stop_ts = int(time.mktime(stop_tt))
alt_start = int(time.mktime(alt_start_tt))
# At one half-hour archive intervals:
interval = 1800
altitude_vt = (700, 'foot', 'group_altitude')
latitude = 45
longitude = -125
daily_temp_range = 40.0
annual_temp_range = 80.0
avg_temp = 40.0
# Four day weather cycle:
weather_cycle = 3600 * 24.0 * 4
weather_baro_range = 2.0
weather_wind_range = 10.0
weather_rain_total = 0.5 # This is inches per weather cycle
avg_baro = 30.0
def configDatabases(config_dict, database_type):
config_dict['DataBindings']['wx_binding']['database'] = "archive_" + database_type
configDatabase(config_dict, 'wx_binding')
config_dict['DataBindings']['alt_binding']['database'] = "alt_" + database_type
configDatabase(config_dict, 'alt_binding', start_ts=alt_start, amplitude=0.5)
def configDatabase(config_dict, binding, start_ts=start_ts, stop_ts=stop_ts, interval=interval, amplitude=1.0,
day_phase_offset=math.pi / 4.0, annual_phase_offset=0.0,
weather_phase_offset=0.0, year_start=start_ts):
"""Configures the archive databases."""
# Check to see if it already exists and is configured correctly.
try:
with weewx.manager.open_manager_with_config(config_dict, binding) as manager:
if manager.firstGoodStamp() == start_ts and manager.lastGoodStamp() == stop_ts:
# Before weewx V4, the test database had interval in seconds.
# Check that this one has been corrected.
last_record = manager.getRecord(stop_ts)
if last_record['interval'] == interval / 60:
# Database exists, and it has the right value for interval. We're done.
return
else:
log.info("Interval value is wrong. Rebuilding test databases.")
except weedb.DatabaseError:
pass
# Delete anything that might already be there.
try:
log.info("Dropping database %s" % config_dict['DataBindings'][binding]['database'])
weewx.manager.drop_database_with_config(config_dict, binding)
except weedb.DatabaseError:
pass
# Need to build a new synthetic database. General strategy is to create the
# archive data, THEN backfill with the daily summaries. This is faster than
# creating the daily summaries on the fly.
# First, we need to modify the configuration dictionary that was passed in
# so it uses the DBManager, instead of the daily summary manager
monkey_dict = config_dict.dict()
monkey_dict['DataBindings'][binding]['manager'] = 'weewx.manager.Manager'
with weewx.manager.open_manager_with_config(monkey_dict, binding, initialize=True) as archive:
log.info("Creating synthetic database %s" % config_dict['DataBindings'][binding]['database'])
# Because this can generate voluminous log information,
# suppress all but the essentials:
logging.disable(logging.INFO)
# Now generate and add the fake records to populate the database:
t1 = time.time()
archive.addRecord(genFakeRecords(start_ts=start_ts, stop_ts=stop_ts,
interval=interval,
amplitude=amplitude,
day_phase_offset=day_phase_offset,
annual_phase_offset=annual_phase_offset,
weather_phase_offset=weather_phase_offset,
year_start=start_ts,
db_manager=archive))
t2 = time.time()
delta = t2 - t1
print("\nTime to create synthetic database '%s' = %6.2fs"
% (config_dict['DataBindings'][binding]['database'], delta))
# Restore the logging
logging.disable(logging.NOTSET)
with weewx.manager.open_manager_with_config(config_dict, binding, initialize=True) as archive:
# Backfill with daily summaries:
t1 = time.time()
nrecs, ndays = archive.backfill_day_summary()
tdiff = time.time() - t1
if nrecs:
print("\nProcessed %d records to backfill %d day summaries in database '%s' in %.2f seconds"
% (nrecs, ndays, config_dict['DataBindings'][binding]['database'], tdiff))
else:
print("Daily summaries in database '%s' up to date."
% config_dict['DataBindings'][binding]['database'])
t1 = time.time()
patch_database(config_dict, binding)
tdiff = time.time() - t1
print("\nTime to patch database with derived types: %.2f seconds" % tdiff)
def genFakeRecords(start_ts=start_ts, stop_ts=stop_ts, interval=interval,
amplitude=1.0, day_phase_offset=0.0, annual_phase_offset=0.0,
weather_phase_offset=0.0, year_start=start_ts, db_manager=None):
"""Generate records from start_ts to stop_ts, inclusive.
start_ts: Starting timestamp in unix epoch time. This timestamp will be included in the results.
stop_ts: Stopping timestamp in unix epoch time. This timestamp will be included in the results.
interval: The interval between timestamps IN SECONDS!
"""
count = 0
for ts in range(start_ts, stop_ts + interval, interval):
daily_phase = ((ts - year_start) * 2.0 * math.pi) / (3600 * 24.0) + day_phase_offset
annual_phase = ((ts - year_start) * 2.0 * math.pi) / (3600 * 24.0 * 365.0) + annual_phase_offset
weather_phase = ((ts - year_start) * 2.0 * math.pi) / weather_cycle + weather_phase_offset
record = {
'dateTime': ts,
'usUnits': weewx.US,
'interval': int(interval / 60),
'outTemp': 0.5 * amplitude * (-daily_temp_range * math.sin(daily_phase)
- annual_temp_range * math.cos(annual_phase)) + avg_temp,
'barometer': -0.5 * amplitude * weather_baro_range * math.sin(weather_phase) + avg_baro,
'windSpeed': abs(amplitude * weather_wind_range * (1.0 + math.sin(weather_phase))),
'windDir': math.degrees(weather_phase) % 360.0,
'outHumidity': 40 * math.sin(weather_phase) + 50,
}
record['windGust'] = 1.2 * record['windSpeed']
record['windGustDir'] = record['windDir']
if math.sin(weather_phase) > .95:
record['rain'] = 0.08 * amplitude if math.sin(weather_phase) > 0.98 else 0.04 * amplitude
else:
record['rain'] = 0.0
record['radiation'] = max(amplitude * 800 * math.sin(daily_phase - math.pi / 2.0), 0)
record['radiation'] *= 0.5 * (math.cos(annual_phase + math.pi) + 1.5)
# Make every 71st observation (a prime number) a null. This is a deterministic algorithm, so it
# will produce the same results every time.
for obs_type in ['barometer', 'outTemp', 'windDir', 'windGust', 'windGustDir', 'windSpeed']:
count += 1
if count % 71 == 0:
record[obs_type] = None
# Round the values slightly so we don't get small assertion errors from SQL arithmetic.
for obs_type in ['barometer', 'outTemp', 'windDir', 'windGust', 'windGustDir', 'windSpeed']:
if record.get(obs_type) is not None:
record[obs_type] = round(record[obs_type], 6)
yield record
def patch_database(config_dict, binding='wx_binding'):
calc_missing_config_dict = {
'name': 'Patch gen_fake_data',
'binding': binding,
'start_ts': start_ts,
'stop_ts': stop_ts,
'dry_run': False,
}
calc_missing = weecfg.database.CalcMissing(config_dict, calc_missing_config_dict)
calc_missing.run()
if __name__ == '__main__':
count = 0
for rec in genFakeRecords():
if count % 30 == 0:
print("Time outTemp windSpeed barometer rain radiation")
count += 1
outTemp = "%10.1f" % rec['outTemp'] if rec['outTemp'] is not None else " N/A"
windSpeed = "%10.1f" % rec['windSpeed'] if rec['windSpeed'] is not None else " N/A"
barometer = "%10.1f" % rec['barometer'] if rec['barometer'] is not None else " N/A"
rain = "%10.2f" % rec['rain'] if rec['rain'] is not None else " N/A"
radiation = "%10.0f" % rec['radiation'] if rec['radiation'] is not None else " N/A"
print(6 * "%s" % (time.ctime(rec['dateTime']), outTemp, windSpeed, barometer, rain, radiation))
| gjr80/weewx | bin/weewx/tests/gen_fake_data.py | Python | gpl-3.0 | 9,588 |
import datetime
from rest_framework.authentication import TokenAuthentication
from rest_framework import exceptions
from factotum.environment import env
class ExpiringTokenAuthentication(TokenAuthentication):
keyword = "Bearer"
def authenticate_credentials(self, key):
model = self.get_model()
try:
token = model.objects.get(key=key)
except model.DoesNotExist:
raise exceptions.AuthenticationFailed("Invalid token")
if not token.user.is_active:
raise exceptions.AuthenticationFailed("User inactive or deleted")
now = datetime.datetime.now()
if token.created < now - datetime.timedelta(
milliseconds=env.FACTOTUM_WS_TOKEN_TTL
):
raise exceptions.AuthenticationFailed("Token has expired")
return token.user, token
| HumanExposure/factotum | apps_api/core/authentication.py | Python | gpl-3.0 | 854 |
# -*- coding: utf-8 -*-
import argparse
import six
parser = argparse.ArgumentParser(description="Minecraft Package Manager")
sub = parser.add_subparsers(help="command help")
# package commands
sync_parser = sub.add_parser("sync",
description="Synchronize local mod archive.",
help="sync --help")
show_parser = sub.add_parser("show",
description="Show mod informations.",
help="show --help")
search_parser = sub.add_parser("search",
description="Search mod archive.",
help="search --help")
update_parser = sub.add_parser("update",
description="Update mods.",
help="update --help")
install_parser = sub.add_parser("install",
description="Install mods.",
help="install --help")
remove_parser = sub.add_parser("remove",
description="Remove mods.",
help="remove --help")
# repo commands
repo_add_parser = sub.add_parser("addrepo",
description="Add mod repository.",
help="addrepo --help")
repo_del_parser = sub.add_parser("rmrepo",
description="Remove mod repository.",
help="rmrepo --help")
repo_show_parser = sub.add_parser("lsrepo",
description="Show mod repository informations.",
help="lsrepo --help")
if __name__ == "__main__":
cmd = parser.parse_args()
six.print_("Done")
| qwattash/mpm | mpm/cli/mpm.py | Python | gpl-3.0 | 1,756 |
import contextlib
import logging
import os.path
import sqlalchemy
from sqlalchemy import schema as sql_schema
from sqlalchemy import types as sql_types
from sqlalchemy.ext import declarative as sql_declarative
from sqlalchemy.orm import session as sql_session
from dancebooks.config import config
_Base = sql_declarative.declarative_base()
class Backup(_Base):
__tablename__ = "backups"
__table_args__ = {"schema": "service"}
id = sql_schema.Column(sql_types.BigInteger, primary_key=True)
path = sql_schema.Column(sql_types.String, nullable=False)
provenance = sql_schema.Column(sql_types.String, nullable=False)
aspect_ratio_x = sql_schema.Column(sql_types.BigInteger, nullable=False)
aspect_ratio_y = sql_schema.Column(sql_types.BigInteger, nullable=False)
image_size_x = sql_schema.Column(sql_types.BigInteger, nullable=False)
image_size_y = sql_schema.Column(sql_types.BigInteger, nullable=False)
note = sql_schema.Column(sql_types.String, nullable=False)
@property
def name(self):
return os.path.basename(self.path)
_engine = sqlalchemy.create_engine(
config.db.connection_url,
connect_args=config.db.options
)
_session_maker = sql_session.sessionmaker(bind=_engine)
@contextlib.contextmanager
def make_transaction():
try:
txn = _session_maker()
yield txn
except Exception:
logging.exception("Rolling session back due to exception")
txn.rollback()
finally:
txn.close()
__all__ = [Backup, make_transaction]
| georgthegreat/dancebooks-bibtex | dancebooks/db.py | Python | gpl-3.0 | 1,449 |
#!/usr/bin/env python
# -*-coding:utf-8-*-
'''
Author : ming
date : 2016/11/27 上午12:20
role : Version Update
'''
from tornado import web
from tornado.web import HTTPError
class IndexHandler(web.RequestHandler):
def get(self, room):
if room == 'get':
raise HTTPError(500)
self.room = room
self.render('index.html', room=self.room, host=self.request.host)
| mk8310/im_demo | views/index.py | Python | gpl-3.0 | 405 |
class Membro(object):
nome = None
cargo = None
def __init__(self, nome, cargo):
self.nome = nome
self.cargo = cargo
| TeamWorkQualityReport/TeamWorkQualityReport | Backend/Analise/entidades/membro.py | Python | gpl-3.0 | 125 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# pylint:disable=too-many-lines
from __future__ import unicode_literals
import ast
import base64
import ctypes
import datetime
import hashlib
import io
import operator
import os
import platform
import random
import re
import shutil
import socket
import ssl
import stat
import time
import traceback
import urllib
import uuid
import xml.etree.ElementTree as ET
import zipfile
from contextlib import closing
from itertools import cycle, izip
import adba
import certifi
import cfscrape
import requests
from cachecontrol import CacheControl
from requests.utils import urlparse
import sickbeard
from sickbeard import classes, db, logger
from sickbeard.common import USER_AGENT
from sickrage.helper import MEDIA_EXTENSIONS, SUBTITLE_EXTENSIONS, episode_num, pretty_file_size
from sickrage.helper.encoding import ek
from sickrage.show.Show import Show
# pylint: disable=protected-access
# Access to a protected member of a client class
urllib._urlopener = classes.SickBeardURLopener()
def indentXML(elem, level=0):
"""
Does our pretty printing, makes Matt very happy
"""
i = "\n" + level * " "
if elem:
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indentXML(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def remove_non_release_groups(name):
"""
Remove non release groups from name
"""
if not name:
return name
# Do not remove all [....] suffixes, or it will break anime releases ## Need to verify this is true now
# Check your database for funky release_names and add them here, to improve failed handling, archiving, and history.
# select release_name from tv_episodes WHERE LENGTH(release_name);
# [eSc], [SSG], [GWC] are valid release groups for non-anime
removeWordsList = {
r'\[rartv\]$': 'searchre',
r'\[rarbg\]$': 'searchre',
r'\.\[eztv\]$': 'searchre',
r'\[eztv\]$': 'searchre',
r'\[ettv\]$': 'searchre',
r'\[cttv\]$': 'searchre',
r'\.\[vtv\]$': 'searchre',
r'\[vtv\]$': 'searchre',
r'\[EtHD\]$': 'searchre',
r'\[GloDLS\]$': 'searchre',
r'\[silv4\]$': 'searchre',
r'\[Seedbox\]$': 'searchre',
r'\[PublicHD\]$': 'searchre',
r'\.\[PublicHD\]$': 'searchre',
r'\.\[NO.RAR\]$': 'searchre',
r'\[NO.RAR\]$': 'searchre',
r'-\=\{SPARROW\}\=-$': 'searchre',
r'\=\{SPARR$': 'searchre',
r'\.\[720P\]\[HEVC\]$': 'searchre',
r'\[AndroidTwoU\]$': 'searchre',
r'\[brassetv\]$': 'searchre',
r'\[Talamasca32\]$': 'searchre',
r'\(musicbolt\.com\)$': 'searchre',
r'\.\(NLsub\)$': 'searchre',
r'\(NLsub\)$': 'searchre',
r'\.\[BT\]$': 'searchre',
r' \[1044\]$': 'searchre',
r'\.RiPSaLoT$': 'searchre',
r'\.GiuseppeTnT$': 'searchre',
r'\.Renc$': 'searchre',
r'\.gz$': 'searchre',
r'\.English$': 'searchre',
r'\.German$': 'searchre',
r'\.\.Italian$': 'searchre',
r'\.Italian$': 'searchre',
r'(?<![57])\.1$': 'searchre',
r'-NZBGEEK$': 'searchre',
r'-Siklopentan$': 'searchre',
r'-Chamele0n$': 'searchre',
r'-Obfuscated$': 'searchre',
r'-BUYMORE$': 'searchre',
r'-\[SpastikusTV\]$': 'searchre',
r'-RP$': 'searchre',
r'-20-40$': 'searchre',
r'\.\[www\.usabit\.com\]$': 'searchre',
r'^\[www\.Cpasbien\.pe\] ': 'searchre',
r'^\[www\.Cpasbien\.com\] ': 'searchre',
r'^\[ www\.Cpasbien\.pw \] ': 'searchre',
r'^\.www\.Cpasbien\.pw': 'searchre',
r'^\[www\.newpct1\.com\]': 'searchre',
r'^\[ www\.Cpasbien\.com \] ': 'searchre',
r'- \{ www\.SceneTime\.com \}$': 'searchre',
r'^\{ www\.SceneTime\.com \} - ': 'searchre',
r'^\]\.\[www\.tensiontorrent.com\] - ': 'searchre',
r'^\]\.\[ www\.tensiontorrent.com \] - ': 'searchre',
r'- \[ www\.torrentday\.com \]$': 'searchre',
r'^\[ www\.TorrentDay\.com \] - ': 'searchre',
r'\[NO-RAR\] - \[ www\.torrentday\.com \]$': 'searchre',
}
_name = name
for remove_string, remove_type in removeWordsList.iteritems():
if remove_type == 'search':
_name = _name.replace(remove_string, '')
elif remove_type == 'searchre':
_name = re.sub(r'(?i)' + remove_string, '', _name)
return _name
def isMediaFile(filename):
"""
Check if named file may contain media
:param filename: Filename to check
:return: True if this is a known media file, False if not
"""
# ignore samples
try:
if re.search(r'(^|[\W_])(?<!shomin.)(sample\d*)[\W_]', filename, re.I):
return False
# ignore RARBG release intro
if re.search(r'^RARBG\.(\w+\.)?(mp4|avi|txt)$', filename, re.I):
return False
# ignore MAC OS's retarded "resource fork" files
if filename.startswith('._'):
return False
filname_parts = filename.rpartition(".")
if re.search('extras?$', filname_parts[0], re.I):
return False
return filname_parts[-1].lower() in MEDIA_EXTENSIONS
except TypeError as error: # Not a string
logger.log('Invalid filename. Filename must be a string. {0}'.format(error), logger.DEBUG) # pylint: disable=no-member
return False
def isRarFile(filename):
"""
Check if file is a RAR file, or part of a RAR set
:param filename: Filename to check
:return: True if this is RAR/Part file, False if not
"""
archive_regex = r'(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
if re.search(archive_regex, filename):
return True
return False
def isBeingWritten(filepath):
"""
Check if file has been written in last 60 seconds
:param filepath: Filename to check
:return: True if file has been written recently, False if none
"""
# Return True if file was modified within 60 seconds. it might still be being written to.
ctime = max(ek(os.path.getctime, filepath), ek(os.path.getmtime, filepath))
if ctime > time.time() - 60:
return True
return False
def remove_file_failed(failed_file):
"""
Remove file from filesystem
:param file: File to remove
"""
try:
ek(os.remove, failed_file)
except Exception:
pass
def makeDir(path):
"""
Make a directory on the filesystem
:param path: directory to make
:return: True if success, False if failure
"""
if not ek(os.path.isdir, path):
try:
ek(os.makedirs, path)
# do the library update for synoindex
sickbeard.notifiers.synoindex_notifier.addFolder(path)
except OSError:
return False
return True
def searchIndexerForShowID(regShowName, indexer=None, indexer_id=None, ui=None):
"""
Contacts indexer to check for information on shows by showid
:param regShowName: Name of show
:param indexer: Which indexer to use
:param indexer_id: Which indexer ID to look for
:param ui: Custom UI for indexer use
:return:
"""
showNames = [re.sub('[. -]', ' ', regShowName)]
# Query Indexers for each search term and build the list of results
for i in sickbeard.indexerApi().indexers if not indexer else int(indexer or []):
# Query Indexers for each search term and build the list of results
lINDEXER_API_PARMS = sickbeard.indexerApi(i).api_params.copy()
if ui is not None:
lINDEXER_API_PARMS['custom_ui'] = ui
t = sickbeard.indexerApi(i).indexer(**lINDEXER_API_PARMS)
for name in showNames:
logger.log("Trying to find " + name + " on " + sickbeard.indexerApi(i).name, logger.DEBUG)
try:
search = t[indexer_id] if indexer_id else t[name]
except Exception:
continue
try:
seriesname = search[0][b'seriesname']
except Exception:
seriesname = None
try:
series_id = search[0][b'id']
except Exception:
series_id = None
if not (seriesname and series_id):
continue
ShowObj = Show.find(sickbeard.showList, int(series_id))
# Check if we can find the show in our list (if not, it's not the right show)
if (indexer_id is None) and (ShowObj is not None) and (ShowObj.indexerid == int(series_id)):
return seriesname, i, int(series_id)
elif (indexer_id is not None) and (int(indexer_id) == int(series_id)):
return seriesname, i, int(indexer_id)
if indexer:
break
return None, None, None
def listMediaFiles(path):
"""
Get a list of files possibly containing media in a path
:param path: Path to check for files
:return: list of files
"""
if not dir or not ek(os.path.isdir, path):
return []
files = []
for curFile in ek(os.listdir, path):
fullCurFile = ek(os.path.join, path, curFile)
# if it's a folder do it recursively
if ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
files += listMediaFiles(fullCurFile)
elif isMediaFile(curFile):
files.append(fullCurFile)
return files
def copyFile(srcFile, destFile):
"""
Copy a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
try:
from shutil import SpecialFileError, Error
except ImportError:
from shutil import Error
SpecialFileError = Error
try:
ek(shutil.copyfile, srcFile, destFile)
except (SpecialFileError, Error) as error:
logger.log('{0}'.format(error), logger.WARNING)
except Exception as error:
logger.log('{0}'.format(error), logger.ERROR)
else:
try:
ek(shutil.copymode, srcFile, destFile)
except OSError:
pass
def moveFile(srcFile, destFile):
"""
Move a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
try:
ek(shutil.move, srcFile, destFile)
fixSetGroupID(destFile)
except OSError:
copyFile(srcFile, destFile)
ek(os.unlink, srcFile)
def link(src, dst):
"""
Create a file link from source to destination.
TODO: Make this unicode proof
:param src: Source file
:param dst: Destination file
"""
if platform.system() == 'Windows':
if ctypes.windll.kernel32.CreateHardLinkW(ctypes.c_wchar_p(unicode(dst)), ctypes.c_wchar_p(unicode(src)), None) == 0:
raise ctypes.WinError()
else:
ek(os.link, src, dst)
def hardlinkFile(srcFile, destFile):
"""
Create a hard-link (inside filesystem link) between source and destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
ek(link, srcFile, destFile)
fixSetGroupID(destFile)
except Exception as error:
logger.log("Failed to create hardlink of {0} at {1}. Error: {2}. Copying instead".format
(srcFile, destFile, error), logger.WARNING)
copyFile(srcFile, destFile)
def symlink(src, dst):
"""
Create a soft/symlink between source and destination
:param src: Source file
:param dst: Destination file
"""
if platform.system() == 'Windows':
if ctypes.windll.kernel32.CreateSymbolicLinkW(ctypes.c_wchar_p(unicode(dst)), ctypes.c_wchar_p(unicode(src)), 1 if ek(os.path.isdir, src) else 0) in [0, 1280]:
raise ctypes.WinError()
else:
ek(os.symlink, src, dst)
def moveAndSymlinkFile(srcFile, destFile):
"""
Move a file from source to destination, then create a symlink back from destination from source. If this fails, copy
the file from source to destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
moveFile(srcFile, destFile)
symlink(destFile, srcFile)
except Exception as error:
logger.log("Failed to create symlink of {0} at {1}. Error: {2}. Copying instead".format
(srcFile, destFile, error), logger.WARNING)
copyFile(srcFile, destFile)
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log("Checking if the path {0} already exists".format(path), logger.DEBUG)
if not ek(os.path.isdir, path):
# Windows, create all missing folders
if platform.system() == 'Windows':
try:
logger.log("Folder {0} didn't exist, creating it".format(path), logger.DEBUG)
ek(os.makedirs, path)
except (OSError, IOError) as error:
logger.log("Failed creating {0} : {1}".format(path, error), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep
# if it exists then just keep walking down the line
if ek(os.path.isdir, sofar):
continue
try:
logger.log("Folder {0} didn't exist, creating it".format(sofar), logger.DEBUG)
ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek(os.path.normpath, sofar))
# do the library update for synoindex
sickbeard.notifiers.synoindex_notifier.addFolder(sofar)
except (OSError, IOError) as error:
logger.log("Failed creating {0} : {1}".format(sofar, error), logger.ERROR)
return False
return True
def rename_ep_file(cur_path, new_path, old_path_length=0):
"""
Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
left that are now empty.
:param cur_path: The absolute path to the file you want to move/rename
:param new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
:param old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
"""
# new_dest_dir, new_dest_name = ek(os.path.split, new_path) # @UnusedVariable
if old_path_length == 0 or old_path_length > len(cur_path):
# approach from the right
cur_file_name, cur_file_ext = ek(os.path.splitext, cur_path) # @UnusedVariable
else:
# approach from the left
cur_file_ext = cur_path[old_path_length:]
cur_file_name = cur_path[:old_path_length]
if cur_file_ext[1:] in SUBTITLE_EXTENSIONS:
# Extract subtitle language from filename
sublang = ek(os.path.splitext, cur_file_name)[1][1:]
# Check if the language extracted from filename is a valid language
if sublang in sickbeard.subtitles.subtitle_code_filter():
cur_file_ext = '.' + sublang + cur_file_ext
# put the extension on the incoming file
new_path += cur_file_ext
make_dirs(ek(os.path.dirname, new_path))
# move the file
try:
logger.log("Renaming file from {0} to {1}".format(cur_path, new_path))
ek(shutil.move, cur_path, new_path)
except (OSError, IOError) as error:
logger.log("Failed renaming {0} to {1} : {2}".format(cur_path, new_path, error), logger.ERROR)
return False
# clean up any old folders that are empty
delete_empty_folders(ek(os.path.dirname, cur_path))
return True
def delete_empty_folders(check_empty_dir, keep_dir=None):
"""
Walks backwards up the path and deletes any empty folders found.
:param check_empty_dir: The path to clean (absolute path to a folder)
:param keep_dir: Clean until this path is reached
"""
# treat check_empty_dir as empty when it only contains these items
ignore_items = []
logger.log("Trying to clean any empty folders under " + check_empty_dir)
# as long as the folder exists and doesn't contain any files, delete it
while ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
check_files = ek(os.listdir, check_empty_dir)
if not check_files or (len(check_files) <= len(ignore_items) and all(
check_file in ignore_items for check_file in check_files)):
# directory is empty or contains only ignore_items
try:
logger.log("Deleting empty folder: " + check_empty_dir)
# need shutil.rmtree when ignore_items is really implemented
ek(os.rmdir, check_empty_dir)
# do the library update for synoindex
sickbeard.notifiers.synoindex_notifier.deleteFolder(check_empty_dir)
except OSError as error:
logger.log("Unable to delete {0}. Error: {1}".format(check_empty_dir, error), logger.WARNING)
break
check_empty_dir = ek(os.path.dirname, check_empty_dir)
else:
break
def fileBitFilter(mode):
"""
Strip special filesystem bits from file
:param mode: mode to check and strip
:return: required mode for media file
"""
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def chmodAsParent(childPath):
"""
Retain permissions of parent for childs
(Does not work for Windows hosts)
:param childPath: Child Path to change permissions to sync from parent
"""
if platform.system() == 'Windows':
return
parentPath = ek(os.path.dirname, childPath)
if not parentPath:
logger.log("No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
return
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
parentPathStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE])
childPathStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
if ek(os.path.isfile, childPath):
childMode = fileBitFilter(parentMode)
else:
childMode = parentMode
if childPath_mode == childMode:
return
childPath_owner = childPathStat.st_uid # pylint: disable=no-member
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id not in (childPath_owner, 0):
logger.log("Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
return
try:
ek(os.chmod, childPath, childMode)
except OSError:
logger.log("Failed to set permission for {0} to {1:o}, parent directory has {2:o}".format(childPath, childMode, parentMode), logger.DEBUG)
def fixSetGroupID(childPath):
"""
Inherid SGID from parent
(does not work on Windows hosts)
:param childPath: Path to inherit SGID permissions from parent
"""
if platform.system() == 'Windows':
return
parentPath = ek(os.path.dirname, childPath)
parentStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
if parentMode & stat.S_ISGID:
parentGID = parentStat[stat.ST_GID]
childStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childGID = childStat[stat.ST_GID]
if childGID == parentGID:
return
childPath_owner = childStat.st_uid # pylint: disable=no-member
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log("Not running as root or owner of " + childPath + ", not trying to set the set-group-ID",
logger.DEBUG)
return
try:
ek(os.chown, childPath, -1, parentGID) # @UndefinedVariable - only available on UNIX
logger.log("Respecting the set-group-ID bit on the parent directory for {0}".format(childPath), logger.DEBUG)
except OSError:
logger.log(
"Failed to respect the set-group-ID bit on the parent directory for {0} (setting group ID {1})".format(
childPath, parentGID), logger.ERROR)
def is_anime_in_show_list():
"""
Check if any shows in list contain anime
:return: True if global showlist contains Anime, False if not
"""
for show in sickbeard.showList:
if show.is_anime:
return True
return False
def update_anime_support():
"""Check if we need to support anime, and if we do, enable the feature"""
sickbeard.ANIMESUPPORT = is_anime_in_show_list()
def get_absolute_number_from_season_and_episode(show, season, episode):
"""
Find the absolute number for a show episode
:param show: Show object
:param season: Season number
:param episode: Episode number
:return: The absolute number
"""
absolute_number = None
if season and episode:
main_db_con = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? and season = ? and episode = ?"
sql_results = main_db_con.select(sql, [show.indexerid, season, episode])
if len(sql_results) == 1:
absolute_number = int(sql_results[0][b"absolute_number"])
logger.log("Found absolute number {absolute} for show {show} {ep}".format
(absolute=absolute_number, show=show.name,
ep=episode_num(season, episode)), logger.DEBUG)
else:
logger.log("No entries for absolute number for show {show} {ep}".format
(show=show.name, ep=episode_num(season, episode)), logger.DEBUG)
return absolute_number
def get_all_episodes_from_absolute_number(show, absolute_numbers, indexer_id=None):
episodes = []
season = None
if len(absolute_numbers):
if not show and indexer_id:
show = Show.find(sickbeard.showList, indexer_id)
for absolute_number in absolute_numbers if show else []:
ep = show.getEpisode(None, None, absolute_number=absolute_number)
if ep:
episodes.append(ep.episode)
season = ep.season # this will always take the last found season so eps that cross the season border are not handeled well
return season, episodes
def sanitizeSceneName(name, anime=False):
"""
Takes a show name and returns the "scenified" version of it.
:param anime: Some show have a ' in their name(Kuroko's Basketball) and is needed for search.
:return: A string containing the scene version of the show name given.
"""
# assert isinstance(name, unicode), name + ' is not unicode'
if not name:
return ''
bad_chars = ',:()!?\u2019'
if not anime:
bad_chars += "'"
# strip out any bad chars
for x in bad_chars:
name = name.replace(x, "")
# tidy up stuff that doesn't belong in scene names
name = name.replace("&", "and")
name = re.sub(r"[- /]+", ".", name)
name = re.sub(r"[.]+", ".", name)
if name.endswith('.'):
name = name[:-1]
return name
_binOps = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}
def arithmeticEval(s):
"""
A safe eval supporting basic arithmetic operations.
:param s: expression to evaluate
:return: value
"""
node = ast.parse(s, mode='eval')
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
else:
raise Exception('Unsupported type {0}'.format(node))
return _eval(node.body)
def create_https_certificates(ssl_cert, ssl_key):
"""
Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
:param ssl_cert: Path of SSL certificate file to write
:param ssl_key: Path of SSL keyfile to write
:return: True on success, False on failure
"""
# assert isinstance(ssl_key, unicode)
# assert isinstance(ssl_cert, unicode)
try:
from OpenSSL import crypto # @UnresolvedImport
from certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, \
serial # @UnresolvedImport
except Exception:
logger.log("pyopenssl module missing, please install for https access", logger.WARNING)
return False
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 1024)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'SickRage'
pkey = createKeyPair(TYPE_RSA, 1024)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
# pylint: disable=no-member
# Module has no member
io.open(ssl_key, 'wb').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
io.open(ssl_cert, 'wb').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except Exception:
logger.log("Error creating SSL key and certificate", logger.ERROR)
return False
return True
def backupVersionedFile(old_file, version):
"""
Back up an old version of a file
:param old_file: Original file, to take a backup from
:param version: Version of file to store in backup
:return: True if success, False if failure
"""
numTries = 0
new_file = old_file + '.' + 'v' + str(version)
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, old_file):
logger.log("Not creating backup, {0} doesn't exist".format(old_file), logger.DEBUG)
break
try:
logger.log("Trying to back up {0} to {1}".format(old_file, new_file), logger.DEBUG)
shutil.copy(old_file, new_file)
logger.log("Backup done", logger.DEBUG)
break
except Exception as error:
logger.log("Error while trying to back up {0} to {1} : {2}".format(old_file, new_file, error), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log("Trying again.", logger.DEBUG)
if numTries >= 10:
logger.log("Unable to back up {0} to {1} please do it manually.".format(old_file, new_file), logger.ERROR)
return False
return True
def restoreVersionedFile(backup_file, version):
"""
Restore a file version to original state
:param backup_file: File to restore
:param version: Version of file to restore
:return: True on success, False on failure
"""
numTries = 0
new_file, ext_ = ek(os.path.splitext, backup_file)
restore_file = new_file + '.' + 'v' + str(version)
if not ek(os.path.isfile, new_file):
logger.log("Not restoring, {0} doesn't exist".format(new_file), logger.DEBUG)
return False
try:
logger.log("Trying to backup {0} to {1}.r{2} before restoring backup".format
(new_file, new_file, version), logger.DEBUG)
shutil.move(new_file, new_file + '.' + 'r' + str(version))
except Exception as error:
logger.log("Error while trying to backup DB file {0} before proceeding with restore: {1}".format
(restore_file, error), logger.WARNING)
return False
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, restore_file):
logger.log("Not restoring, {0} doesn't exist".format(restore_file), logger.DEBUG)
break
try:
logger.log("Trying to restore file {0} to {1}".format(restore_file, new_file), logger.DEBUG)
shutil.copy(restore_file, new_file)
logger.log("Restore done", logger.DEBUG)
break
except Exception as error:
logger.log("Error while trying to restore file {0}. Error: {1}".format(restore_file, error), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log("Trying again. Attempt #: {0}".format(numTries), logger.DEBUG)
if numTries >= 10:
logger.log("Unable to restore file {0} to {1}".format(restore_file, new_file), logger.WARNING)
return False
return True
def get_lan_ip():
"""Returns IP of system"""
try:
return [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
except Exception:
return socket.gethostname()
def check_url(url):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
try:
requests.head(url, verify=False).raise_for_status()
except Exception as error:
handle_requests_exception(error)
return False
return True
def anon_url(*url):
"""
Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
"""
return '' if None in url else '{0}{1}'.format(sickbeard.ANON_REDIRECT, ''.join(str(s) for s in url))
"""
Encryption
==========
By Pedro Jose Pereira Vieito <pvieito@gmail.com> (@pvieito)
* If encryption_version==0 then return data without encryption
* The keys should be unique for each device
To add a new encryption_version:
1) Code your new encryption_version
2) Update the last encryption_version available in webserve.py
3) Remember to maintain old encryption versions and key generators for retrocompatibility
"""
# Key Generators
unique_key1 = hex(uuid.getnode() ** 2) # Used in encryption v1
# Encryption Functions
def encrypt(data, encryption_version=0, _decrypt=False):
# Version 1: Simple XOR encryption (this is not very secure, but works)
if encryption_version == 1:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(unique_key1)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(unique_key1)))).strip()
# Version 2: Simple XOR encryption (this is not very secure, but works)
elif encryption_version == 2:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(sickbeard.ENCRYPTION_SECRET)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(sickbeard.ENCRYPTION_SECRET)))).strip()
# Version 0: Plain text
else:
return data
def decrypt(data, encryption_version=0):
return encrypt(data, encryption_version, _decrypt=True)
def full_sanitizeSceneName(name):
return re.sub('[. -]', ' ', sanitizeSceneName(name)).lower().strip()
def _check_against_names(nameInQuestion, show, season=-1):
showNames = []
if season in [-1, 1]:
showNames = [show.name]
showNames.extend(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=season))
for showName in showNames:
nameFromList = full_sanitizeSceneName(showName)
if nameFromList == nameInQuestion:
return True
return False
def get_show(name, tryIndexers=False):
if not sickbeard.showList:
return
showObj = None
fromCache = False
if not name:
return showObj
try:
# check cache for show
cache = sickbeard.name_cache.retrieveNameFromCache(name)
if cache:
fromCache = True
showObj = Show.find(sickbeard.showList, int(cache))
# try indexers
if not showObj and tryIndexers:
showObj = Show.find(
sickbeard.showList, searchIndexerForShowID(full_sanitizeSceneName(name), ui=classes.ShowListUI)[2])
# try scene exceptions
if not showObj:
ShowID = sickbeard.scene_exceptions.get_scene_exception_by_name(name)[0]
if ShowID:
showObj = Show.find(sickbeard.showList, int(ShowID))
# add show to cache
if showObj and not fromCache:
sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
except Exception as error:
logger.log("Error when attempting to find show: {0} in SickRage. Error: {1} ".format(name, error), logger.DEBUG)
return showObj
def is_hidden_folder(folder):
"""
Returns True if folder is hidden.
On Linux based systems hidden folders start with . (dot)
:param folder: Full path of folder to check
"""
def is_hidden(filepath):
name = ek(os.path.basename, ek(os.path.abspath, filepath))
return name.startswith('.') or has_hidden_attribute(filepath)
def has_hidden_attribute(filepath):
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(ctypes.c_wchar_p(unicode(filepath)))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
if ek(os.path.isdir, folder):
if is_hidden(folder):
return True
return False
def real_path(path):
"""
Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components.
"""
return ek(os.path.normpath, ek(os.path.normcase, ek(os.path.realpath, path)))
def validateShow(show, season=None, episode=None):
indexer_lang = show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(show.indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
if show.dvdorder:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show.indexer).indexer(**lINDEXER_API_PARMS)
if season is None and episode is None:
return t
return t[show.indexerid][season][episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
pass
def set_up_anidb_connection():
"""Connect to anidb"""
if not sickbeard.USE_ANIDB:
logger.log("Usage of anidb disabled. Skiping", logger.DEBUG)
return False
if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
logger.log("anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
return False
if not sickbeard.ADBA_CONNECTION:
def anidb_logger(msg):
return logger.log("anidb: {0} ".format(msg), logger.DEBUG)
try:
sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)
except Exception as error:
logger.log("anidb exception msg: {0} ".format(error), logger.WARNING)
return False
try:
if not sickbeard.ADBA_CONNECTION.authed():
sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
else:
return True
except Exception as error:
logger.log("anidb exception msg: {0} ".format(error), logger.WARNING)
return False
return sickbeard.ADBA_CONNECTION.authed()
def makeZip(fileList, archive):
"""
Create a ZIP of files
:param fileList: A list of file names - full path each name
:param archive: File name for the archive with a full path
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f)
a.close()
return True
except Exception as error:
logger.log("Zip creation error: {0} ".format(error), logger.ERROR)
return False
def extractZip(archive, targetDir):
"""
Unzip a file to a directory
:param fileList: A list of file names - full path each name
:param archive: The file name for the archive with a full path
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
filename = ek(os.path.basename, member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = file(ek(os.path.join, targetDir, filename), "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
zip_file.close()
return True
except Exception as error:
logger.log("Zip extraction error: {0} ".format(error), logger.ERROR)
return False
def backupConfigZip(fileList, archive, arcname=None):
"""
Store the config file as a ZIP
:param fileList: List of files to store
:param archive: ZIP file name
:param arcname: Archive path
:return: True on success, False on failure
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f, ek(os.path.relpath, f, arcname))
a.close()
return True
except Exception as error:
logger.log("Zip creation error: {0} ".format(error), logger.ERROR)
return False
def restoreConfigZip(archive, targetDir):
"""
Restores a Config ZIP file back in place
:param archive: ZIP filename
:param targetDir: Directory to restore to
:return: True on success, False on failure
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
else:
def path_leaf(path):
head, tail = ek(os.path.split, path)
return tail or ek(os.path.basename, head)
bakFilename = '{0}-{1}'.format(path_leaf(targetDir), datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
shutil.move(targetDir, ek(os.path.join, ek(os.path.dirname, targetDir), bakFilename))
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
zip_file.extract(member, targetDir)
zip_file.close()
return True
except Exception as error:
logger.log("Zip extraction error: {0}".format(error), logger.ERROR)
shutil.rmtree(targetDir)
return False
def mapIndexersToShow(showObj):
mapped = {}
# init mapped indexers object
for indexer in sickbeard.indexerApi().indexers:
mapped[indexer] = showObj.indexerid if int(indexer) == int(showObj.indexer) else 0
main_db_con = db.DBConnection()
sql_results = main_db_con.select(
"SELECT * FROM indexer_mapping WHERE indexer_id = ? AND indexer = ?",
[showObj.indexerid, showObj.indexer])
# for each mapped entry
for curResult in sql_results:
nlist = [i for i in curResult if i is not None]
# Check if its mapped with both tvdb and tvrage.
if len(nlist) >= 4:
logger.log("Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
mapped[int(curResult[b'mindexer'])] = int(curResult[b'mindexer_id'])
break
else:
sql_l = []
for indexer in sickbeard.indexerApi().indexers:
if indexer == showObj.indexer:
mapped[indexer] = showObj.indexerid
continue
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['custom_ui'] = classes.ShowListUI
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
try:
mapped_show = t[showObj.name]
except Exception:
logger.log("Unable to map " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name + ", skipping it", logger.DEBUG)
continue
if mapped_show and len(mapped_show) == 1:
logger.log("Mapping " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name, logger.DEBUG)
mapped[indexer] = int(mapped_show[0][b'id'])
logger.log("Adding indexer mapping to DB for show: " + showObj.name, logger.DEBUG)
sql_l.append([
"INSERT OR IGNORE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer) VALUES (?,?,?,?)",
[showObj.indexerid, showObj.indexer, int(mapped_show[0][b'id']), indexer]])
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
return mapped
def touchFile(fname, atime=None):
"""
Touch a file (change modification date)
:param fname: Filename to touch
:param atime: Specific access time (defaults to None)
:return: True on success, False on failure
"""
if atime and fname and ek(os.path.isfile, fname):
ek(os.utime, fname, (atime, atime))
return True
return False
def make_session():
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
session = cfscrape.create_scraper(sess=session)
return CacheControl(sess=session, cache_etags=True)
def request_defaults(kwargs):
hooks = kwargs.pop('hooks', None)
cookies = kwargs.pop('cookies', None)
allow_proxy = kwargs.pop('allow_proxy', True)
verify = certifi.old_where() if all([sickbeard.SSL_VERIFY, kwargs.pop('verify', True)]) else False
# request session proxies
if allow_proxy and sickbeard.PROXY_SETTING:
logger.log("Using global proxy: " + sickbeard.PROXY_SETTING, logger.DEBUG)
parsed_url = urlparse(sickbeard.PROXY_SETTING)
address = sickbeard.PROXY_SETTING if parsed_url.scheme else 'http://' + sickbeard.PROXY_SETTING
proxies = {
"http": address,
"https": address,
}
else:
proxies = None
return hooks, cookies, verify, proxies
def getURL(url, post_data=None, params=None, headers=None, # pylint:disable=too-many-arguments, too-many-return-statements, too-many-branches, too-many-locals
timeout=30, session=None, **kwargs):
"""
Returns data retrieved from the url provider.
"""
try:
response_type = kwargs.pop('returns', 'text')
stream = kwargs.pop('stream', False)
hooks, cookies, verify, proxies = request_defaults(kwargs)
if params and isinstance(params, (list, dict)):
for param in params:
if isinstance(params[param], unicode):
params[param] = params[param].encode('utf-8')
if post_data and isinstance(post_data, (list, dict)):
for param in post_data:
if isinstance(post_data[param], unicode):
post_data[param] = post_data[param].encode('utf-8')
resp = session.request(
'POST' if post_data else 'GET', url, data=post_data or {}, params=params or {},
timeout=timeout, allow_redirects=True, hooks=hooks, stream=stream,
headers=headers, cookies=cookies, proxies=proxies, verify=verify
)
resp.raise_for_status()
except Exception as error:
handle_requests_exception(error)
return None
try:
return resp if response_type == 'response' or response_type is None else resp.json() if response_type == 'json' else getattr(resp, response_type, resp)
except ValueError:
logger.log('Requested a json response but response was not json, check the url: {0}'.format(url), logger.DEBUG)
return None
def download_file(url, filename, session=None, headers=None, **kwargs): # pylint:disable=too-many-return-statements
"""
Downloads a file specified
:param url: Source URL
:param filename: Target file on filesystem
:param session: request session to use
:param headers: override existing headers in request session
:return: True on success, False on failure
"""
try:
hooks, cookies, verify, proxies = request_defaults(kwargs)
with closing(session.get(url, allow_redirects=True, stream=True,
verify=verify, headers=headers, cookies=cookies,
hooks=hooks, proxies=proxies)) as resp:
resp.raise_for_status()
try:
with io.open(filename, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
fp.flush()
chmodAsParent(filename)
except Exception as error:
logger.log("Problem downloading file, setting permissions or writing file to \"{0}\" - ERROR: {1}".format(filename, error), logger.WARNING)
except Exception as error:
handle_requests_exception(error)
return False
return True
def handle_requests_exception(requests_exception): # pylint: disable=too-many-branches, too-many-statements
default = "Request failed: {0}"
try:
raise requests_exception
except requests.exceptions.SSLError as error:
if ssl.OPENSSL_VERSION_INFO < (1, 0, 1, 5):
logger.log("SSL Error requesting url: '{0}' You have {1}, try upgrading OpenSSL to 1.0.1e+".format(error.request.url, ssl.OPENSSL_VERSION))
if sickbeard.SSL_VERIFY:
logger.log("SSL Error requesting url: '{0}' Try disabling Cert Verification on the advanced tab of /config/general")
logger.log(default.format(error), logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
except requests.exceptions.HTTPError as error:
if not (hasattr(error, 'response') and error.response and \
hasattr(error.response, 'status_code') and error.response.status_code == 404 and \
hasattr(error.response, 'headers') and error.response.headers.get('X-Content-Type-Options') == 'nosniff'):
logger.log(default.format(error))
except requests.exceptions.TooManyRedirects as error:
logger.log(default.format(error))
except requests.exceptions.ConnectTimeout as error:
logger.log(default.format(error))
except requests.exceptions.ReadTimeout as error:
logger.log(default.format(error))
except requests.exceptions.ProxyError as error:
logger.log(default.format(error))
except requests.exceptions.ConnectionError as error:
logger.log(default.format(error))
except requests.exceptions.ContentDecodingError as error:
logger.log(default.format(error))
logger.log(traceback.format_exc(), logger.DEBUG)
except requests.exceptions.ChunkedEncodingError as error:
logger.log(default.format(error))
except requests.exceptions.InvalidURL as error:
logger.log(default.format(error))
except requests.exceptions.InvalidSchema as error:
logger.log(default.format(error))
except requests.exceptions.MissingSchema as error:
logger.log(default.format(error))
except requests.exceptions.RetryError as error:
logger.log(default.format(error))
except requests.exceptions.StreamConsumedError as error:
logger.log(default.format(error))
except requests.exceptions.URLRequired as error:
logger.log(default.format(error))
except Exception as error:
logger.log(default.format(error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
def get_size(start_path='.'):
"""
Find the total dir and filesize of a path
:param start_path: Path to recursively count size
:return: total filesize
"""
if not ek(os.path.isdir, start_path):
return -1
total_size = 0
for dirpath, dirnames_, filenames in ek(os.walk, start_path):
for f in filenames:
fp = ek(os.path.join, dirpath, f)
try:
total_size += ek(os.path.getsize, fp)
except OSError as error:
logger.log("Unable to get size for file {0} Error: {1}".format(fp, error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
return total_size
def generateApiKey():
""" Return a new randomized API_KEY"""
logger.log("Generating New API key")
secure_hash = hashlib.sha512(str(time.time()))
secure_hash.update(str(random.SystemRandom().getrandbits(4096)))
return secure_hash.hexdigest()[:32]
def remove_article(text=''):
"""Remove the english articles from a text string"""
return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)
def generateCookieSecret():
"""Generate a new cookie secret"""
return base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
def disk_usage(path):
if platform.system() == 'Windows':
free = ctypes.c_ulonglong(0)
if ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(unicode(path)), None, None, ctypes.pointer(free)) == 0:
raise ctypes.WinError()
return free.value
elif hasattr(os, 'statvfs'): # POSIX
if platform.system() == 'Darwin':
try:
import subprocess
call = subprocess.Popen(["df", "-k", path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = call.communicate()[0]
return int(output.split("\n")[1].split()[3]) * 1024
except Exception:
pass
st = ek(os.statvfs, path)
return st.f_bavail * st.f_frsize # pylint: disable=no-member
else:
raise Exception("Unable to determine free space on your OS")
def verify_freespace(src, dest, oldfile=None, method="copy"):
"""
Checks if the target system has enough free space to copy or move a file.
:param src: Source filename
:param dest: Destination path
:param oldfile: File to be replaced (defaults to None)
:return: True if there is enough space for the file, False if there isn't. Also returns True if the OS doesn't support this option
"""
if not isinstance(oldfile, list):
oldfile = [oldfile] if oldfile else []
logger.log("Trying to determine free space on destination drive", logger.DEBUG)
if not ek(os.path.isfile, src):
logger.log("A path to a file is required for the source. {0} is not a file.".format(src), logger.WARNING)
return True
# shortcut: if we are moving the file and the destination == src dir,
# then by definition there is enough space
if method == "move" and ek(os.stat, src).st_dev == ek(os.stat, dest if ek(os.path.exists, dest) else ek(os.path.dirname, dest)).st_dev: # pylint: disable=no-member
logger.log("Process method is 'move' and src and destination are on the same device, skipping free space check", logger.INFO)
return True
try:
diskfree = disk_usage(dest if ek(os.path.exists, dest) else ek(os.path.dirname, dest))
except Exception as error:
logger.log("Unable to determine free space, so I will assume there is enough.", logger.WARNING)
logger.log("Error: {error}".format(error=error), logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
return True
# Lets also do this for symlink and hardlink
if 'link' in method and diskfree > 1024**2:
return True
neededspace = ek(os.path.getsize, src)
if oldfile:
for f in oldfile:
if ek(os.path.isfile, f.location):
diskfree += ek(os.path.getsize, f.location)
if diskfree > neededspace:
return True
else:
logger.log("Not enough free space: Needed: {0} bytes ( {1} ), found: {2} bytes ( {3} )".format
(neededspace, pretty_file_size(neededspace), diskfree, pretty_file_size(diskfree)), logger.WARNING)
return False
def getDiskSpaceUsage(diskPath=None):
"""
returns the free space in human readable bytes for a given path or False if no path given
:param diskPath: the filesystem path being checked
"""
if diskPath and ek(os.path.exists, diskPath):
try:
free = disk_usage(diskPath)
except Exception as error:
logger.log("Unable to determine free space", logger.WARNING)
logger.log("Error: {error}".format(error=error), logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
else:
return pretty_file_size(free)
return False
# https://gist.github.com/thatalextaylor/7408395
def pretty_time_delta(seconds):
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
time_delta = sign_string
if days > 0:
time_delta += '{0}d'.format(days)
if hours > 0:
time_delta += '{0}h'.format(hours)
if minutes > 0:
time_delta += '{0}m'.format(minutes)
if seconds > 0:
time_delta += '{0}s'.format(seconds)
return time_delta
def isFileLocked(checkfile, writeLockCheck=False):
"""
Checks to see if a file is locked. Performs three checks
1. Checks if the file even exists
2. Attempts to open the file for reading. This will determine if the file has a write lock.
Write locks occur when the file is being edited or copied to, e.g. a file copy destination
3. If the readLockCheck parameter is True, attempts to rename the file. If this fails the
file is open by some other process for reading. The file can be read, but not written to
or deleted.
:param file: the file being checked
:param writeLockCheck: when true will check if the file is locked for writing (prevents move operations)
"""
checkfile = ek(os.path.abspath, checkfile)
if not ek(os.path.exists, checkfile):
return True
try:
f = ek(io.open, checkfile, 'rb')
f.close() # pylint: disable=no-member
except IOError:
return True
if writeLockCheck:
lockFile = checkfile + ".lckchk"
if ek(os.path.exists, lockFile):
ek(os.remove, lockFile)
try:
ek(os.rename, checkfile, lockFile)
time.sleep(1)
ek(os.rename, lockFile, checkfile)
except (OSError, IOError):
return True
return False
def getTVDBFromID(indexer_id, indexer): # pylint:disable=too-many-return-statements
session = make_session()
tvdb_id = ''
if indexer == 'IMDB':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?imdbid={0}".format(indexer_id)
data = getURL(url, session=session, returns='content')
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'ZAP2IT':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?zap2it={0}".format(indexer_id)
data = getURL(url, session=session, returns='content')
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'TVMAZE':
url = "http://api.tvmaze.com/shows/{0}".format(indexer_id)
data = getURL(url, session=session, returns='json')
if data is None:
return tvdb_id
tvdb_id = data[b'externals'][b'thetvdb']
return tvdb_id
else:
return tvdb_id
def get_showname_from_indexer(indexer, indexer_id, lang='en'):
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
logger.log('{0}: {1!r}'.format(sickbeard.indexerApi(indexer).name, lINDEXER_API_PARMS))
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
s = t[int(indexer_id)]
if hasattr(s, 'data'):
return s.data.get('seriesname')
return None
def is_ip_private(ip):
priv_lo = re.compile(r"^127\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_24 = re.compile(r"^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_20 = re.compile(r"^192\.168\.\d{1,3}.\d{1,3}$")
priv_16 = re.compile(r"^172.(1[6-9]|2[0-9]|3[0-1]).[0-9]{1,3}.[0-9]{1,3}$")
return priv_lo.match(ip) or priv_24.match(ip) or priv_20.match(ip) or priv_16.match(ip)
| jackkiej/SickRage | sickbeard/helpers.py | Python | gpl-3.0 | 59,002 |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <fredrik314@gmail.com>,
# Stephan Ehlen <>
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
r""" Class for newforms in format which can be presented on the web easily
AUTHORS:
- Fredrik Stroemberg
- Stephan Ehlen
"""
from sage.all import ZZ, QQ, DirichletGroup, CuspForms, Gamma0, ModularSymbols, Newforms, trivial_character, is_squarefree, divisors, RealField, ComplexField, prime_range, I, join, gcd, Cusp, Infinity, ceil, CyclotomicField, exp, pi, primes_first_n, euler_phi, RR, prime_divisors, Integer, matrix,NumberField,PowerSeriesRing,cached_function,AlphabeticStrings
from sage.rings.power_series_poly import PowerSeries_poly
from sage.all import Parent, SageObject, dimension_new_cusp_forms, vector, dimension_modular_forms, dimension_cusp_forms, EisensteinForms, Matrix, floor, denominator, latex, is_prime, prime_pi, next_prime, previous_prime,primes_first_n, previous_prime, factor, loads,save,dumps,deepcopy
import re
import yaml
from flask import url_for
from wmf import wmf_logger
from lmfdb.modular_forms.elliptic_modular_forms import emf_version
from sage.rings.number_field.number_field_base import NumberField as NumberField_class
from lmfdb.modular_forms.elliptic_modular_forms.backend import connect_to_modularforms_db,get_files_from_gridfs
from lmfdb.modular_forms.elliptic_modular_forms.backend.web_modform_space import WebModFormSpace_class
def WebModFormSpace_computing(N=1, k=2, chi=1, cuspidal=1, prec=10, bitprec=53, data=None, verbose=0,**kwds):
r"""
Constructor for WebNewForms with added 'nicer' error message.
"""
if data is None: data = {}
if cuspidal <> 1:
raise IndexError,"We are very sorry. There are only cuspidal spaces currently in the database!"
#try:
F = WebModFormSpace_class(N=N, k=k, chi=chi, cuspidal=cuspidal, prec=prec, bitprec=bitprec, data=data, verbose=verbose,**kwds)
#except Exception as e:
# wmf_logger.critical("Could not construct WebModFormSpace with N,k,chi = {0}. Error: {1}".format( (N,k,chi),e.message))
# #raise e
# #raise IndexError,"We are very sorry. The sought space could not be found in the database."
return F
from lmfdb.modular_forms.elliptic_modular_forms.backend import WebModFormSpace
class WebModFormSpace_computing_class(WebModFormSpace_class):
r"""
Space of cuspforms to be presented on the web.
G = NS.
EXAMPLES::
sage: WS=WebModFormSpace(2,39)
"""
def __init__(self, N=1, k=2, chi=1, cuspidal=1, prec=10, bitprec=53, data=None, verbose=0,get_from_db=True):
r"""
Init self.
INPUT:
- 'k' -- weight
- 'N' -- level
- 'chi' -- character
- 'cuspidal' -- 1 if space of cuspforms, 0 if all modforms
"""
wmf_logger.debug("WebModFormSpace with k,N,chi={0}".format( (k,N,chi)))
super(WebModFormSpace_computing_class,self).__init__(N,k,chi,cuspidal,prec,bitprec,data, verbose,get_from_db=False)
## In this subclass we add properties which are not
## supposed to be used on the web or stored in the database
self._dimension = None
self._dimension_oldspace = None
self._newforms = None
self._modular_symbols = None
self.compute_additional_properties()
self.insert_into_db()
def compute_additional_properties(self):
r"""
Compute additional properties.
"""
### Set / Compute / fetch everything we need
if self._group is None:
self._group = Gamma0(self._N)
self.get_modular_symbols()
self._newspace = self._modular_symbols.cuspidal_submodule().new_submodule()
self.get_newform_factors()
if self._newforms == {} and self._newspace.dimension()>0:
for i in self.labels():
self._newforms[i]=None
if len(self._ap) == 0:
self._ap = self._get_aps(prec=self._prec)
self.set_dimensions()
if self.dimension() == self.dimension_newspace():
self._is_new = True
else:
self._is_new = False
self.set_sturm_bound()
self.set_oldspace_decomposition()
self.insert_into_db()
def newform_factors(self):
r"""
Return newform factors of self.
"""
if self._newform_factors is None:
self._newform_factors = self._get_newform_factors()
return self._newform_factors
def character_orbit_rep(self,k=None):
r"""
Returns canonical representative of the Galois orbit nr. k acting on the ambient space of self.
"""
if self._character_orbit_rep is None:
x = self.character().character().galois_orbit()[0]
self._character_orbit_rep = WebChar(x.modulus(),x.number())
return self._character_orbit_rep
## Database fetching functions.
def insert_into_db(self):
r"""
Insert a dictionary of data for self into the collection WebModularforms.files
"""
wmf_logger.debug("inserting self into db! name={0}".format(self._name))
db = connect_to_modularforms_db('WebModformspace.files')
fs = get_files_from_gridfs('WebModformspace')
s = {'name':self._name,'version':emf_version}
rec = db.find_one(s)
if rec:
id = rec.get('_id')
else:
id = None
if id<>None:
wmf_logger.debug("Removing self from db with id={0}".format(id))
fs.delete(id)
fname = "webmodformspace-{0:0>5}-{1:0>3}-{2:0>3}".format(self._N,self._k,self._chi)
d = self.to_dict()
d.pop('_ap',None) # Since the ap's are already in the database we don't need them here
id = fs.put(dumps(d),filename=fname,N=int(self._N),k=int(self._k),chi=int(self._chi),name=self._name,version=emf_version)
wmf_logger.debug("inserted :{0}".format(id))
def get_from_db(self):
r"""
Fetch dictionary data from the database.
"""
db = connect_to_modularforms_db('WebModformspace.files')
s = {'name':self._name,'version':emf_version}
wmf_logger.debug("Looking in DB for rec={0}".format(s))
f = db.find_one(s)
wmf_logger.debug("Found rec={0}".format(f))
if f<>None:
id = f.get('_id')
fs = get_files_from_gridfs('WebModformspace')
f = fs.get(id)
wmf_logger.debug("Getting rec={0}".format(f))
d = loads(f.read())
return d
return {}
def _get_aps(self, prec=-1):
r"""
Get aps from database if they exist.
"""
ap_files = connect_to_modularforms_db('ap.files')
key = {'k': int(self._k), 'N': int(self._N), 'cchi': int(self._chi)}
key['prec'] = {"$gt": int(prec - 1)}
ap_from_db = ap_files.find(key).sort("prec")
wmf_logger.debug("finds={0}".format(ap_from_db))
wmf_logger.debug("finds.count()={0}".format(ap_from_db.count()))
fs = get_files_from_gridfs('ap')
aplist = {}
for i in range(len(self.labels())):
aplist[self.labels()[i]]={}
for rec in ap_from_db:
wmf_logger.debug("rec={0}".format(rec))
ni = rec.get('newform')
if ni is None:
for a in self.labels():
aplist[a][prec]=None
return aplist
a = self.labels()[ni]
cur_prec = rec['prec']
if aplist.get(a,{}).get(cur_prec,None) is None:
aplist[a][prec]=loads(fs.get(rec['_id']).read())
if cur_prec > prec and prec>0: # We are happy with these coefficients.
return aplist
return aplist
def get_modular_symbols(self):
r"""
Get Modular Symbols from database they exist.
"""
if not self._modular_symbols is None:
return
modular_symbols = connect_to_modularforms_db('Modular_symbols.files')
key = {'k': int(self._k), 'N': int(self._N), 'cchi': int(self._chi)}
modular_symbols_from_db = modular_symbols.find_one(key)
wmf_logger.debug("found ms={0}".format(modular_symbols_from_db))
if modular_symbols_from_db is None:
ms = None
else:
id = modular_symbols_from_db['_id']
fs = get_files_from_gridfs('Modular_symbols')
ms = loads(fs.get(id).read())
self._id = id
self._modular_symbols = ms
def get_newform_factors(self):
r"""
Get New form factors from database they exist.
"""
if not self._newforms is None and self._newforms == []:
return
factors = connect_to_modularforms_db('Newform_factors.files')
key = {'k': int(self._k), 'N': int(self._N), 'cchi': int(self._chi),}
factors_from_db = factors.find(key).sort('newform',int(1))
wmf_logger.debug("found factors={0}".format(factors_from_db))
self._newforms = {}
if factors_from_db.count()==0:
raise ValueError,"Space is not in database!"
else:
facts = []
self._labels = []
fs = get_files_from_gridfs('Newform_factors')
for rec in factors_from_db:
factor = loads(fs.get(rec['_id']).read())
label = orbit_label(rec['newform'])
self._galois_orbits_labels.append(label)
self._newforms[label] = factor
def __reduce__(self):
r"""
Used for pickling.
"""
data = self.to_dict()
return(unpickle_wmfs_v1, (self._k, self._N, self._chi, self._cuspidal, self._prec, self._bitprec, data))
def _repr_(self):
r"""
Return string representation of self.
"""
s = 'Space of Cusp forms on ' + str(self.group()) + ' of weight ' + str(self._k)
s += ' and dimension ' + str(self.dimension())
return s
def _computation_too_hard(self,comp='decomp'):
r"""
See if the supplied parameters make computation too hard or if we should try to do it on the fly.
TODO: Actually check times.
"""
if comp=='decomp':
if self._N > 50:
return True
if self._chi > 1 and self._N > 100:
return True
if self._k+self._N > 100:
return True
return False
# internal methods to generate properties of self
def galois_decomposition(self):
r"""
We compose the new subspace into galois orbits of new cusp forms.
"""
from sage.monoids.all import AlphabeticStrings
if(len(self._galois_decomposition) != 0):
return self._galois_decomposition
if '_HeckeModule_free_module__decomposition' in self._newspace.__dict__:
L = self._newspace.decomposition()
else:
decomp = self.newform_factors()
if len(decomp)>0:
L = filter(lambda x: x.is_new() and x.is_cuspidal(), decomp)
wmf_logger.debug("found L:{0}".format(L))
elif self._computation_too_hard():
L = []
raise IndexError,"No decomposition was found in the database!"
wmf_logger.debug("no decomp in database!")
else: # compute
L = self._newspace.decomposition()
wmf_logger.debug("newspace :".format(self._newspace))
wmf_logger.debug("computed L:".format(L))
self._galois_decomposition = L
# we also label the compnents
x = AlphabeticStrings().gens()
for j in range(len(L)):
if(j < 26):
label = str(x[j]).lower()
else:
j1 = j % 26
j2 = floor(QQ(j) / QQ(26))
label = str(x[j1]).lower()
label = label + str(j2)
if label not in self._galois_orbits_labels:
self._galois_orbits_labels.append(label)
return L
def galois_orbit_label(self, j):
r"""
Return the label of the Galois orbit nr. j
"""
if(len(self._galois_orbits_labels) == 0):
self.galois_decomposition()
return self._galois_orbits_labels[j]
### Dimension formulas, calculates dimensions of subspaces of self.
def set_dimensions(self):
r"""
The dimension of the subspace of newforms in self.
"""
if self._chi != 1:
x = self.character().sage_character()
else:
x = self.level()
k = self.weight()
# Ambient modular formsspace
if self._dimension_modular_forms is None:
self._dimension_modular_forms = int(dimension_modular_forms(x,k))
# Cuspidal subspace
if self._dimension_cusp_forms is None:
self._dimension_cusp_forms = int(dimension_cusp_forms(x,k))
# New cuspidal subspace
if self._dimension_new_cusp_forms is None:
self._dimension_new_cusp_forms = int(dimension_new_cusp_forms(x,k))
# New subspace of ambient space
if self._dimension_newspace is None:
if self._cuspidal == 1:
self._dimension_newspace = self.dimension_new_cusp_forms()
else:
self._dimension_newspace = self._newspace.dimension()
# Old subspace of self.
if self._dimension_oldspace is None:
if self._cuspidal == 1:
self._dimension_oldspace = self.dimension_cusp_forms() - self.dimension_new_cusp_forms()
else:
self._dimension_oldspace = self.dimension_modular_forms() - self.dimension_newforms()
if self._dimension is None:
if self._cuspidal == 1:
self._dimension = self.dimension_cusp_forms()
elif self._cuspidal == 0:
self._dimension = self.dimension_modular_forms()
def set_sturm_bound(self):
r""" Return the Sturm bound of S_k(N,xi), i.e. the number of coefficients necessary to determine a form uniquely in the space.
"""
if self._sturm_bound is None:
self._sturm_bound = self._modular_symbols.sturm_bound()
def set_oldspace_decomposition(self):
r"""
Get decomposition of the oldspace in self into submodules.
"""
if not (self._oldspace_decomposition is None or self._oldspace_decomposition == []):
return
N = self._N
k = self._k
M = self._modular_symbols.cuspidal_submodule()
L = list()
L = []
check_dim = self.dimension_newspace()
if(check_dim == self.dimension()):
return L
if(self._verbose > 1):
wmf_logger.debug("check_dim:={0}".format(check_dim))
for d in divisors(N):
if(d == 1):
continue
q = N.divide_knowing_divisible_by(d)
if(self._verbose > 1):
wmf_logger.debug("d={0}".format(d))
# since there is a bug in the current version of sage
# we have to try this...
try:
O = M.old_submodule(d)
except AttributeError:
O = M.zero_submodule()
Od = O.dimension()
if(self._verbose > 1):
wmf_logger.debug("O={0}".format(O))
wmf_logger.debug("Od={0}".format(Od))
if(d == N and k == 2 or Od == 0):
continue
if self.character().is_trivial():
# S=ModularSymbols(ZZ(N/d),k,sign=1).cuspidal_submodule().new_submodule(); Sd=S.dimension()
wmf_logger.debug("q={0},{1}".format(q, type(q)))
wmf_logger.debug("k={0},{1}".format(k, type(k)))
Sd = dimension_new_cusp_forms(q, k)
if(self._verbose > 1):
wmf_logger.debug("Sd={0}".format(Sd))
if Sd > 0:
mult = len(divisors(ZZ(d)))
check_dim = check_dim + mult * Sd
L.append((q, 0, mult, Sd))
else:
xd = self.character().decomposition()
for xx in xd:
if xx.modulus() == q:
Sd = dimension_new_cusp_forms(xx, k)
if Sd > 0:
# identify this character for internal storage... should be optimized
x_k = self.conrey_character(xx).number()
mult = len(divisors(ZZ(d)))
check_dim = check_dim + mult * Sd
L.append((q, x_k, mult, Sd))
if(self._verbose > 1):
wmf_logger.debug("mult={0},N/d={1},Sd={2}".format(mult, ZZ(N / d), Sd))
wmf_logger.debug("check_dim={0}".format(check_dim))
check_dim = check_dim - M.dimension()
if(check_dim != 0):
raise ArithmeticError("Something wrong! check_dim=%s" % check_dim)
self._oldspace_decomposition = L
@cached_function
def orbit_label(j):
x = AlphabeticStrings().gens()
if(j < 26):
label = str(x[j]).lower()
else:
j1 = j % 26
j2 = floor(QQ(j) / QQ(26))
label = str(x[j1]).lower()
label = label + str(j2)
return label
| sehlen/modforms-db | wmf/web_modform_space_computing.py | Python | gpl-3.0 | 18,193 |
import pytest
from united_states_of_browsers.db_merge.db_search import (
check_fts5_installed,
search,
)
pytestmark = pytest.mark.skipif(
not check_fts5_installed(),
reason="FTS5 not available. Search disabled",
)
def test_search_with_keywords_and_dates(searchable_db_path):
actual_search_results_rec_ids = {}
search_keywords = ("circleci", "google", "gitlab")
for search_keyword_ in search_keywords:
actual_search_results_rec_ids[search_keyword_] = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
word_query=search_keyword_,
date_start="2019-01-01",
date_stop="2388-12-31",
)
]
)
expected_search_results_rec_ids = {
"circleci": [50, 51],
"google": [32, 33, 45, 46, 48, 50],
"gitlab": [42, 43, 44],
}
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_dates_specified(searchable_db_path):
expected_search_results_rec_ids = list(range(39, 52))
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
date_start="2388-09-01",
date_stop="2388-09-30",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_keywords(searchable_db_path):
expected_search_results_rec_ids = [13, 31]
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
word_query="start page",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_dates_till_now(searchable_db_path):
expected_search_results_rec_ids = [*range(10, 20), *range(29, 39)]
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_date_start(searchable_db_path):
expected_search_results_rec_ids = []
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
date_start="2388-09-01",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
def test_search_date_stop(searchable_db_path):
expected_search_results_rec_ids = [*range(10, 20), *range(29, 39)]
actual_search_results_rec_ids = sorted(
[
row["rec_id"]
for row in search(
searchable_db_path,
date_stop="2019-09-04",
)
]
)
assert expected_search_results_rec_ids == actual_search_results_rec_ids
| kchawla-pi/united-states-of-browsers | tests/tests/test_db_search.py | Python | gpl-3.0 | 3,037 |
########################################################################
# $HeadURL $
# File: RequestDB.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/12/04 08:06:30
########################################################################
""" :mod: RequestDB
=======================
.. module: RequestDB
:synopsis: db holding Requests
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
db holding Request, Operation and File
"""
__RCSID__ = "$Id $"
# #
# @file RequestDB.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/12/04 08:06:51
# @brief Definition of RequestDB class.
# # imports
import random
import threading
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb.cursors
from MySQLdb import Error as MySQLdbError
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.List import stringListToString
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
########################################################################
class RequestDB( DB ):
"""
.. class:: RequestDB
db holding requests
"""
def __init__( self, systemInstance = 'Default', maxQueueSize = 10 ):
"""c'tor
:param self: self reference
"""
self.getIdLock = threading.Lock()
DB.__init__( self, "ReqDB", "RequestManagement/ReqDB", maxQueueSize )
def createTables( self, toCreate = None, force = False ):
""" create tables """
toCreate = toCreate if toCreate else []
if not toCreate:
return S_OK()
tableMeta = self.getTableMeta()
metaCreate = {}
for tableName in toCreate:
metaCreate[tableName] = tableMeta[tableName]
if metaCreate:
return self._createTables( metaCreate, force )
return S_OK()
@staticmethod
def getTableMeta():
""" get db schema in a dict format """
return dict( [ ( classDef.__name__, classDef.tableDesc() )
for classDef in ( Request, Operation, File ) ] )
def getTables( self ):
""" get tables """
showTables = self._query( "SHOW TABLES;" )
if not showTables["OK"]:
return showTables
return S_OK( [ table[0] for table in showTables["Value"] if table ] )
def dictCursor( self, conn = None ):
""" get dict cursor for connection :conn:
:return: S_OK( { "cursor": MySQLdb.cursors.DictCursor, "connection" : connection } ) or S_ERROR
"""
if not conn:
retDict = self._getConnection()
if not retDict["OK"]:
self.log.error( retDict["Message"] )
return retDict
conn = retDict["Value"]
cursor = conn.cursor( cursorclass = MySQLdb.cursors.DictCursor )
return S_OK( ( conn, cursor ) )
def _transaction( self, queries ):
""" execute transaction """
queries = [ queries ] if type( queries ) == str else queries
# # get cursor and connection
getCursorAndConnection = self.dictCursor()
if not getCursorAndConnection["OK"]:
self.log.error( getCursorAndConnection["Message"] )
return getCursorAndConnection
connection, cursor = getCursorAndConnection["Value"]
# # this will be returned as query result
ret = { "OK" : True }
queryRes = { }
# # switch off autocommit
connection.autocommit( False )
try:
# # execute queries
for query in queries:
cursor.execute( query )
queryRes[query] = list( cursor.fetchall() )
# # commit
connection.commit()
# # save last row ID
lastrowid = cursor.lastrowid
# # close cursor
cursor.close()
ret["Value"] = queryRes
ret["lastrowid"] = lastrowid
connection.autocommit( True )
return ret
except MySQLdbError, error:
self.log.exception( error )
# # rollback
connection.rollback()
# # rever autocommit
connection.autocommit( True )
# # close cursor
cursor.close()
return S_ERROR( str( error ) )
def putRequest( self, request ):
""" update or insert request into db
:param Request request: Request instance
"""
query = "SELECT `RequestID` from `Request` WHERE `RequestName` = '%s'" % request.RequestName
exists = self._transaction( query )
if not exists["OK"]:
self.log.error( "putRequest: %s" % exists["Message"] )
return exists
exists = exists["Value"]
if exists[query] and exists[query][0]["RequestID"] != request.RequestID:
return S_ERROR( "putRequest: request '%s' already exists in the db (RequestID=%s)"\
% ( request.RequestName, exists[query][0]["RequestID"] ) )
reqSQL = request.toSQL()
if not reqSQL["OK"]:
return reqSQL
reqSQL = reqSQL["Value"]
putRequest = self._transaction( reqSQL )
if not putRequest["OK"]:
self.log.error( "putRequest: %s" % putRequest["Message"] )
return putRequest
lastrowid = putRequest["lastrowid"]
putRequest = putRequest["Value"]
cleanUp = request.cleanUpSQL()
if cleanUp:
dirty = self._transaction( cleanUp )
if not dirty["OK"]:
self.log.error( "putRequest: unable to delete dirty Operation records: %s" % dirty["Message"] )
return dirty
# # flag for a new request
isNew = False
# # set RequestID when necessary
if request.RequestID == 0:
isNew = True
request.RequestID = lastrowid
for operation in request:
cleanUp = operation.cleanUpSQL()
if cleanUp:
dirty = self._transaction( [ cleanUp ] )
if not dirty["OK"]:
self.log.error( "putRequest: unable to delete dirty File records: %s" % dirty["Message"] )
return dirty
opSQL = operation.toSQL()["Value"]
putOperation = self._transaction( opSQL )
if not putOperation["OK"]:
self.log.error( "putRequest: unable to put operation %d: %s" % ( request.indexOf( operation ),
putOperation["Message"] ) )
if isNew:
deleteRequest = self.deleteRequest( request.RequestName )
if not deleteRequest["OK"]:
self.log.error( "putRequest: unable to delete request '%s': %s"\
% ( request.RequestName, deleteRequest["Message"] ) )
return deleteRequest
return putOperation
lastrowid = putOperation["lastrowid"]
putOperation = putOperation["Value"]
if operation.OperationID == 0:
operation.OperationID = lastrowid
filesToSQL = [ opFile.toSQL()["Value"] for opFile in operation ]
if filesToSQL:
putFiles = self._transaction( filesToSQL )
if not putFiles["OK"]:
self.log.error( "putRequest: unable to put files for operation %d: %s" % ( request.indexOf( operation ),
putFiles["Message"] ) )
if isNew:
deleteRequest = self.deleteRequest( request.requestName )
if not deleteRequest["OK"]:
self.log.error( "putRequest: unable to delete request '%s': %s"\
% ( request.RequestName, deleteRequest["Message"] ) )
return deleteRequest
return putFiles
return S_OK( request.RequestID )
def getScheduledRequest( self, operationID ):
""" read scheduled request given its FTS operationID """
query = "SELECT `Request`.`RequestName` FROM `Request` JOIN `Operation` ON "\
"`Request`.`RequestID`=`Operation`.`RequestID` WHERE `OperationID` = %s;" % operationID
requestName = self._query( query )
if not requestName["OK"]:
self.log.error( "getScheduledRequest: %s" % requestName["Message"] )
return requestName
requestName = requestName["Value"]
if not requestName:
return S_OK()
return self.getRequest( requestName[0][0] )
def getRequestName( self, requestID ):
""" get Request.RequestName for a given Request.RequestID """
query = "SELECT `RequestName` FROM `Request` WHERE `RequestID` = %s" % requestID
query = self._query( query )
if not query["OK"]:
self.log.error( "getRequestName: %s" % query["Message"] )
query = query["Value"]
if not query:
return S_ERROR( "getRequestName: no request found for RequestID=%s" % requestID )
return S_OK( query[0][0] )
def getRequest( self, requestName = '', assigned = True ):
""" read request for execution
:param str requestName: request's name (default None)
"""
requestID = None
if requestName:
self.log.info( "getRequest: selecting request '%s'" % requestName )
reqIDQuery = "SELECT `RequestID`, `Status` FROM `Request` WHERE `RequestName` = '%s';" % str( requestName )
reqID = self._transaction( reqIDQuery )
if not reqID["OK"]:
self.log.error( "getRequest: %s" % reqID["Message"] )
return reqID
requestID = reqID["Value"][reqIDQuery][0]["RequestID"] if "RequestID" in reqID["Value"][reqIDQuery][0] else None
status = reqID["Value"][reqIDQuery][0]["Status"] if "Status" in reqID["Value"][reqIDQuery][0] else None
if not all( ( requestID, status ) ):
return S_ERROR( "getRequest: request '%s' not exists" % requestName )
if requestID and status and status == "Assigned" and assigned:
return S_ERROR( "getRequest: status of request '%s' is 'Assigned', request cannot be selected" % requestName )
else:
reqIDsQuery = "SELECT `RequestID` FROM `Request` WHERE `Status` = 'Waiting' ORDER BY `LastUpdate` ASC LIMIT 100;"
reqIDs = self._transaction( reqIDsQuery )
if not reqIDs["OK"]:
self.log.error( "getRequest: %s" % reqIDs["Message"] )
return reqIDs
reqIDs = reqIDs["Value"][reqIDsQuery]
reqIDs = [ reqID["RequestID"] for reqID in reqIDs ]
if not reqIDs:
return S_OK()
random.shuffle( reqIDs )
requestID = reqIDs[0]
selectQuery = [ "SELECT * FROM `Request` WHERE `RequestID` = %s;" % requestID,
"SELECT * FROM `Operation` WHERE `RequestID` = %s;" % requestID ]
selectReq = self._transaction( selectQuery )
if not selectReq["OK"]:
self.log.error( "getRequest: %s" % selectReq["Message"] )
return S_ERROR( selectReq["Message"] )
selectReq = selectReq["Value"]
request = Request( selectReq[selectQuery[0]][0] )
for records in sorted( selectReq[selectQuery[1]], key = lambda k: k["Order"] ):
# # order is ro, remove
del records["Order"]
operation = Operation( records )
getFilesQuery = "SELECT * FROM `File` WHERE `OperationID` = %s;" % operation.OperationID
getFiles = self._transaction( getFilesQuery )
if not getFiles["OK"]:
self.log.error( "getRequest: %s" % getFiles["Message"] )
return getFiles
getFiles = getFiles["Value"][getFilesQuery]
for getFile in getFiles:
getFileDict = dict( [ ( key, value ) for key, value in getFile.items() if value != None ] )
operation.addFile( File( getFileDict ) )
request.addOperation( operation )
if assigned:
setAssigned = self._transaction( "UPDATE `Request` SET `Status` = 'Assigned' WHERE RequestID = %s;" % requestID )
if not setAssigned["OK"]:
self.log.error( "getRequest: %s" % setAssigned["Message"] )
return setAssigned
return S_OK( request )
def peekRequest( self, requestName ):
""" get request (ro), no update on states
:param str requestName: Request.RequestName
"""
return self.getRequest( requestName, False )
def getRequestNamesList( self, statusList = None, limit = None ):
""" select requests with status in :statusList: """
statusList = statusList if statusList else list( Request.FINAL_STATES )
limit = limit if limit else 100
query = "SELECT `RequestName`, `Status`, `LastUpdate` FROM `Request` WHERE "\
" `Status` IN (%s) ORDER BY `LastUpdate` DESC LIMIT %s;" % ( stringListToString( statusList ), limit )
reqNamesList = self._query( query )
if not reqNamesList["OK"]:
self.log.error( "getRequestNamesList: %s" % reqNamesList["Message"] )
return reqNamesList
reqNamesList = reqNamesList["Value"]
return S_OK( [ reqName for reqName in reqNamesList] )
def deleteRequest( self, requestName ):
""" delete request given its name
:param str requestName: request.RequestName
:param mixed connection: connection to use if any
"""
requestIDs = self._transaction(
"SELECT r.RequestID, o.OperationID FROM `Request` r LEFT JOIN `Operation` o "\
"ON r.RequestID = o.RequestID WHERE `RequestName` = '%s'" % requestName )
if not requestIDs["OK"]:
self.log.error( "deleteRequest: unable to read RequestID and OperationIDs: %s" % requestIDs["Message"] )
return requestIDs
requestIDs = requestIDs["Value"]
trans = []
requestID = None
for records in requestIDs.values():
for record in records:
requestID = record["RequestID"] if record["RequestID"] else None
operationID = record["OperationID"] if record["OperationID"] else None
if operationID and requestID:
trans.append( "DELETE FROM `File` WHERE `OperationID` = %s;" % operationID )
trans.append( "DELETE FROM `Operation` WHERE `RequestID` = %s AND `OperationID` = %s;" % ( requestID,
operationID ) )
# # last bit: request itself
if requestID:
trans.append( "DELETE FROM `Request` WHERE `RequestID` = %s;" % requestID )
delete = self._transaction( trans )
if not delete["OK"]:
self.log.error( "deleteRequest: unable to delete request '%s': %s" % ( requestName, delete["Message"] ) )
return delete
return S_OK()
def getRequestProperties( self, requestName, columnNames ):
""" submit query """
return self._query( self._getRequestProperties( requestName, columnNames ) )
def _getRequestProperties( self, requestName, columnNames = None ):
""" select :columnNames: from Request table """
columnNames = columnNames if columnNames else Request.tableDesc()["Fields"].keys()
columnNames = ",".join( [ '`%s`' % str( columnName ) for columnName in columnNames ] )
return "SELECT %s FROM `Request` WHERE `RequestName` = '%s';" % ( columnNames, requestName )
def _getOperationProperties( self, operationID, columnNames = None ):
""" select :columnNames: from Operation table """
columnNames = columnNames if columnNames else Operation.tableDesc()["Fields"].keys()
columnNames = ",".join( [ '`%s`' % str( columnName ) for columnName in columnNames ] )
return "SELECT %s FROM `Operation` WHERE `OperationID` = %s;" % ( columnNames, int( operationID ) )
def _getFileProperties( self, fileID, columnNames = None ):
""" select :columnNames: from File table """
columnNames = columnNames if columnNames else File.tableDesc()["Fields"].keys()
columnNames = ",".join( [ '`%s`' % str( columnName ) for columnName in columnNames ] )
return "SELECT %s FROM `File` WHERE `FileID` = %s;" % ( columnNames, int( fileID ) )
def getDBSummary( self ):
""" get db summary """
# # this will be returned
retDict = { "Request" : {}, "Operation" : {}, "File" : {} }
transQueries = { "SELECT `Status`, COUNT(`Status`) FROM `Request` GROUP BY `Status`;" : "Request",
"SELECT `Type`,`Status`,COUNT(`Status`) FROM `Operation` GROUP BY `Type`,`Status`;" : "Operation",
"SELECT `Status`, COUNT(`Status`) FROM `File` GROUP BY `Status`;" : "File" }
ret = self._transaction( transQueries.keys() )
if not ret["OK"]:
self.log.error( "getDBSummary: %s" % ret["Message"] )
return ret
ret = ret["Value"]
for k, v in ret.items():
if transQueries[k] == "Request":
for aDict in v:
status = aDict.get( "Status" )
count = aDict.get( "COUNT(`Status`)" )
if status not in retDict["Request"]:
retDict["Request"][status] = 0
retDict["Request"][status] += count
elif transQueries[k] == "File":
for aDict in v:
status = aDict.get( "Status" )
count = aDict.get( "COUNT(`Status`)" )
if status not in retDict["File"]:
retDict["File"][status] = 0
retDict["File"][status] += count
else: # # operation
for aDict in v:
status = aDict.get( "Status" )
oType = aDict.get( "Type" )
count = aDict.get( "COUNT(`Status`)" )
if oType not in retDict["Operation"]:
retDict["Operation"][oType] = {}
if status not in retDict["Operation"][oType]:
retDict["Operation"][oType][status] = 0
retDict["Operation"][oType][status] += count
return S_OK( retDict )
def getRequestSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" get db summary for web
:param dict selectDict: whatever
:param list sortList: whatever
:param int startItem: limit
:param int maxItems: limit
"""
resultDict = {}
rparameterList = [ 'RequestID', 'RequestName', 'JobID', 'OwnerDN', 'OwnerGroup']
sparameterList = [ 'Type', 'Status', 'Operation']
parameterList = rparameterList + sparameterList + [ "Error", "CreationTime", "LastUpdate"]
# parameterList.append( 'Error' )
# parameterList.append( 'CreationTime' )
# parameterList.append( 'LastUpdateTime' )
req = "SELECT R.RequestID, R.RequestName, R.JobID, R.OwnerDN, R.OwnerGroup,"
req += "O.Type, O.Status, O.Type, O.Error, O.CreationTime, O.LastUpdate FROM Requests as R, Operation as O "
new_selectDict = {}
older = None
newer = None
for key, value in selectDict.items():
if key in rparameterList:
new_selectDict['R.' + key] = value
elif key in sparameterList:
new_selectDict['O.' + key] = value
elif key == 'ToDate':
older = value
elif key == 'FromDate':
newer = value
condition = ''
if new_selectDict or older or newer:
condition = self.__buildCondition( new_selectDict, older = older, newer = newer )
req += condition
if condition:
req += " AND R.RequestID=O.RequestID"
else:
req += " WHERE R.RequestID=O.RequestID"
if sortList:
req += " ORDER BY %s %s" % ( sortList[0][0], sortList[0][1] )
result = self._query( req )
if not result['OK']:
return result
if not result['Value']:
resultDict['ParameterNames'] = parameterList
resultDict['Records'] = []
return S_OK( resultDict )
nRequests = len( result['Value'] )
if startItem <= len( result['Value'] ):
firstIndex = startItem
else:
return S_ERROR( 'Requested index out of range' )
if ( startItem + maxItems ) <= len( result['Value'] ):
secondIndex = startItem + maxItems
else:
secondIndex = len( result['Value'] )
records = []
columnWidth = [ 0 for x in range( len( parameterList ) ) ]
for i in range( firstIndex, secondIndex ):
row = result['Value'][i]
records.append( [ str( x ) for x in row] )
for ind in range( len( row ) ):
if len( str( row[ind] ) ) > columnWidth[ind]:
columnWidth[ind] = len( str( row[ind] ) )
resultDict['ParameterNames'] = parameterList
resultDict['ColumnWidths'] = columnWidth
resultDict['Records'] = records
resultDict['TotalRecords'] = nRequests
return S_OK( resultDict )
def getRequestNamesForJobs( self, jobIDs ):
""" read request names for jobs given jobIDs
:param list jobIDs: list of jobIDs
"""
self.log.debug( "getRequestForJobs: got %s jobIDs to check" % str( jobIDs ) )
if not jobIDs:
return S_ERROR( "Must provide jobID list as argument." )
if type( jobIDs ) in ( long, int ):
jobIDs = [ jobIDs ]
jobIDs = list( set( [ int( jobID ) for jobID in jobIDs ] ) )
reqDict = { "Successful": {}, "Failed": {} }
# # filter out 0
jobIDsStr = ",".join( [ str( jobID ) for jobID in jobIDs if jobID ] )
# # request names
requestNames = "SELECT `RequestName`, `JobID` FROM `Request` WHERE `JobID` IN (%s);" % jobIDsStr
requestNames = self._query( requestNames )
if not requestNames["OK"]:
self.log.error( "getRequestsForJobs: %s" % requestNames["Message"] )
return requestNames
requestNames = requestNames["Value"]
for requestName, jobID in requestNames:
reqDict["Successful"][jobID] = requestName
reqDict["Failed"] = dict.fromkeys( [ jobID for jobID in jobIDs if jobID not in reqDict["Successful"] ],
"Request not found" )
return S_OK( reqDict )
def readRequestsForJobs( self, jobIDs = None ):
""" read request for jobs
:param list jobIDs: list of JobIDs
:return: S_OK( "Successful" : { jobID1 : Request, jobID2: Request, ... }
"Failed" : { jobID3: "error message", ... } )
"""
self.log.debug( "readRequestForJobs: got %s jobIDs to check" % str( jobIDs ) )
requestNames = self.getRequestNamesForJobs( jobIDs )
if not requestNames["OK"]:
self.log.error( "readRequestForJobs: %s" % requestNames["Message"] )
return requestNames
requestNames = requestNames["Value"]
# # this will be returned
retDict = { "Failed": requestNames["Failed"], "Successful": {} }
self.log.debug( "readRequestForJobs: got %d request names" % len( requestNames["Successful"] ) )
for jobID in requestNames['Successful']:
request = self.peekRequest( requestNames['Successful'][jobID] )
if not request["OK"]:
retDict["Failed"][jobID] = request["Message"]
continue
retDict["Successful"][jobID] = request["Value"]
return S_OK( retDict )
def getRequestStatus( self, requestName ):
""" get request status for a given request name """
self.log.debug( "getRequestStatus: checking status for '%s' request" % requestName )
query = "SELECT `Status` FROM `Request` WHERE `RequestName` = '%s'" % requestName
query = self._query( query )
if not query["OK"]:
self.log.error( "getRequestStatus: %s" % query["Message"] )
return query
requestStatus = query['Value'][0][0]
return S_OK( requestStatus )
def getRequestFileStatus( self, requestName, lfnList ):
""" get status for files in request given its name
:param str requestName: Request.RequestName
:param list lfnList: list of LFNs
"""
if type( requestName ) == int:
requestName = self.getRequestName( requestName )
if not requestName["OK"]:
self.log.error( "getRequestFileStatus: %s" % requestName["Message"] )
return requestName
else:
requestName = requestName["Value"]
req = self.peekRequest( requestName )
if not req["OK"]:
self.log.error( "getRequestFileStatus: %s" % req["Message"] )
return req
req = req["Value"]
res = dict.fromkeys( lfnList, "UNKNOWN" )
for op in req:
for opFile in op:
if opFile.LFN in lfnList:
res[opFile.LFN] = opFile.Status
return S_OK( res )
def getRequestInfo( self, requestName ):
""" get request info given Request.RequestID """
if type( requestName ) == int:
requestName = self.getRequestName( requestName )
if not requestName["OK"]:
self.log.error( "getRequestInfo: %s" % requestName["Message"] )
return requestName
else:
requestName = requestName["Value"]
requestInfo = self.getRequestProperties( requestName, [ "RequestID", "Status", "RequestName", "JobID",
"OwnerDN", "OwnerGroup", "DIRACSetup", "SourceComponent",
"CreationTime", "SubmitTime", "lastUpdate" ] )
if not requestInfo["OK"]:
self.log.error( "getRequestInfo: %s" % requestInfo["Message"] )
return requestInfo
requestInfo = requestInfo["Value"][0]
return S_OK( requestInfo )
def getDigest( self, requestName ):
""" get digest for request given its name
:param str requestName: request name
"""
self.log.debug( "getDigest: will create digest for request '%s'" % requestName )
request = self.getRequest( requestName, False )
if not request["OK"]:
self.log.error( "getDigest: %s" % request["Message"] )
request = request["Value"]
if not isinstance( request, Request ):
self.log.info( "getDigest: request '%s' not found" )
return S_OK()
return request.getDigest()
@staticmethod
def __buildCondition( condDict, older = None, newer = None ):
""" build SQL condition statement from provided condDict
and other extra conditions
blindly copied from old code, hope it works
"""
condition = ''
conjunction = "WHERE"
if condDict != None:
for attrName, attrValue in condDict.items():
if type( attrValue ) == list:
multiValue = ','.join( ['"' + x.strip() + '"' for x in attrValue] )
condition = ' %s %s %s in (%s)' % ( condition,
conjunction,
str( attrName ),
multiValue )
else:
condition = ' %s %s %s=\'%s\'' % ( condition,
conjunction,
str( attrName ),
str( attrValue ) )
conjunction = "AND"
if older:
condition = ' %s %s O.LastUpdate < \'%s\'' % ( condition,
conjunction,
str( older ) )
conjunction = "AND"
if newer:
condition = ' %s %s O.LastUpdate >= \'%s\'' % ( condition,
conjunction,
str( newer ) )
return condition
| avedaee/DIRAC | RequestManagementSystem/DB/RequestDB.py | Python | gpl-3.0 | 26,378 |
import shutil
import os
import errno
class file_exporter:
def __init__(self):
self.error_message = ''
def get_error_message (self):
return self.error_message
def tally_path_segments (self, file):
while (file != ''):
(first, last) = os.path.split (file)
if first == file:
#### we've hit the top of the path, so bail out
break
if first not in self.path_segments:
self.path_segments[first] = 0
self.path_segments[first] += 1
file = first
def get_base_path (self):
self.path_segments = {}
for file in self.files:
self.tally_path_segments (file)
max_path_len = 0
max_path = ''
for segment in self.path_segments:
if self.path_segments[segment] == len (self.files):
if len (segment) > max_path_len:
max_path_len = len (segment)
max_path = segment
#### use join() to append a final separator; this is important when
#### we strip the base path from the full filenames
self.base_path = os.path.join (max_path, '')
def export (self, export_path, files):
self.files = files
self.base_path = ''
print " calculating base path..."
self.get_base_path ()
print " base path : " + self.base_path.encode('utf-8')
for file in self.files:
print " - exporting file '" + file.encode('utf-8') + "'..."
basename = file.replace (self.base_path, '')
export_file = os.path.join (export_path, basename)
print " writing to '" + export_file.encode('utf-8') + "'..."
(first, last) = os.path.split (export_file)
try:
print " making dir '" + first.encode('utf-8') + "'..."
os.makedirs (first.encode ('utf-8'))
except OSError as e:
#### ignore directory already exists
if e.errno == errno.EEXIST:
pass
else:
self.error_message = "Could not copy '" + file.encode('utf-8') + "' to '" + export_file.encode('utf-8') + "': " + e.strerror
return False
print " copying file..."
try:
shutil.copy2(file.encode ('utf-8'), export_file.encode ('utf-8'))
except OSError as e:
self.error_message = "Could not copy '" + file.encode ('utf-8') + "' to '" + export_file.encode ('utf-8') + "': " + e.strerror
return False
return True
| jpriebe/qooxtunes | addon/python/file_exporter.py | Python | gpl-3.0 | 2,670 |
"""
Django settings for asucourses project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'south',
'catalog'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'asucourses.urls'
WSGI_APPLICATION = 'asucourses.wsgi.application'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates')
)
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
execfile(os.path.join(BASE_DIR, 'asucourses', 'development_settings.py'))
| asucourses/asucourses.com | asucourses/settings.py | Python | gpl-3.0 | 1,495 |
__version__ = "0.0.2a3"
| trueneu/swiss-knife | swk_plugins/swk_casp/swk_casp/version.py | Python | gpl-3.0 | 24 |
#!/usr/bin/env python
import socket
import sys
import os
import time
import random
import csv
import json
import random
from sentence_generator import make_sentence
from copy import deepcopy
from subprocess import check_output
# csv file columns are timestamp, pressure, CO2, ...
SAMPLE_DATA_DIR = os.path.join(os.path.dirname(__file__), "sample_data")
SAMPLE_DATA = os.path.join(SAMPLE_DATA_DIR, "1427199271-sample-breathing.csv")
SOCKET_PATH = '/tmp/lucidity.socket'
TIME_WARP = float(os.environ.get('TIME_WARP', 1.0))
MAX_LINES_AT_ONCE = int(os.environ.get('MAX_LINES_AT_ONCE', 1))
EMIT_RANDOM_MSGS = bool(os.environ.get('GIBBERISH', False))
class SocketNotFound(Exception):
pass
# Read in data from the example csv file
datapoints = []
with open(SAMPLE_DATA, 'rb') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
datapoints.append([float(x) for x in row])
# Try and connect to socket. If any error, print out error and output to stdout instead.
try:
# Make sure the socket exists
if not os.path.exists(SOCKET_PATH):
raise SocketNotFound("No socket at %s" % SOCKET_PATH)
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.setblocking(0) # important - don't block on reads
sock.connect(SOCKET_PATH)
output = sock.sendall
except (SocketNotFound, socket.error), msg:
print >>sys.stderr, "Error connecting to %s.\n\n%s." % (SOCKET_PATH, msg)
sys.exit(1)
def receive(the_socket):
# Act as an iterator. Sometimes >1 message will have accumulated on the
# socket by the time we come to read it.
# Yield either None (if nothing received, buffer empty) or json decode line by line.
rbuffer = ''
while True:
try:
incoming = the_socket.recv(1024)
rbuffer += incoming
except socket.error:
# nothing to read
yield None
continue
while rbuffer.find("\n") != -1:
line, rbuffer = rbuffer.split("\n", 1)
try:
yield json.loads(line)
except ValueError, e:
print >>sys.stderr, str(e)
print >>sys.stderr, line
def enum(**enums):
return type('Enum', (), enums)
STATES = enum(
INITIALISING = "initialising",
WAITING = "waiting",
CALIBRATING = "calibrating",
ANALYSING = "analysing",
COLLECTING = "collecting",
)
ACTIVE_STATES = [ STATES.CALIBRATING, STATES.ANALYSING, STATES.COLLECTING ]
DEFAULT_SETTINGS = {
"calibration_time": 3,
"sample_collection_time": 3,
"collection_control": "c",
"auto_triggers": True,
"blank_capture": False,
"total_breath": False,
"collection_rate": 4,
"collection_limit": 5,
"filename": "",
"capture_window": {
"start": {
"percent": 85,
"gradient": "rising"
},
"end": {
"percent": 15,
"gradient": "falling"
},
}
}
class Publisher:
def __init__(self):
self.lines_buffered = 0
self.index = 0
self.buffer = ""
self.state = None
# get own version
self.version = check_output(['git','describe','--tags'])
self.change_state(STATES.INITIALISING)
self.user_settings = {
"calibration_time": 5,
"sample_collection_time": 2,
"collection_control": "p",
"auto_triggers": False,
"blank_capture": False,
"total_breath": False,
"collection_rate": 2,
"collection_limit": 7,
"filename": "myfile",
"capture_window": {
"start": {
"percent": 62,
"gradient": "rising"
},
"end": {
"percent": 9,
"gradient": "falling"
},
}
}
self.settings = deepcopy(DEFAULT_SETTINGS)
self.set_completion(0,0)
def set_completion(self, by_volume, by_time):
self.collection_completion = {
"volume": min(100, by_volume),
"time": min(100, by_time),
}
self.emit(
collection_completion = self.collection_completion,
)
def change_state(self, new_state, message=None, severity=None):
if self.state != new_state:
message = "State changed to %s." % new_state
severity = "info"
self.state = new_state
self.emit(message=message, severity="info")
self.set_completion(0, 0)
def emit(self, **kwargs):
h = {
"state": self.state,
"version": self.version,
"is_simulation": True # DON'T include this member in a real publisher's messages
}
for key,val in kwargs.iteritems():
h[key] = val
output(json.dumps(h) + "\n")
def run(self):
# Wait a while to simulate initialisation
self.change_state(STATES.INITIALISING)
time.sleep(3.0 / TIME_WARP)
self.change_state(STATES.WAITING)
# Loop until user hits Ctrl+C
while True:
try:
# read from sock
received = receive(sock).next()
been_nudged = False
if received is not None and 'command' in received:
# act on information received
print "Received: %s" % received
do_what = received['command']
if do_what == "stop":
self.change_state(STATES.WAITING)
elif do_what == "start":
self.change_state(STATES.CALIBRATING)
self.emit(message="Using settings: " + json.dumps(received['settings']), severity="info", results_dir=SAMPLE_DATA_DIR)
self.emit(message="Got timestamp: " + json.dumps(received['timestamp']), severity="info")
elif do_what == "request_state":
self.emit()
elif do_what == "request_settings_current":
self.emit(settings=self.settings, results_dir=SAMPLE_DATA_DIR)
elif do_what == "apply_settings_default":
self.settings = deepcopy(DEFAULT_SETTINGS)
self.emit(settings=self.settings, message="Loaded default settings.", severity="info")
elif do_what == "apply_settings_user":
self.settings = deepcopy(self.user_settings)
self.emit(settings=self.settings, message="Loaded user settings.", severity="info")
elif do_what == "save_settings":
self.user_settings = received['settings']
self.settings = deepcopy(self.user_settings)
self.emit(settings=self.settings, message="Saved user settings.", severity="info")
elif do_what == "nudge":
been_nudged = True
# While running...
if self.state in ACTIVE_STATES:
# ...cycle through active states to simulate instrument doing things
if been_nudged:
current = ACTIVE_STATES.index(self.state)
next = current + 1
if next >= len(ACTIVE_STATES):
self.change_state(STATES.WAITING)
else:
self.change_state(ACTIVE_STATES[next])
# Emit incrementing completion data during simulated collection
if self.state == STATES.COLLECTING:
self.set_completion(
by_volume = self.collection_completion["volume"] + random.random() * 5,
by_time = self.collection_completion["time"] + 2.5,
)
# Get data (ultimately this comes from the sample file)
datapoint = datapoints[self.index]
# Replace the first member of datapoint with the current timestamp
datapoint[0] = time.time()
# Fourth column of data should be zero unless we are in collecting state
if self.state != STATES.COLLECTING:
datapoint[3] = 0
# Put comma-separated line of data into the buffer
self.buffer += ",".join([str(x) for x in datapoint]) + "\n"
self.lines_buffered += 1
# Output data if the 'buffer' is full, or on a random spin.
if self.lines_buffered >= MAX_LINES_AT_ONCE or random.random() < 0.3:
if self.state in ACTIVE_STATES:
output( self.buffer )
self.buffer = ""
self.lines_buffered = 0
# Move to next data point. Increment self.index and loop back round
self.index += 1
if self.index >= len(datapoints):
self.index = 0
# Emit some random debugging every now and then
if EMIT_RANDOM_MSGS:
if self.state == STATES.WAITING:
if random.random() < 0.1:
self.emit(message="Waiting" + "." * random.randint(2,5))
else:
x = random.random()
if x < 0.05:
self.emit(message="ERROR: " + make_sentence(), severity="error")
elif x < 0.1:
self.emit(message="WARNING: " + make_sentence(), severity="warning")
elif x < 0.5:
self.emit(message=make_sentence())
time.sleep(0.2 / TIME_WARP)
except KeyboardInterrupt:
break
p = Publisher()
p.run()
sock.close()
print "Finished." | breathe-free/breathe-see | example_publisher/__main__.py | Python | gpl-3.0 | 10,411 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Repacking Instaseis databases.
Requires click, h5py, and numpy.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2016
Simon Stähler (staehler@geophysik.uni-muenchen.de), 2016
:license:
GNU Lesser General Public License, Version 3 [non-commercial/academic use]
(http://www.gnu.org/copyleft/lgpl.html)
"""
import os
import click
import numpy as np
def maybe_encode(string, encoding='ascii'):
try:
return string.encode(encoding)
except AttributeError:
return string
except UnicodeEncodeError:
return string
def unroll_and_merge_netcdf4(filenames, output_folder):
"""
Completely unroll and merge both files.
"""
import netCDF4
from scipy.spatial import cKDTree
# Find MZZ, MXX_P_MYY, MXZ_MYZ, MXY_MXX_M_MYY directories
if len(filenames) == 4:
filenames = [os.path.normpath(_i) for _i in filenames]
mzz = [_i for _i in filenames if "MZZ" in _i]
mxx = [_i for _i in filenames if "MXX_P_MYY" in _i]
mxz = [_i for _i in filenames if "MXZ_MYZ" in _i]
mxy = [_i for _i in filenames if "MXY_MXX_M_MYY" in _i]
assert len(mzz) == 1
assert len(mxx) == 1
assert len(mxz) == 1
assert len(mxy) == 1
mzz = mzz[0]
mxx = mxx[0]
mxz = mxz[0]
mxy = mxy[0]
assert os.path.exists(mzz)
assert os.path.exists(mxx)
assert os.path.exists(mxz)
assert os.path.exists(mxy)
f_in_1 = netCDF4.Dataset(mzz, 'r')
f_in_2 = netCDF4.Dataset(mxx, 'r')
f_in_3 = netCDF4.Dataset(mxz, 'r')
f_in_4 = netCDF4.Dataset(mxy, 'r')
elif len(filenames) == 2:
pz = [_i for _i in filenames if "PZ" in _i]
px = [_i for _i in filenames if "PX" in _i]
assert len(pz) == 1
assert len(px) == 1
pz = pz[0]
px = px[0]
assert os.path.exists(pz)
assert os.path.exists(px)
f_in_1 = netCDF4.Dataset(pz, 'r')
f_in_2 = netCDF4.Dataset(px, 'r')
else:
print('Wrong number of simulations: ', len(filenames))
assert False
output_filename = os.path.join(output_folder, "merged_instaseis_db.nc4")
assert not os.path.exists(output_filename)
# Get sorting order
r = np.array([f_in_1.groups['Mesh'].variables['mp_mesh_Z'][:],
f_in_1.groups['Mesh'].variables['mp_mesh_S'][:]]).transpose()
ctree = cKDTree(r)
inds = ctree.indices
try:
f_out = netCDF4.Dataset(output_filename, 'w', format='NETCDF4')
# Copy attributes from the vertical file.
for name in f_in_1.ncattrs():
value = getattr(f_in_1, name)
print(name, value)
setattr(f_out, name, maybe_encode(value))
f_out.setncattr('nsim', len(filenames))
for name, dimension in f_in_1.dimensions.items():
if not dimension.isunlimited():
f_out.createDimension(name, len(dimension))
else:
f_out.createDimension(name, None)
# Create Mesh group and copy mesh variables
f_out.createGroup('Mesh')
for name, dimension in f_in_1['Mesh'].dimensions.items():
if not dimension.isunlimited():
f_out['Mesh'].createDimension(name, len(dimension))
else:
f_out['Mesh'].createDimension(name, None)
for name, variable in f_in_1['Mesh'].variables.items():
f_out['Mesh'].createVariable(name, variable.datatype,
variable.dimensions)
if ('elements',) == variable.dimensions:
print('Resorting %s' % name)
f_out['Mesh'].variables[name][:] = \
f_in_1['Mesh'].variables[name][inds]
elif name == 'sem_mesh':
print('Resorting first dim of %s' % name)
f_out['Mesh'].variables[name][:, :, :] = \
f_in_1['Mesh'].variables[name][inds, :, :]
elif name == 'fem_mesh':
print('Resorting first dim of %s' % name)
f_out['Mesh'].variables[name][:, :] = \
f_in_1['Mesh'].variables[name][inds, :]
else:
f_out['Mesh'].variables[name][:] = \
f_in_1['Mesh'].variables[name][:]
# Copy source time function variables
for name, variable in f_in_1['Snapshots'].variables.items():
if name in ['stf_dump', 'stf_d_dump']:
f_out.createVariable(name, variable.datatype,
variable.dimensions)
f_out.variables[name][:] = f_in_1['Snapshots'].variables[name][:]
# Create a new array but this time in 5D. The first dimension
# is the element number, the second and third are the GLL
# points in both directions, the fourth is the time axis, and the
# last the displacement axis.
ndumps = f_in_1.getncattr("number of strain dumps")
number_of_elements = f_in_1.getncattr("nelem_kwf_global")
npol = f_in_1.getncattr("npol")
# Get datasets and the dtype.
if len(filenames) == 2:
meshes = [
f_in_1["Snapshots"]["disp_s"], # PZ
f_in_1["Snapshots"]["disp_z"],
f_in_2["Snapshots"]["disp_s"], # PX
f_in_2["Snapshots"]["disp_p"],
f_in_2["Snapshots"]["disp_z"]]
elif len(filenames) == 4:
meshes = [
f_in_1["Snapshots"]["disp_s"], # MZZ
f_in_1["Snapshots"]["disp_z"],
f_in_2["Snapshots"]["disp_s"], # MXX + MYY
f_in_2["Snapshots"]["disp_z"],
f_in_3["Snapshots"]["disp_s"], # MXZ / MYZ
f_in_3["Snapshots"]["disp_p"],
f_in_3["Snapshots"]["disp_z"],
f_in_4["Snapshots"]["disp_s"], # MXY / MXX - MYY
f_in_4["Snapshots"]["disp_p"],
f_in_4["Snapshots"]["disp_z"]]
dtype = meshes[0].dtype
nvars = len(meshes)
dim_elements = f_out.createDimension('elements', number_of_elements)
dim_ipol = f_out.createDimension('ipol', npol + 1)
dim_jpol = f_out.createDimension('jpol', npol + 1)
dim_nvars = f_out.createDimension('variables', nvars)
dim_snaps = f_out.dimensions['snapshots']
ds_o = f_out.createVariable(varname="merged_snapshots",
dimensions=(dim_elements.name,
dim_nvars.name,
dim_jpol.name,
dim_ipol.name,
dim_snaps.name),
datatype=dtype, contiguous=True)
# Old order (Instaseis):
# dimensions=(dim_elements.name,
# dim_snaps.name,
# dim_ipol.name,
# dim_jpol.name,
# dim_nvars.name),
utemp = np.zeros((nvars, npol + 1, npol + 1, ndumps),
dtype=dtype)
# Now it becomes more interesting and very slow.
sem_mesh = f_in_1["Mesh"]["sem_mesh"]
with click.progressbar(range(number_of_elements),
length=number_of_elements,
label="\t ") as gll_idxs:
for gll_idx in gll_idxs:
gll_point_ids = sem_mesh[inds[gll_idx]]
# Load displacement from all GLL points.
for ivar, var in enumerate(meshes):
# The list of ids we have is unique but not sorted.
ids = gll_point_ids.flatten()
s_ids = np.sort(ids)
temp = var[:, s_ids]
for ipol in range(npol + 1):
for jpol in range(npol + 1):
idx = ipol * (npol + 1) + jpol
# ndumps, ipol, jpol, nvar (Fortran notation)
utemp[ivar, ipol, jpol, :] = \
temp[:, np.argwhere(s_ids == ids[idx])[0][0]]
ds_o[gll_idx] = utemp
finally:
try:
f_in_1.close()
except:
pass
try:
f_in_2.close()
except:
pass
try:
f_in_3.close()
except:
pass
try:
f_in_4.close()
except:
pass
try:
f_out.close()
except:
pass
@click.command()
@click.argument("input_folder", type=click.Path(exists=True,
file_okay=False,
dir_okay=True))
@click.argument("output_folder", type=click.Path(exists=False))
def repack_database(input_folder, output_folder):
found_filenames = []
for root, _, filenames in os.walk(input_folder, followlinks=True):
for filename in filenames:
if filename != "ordered_output.nc4":
continue
found_filenames.append(os.path.join(root, filename))
assert found_filenames, "No files named `ordered_output.nc4` found."
os.makedirs(output_folder)
# The unrolled merge completely unrolls everything, dededuplicates the GLL
# points, and merges both netCDF files into one big file.
unroll_and_merge_netcdf4(filenames=found_filenames,
output_folder=output_folder)
if __name__ == "__main__":
repack_database()
| seismology/mc_kernel | UTILS/repack_database.py | Python | gpl-3.0 | 9,908 |
"""
Unit tests for the base mechanism class.
"""
import pytest
from azmq.mechanisms.base import Mechanism
from azmq.errors import ProtocolError
@pytest.mark.asyncio
async def test_expect_command(reader):
reader.write(b'\x04\x09\x03FOOhello')
reader.seek(0)
result = await Mechanism._expect_command(reader=reader, name=b'FOO')
assert result == b'hello'
@pytest.mark.asyncio
async def test_expect_command_large(reader):
reader.write(b'\x06\x00\x00\x00\x00\x00\x00\x00\x09\x03FOOhello')
reader.seek(0)
result = await Mechanism._expect_command(reader=reader, name=b'FOO')
assert result == b'hello'
@pytest.mark.asyncio
async def test_expect_command_invalid_size_type(reader):
reader.write(b'\x03')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_expect_command_invalid_name_size(reader):
reader.write(b'\x04\x09\x04HELOhello')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_expect_command_invalid_name(reader):
reader.write(b'\x04\x08\x03BARhello')
reader.seek(0)
with pytest.raises(ProtocolError):
await Mechanism._expect_command(reader=reader, name=b'FOO')
@pytest.mark.asyncio
async def test_read_frame(reader):
reader.write(b'\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert False
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_frame_large(reader):
reader.write(b'\x02\x00\x00\x00\x00\x00\x00\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert False
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_command(reader):
reader.write(b'\x04\x09\x03BARhello\x00\x03foo')
reader.seek(0)
async def on_command(name, data):
assert name == b'BAR'
assert data == b'hello'
result = await Mechanism.read(reader=reader, on_command=on_command)
assert result == (b'foo', True)
@pytest.mark.asyncio
async def test_read_invalid_size_type(reader):
reader.write(b'\x09')
reader.seek(0)
async def on_command(name, data):
assert False
with pytest.raises(ProtocolError):
await Mechanism.read(reader=reader, on_command=on_command)
| ereOn/azmq | tests/unit/test_mechanisms/test_base.py | Python | gpl-3.0 | 2,577 |
# -*- coding: utf-8 -*-
#
# Total Open Station documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 28 23:03:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../totalopenstation'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Total Open Station'
copyright = '2015-2020, Stefano Costa, Damien Gaignon and Luca Bianconi'
author = 'Stefano Costa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build',
'global.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_prolog = """
.. include:: /global.rst
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'totalopenstation',
'github_repo': 'totalopenstation',
'github_type': 'star',
'github_count': 'true',
'github_button': True,
'description': 'Download and export field survey data from your total station'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "tops.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TotalOpenStationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TotalOpenStation.tex', 'Total Open Station Documentation',
'Stefano Costa, Damien Gaignon, Luca Bianconi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('totalopenstation-cli-parser',
'totalopenstation-cli-parser',
'Total Open Station command line converter',
['Stefano Costa, Luca Bianconi'],
1),
('totalopenstation-cli-connector',
'totalopenstation-cli-connector',
'Total Open Station command line downloader',
['Stefano Costa, Luca Bianconi'],
1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TotalOpenStation', 'Total Open Station Documentation',
'Stefano Costa, Damien Gaignon, Luca Bianconi', 'TotalOpenStation', 'Total Open Station downloads data from your total station into common formats',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_use_ivar = True
napoleon_use_param = False
| steko/totalopenstation | docs/conf.py | Python | gpl-3.0 | 9,411 |
#!/usr/bin/env python3
'''Khronos OpenVG parent image binding for EGL.
http://www.khronos.org/registry/egl/extensions/KHR/EGL_KHR_vg_parent_image.txt
'''
# Copyright © 2012-13 Tim Pederick.
#
# This file is part of Pegl.
#
# Pegl is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pegl is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pegl. If not, see <http://www.gnu.org/licenses/>.
# Local imports.
from .khr_image import Image
# Extension image target type.
Image.extend('EGL_KHR_vg_parent_image', {'VG_PARENT_IMAGE': 0x30BA})
| perey/pegl | src/pegl/ext/khr_vgimage.py | Python | gpl-3.0 | 983 |
#!/usr/bin/env python
# encoding: utf-8
###
# Copyright 2011 University of Warsaw, Krzysztof Rusek
#
# This file is part of SegmEdit.
#
# SegmEdit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SegmEdit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SegmEdit. If not, see <http://www.gnu.org/licenses/>.
import os, sys
from configtypes import ShapeStyle, ZoneLabel
MAIN_WINDOW_SIZE = (1024, 700)
WINDOW_TITLE = "SegmEdit"
# fonts sizes
BUTTON_FONT_SIZE = 10
LABEL_FONT_SIZE = 9
LITTLE_BUTTON_FONT_SIZE = 7
# buttons sizes
BUTTON_HEIGHT = 23
LITTLE_BUTTON_HEIGHT = 21
# zone labels
OLD_ZONE_LABELS = ['Title', 'Author', 'Affiliation', 'Abstract',
'Body', 'Header', 'Footer', 'Unknown']
ZONE_LABELS = [
ZoneLabel('abstract', 'Abstract', ShapeStyle((230, 230, 150, 127))),
ZoneLabel('affiliation', 'Affiliation', ShapeStyle((230, 150, 230, 127))),
ZoneLabel('author', 'Author', ShapeStyle((255, 180, 180, 90))),
ZoneLabel('bib_info', 'Bibliographic info', ShapeStyle((30, 100, 30, 127))),
ZoneLabel('body', 'Body', ShapeStyle((180, 180, 255, 90))),
ZoneLabel('body_content', 'Body', ShapeStyle((180, 180, 255, 90))),
ZoneLabel('copyright', 'Copyright/License', ShapeStyle((103, 255, 52, 127))),
ZoneLabel('correspondence', 'Correspondence', ShapeStyle((30, 30, 100, 127))),
ZoneLabel('dates', 'Dates', ShapeStyle((100, 90, 0, 127))),
ZoneLabel('editor', 'Editor', ShapeStyle((90, 30, 90, 127))),
ZoneLabel('equation', 'Equation', ShapeStyle((140, 170, 0, 127))),
ZoneLabel('equation_label', 'Equation label', ShapeStyle((90, 110, 0, 127))),
ZoneLabel('figure', 'Figure', ShapeStyle((180, 130, 0, 127))),
ZoneLabel('figure_caption', 'Figure caption', ShapeStyle((120, 80, 0, 127))),
#ZoneLabel('footer', 'Footer', ShapeStyle((100, 100, 100, 127))),
#ZoneLabel('header', 'Header', ShapeStyle((180, 180, 180, 127))),
ZoneLabel('keywords', 'Keywords', ShapeStyle((30, 90, 90, 127))),
ZoneLabel('page_number', 'Page number', ShapeStyle((100, 100, 100, 127))),
ZoneLabel('references', 'References', ShapeStyle((150, 230, 230, 127))),
ZoneLabel('table', 'Table', ShapeStyle((140, 10, 170, 127))),
ZoneLabel('table_caption', 'Table caption', ShapeStyle((80, 5, 100, 127))),
ZoneLabel('title', 'Title', ShapeStyle((180, 255, 180, 90))),
ZoneLabel('type', 'Type', ShapeStyle((100, 30, 30, 127))),
ZoneLabel('unknown', 'Unknown', ShapeStyle((245, 245, 245, 0))),
]
DEFAULT_ZONE_LABEL = ZONE_LABELS[-1]
#SCROLL_STEP = 20
SCROLL_STEP = 30
# directories and files
VAR_DIRECTORY = os.environ['HOME'] + os.path.sep + "segmedit_cache"
INDEX_FILE = VAR_DIRECTORY + os.path.sep + "index.sqlite"
CONVERT_DPI = 300
CONVERT = "convert" if sys.platform == "win32" else "/usr/bin/convert"
CONVERT_OPTIONS = "-density " + str(CONVERT_DPI)
XML_PAGE_DELTA_X = 0
XML_UNIT_RATIO = CONVERT_DPI / 72.0
#XML_PAGE_HEIGHT = 3299
#XML_PAGE_DELTA_X = 0
#XML_UNIT_RATIO = CONVERT_DPI / 300.0
UNDO_HISTORY_SIZE = None #unlimited
MAX_ZOOM = 200
DEFAULT_ZOOM = 52
HIGHER_LAYER_STYLE = ShapeStyle((64, 64, 64, 32), strokeWidth=0)
CURRENT_LAYER_STYLE = ShapeStyle((0, 255, 0, 64), (255, 0, 0), 1)
LOWER_LAYER_STYLE = ShapeStyle((32, 0, 255, 32), (0, 0, 64, 32), 1)
SELECTION_STYLE = ShapeStyle(None, strokeWidth=1, dashes=[1, 2])
NETWORK_ADDRESS = 'ciemniak.icm.edu.pl:7171'
if __name__ == "__main__":
print "This is a module, don't run this as a program"
| CeON/SegmEdit | SegmEditGUI/config.py | Python | gpl-3.0 | 3,901 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# rn.py - load network of routes from OSM
#
# Copyright (C) 2012, Stefan Schramm <mail@stefanschramm.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import stat
import datetime
from imposm.parser import OSMParser
from mako.lookup import TemplateLookup
class RouteNetwork(object):
# dummy profile
profile = {
'name': '',
'shortname': '',
'filter_text': '',
'datasource': '',
'stopplan': False,
'maps': {}
}
pbf = ""
mtime = None
relation_filter = lambda r: True
makolookup = TemplateLookup(directories=[os.path.dirname(__file__) + '/templates'])
def __init__(self):
# the interesting objects will be stored in these 3 dicts:
# dict of relations; index: relation id
# each relation consists of (relation_id, tags, members)
# where members consists of (member_id, member_type, role)
self.relations = {}
# dict of ways; index: way id
# each way consists of (way_id, tags, node_ids)
self.ways = {}
# dict of nodes; index: node id
# each node consists of (node_id, tags, coordinates)
self.nodes = {}
# additionally information about parent-relations is collected:
# dict of parent relations; index: id of relation to get parent relations for
self.parents = {}
def load_network(self, pbf, filterfunction=lambda r: True):
# read data of public transport network
# required for validating and displaying
self.relation_filter = filterfunction
# get modification time of data source
# TODO: how to determine time when reading from multiple sources?
self.mtime = datetime.datetime.fromtimestamp(os.stat(pbf)[stat.ST_MTIME])
# first pass:
# collect all interesting relations
print "Collecting relations..."
p = OSMParser(concurrency=4, relations_callback=self.relations_cb)
p.parse(pbf)
# second pass:
# collect ways for these relations
print "Collecting %i ways..." % len(self.ways)
p = OSMParser(concurrency=4, ways_callback=self.ways_cb)
p.parse(pbf)
# collect nodes for collected relations and ways
print "Collecting %i nodes..." % len(self.nodes)
p = OSMParser(concurrency=4, nodes_callback=self.nodes_cb)
p.parse(pbf)
def relations_cb(self, relations):
# callback: collect routes to validate
for relation in relations:
rid, tags, members = relation
if self.relation_filter(relation):
self.relations[rid] = relation
for member in members:
mid, typ, role = member
if typ == "node":
self.nodes[mid] = None
if typ == "way":
self.ways[mid] = None
if (typ, mid) not in self.parents:
self.parents[(typ, mid)] = [("relation", rid)]
else:
self.parents[(typ, mid)].append(("relation", rid))
def ways_cb(self, ways):
# callback: collect interesting ways
for way in ways:
wid, tags, nodes = way
if wid in self.ways and self.ways[wid] == None:
self.ways[wid] = way
for nid in nodes:
self.nodes[nid] = None
def nodes_cb(self, nodes):
# callback: collect interesting nodes
for node in nodes:
nid, tags, coords = node
if nid in self.nodes and self.nodes[nid] == None:
self.nodes[nid] = node
def get_sortkey(self, relation):
rid, tags, members = relation
key = ""
if "route_master" in tags:
key += tags["route_master"]
elif "route" in tags:
key += tags["route"]
key += "_"
if "ref" in tags:
ref = tags["ref"]
for number in set(re.findall("[0-9]+", ref)):
# append a lot of leading zeroes to each number
ref = ref.replace(number, "%010i" % int(number))
key += ref
key += "_"
if "type" in tags and tags["type"] == "route_master":
# for same refs put route_master at top
key += "0"
else:
key += "1"
return key
| stefanschramm/osm_oepnv_validator | rn.py | Python | gpl-3.0 | 4,325 |
#author :haiyfu
#date:April 14
#description:
#contact:haiyangfu512@gmail.com
"""
This little part is to check how many different values in
a column and store the unqiue values in a list.
For FCBF initially.
The last column is the class .
"""
from sys import argv
#only count the target file and return
#a list structure which contains the detail
#information,like [23, [[1,23],[11,233]], 34 ]
#Here is the correspond meanings
#[attribure_number,[ [first-column-different-values] [2nd-df-val] ],line_num]
def rc_gn(sn):
fin=open(sn)
atrn=len(fin.readline().split(","))
#Initialize the result list
fin.seek(0,0)
rc=[]
rc.append(atrn)
rc.append([])
l=fin.readline().strip("\r \n ").split(",")
for x in l:
rc[1].append([x])
count=0
for l in fin:
l=l.strip("\n \r").split(",")
idx=0
if(len(l)<rc[0]):
break
for x in l:
if x not in rc[1][idx]:
rc[1][idx].append(x)
rc[1][idx].sort()
idx=idx+1
count=count+1
#print rc
rc.append(count+1)
fin.close()
return rc
def wrt_rc(rc,tn):
#print rc
ft=open(tn,"w")
#class info
ft.write(str(len(rc[1][-1]))+","+",".join(rc[1][-1])+".\n" )
#attribure number
ft.write(str( rc[0]-1 )+"\n")
#every attribure info
for x in range(rc[0]-1):
sl="A"+str(x+1)+" - "+",".join(rc[1][x])+".\n"
ft.write(sl)
ft.close()
if __name__=="__main__":
script_nm,src_file,out_file=argv
wrt_rc(rc_gn(src_file),out_file)
| haiy/XF_PRISM | src/XF-Prism/rc_generator.py | Python | gpl-3.0 | 1,587 |
from paste.session import make_session_middleware
from webapp.app import *
from webapp.check import Check, environ_has
Request.session = property(lambda self: self.environ['paste.session.factory']())
def session_has(*args, **kwargs):
"""
Check for the presense and, optionally, value of a session variable.
If value is a callable it will be passed (app, var, value), otherwise
it will be compared literally.
"""
if len(args) > 1 or len(kwargs) > 1 or (len(args) and len(kwargs)) or (not len(args) and not len(kwargs)):
raise ValueError("Must provide one and only one session variable to test. Consider using session_has_any or session_has_all.")
elif len(args):
var = args[0]
return Check(lambda app: var in app.request.session)
else:
var, value = kwargs.items()[0]
return Check(lambda app: app.request.session.get(var) == value)
def environ_has_any(*args, **kwargs):
return Check(lambda app: any(arg in app.request.session for arg in args) or any(app.request.session.get(key) == value for (key, value) in kwargs.iteritems()))
def session_has_all(*args, **kwargs):
return Check(lambda app: all(arg in app.request.session for arg in args) and all(app.request.session.get(key) == value for (key, value) in kwargs.iteritems()))
class Sessioned(object):
def __middleware_factory__(self, app):
return super(Sessioned, self).__middleware_factory__(make_session_middleware(app, {}))
| sj26/webapp | build/lib/webapp/session.py | Python | gpl-3.0 | 1,477 |
from xivo_admin import BasePlugin
from views import home
class XiVOHome(BasePlugin):
def load(self, app):
app.register_blueprint(home)
| sboily/xivo-admin-ivr-plugins | xivo_ivr/plugins/home/load.py | Python | gpl-3.0 | 148 |
#!/usr/bin/env python3
import sys
import os
import deadpool_dfa
import phoenixAES
import binascii
def processinput(iblock, blocksize):
#p=b'%0*x' % (2*blocksize, iblock) # Requires python3.5
p=('%0*x' % (2*blocksize, iblock)).encode('utf8')
open('foo', 'wb').write(binascii.unhexlify(p)*4)
return (None, ['-f', '-E', 'foo'])
def processoutput(output, blocksize):
return int(binascii.hexlify(output[:16]), 16)
# Patch drmless to always return decrypted version:
if not os.path.isfile('drmless.gold'):
with open('drmless', 'rb') as finput, open('drmless.gold', 'wb') as foutput:
foutput.write(finput.read(0x6C18)+b'\x01'+finput.read()[1:])
engine=deadpool_dfa.Acquisition(targetbin='./drmless', targetdata='./drmless', goldendata='drmless.gold',
dfa=phoenixAES, processinput=processinput, processoutput=processoutput, maxleaf=2048, faults=[('nop', lambda x: 0x90)], verbose=2)
tracefiles_sets=engine.run()
for trace in tracefiles_sets[1]:
if phoenixAES.crack_file(trace, encrypt=False):
break
| SideChannelMarvels/Deadpool | wbs_aes_plaidctf2013/DFA/attack_plaidctf.py | Python | gpl-3.0 | 1,049 |
import math
Deleted = math.inf
def hash_delete(T, k, h):
m = T.length
i = 0
while True:
j = h(k, i, m)
if T[j] == k:
T[j] = Deleted
return
i = i + 1
if T[j] is None or i == m:
break
def hash_insert_(T, k, h):
m = T.length
i = 0
while True:
j = h(k, i, m)
if T[j] is None or T[j] is Deleted:
T[j] = k
return j
else:
i = i + 1
if i == m:
break
raise RuntimeError('hash table overflow')
| wojtask/CormenPy | src/chapter11/exercise11_4_2.py | Python | gpl-3.0 | 565 |
#!/usr/bin/python
import numpy as np
from sklearn.metrics import mutual_info_score
import MDAnalysis
import os
import re
import math
import sys
from itertools import combinations_with_replacement,permutations
from concurrent.futures import ProcessPoolExecutor, Future, wait
usecpus = 10#how many cores to use
frms_num = 10000
u = MDAnalysis.Universe('ini.pdb','allpdb.trr')
f = open('CA-out.txt', 'w')
b = np.zeros((352,352))
#for i in range(0,352):
# for j in range(0,352):
# b[i][j] = 100
def new_dihedral(p):
p0 = p[0]
p1 = p[1]
p2 = p[2]
p3 = p[3]
b0 = -1.0*(p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
b1 /= np.linalg.norm(b1)
v = b0 - np.dot(b0, b1)*b1
w = b2 - np.dot(b2, b1)*b1
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.degrees(np.arctan2(y, x))
"""get a list of transition time points in sequential order"""
def trans_time(X):
t_t = []
"""increase 1D neighbor search radius nbr to filter out thermal fluctuations,
assuming symmetric peaks, for unsymmetric case need left nbr and right nbr"""
nbr = 10
for i in range(0 + nbr, len(X) - nbr):
peak = 1
for j in range(1, nbr+1):
if X[i] < X[i - j] or X[i] < X[i + j]:
peak = 0
break
if peak == 1:
t_t.append(i+1)
find_basin = t_time(X, t_t)
return find_basin
def t_time(X, t_t):
rg = 1
k_k = []
for i in range(0, len(X)):
peak1 = min(t_t, key=lambda x:abs(x-i))
peak2 = min(t_t, key=lambda x:abs(x-(i+rg)))
if peak1 != peak2:
k_k.append(i)
return k_k
"""
transition time function T(X, n) to get the time of
N th transition of a time series X time unit is 100 picosecond
"""
def trans_time_n(X, n, tx):
T = tx
return T[n-1]
"""
waiting time function W(X, t) to get the time interval
from t until the next transition/peak of X
"""
def wait_time(X, t, tx):
#if t < 0 :
# sys.exit("Error: time needs to be a positive number")
wait = 0
T = tx
for i in range(0,len(T)-1):
if t > T[i] and t < T[i+1]:
wait = T[i+1] - t
break
elif t < T[i]:
wait = T[i] - t
break
elif t == T[i]:
wait = T[i+1] - t
break
#elif t > T[-1]:
# wait = -100
return wait
"""
get mean persistence time of X
"""
def tau_p(X, tx):
T = tx
observ_t = T[-1]
sum = 0
for i in range(0,len(T)):
sum += wait_time(X, trans_time_n(X,i+1,T), T)
taup = math.pow(float(sum), 2)/float(2*observ_t)
return taup
"""
get mean exchange time of X following the (i+1)th transition in Y
in the cases whereas after the transition time of Y,
no transtion occurred in X, wait time is assigned to 0
"""
def tau_ex(X, Y, tx, ty):
TX = tx
TY = ty
observ_t_Y = TY[-1]
sum = 0
for i in range(0,len(TY)-1):
w1 = wait_time(X, trans_time_n(Y,i+1,TY), TX)
w2 = wait_time(Y, trans_time_n(Y,i,TY), TY)
sum += w1 * w2
tauex = float(sum)/float(observ_t_Y)
return tauex
def get_ij_ca(res_i,res_j,fs):
protein = u.select_atoms('backbone')
phi_sel = protein.residues[res_i].phi_selection()
phi_sel2 = protein.residues[res_j].phi_selection()
resi_phi = []
resj_phi = []
for ts in u.trajectory:
frame = ts.frame
if frame >= fs:
break
k = new_dihedral(phi_sel.positions)
resi_phi.append(k)
p = new_dihedral(phi_sel2.positions)
resj_phi.append(p)
#get TX, TY, lists of transition time and pass them
X = np.array(resi_phi)
Y = np.array(resj_phi)
TX = trans_time(X)
TY = trans_time(Y)
CA = (-1) * math.log(tau_ex(X, Y, TX, TY)/tau_p(X, TX))
#CA = get_ca(np.array(resi_phi),np.array(resj_phi),TX,TY)
pair = str(res_i) + '-' + str(res_j) + '.ax'
all = str(res_i) + '\t' + str(res_j) + '\t' + str(CA) + '\n'
f1 = open(pair, 'w')
f1.write(all)
f1.close()
def main():
with ProcessPoolExecutor(max_workers=usecpus) as executer:
a = []
for i, j in permutations(range(2,8), 2):
future = executer.submit(get_ij_ca, i, j, frms_num)
a.append(future)
wait(a)
#join small files together
os.system('cat *.ax > temp-all')
f2 = open("temp-all")
for line in f2.readlines():
a = re.split('\t|\n',line)
s0 = int(a[0])
s1 = int(a[1])
s2 = float(a[2])
b[s0][s1] = s2
#b[s1][s0] = s2
f2.close()
for i in range(0,352):
for j in range(0,352):
p = str(i) + '\t' + str(j) + '\t' + str(b[i][j]) + '\n'
f.write(p)
f.close()
os.system('mv *.ax crap/')
os.system('rm temp-all')
if __name__ == '__main__':
main()
| id4zs2008/blob-dyn | ca-multicore.py | Python | gpl-3.0 | 4,857 |
# -*- coding: utf-8 -*-
#
# tm1640-rpi documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 12 19:52:17 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/python/'))
# hack for readthedocs to cause it to run doxygen first
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from subprocess import call
call('doxygen')
del call
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'breathe']
breathe_projects = {'tm1640-rpi': 'doxygen-xml/'}
breathe_default_project = 'tm1640-rpi'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tm1640-rpi'
copyright = u'2013, Michael Farrell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tm1640-rpidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tm1640-rpi.tex', u'tm1640-rpi Documentation',
u'Michael Farrell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
[u'Michael Farrell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
u'Michael Farrell', 'tm1640-rpi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'both'
| micolous/tm1640-rpi | doc/source/conf.py | Python | gpl-3.0 | 8,144 |
###############################################################################
import numpy
import time
###############################################################################
def matrixFactorization(R, P, Q, K, epochMax=1000, alpha=0.0002, beta=0.02):
Q = Q.T
for step in xrange(epochMax):
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
eij = R[i][j] - numpy.dot(P[i,:],Q[:,j])
for k in xrange(K):
P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])
Q[k][j] = Q[k][j] + alpha * (2 * eij * P[i][k] - beta * Q[k][j])
eR = numpy.dot(P,Q)
e = 0
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
e = e + pow(R[i][j] - numpy.dot(P[i,:],Q[:,j]), 2)
for k in xrange(K):
e = e + (beta/2) * ( pow(P[i][k],2) + pow(Q[k][j],2) )
if e < 0.001:
break
return P, Q.T
###########################################################################
def createUserRow(utp):
userRow = numpy.zeros(shape=(1, utp.nextSongIndex), dtype=float)
for songId in utp.user_feed_artists_tracks + utp.user_feed_tracks:
if songId in utp.echoSongIdToIdxMap:
userRow[0][utp.echoSongIdToIdxMap[songId]] = 20 # hardcoded estimated playcount for current user
# else:
# print("Missing other user preference on %s" % songId)
return userRow
###############################################################################
def createPreferenceMatrix(utp):
R = numpy.zeros(shape=(utp.nextUserIndex+1, utp.nextSongIndex), dtype=float)
print( "Create preference matrix R %d %d" %(utp.nextUserIndex+1, utp.nextSongIndex) )
t0 = time.clock()
for userId in utp.user_track_like:
for songId in utp.user_track_like[userId]:
R[userId][songId] = utp.user_track_like[userId][songId]
print("Time spent %s" % str(time.clock() - t0))
# add user row as last
R[utp.nextUserIndex][:] = createUserRow(utp)
return R
###############################################################################
def matrixFactorize(utp):
R = createPreferenceMatrix(utp)
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N,K).astype('f')
Q = numpy.random.rand(M,K).astype('f')
print("Matrix factorization")
t0 = time.clock()
nP, nQ = matrixFactorization(R, P, Q, K)
print("Time spent %s" % str(time.clock() - t0))
# print(nP)
# print(nQ)
nR = numpy.dot(nP, nQ.T)
print(nR[utp.nextUserIndex][:]) # last user is the goal user
songIdx = 0
utp.user_recommendations_idxs.clear()
for element in nR[utp.nextUserIndex]:
#print(element)
#print(songIdx)
if element > 1: # greater than threshold TODO ask user?
utp.user_recommendations_idxs[songIdx] = element
else:
print("Skip songIdx %d with value %d" %(songIdx, element))
songIdx += 1
print(utp.user_recommendations_idxs)
############################################################################### | mgodek/music_recommendation_system | matrixFactor.py | Python | gpl-3.0 | 3,252 |
import json
from bson.errors import InvalidId
from flask import Blueprint, jsonify, request, g
item_api = Blueprint('itemApi', __name__)
def get_item_as_object(item) -> dict:
return_item = {
"_id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"imageURL": item['imageURL'],
"price": item['price'],
"calories": item['calories'],
"category": item['category'],
"tags": item['tags']
}
if 'isRecommended' in item:
return_item['isRecommended'] = item['isRecommended']
return return_item
@item_api.route('/item', methods=['GET'])
def get_all_items() -> tuple:
"""
swagger_from_file: ../swagger/item/getItems.yml
returns all the items as a json array
:return:
"""
from hopkin.models.items import Item
# get all items
items = Item.get_all()
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
@item_api.route('/item/id/<item_id>', methods=['GET'])
def get_item_by_id(item_id) -> tuple:
"""
swagger_from_file: ../swagger/item/getItem.yml
returns one item as a json array
:return:
"""
from hopkin.models.items import Item
# find specific item
item = Item.get_by_id(item_id)
return jsonify({'data': {'item': get_item_as_object(item)}})
@item_api.route('/item/category/<category>', methods=['GET'])
def get_item_by_category(category) -> tuple:
"""
swagger_from_file: ../swagger/item/getItemsByCategory.yml
returns all the items in a category as a json array
:return:
"""
from hopkin.models.items import Item
# find items by category
items = Item.get_by_category(category)
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
@item_api.route('/item/category/<category>/count', methods=['GET'])
def get_category_count(category) -> tuple:
"""
swagger_from_file: ../swagger/item/getNumItemsInCat.yml
Returns the number items in that category
:param category:
:return:
"""
json_response = get_item_by_category(category)
return jsonify({'data': {'count': len(json.loads(json_response.data)['data']['items'])}})
@item_api.route('/item/search', methods=['GET'])
def search_item() -> tuple:
"""
swagger_from_file: ../swagger/item/searchItem.yml
Searches items if query less that 3
it only searches the name else it will
search the names and tags
:return:
"""
from hopkin.models.items import Item
items_list = []
query: str = request.args['q']
if not len(query) > 0:
return jsonify({'error': 'no search results provided'})
query = query.title()
items = list(Item.get_by_name_search(query.lower()))
if len(query) > 3:
items = items + list(Item.get_by_tag_starts_with(query.lower()))
unique_ids = []
for item in items:
if str(item['_id']) not in unique_ids:
items_list.append({
"_id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"imageURL": item['imageURL'],
"price": item['price'],
"calories": item['calories'],
"category": item['category'],
"tags": item['tags'],
"isRecommended": item['isRecommended']
})
unique_ids.append(str(item['_id']))
return jsonify({'data': {'items': items_list}})
@item_api.route('/rate/item/<itemid>', methods=['GET'])
def get_rating(itemid: str) -> tuple:
"""
swagger_from_file: ../swagger/item/getItemRating.yml
Gets a user rating of an item
:param itemid:
:return:
"""
from hopkin.models.ratings import Rating
user_id = str(g.user_id)
rating = Rating.get_rating(itemid, user_id)
if rating is None:
return jsonify({
'error': {'error': {'message': 'No Rating for item'}}
})
return jsonify({'data': {'rating': {
'item_id': rating['item_id'],
'rating': rating['rating'],
}}})
@item_api.route('/rate/item', methods=['POST'])
def rate_item() -> tuple:
"""
Adds a user rating of an item
:return:
"""
from hopkin.models.items import Item
from hopkin.models.ratings import Rating
if request.json is None:
return jsonify({'error': 'invalid request'})
try:
item_id = Item.get_by_id(request.json['itemid'])
if item_id is None:
return jsonify({'error': f"No item with id: {request.json['itemid']} found"}), 400
elif request.json['rating'] > 5:
return jsonify({'error': 'rating can\'t be grater than 5'}), 400
except InvalidId:
return jsonify({'error': 'Invalid item id format'}), 400
user_id = str(g.user_id)
rating = Rating.get_rating(request.json['itemid'], user_id)
if rating is None:
Rating.save({
'item_id': request.json['itemid'],
'user_id': user_id,
'rating': request.json['rating']
})
return jsonify({'data': {'success': True, 'message': 'new rating added'}})
rating['item_id'] = request.json['itemid']
rating['user_id'] = user_id
Rating.update(rating)
return jsonify({'data': {'success': True, 'message': 'rating updated'}})
@item_api.route('/admin/item/add', methods=['POST'])
def add_new_item() -> tuple:
"""
swagger_from_file: ../swagger/item/itemAdd.yml
adds an item to the database and returns it in a JSON object
:return:
"""
from hopkin.models.items import Item
if request.json is not None and g.is_admin:
new_item = {
'name': request.json['name'],
'description': request.json['description'],
'imageURL': request.json['imageURL'],
'price': request.json['price'],
'calories': request.json['calories'],
'category': request.json['category'],
'tags': request.json['tags'],
"isRecommended": request.json['isRecommended']
}
new_item_id = Item.insert(new_item)
return jsonify({'data': {'item': request.json, 'itemId': str(new_item_id)}})
return jsonify({'error': 'invalid item' + request.json}), 403
@item_api.route('/admin/item/delete/<item_id>', methods=['POST'])
def delete_item(item_id):
"""
swagger_from_file: ../swagger/item/deleteItem.yml
deletes the selected item from the database
:return:
"""
from hopkin.models.items import Item
# search for item by id
item = Item.get_by_id(str(item_id))
if item is not None and g.is_admin:
# remove item
Item.remove(item_id)
return jsonify({'data': {'success': True}})
return jsonify({'error': 'No item found with id ' + item_id})
@item_api.route('/admin/item/update', methods=['POST'])
def update_item():
"""
swagger_from_file: ../swagger/item/updateItem.yml
updated the selected item in the database
:return:
"""
from hopkin.models.items import Item
if request.json is not None:
item_update = Item.get_by_id(request.json['_id'])
item_update['calories'] = request.json['calories']
item_update['category'] = request.json['category']
item_update['description'] = request.json['description']
# will be updated to get base64 image
item_update['imageURL'] = request.json['imageURL']
item_update['name'] = request.json['name']
item_update['price'] = request.json['price']
item_update['tags'] = request.json['tags']
item_update['isRecommended'] = request.json['isRecommended']
Item.save(item_update)
return jsonify({'data': {'message': 'Updated with item id: ' + str(item_update['_id']),
'mongo_id': str(item_update['_id'])}
})
return jsonify({'error': 'item not updated'})
@item_api.route('/item/recommendations', methods=['GET'])
def get_recommendations() -> tuple:
"""
swagger_from_file: ../swagger/item/getRecommended.yml
returns all the items as a json array
:return:
"""
from hopkin.models.items import Item
# get all items
items = Item.get_recommended()
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
| project-hopkins/Westworld | hopkin/routes/items.py | Python | gpl-3.0 | 8,707 |
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import Counter, deque
from functools import partial
import logging
from typing import Counter as CounterT
from typing import Deque, Dict, Iterable, List, Optional
from swh.model.model import BaseModel
from swh.storage import get_storage
logger = logging.getLogger(__name__)
class RateQueue:
def __init__(self, size: int, max_errors: int):
assert size > max_errors
self._size = size
self._max_errors = max_errors
self._errors: Deque[bool] = deque(maxlen=size)
def add_ok(self, n_ok: int = 1) -> None:
self._errors.extend([False] * n_ok)
def add_error(self, n_error: int = 1) -> None:
self._errors.extend([True] * n_error)
def limit_reached(self) -> bool:
return sum(self._errors) > self._max_errors
def reset(self):
# mainly for testing purpose
self._errors.clear()
class TenaciousProxyStorage:
"""Storage proxy that have a tenacious insertion behavior.
When an xxx_add method is called, it's first attempted as is against the backend
storage. If a failure occurs, split the list of inserted objects in pieces until
erroneous objects have been identified, so all the valid objects are guaranteed to
be inserted.
Also provides a error-rate limit feature: if more than n errors occurred during the
insertion of the last p (window_size) objects, stop accepting any insertion.
The number of insertion retries for a single object can be specified via
the 'retries' parameter.
This proxy is mainly intended to be used in a replayer configuration (aka a
mirror stack), where insertion errors are mostly unexpected (which explains
the low default ratio errors/window_size).
Conversely, it should not be used in a loader configuration, as it may
drop objects without stopping the loader, which leads to holes in the graph.
Deployments using this proxy should carefully monitor their logs to check any
failure is expected (because the failed object is corrupted),
not because of transient errors or issues with the storage backend.
Sample configuration use case for tenacious storage:
.. code-block:: yaml
storage:
cls: tenacious
storage:
cls: remote
args: http://storage.internal.staging.swh.network:5002/
error-rate-limit:
errors: 10
window_size: 1000
"""
tenacious_methods: Dict[str, str] = {
"content_add": "content",
"content_add_metadata": "content",
"skipped_content_add": "skipped_content",
"directory_add": "directory",
"revision_add": "revision",
"extid_add": "extid",
"release_add": "release",
"snapshot_add": "snapshot",
"origin_add": "origin",
}
def __init__(
self,
storage,
error_rate_limit: Optional[Dict[str, int]] = None,
retries: int = 3,
):
self.storage = get_storage(**storage)
if error_rate_limit is None:
error_rate_limit = {"errors": 10, "window_size": 1000}
assert "errors" in error_rate_limit
assert "window_size" in error_rate_limit
self.rate_queue = RateQueue(
size=error_rate_limit["window_size"], max_errors=error_rate_limit["errors"],
)
self._single_object_retries: int = retries
def __getattr__(self, key):
if key in self.tenacious_methods:
return partial(self._tenacious_add, key)
return getattr(self.storage, key)
def _tenacious_add(self, func_name, objects: Iterable[BaseModel]) -> Dict[str, int]:
"""Enqueue objects to write to the storage. This checks if the queue's
threshold is hit. If it is actually write those to the storage.
"""
add_function = getattr(self.storage, func_name)
object_type = self.tenacious_methods[func_name]
# list of lists of objects; note this to_add list is consumed from the tail
to_add: List[List[BaseModel]] = [list(objects)]
n_objs: int = len(to_add[0])
results: CounterT[str] = Counter()
retries: int = self._single_object_retries
while to_add:
if self.rate_queue.limit_reached():
logging.error(
"Too many insertion errors have been detected; "
"disabling insertions"
)
raise RuntimeError(
"Too many insertion errors have been detected; "
"disabling insertions"
)
objs = to_add.pop()
try:
results.update(add_function(objs))
self.rate_queue.add_ok(len(objs))
except Exception as exc:
if len(objs) > 1:
logger.info(
f"{func_name}: failed to insert a batch of "
f"{len(objs)} {object_type} objects, splitting"
)
# reinsert objs split in 2 parts at the end of to_add
to_add.append(objs[(len(objs) // 2) :])
to_add.append(objs[: (len(objs) // 2)])
# each time we append a batch in the to_add bag, reset the
# one-object-batch retries counter
retries = self._single_object_retries
else:
retries -= 1
if retries:
logger.info(
f"{func_name}: failed to insert an {object_type}, retrying"
)
# give it another chance
to_add.append(objs)
else:
logger.error(
f"{func_name}: failed to insert an object, "
f"excluding {objs} (from a batch of {n_objs})"
)
logger.exception(f"Exception was: {exc}")
results.update({f"{object_type}:add:errors": 1})
self.rate_queue.add_error()
# reset the retries counter (needed in case the next
# batch is also 1 element only)
retries = self._single_object_retries
return dict(results)
def reset(self):
self.rate_queue.reset()
| SoftwareHeritage/swh-storage | swh/storage/proxies/tenacious.py | Python | gpl-3.0 | 6,681 |
"""
WSGI config for ShroomsAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ShroomsAPI.settings")
application = get_wsgi_application()
| AtelierSoude/shrooms | ShroomsAPI/ShroomsAPI/wsgi.py | Python | gpl-3.0 | 398 |
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: UHD FFT Qt
# Author: Johannes Demel
# Generated: Wed Jan 29 13:51:16 2014
##################################################
from PyQt4 import Qt
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import PyQt4.Qwt5 as Qwt
import sip
import sys
import threading
import time
class uhd_fft_qt(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "UHD FFT Qt")
Qt.QWidget.__init__(self)
self.setWindowTitle("UHD FFT Qt")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "uhd_fft_qt")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.z_info = z_info = {"mboard_id":"id","mboard_serial":"serial","rx_serial":"rx","rx_subdev_name":"subname", "rx_subdev_spec":"spec","rx_antenna":"antenna"}
self.usrp_serial = usrp_serial = z_info["mboard_serial"]
self.usrp_id = usrp_id = z_info["mboard_id"]
self.db_spec = db_spec = z_info["rx_subdev_spec"]
self.db_serial = db_serial = z_info["rx_serial"]
self.db_name = db_name = z_info["rx_subdev_name"]
self.db_antenna = db_antenna = z_info["rx_antenna"]
self.catch_result = catch_result = uhd.tune_result()
self.usrp_type = usrp_type = "usrp2"
self.usrp_text = usrp_text = usrp_id + " (" + usrp_serial + ")"
self.master_clock_rate = master_clock_rate = 40e6
self.db_text = db_text = db_name + " (" + db_serial + " ," + db_spec + " ," + db_antenna + ")"
self.actual_rf = actual_rf = catch_result.actual_rf_freq
self.actual_dsp = actual_dsp = catch_result.actual_dsp_freq
self.uhd_version = uhd_version = uhd.get_version_string()
self.samp_rate = samp_rate = 10e6
self.rf_label = rf_label = actual_rf
self.myzero = myzero = 0
self.gain = gain = 50
self.dsp_label = dsp_label = actual_dsp
self.dev_args = dev_args = "type=" + usrp_type + ",master_clock_rate=" + str(master_clock_rate) + ", recv_buff_size=32768e6"
self.center_freq = center_freq = 900e6
self.a_usrp = a_usrp = usrp_text
self.a_db_label = a_db_label = db_text
##################################################
# Blocks
##################################################
self._samp_rate_tool_bar = Qt.QToolBar(self)
self._samp_rate_tool_bar.addWidget(Qt.QLabel("Sample Rate"+": "))
self._samp_rate_line_edit = Qt.QLineEdit(str(self.samp_rate))
self._samp_rate_tool_bar.addWidget(self._samp_rate_line_edit)
self._samp_rate_line_edit.returnPressed.connect(
lambda: self.set_samp_rate(eng_notation.str_to_num(self._samp_rate_line_edit.text().toAscii())))
self.top_grid_layout.addWidget(self._samp_rate_tool_bar, 4, 0, 1, 3)
self._gain_layout = Qt.QVBoxLayout()
self._gain_tool_bar = Qt.QToolBar(self)
self._gain_layout.addWidget(self._gain_tool_bar)
self._gain_tool_bar.addWidget(Qt.QLabel("Gain"+": "))
self._gain_counter = Qwt.QwtCounter()
self._gain_counter.setRange(0, 100, 1)
self._gain_counter.setNumButtons(2)
self._gain_counter.setValue(self.gain)
self._gain_tool_bar.addWidget(self._gain_counter)
self._gain_counter.valueChanged.connect(self.set_gain)
self._gain_slider = Qwt.QwtSlider(None, Qt.Qt.Horizontal, Qwt.QwtSlider.BottomScale, Qwt.QwtSlider.BgSlot)
self._gain_slider.setRange(0, 100, 1)
self._gain_slider.setValue(self.gain)
self._gain_slider.setMinimumWidth(200)
self._gain_slider.valueChanged.connect(self.set_gain)
self._gain_layout.addWidget(self._gain_slider)
self.top_grid_layout.addLayout(self._gain_layout, 5, 0, 1, 5)
self._center_freq_tool_bar = Qt.QToolBar(self)
self._center_freq_tool_bar.addWidget(Qt.QLabel("Center Frequency"+": "))
self._center_freq_line_edit = Qt.QLineEdit(str(self.center_freq))
self._center_freq_tool_bar.addWidget(self._center_freq_line_edit)
self._center_freq_line_edit.returnPressed.connect(
lambda: self.set_center_freq(eng_notation.str_to_num(self._center_freq_line_edit.text().toAscii())))
self.top_grid_layout.addWidget(self._center_freq_tool_bar, 4, 3, 1, 2)
self.usrp_dev = uhd.usrp_source(
device_addr=dev_args,
stream_args=uhd.stream_args(
cpu_format="fc32",
args="calibration-file=/home/johannes/tests/calibration-rx_B210_150N15_FE-RX2_integrated_TX-RX_1387571801.csv",
channels=range(1),
),
)
self.usrp_dev.set_samp_rate(samp_rate)
self.usrp_dev.set_center_freq(center_freq, 0)
self.usrp_dev.set_gain(gain, 0)
self.usrp_dev.set_antenna("RX2", 0)
self.z_info = val = self.usrp_dev.get_usrp_info(0)
def _z_info_probe():
notset = True
while notset:
try:
self.set_z_info(self.z_info)
notset = False
except:
notset = True
time.sleep(1.0/10.0)
self._z_info_thread = threading.Thread(target=_z_info_probe)
self._z_info_thread.daemon = True
self._z_info_thread.start()
self._uhd_version_tool_bar = Qt.QToolBar(self)
self._uhd_version_tool_bar.addWidget(Qt.QLabel("UHD"+": "))
self._uhd_version_label = Qt.QLabel(str(self.uhd_version))
self._uhd_version_tool_bar.addWidget(self._uhd_version_label)
self.top_grid_layout.addWidget(self._uhd_version_tool_bar, 3, 0, 1, 1)
self._rf_label_tool_bar = Qt.QToolBar(self)
self._rf_label_tool_bar.addWidget(Qt.QLabel("RF Freq"+": "))
self._rf_label_label = Qt.QLabel(str(self.rf_label))
self._rf_label_tool_bar.addWidget(self._rf_label_label)
self.top_grid_layout.addWidget(self._rf_label_tool_bar, 3, 3, 1, 1)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024, #fftsize
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq, #fc
samp_rate, #bw
"QT GUI Plot", #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True, #plotconst
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_sink_x_0_win, 0, 0, 3, 5)
self._dsp_label_tool_bar = Qt.QToolBar(self)
self._dsp_label_tool_bar.addWidget(Qt.QLabel("DSP Freq"+": "))
self._dsp_label_label = Qt.QLabel(str(self.dsp_label))
self._dsp_label_tool_bar.addWidget(self._dsp_label_label)
self.top_grid_layout.addWidget(self._dsp_label_tool_bar, 3, 4, 1, 1)
self.catch_result = val = self.usrp_dev.set_center_freq(center_freq, myzero)
def _catch_result_probe():
notset = True
while notset:
try:
self.set_catch_result(self.catch_result)
notset = False
except:
notset = True
time.sleep(1.0/10.0)
self._catch_result_thread = threading.Thread(target=_catch_result_probe)
self._catch_result_thread.daemon = True
self._catch_result_thread.start()
self._a_usrp_tool_bar = Qt.QToolBar(self)
self._a_usrp_tool_bar.addWidget(Qt.QLabel("USRP"+": "))
self._a_usrp_label = Qt.QLabel(str(self.a_usrp))
self._a_usrp_tool_bar.addWidget(self._a_usrp_label)
self.top_grid_layout.addWidget(self._a_usrp_tool_bar, 3, 1, 1, 1)
self._a_db_label_tool_bar = Qt.QToolBar(self)
self._a_db_label_tool_bar.addWidget(Qt.QLabel("Daughterboard"+": "))
self._a_db_label_label = Qt.QLabel(str(self.a_db_label))
self._a_db_label_tool_bar.addWidget(self._a_db_label_label)
self.top_grid_layout.addWidget(self._a_db_label_tool_bar, 3, 2, 1, 1)
##################################################
# Connections
##################################################
self.connect((self.usrp_dev, 0), (self.qtgui_sink_x_0, 0))
# QT sink close method reimplementation
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "uhd_fft_qt")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_z_info(self):
return self.z_info
def set_z_info(self, z_info):
self.z_info = z_info
self.set_db_name(self.z_info["rx_subdev_name"])
self.set_db_antenna(self.z_info["rx_antenna"])
self.set_db_serial(self.z_info["rx_serial"])
self.set_db_spec(self.z_info["rx_subdev_spec"])
self.set_usrp_serial(self.z_info["mboard_serial"])
self.set_usrp_id(self.z_info["mboard_id"])
def get_usrp_serial(self):
return self.usrp_serial
def set_usrp_serial(self, usrp_serial):
self.usrp_serial = usrp_serial
self.set_usrp_text(self.usrp_id + " (" + self.usrp_serial + ")")
def get_usrp_id(self):
return self.usrp_id
def set_usrp_id(self, usrp_id):
self.usrp_id = usrp_id
self.set_usrp_text(self.usrp_id + " (" + self.usrp_serial + ")")
def get_db_spec(self):
return self.db_spec
def set_db_spec(self, db_spec):
self.db_spec = db_spec
self.set_db_text(self.db_name + " (" + self.db_serial + " ," + self.db_spec + " ," + self.db_antenna + ")")
def get_db_serial(self):
return self.db_serial
def set_db_serial(self, db_serial):
self.db_serial = db_serial
self.set_db_text(self.db_name + " (" + self.db_serial + " ," + self.db_spec + " ," + self.db_antenna + ")")
def get_db_name(self):
return self.db_name
def set_db_name(self, db_name):
self.db_name = db_name
self.set_db_text(self.db_name + " (" + self.db_serial + " ," + self.db_spec + " ," + self.db_antenna + ")")
def get_db_antenna(self):
return self.db_antenna
def set_db_antenna(self, db_antenna):
self.db_antenna = db_antenna
self.set_db_text(self.db_name + " (" + self.db_serial + " ," + self.db_spec + " ," + self.db_antenna + ")")
def get_catch_result(self):
return self.catch_result
def set_catch_result(self, catch_result):
self.catch_result = catch_result
self.set_actual_rf(self.catch_result.actual_rf_freq)
self.set_actual_dsp(self.catch_result.actual_dsp_freq)
def get_usrp_type(self):
return self.usrp_type
def set_usrp_type(self, usrp_type):
self.usrp_type = usrp_type
self.set_dev_args("type=" + self.usrp_type + ",master_clock_rate=" + str(self.master_clock_rate) + ", recv_buff_size=32768e6")
def get_usrp_text(self):
return self.usrp_text
def set_usrp_text(self, usrp_text):
self.usrp_text = usrp_text
self.set_a_usrp(self.usrp_text)
def get_master_clock_rate(self):
return self.master_clock_rate
def set_master_clock_rate(self, master_clock_rate):
self.master_clock_rate = master_clock_rate
self.set_dev_args("type=" + self.usrp_type + ",master_clock_rate=" + str(self.master_clock_rate) + ", recv_buff_size=32768e6")
def get_db_text(self):
return self.db_text
def set_db_text(self, db_text):
self.db_text = db_text
self.set_a_db_label(self.db_text)
def get_actual_rf(self):
return self.actual_rf
def set_actual_rf(self, actual_rf):
self.actual_rf = actual_rf
self.set_rf_label(self.actual_rf)
def get_actual_dsp(self):
return self.actual_dsp
def set_actual_dsp(self, actual_dsp):
self.actual_dsp = actual_dsp
self.set_dsp_label(self.actual_dsp)
def get_uhd_version(self):
return self.uhd_version
def set_uhd_version(self, uhd_version):
self.uhd_version = uhd_version
self._uhd_version_label.setText(str(self.uhd_version))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self._samp_rate_line_edit.setText(eng_notation.num_to_str(self.samp_rate))
self.qtgui_sink_x_0.set_frequency_range(self.center_freq, self.samp_rate)
self.usrp_dev.set_samp_rate(self.samp_rate)
def get_rf_label(self):
return self.rf_label
def set_rf_label(self, rf_label):
self.rf_label = rf_label
self._rf_label_label.setText(eng_notation.num_to_str(self.rf_label))
def get_myzero(self):
return self.myzero
def set_myzero(self, myzero):
self.myzero = myzero
self.set_catch_result(self.usrp_dev.set_center_freq(self.center_freq, self.myzero))
def get_gain(self):
return self.gain
def set_gain(self, gain):
self.gain = gain
self._gain_counter.setValue(self.gain)
self._gain_slider.setValue(self.gain)
self.usrp_dev.set_gain(self.gain, 0)
def get_dsp_label(self):
return self.dsp_label
def set_dsp_label(self, dsp_label):
self.dsp_label = dsp_label
self._dsp_label_label.setText(eng_notation.num_to_str(self.dsp_label))
def get_dev_args(self):
return self.dev_args
def set_dev_args(self, dev_args):
self.dev_args = dev_args
def get_center_freq(self):
return self.center_freq
def set_center_freq(self, center_freq):
self.center_freq = center_freq
self.set_catch_result(self.usrp_dev.set_center_freq(self.center_freq, self.myzero))
self._center_freq_line_edit.setText(eng_notation.num_to_str(self.center_freq))
self.qtgui_sink_x_0.set_frequency_range(self.center_freq, self.samp_rate)
self.usrp_dev.set_center_freq(self.center_freq, 0)
def get_a_usrp(self):
return self.a_usrp
def set_a_usrp(self, a_usrp):
self.a_usrp = a_usrp
self._a_usrp_label.setText(repr(self.a_usrp))
def get_a_db_label(self):
return self.a_db_label
def set_a_db_label(self, a_db_label):
self.a_db_label = a_db_label
self._a_db_label_label.setText(str(self.a_db_label))
if __name__ == '__main__':
import ctypes
import os
if os.name == 'posix':
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
qapp = Qt.QApplication(sys.argv)
tb = uhd_fft_qt()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
tb = None #to clean up Qt widgets
| jdemel/gr-misc | examples/uhd_fft_qt.py | Python | gpl-3.0 | 16,000 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# single_thread_download.py
import os
import urllib.request
import urllib.error
import shutil
# 单线程
def single_thread_download(url, file_name=None, overwrite=False):
# 如果文件名为空,则从 URL 中获取文件名
if file_name is None:
file_name = url.rpartition('/')[-1]
# 潜在 bug:如果不覆盖己有文件,而已有文件不完整(eg. 没下载全),会有潜在影响
if os.path.exists(file_name) and (not overwrite):
return
try:
with urllib.request.urlopen(url) as response, open(file_name, 'wb') as out_stream:
shutil.copyfileobj(response, out_stream)
except urllib.error.URLError as e:
print(e.errno, '\n', e.reason, '\n')
# single_thread_download("http://screencasts.b0.upaiyun.com/podcasts/nil_podcast_1.m4a")
| zeekvfu/sip_tea | single_thread_download.py | Python | gpl-3.0 | 813 |
# Tests:
# assign ::= expr store
pass
| rocky/python-uncompyle6 | test/simple_source/stmts/00_pass.py | Python | gpl-3.0 | 38 |
# This file is part of django-doubleoptin-contactform.
# django-doubleoptin-contactform is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# django-doubleoptin-contactform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
urlpatterns = patterns('',
(r'^$', 'doptincf.views.contact'),
(r'^received/$', direct_to_template, {'template': 'contact/received.html'}),
(r'^(?P<contact_id>\d+)/verify/$', 'doptincf.views.verify'),
(r'^received/$', direct_to_template, {'template': 'contact/verified.html'}),
)
| MyersGer/django-doubleoptin-contactform | doptincf/urls.py | Python | gpl-3.0 | 1,171 |
#
# Copyright 2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, filter
import math
import cmath
class fm_deemph(gr.hier_block2):
r"""
FM Deemphasis IIR filter
Args:
fs: sampling frequency in Hz (float)
tau: Time constant in seconds (75us in US, 50us in EUR) (float)
An analog deemphasis filter:
R
o------/\/\/\/---+----o
|
= C
|
---
Has this transfer function:
1 1
---- ---
RC tau
H(s) = ---------- = ----------
1 1
s + ---- s + ---
RC tau
And has its -3 dB response, due to the pole, at
|H(j w_c)|^2 = 1/2 => s = j w_c = j (1/(RC))
Historically, this corner frequency of analog audio deemphasis filters
been specified by the RC time constant used, called tau.
So w_c = 1/tau.
FWIW, for standard tau values, some standard analog components would be:
tau = 75 us = (50K)(1.5 nF) = (50 ohms)(1.5 uF)
tau = 50 us = (50K)(1.0 nF) = (50 ohms)(1.0 uF)
In specifying tau for this digital deemphasis filter, tau specifies
the *digital* corner frequency, w_c, desired.
The digital deemphasis filter design below, uses the
"bilinear transformation" method of designing digital filters:
1. Convert digital specifications into the analog domain, by prewarping
digital frequency specifications into analog frequencies.
w_a = (2/T)tan(wT/2)
2. Use an analog filter design technique to design the filter.
3. Use the bilinear transformation to convert the analog filter design to a
digital filter design.
H(z) = H(s)|
s = (2/T)(1-z^-1)/(1+z^-1)
w_ca 1 1 - (-1) z^-1
H(z) = ---- * ----------- * -----------------------
2 fs -w_ca -w_ca
1 - ----- 1 + -----
2 fs 2 fs
1 - ----------- z^-1
-w_ca
1 - -----
2 fs
We use this design technique, because it is an easy way to obtain a filter
design with the -6 dB/octave roll-off required of the deemphasis filter.
Jackson, Leland B., _Digital_Filters_and_Signal_Processing_Second_Edition_,
Kluwer Academic Publishers, 1989, pp 201-212
Orfanidis, Sophocles J., _Introduction_to_Signal_Processing_, Prentice Hall,
1996, pp 573-583
"""
def __init__(self, fs, tau=75e-6):
gr.hier_block2.__init__(self, "fm_deemph",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# Digital corner frequency
w_c = 1.0 / tau
# Prewarped analog corner frequency
w_ca = 2.0 * fs * math.tan(w_c / (2.0 * fs))
# Resulting digital pole, zero, and gain term from the bilinear
# transformation of H(s) = w_ca / (s + w_ca) to
# H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1)
k = -w_ca / (2.0 * fs)
z1 = -1.0
p1 = (1.0 + k) / (1.0 - k)
b0 = -k / (1.0 - k)
btaps = [ b0 * 1.0, b0 * -z1 ]
ataps = [ 1.0, -p1 ]
# Since H(s = 0) = 1.0, then H(z = 1) = 1.0 and has 0 dB gain at DC
if 0:
print("btaps =", btaps)
print("ataps =", ataps)
global plot1
plot1 = gru.gnuplot_freqz(gru.freqz(btaps, ataps), fs, True)
deemph = filter.iir_filter_ffd(btaps, ataps, False)
self.connect(self, deemph, self)
class fm_preemph(gr.hier_block2):
r"""
FM Preemphasis IIR filter.
Args:
fs: sampling frequency in Hz (float)
tau: Time constant in seconds (75us in US, 50us in EUR) (float)
fh: High frequency at which to flatten out (< 0 means default of 0.925*fs/2.0) (float)
An analog preemphasis filter, that flattens out again at the high end:
C
+-----||------+
| |
o------+ +-----+--------o
| R1 | |
+----/\/\/\/--+ \
/
\ R2
/
\
|
o--------------------------+--------o
(This fine ASCII rendition is based on Figure 5-15
in "Digital and Analog Communication Systems", Leon W. Couch II)
Has this transfer function:
1
s + ---
R1C
H(s) = ------------------
1 R1
s + --- (1 + --)
R1C R2
It has a corner due to the numerator, where the rise starts, at
|Hn(j w_cl)|^2 = 2*|Hn(0)|^2 => s = j w_cl = j (1/(R1C))
It has a corner due to the denominator, where it levels off again, at
|Hn(j w_ch)|^2 = 1/2*|Hd(0)|^2 => s = j w_ch = j (1/(R1C) * (1 + R1/R2))
Historically, the corner frequency of analog audio preemphasis filters
been specified by the R1C time constant used, called tau.
So
w_cl = 1/tau = 1/R1C; f_cl = 1/(2*pi*tau) = 1/(2*pi*R1*C)
w_ch = 1/tau2 = (1+R1/R2)/R1C; f_ch = 1/(2*pi*tau2) = (1+R1/R2)/(2*pi*R1*C)
and note f_ch = f_cl * (1 + R1/R2).
For broadcast FM audio, tau is 75us in the United States and 50us in Europe.
f_ch should be higher than our digital audio bandwidth.
The Bode plot looks like this:
/----------------
/
/ <-- slope = 20dB/decade
/
-------------/
f_cl f_ch
In specifying tau for this digital preemphasis filter, tau specifies
the *digital* corner frequency, w_cl, desired.
The digital preemphasis filter design below, uses the
"bilinear transformation" method of designing digital filters:
1. Convert digital specifications into the analog domain, by prewarping
digital frequency specifications into analog frequencies.
w_a = (2/T)tan(wT/2)
2. Use an analog filter design technique to design the filter.
3. Use the bilinear transformation to convert the analog filter design to a
digital filter design.
H(z) = H(s)|
s = (2/T)(1-z^-1)/(1+z^-1)
-w_cla
1 + ------
2 fs
1 - ------------ z^-1
-w_cla -w_cla
1 - ------ 1 - ------
2 fs 2 fs
H(z) = ------------ * -----------------------
-w_cha -w_cha
1 - ------ 1 + ------
2 fs 2 fs
1 - ------------ z^-1
-w_cha
1 - ------
2 fs
We use this design technique, because it is an easy way to obtain a filter
design with the 6 dB/octave rise required of the premphasis filter.
Jackson, Leland B., _Digital_Filters_and_Signal_Processing_Second_Edition_,
Kluwer Academic Publishers, 1989, pp 201-212
Orfanidis, Sophocles J., _Introduction_to_Signal_Processing_, Prentice Hall,
1996, pp 573-583
"""
def __init__(self, fs, tau=75e-6, fh=-1.0):
gr.hier_block2.__init__(self, "fm_preemph",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# Set fh to something sensible, if needed.
# N.B. fh == fs/2.0 or fh == 0.0 results in a pole on the unit circle
# at z = -1.0 or z = 1.0 respectively. That makes the filter unstable
# and useless.
if fh <= 0.0 or fh >= fs / 2.0:
fh = 0.925 * fs/2.0
# Digital corner frequencies
w_cl = 1.0 / tau
w_ch = 2.0 * math.pi * fh
# Prewarped analog corner frequencies
w_cla = 2.0 * fs * math.tan(w_cl / (2.0 * fs))
w_cha = 2.0 * fs * math.tan(w_ch / (2.0 * fs))
# Resulting digital pole, zero, and gain term from the bilinear
# transformation of H(s) = (s + w_cla) / (s + w_cha) to
# H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1)
kl = -w_cla / (2.0 * fs)
kh = -w_cha / (2.0 * fs)
z1 = (1.0 + kl) / (1.0 - kl)
p1 = (1.0 + kh) / (1.0 - kh)
b0 = (1.0 - kl) / (1.0 - kh)
# Since H(s = infinity) = 1.0, then H(z = -1) = 1.0 and
# this filter has 0 dB gain at fs/2.0.
# That isn't what users are going to expect, so adjust with a
# gain, g, so that H(z = 1) = 1.0 for 0 dB gain at DC.
w_0dB = 2.0 * math.pi * 0.0
g = abs(1.0 - p1 * cmath.rect(1.0, -w_0dB)) \
/ (b0 * abs(1.0 - z1 * cmath.rect(1.0, -w_0dB)))
btaps = [ g * b0 * 1.0, g * b0 * -z1 ]
ataps = [ 1.0, -p1 ]
if 0:
print("btaps =", btaps)
print("ataps =", ataps)
global plot2
plot2 = gru.gnuplot_freqz(gru.freqz(btaps, ataps), fs, True)
preemph = filter.iir_filter_ffd(btaps, ataps, False)
self.connect(self, preemph, self)
| trabucayre/gnuradio | gr-analog/python/analog/fm_emph.py | Python | gpl-3.0 | 9,632 |